raw_hashtable.h 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #ifndef CARBON_COMMON_RAW_HASHTABLE_H_
  5. #define CARBON_COMMON_RAW_HASHTABLE_H_
  6. #include <algorithm>
  7. #include <concepts>
  8. #include <cstddef>
  9. #include <cstring>
  10. #include <iterator>
  11. #include <new>
  12. #include <type_traits>
  13. #include <utility>
  14. #include "common/check.h"
  15. #include "common/hashing.h"
  16. #include "common/raw_hashtable_metadata_group.h"
  17. #include "llvm/Support/Compiler.h"
  18. #include "llvm/Support/MathExtras.h"
  19. // A namespace collecting a set of low-level utilities for building hashtable
  20. // data structures. These should only be used as implementation details of
  21. // higher-level data-structure APIs.
  22. //
  23. // The utilities here use the `hashtable_key_context.h` provided `KeyContext` to
  24. // support the necessary hashtable operations on keys: hashing and comparison.
  25. // This also serves as the customization point for hashtables built on this
  26. // infrastructure for those operations. See that header file for details.
  27. //
  28. // These utilities support hashtables following a *specific* API design pattern,
  29. // and using Small-Size Optimization, or "SSO", when desired. We expect there to
  30. // be three layers to any hashtable design:
  31. //
  32. // - A *view* type: a read-only view of the hashtable contents. This type should
  33. // be a value type and is expected to be passed by-value in APIs. However, it
  34. // will have `const`-reference semantics, much like a `std::string_view`. Note
  35. // that the *entries* will continue to be mutable, it is only the *table* that
  36. // is read-only.
  37. //
  38. // - A *base* type: a base class type of the actual hashtable, which allows
  39. // almost all mutable operations but erases any specific SSO buffer size.
  40. // Because this is a base of the actual hash table, it is designed to be
  41. // passed as a non-`const` reference or pointer.
  42. //
  43. // - A *table* type: the actual hashtable which derives from the base type and
  44. // adds any desired SSO storage buffer. Beyond the physical storage, it also
  45. // allows resetting the table to its initial state & allocated size, as well
  46. // as copying and moving the table.
  47. //
  48. // For complete examples of the API design, see `set.h` for a hashtable-based
  49. // set data structure, and `map.h` for a hashtable-based map data structure.
  50. //
  51. // The hashtable design implemented here has several key invariants and design
  52. // elements that are essential to all three of the types above and the
  53. // functionality they provide.
  54. //
  55. // - The underlying hashtable uses [open addressing], a power-of-two table size,
  56. // and quadratic probing rather than closed addressing and chaining.
  57. //
  58. // [open addressing]: https://en.wikipedia.org/wiki/Open_addressing
  59. //
  60. // - Each _slot_ in the table corresponds to a key, a value, and one byte of
  61. // metadata. Each _entry_ is a key and value. The key and value for an entry
  62. // are stored together.
  63. //
  64. // - The allocated storage is organized into an array of metadata bytes followed
  65. // by an array of entry storage.
  66. //
  67. // - The metadata byte corresponding to each entry marks that entry is either
  68. // empty, deleted, or present. When present, a 7-bit tag is also stored using
  69. // another 7 bits from the hash of the entry key.
  70. //
  71. // - The storage for an entry is an internal type that should not be exposed to
  72. // users, and instead only the underlying keys and values.
  73. //
  74. // - The hash addressing and probing occurs over *groups* of slots rather than
  75. // individual entries. When inserting a new entry, it can be added to the
  76. // group it hashes to as long it is not full, and can even replace a slot with
  77. // a tombstone indicating a previously deleted entry. Only when the group is
  78. // full will it look at the next group in the probe sequence. As a result,
  79. // there may be entries in a group where a different group is the start of
  80. // that entry's probe sequence. Also, when performing a lookup, every group in
  81. // the probe sequence must be inspected for the lookup key until it is found
  82. // or the group has an empty slot.
  83. //
  84. // - Groups are scanned rapidly using the one-byte metadata for each entry in
  85. // the group and CPU instructions that allow comparing all of the metadata for
  86. // a group in parallel. For more details on the metadata group encoding and
  87. // scanning, see `raw_hashtable_metadata_group.h`.
  88. //
  89. // - `GroupSize` is a platform-specific relatively small power of two that fits
  90. // in some hardware register. However, `MaxGroupSize` is provided as a
  91. // portable max that is also a power of two. The table storage, whether
  92. // provided by an SSO buffer or allocated, is required to be a multiple of
  93. // `MaxGroupSize` to keep the requirement portable but sufficient for all
  94. // platforms.
  95. //
  96. // - There is *always* an allocated table of some multiple of `MaxGroupSize`.
  97. // This allows accesses to be branchless. When heap allocated, we pro-actively
  98. // allocate at least a minimum heap size table. When there is a small-size
  99. // optimization (SSO) buffer, that provides the initial allocation.
  100. //
  101. // - The table performs a minimal amount of bookkeeping that limits the APIs it
  102. // can support:
  103. // - `alloc_size` is the size of the table *allocated* (not *used*), and is
  104. // always a power of 2 at least as big as `MinAllocatedSize`.
  105. // - `storage` is a pointer to the storage for the `alloc_size` slots of the
  106. // table, and never null.
  107. // - `small_alloc_size` is the maximum `alloc_size` where the table is stored
  108. // in the object itself instead of separately on the heap. In this case,
  109. // `storage` points to `small_storage_`.
  110. // - `growth_budget` is the number of entries that may be added before the
  111. // table allocation is doubled. It is always
  112. // `GrowthThresholdForAllocSize(alloc_size)` minus the number of
  113. // non-empty (filled or deleted) slots. If it ever falls to 0, the table
  114. // is grown to keep it greater than 0.
  115. // There is also the "moved-from" state where the table may only be
  116. // reinitialized or destroyed where the `alloc_size` is 0 and `storage` is
  117. // null. Since it doesn't track the exact number of filled entries in a table,
  118. // it doesn't support a container-style `size` API.
  119. //
  120. // - There is no direct iterator support because of the complexity of embedding
  121. // the group-based metadata scanning into an iterator model. Instead, there is
  122. // just a for-each method that is passed a lambda to observe all entries. The
  123. // order of this observation is also not guaranteed.
  124. namespace Carbon::RawHashtable {
  125. // If allocating storage, allocate a minimum of one cacheline of group metadata
  126. // or a minimum of one group, whichever is larger.
  127. constexpr ssize_t MinAllocatedSize = std::max<ssize_t>(64, MaxGroupSize);
  128. // An entry in the hashtable storage of a `KeyT` and `ValueT` object.
  129. //
  130. // Allows manual construction, destruction, and access to these values so we can
  131. // create arrays af the entries prior to populating them with actual keys and
  132. // values.
  133. template <typename KeyT, typename ValueT>
  134. struct StorageEntry {
  135. static constexpr bool IsTriviallyDestructible =
  136. std::is_trivially_destructible_v<KeyT> &&
  137. std::is_trivially_destructible_v<ValueT>;
  138. static constexpr bool IsTriviallyRelocatable =
  139. IsTriviallyDestructible && std::is_trivially_move_constructible_v<KeyT> &&
  140. std::is_trivially_move_constructible_v<ValueT>;
  141. auto key() const -> const KeyT& {
  142. // Ensure we don't need more alignment than available. Inside a method body
  143. // to apply to the complete type.
  144. static_assert(
  145. alignof(StorageEntry) <= MinAllocatedSize,
  146. "The minimum allocated size turns into the alignment of our array of "
  147. "storage entries as they follow the metadata byte array.");
  148. return *std::launder(reinterpret_cast<const KeyT*>(&key_storage));
  149. }
  150. auto key() -> KeyT& {
  151. return const_cast<KeyT&>(const_cast<const StorageEntry*>(this)->key());
  152. }
  153. auto value() const -> const ValueT& {
  154. return *std::launder(reinterpret_cast<const ValueT*>(&value_storage));
  155. }
  156. auto value() -> ValueT& {
  157. return const_cast<ValueT&>(const_cast<const StorageEntry*>(this)->value());
  158. }
  159. // We handle destruction and move manually as we only want to expose distinct
  160. // `KeyT` and `ValueT` subobjects to user code that may need to do in-place
  161. // construction. As a consequence, this struct only provides the storage and
  162. // we have to manually manage the construction, move, and destruction of the
  163. // objects.
  164. auto Destroy() -> void {
  165. static_assert(!IsTriviallyDestructible,
  166. "Should never instantiate when trivial!");
  167. key().~KeyT();
  168. value().~ValueT();
  169. }
  170. auto CopyFrom(const StorageEntry& entry) -> void {
  171. if constexpr (IsTriviallyRelocatable) {
  172. memcpy(this, &entry, sizeof(StorageEntry));
  173. } else {
  174. new (&key_storage) KeyT(entry.key());
  175. new (&value_storage) ValueT(entry.value());
  176. }
  177. }
  178. // Move from an expiring entry and destroy that entry's key and value.
  179. // Optimizes to directly use `memcpy` when correct.
  180. auto MoveFrom(StorageEntry&& entry) -> void {
  181. if constexpr (IsTriviallyRelocatable) {
  182. memcpy(this, &entry, sizeof(StorageEntry));
  183. } else {
  184. new (&key_storage) KeyT(std::move(entry.key()));
  185. entry.key().~KeyT();
  186. new (&value_storage) ValueT(std::move(entry.value()));
  187. entry.value().~ValueT();
  188. }
  189. }
  190. alignas(KeyT) std::byte key_storage[sizeof(KeyT)];
  191. alignas(ValueT) std::byte value_storage[sizeof(ValueT)];
  192. };
  193. // A specialization of the storage entry for sets without a distinct value type.
  194. // Somewhat duplicative with the key-value version, but C++ specialization makes
  195. // doing better difficult.
  196. template <typename KeyT>
  197. struct StorageEntry<KeyT, void> {
  198. static constexpr bool IsTriviallyDestructible =
  199. std::is_trivially_destructible_v<KeyT>;
  200. static constexpr bool IsTriviallyRelocatable =
  201. IsTriviallyDestructible && std::is_trivially_move_constructible_v<KeyT>;
  202. auto key() const -> const KeyT& {
  203. // Ensure we don't need more alignment than available.
  204. static_assert(
  205. alignof(StorageEntry) <= MinAllocatedSize,
  206. "The minimum allocated size turns into the alignment of our array of "
  207. "storage entries as they follow the metadata byte array.");
  208. return *std::launder(reinterpret_cast<const KeyT*>(&key_storage));
  209. }
  210. auto key() -> KeyT& {
  211. return const_cast<KeyT&>(const_cast<const StorageEntry*>(this)->key());
  212. }
  213. auto Destroy() -> void {
  214. static_assert(!IsTriviallyDestructible,
  215. "Should never instantiate when trivial!");
  216. key().~KeyT();
  217. }
  218. auto CopyFrom(const StorageEntry& entry) -> void {
  219. if constexpr (IsTriviallyRelocatable) {
  220. memcpy(this, &entry, sizeof(StorageEntry));
  221. } else {
  222. new (&key_storage) KeyT(entry.key());
  223. }
  224. }
  225. auto MoveFrom(StorageEntry&& entry) -> void {
  226. if constexpr (IsTriviallyRelocatable) {
  227. memcpy(this, &entry, sizeof(StorageEntry));
  228. } else {
  229. new (&key_storage) KeyT(std::move(entry.key()));
  230. entry.key().~KeyT();
  231. }
  232. }
  233. alignas(KeyT) std::byte key_storage[sizeof(KeyT)];
  234. };
  235. struct Metrics {
  236. // How many keys are present in the table.
  237. ssize_t key_count = 0;
  238. // How many slots of the table are reserved due to deleted markers required to
  239. // preserve probe sequences.
  240. ssize_t deleted_count = 0;
  241. // How many bytes of allocated storage are used by the table. Note, does not
  242. // include the table object or any small-size buffer.
  243. ssize_t storage_bytes = 0;
  244. // How many keys have required probing beyond the initial group. These are the
  245. // keys with a probe distance > 0.
  246. ssize_t probed_key_count = 0;
  247. // The probe distance averaged over every key. If every key is in its initial
  248. // group, this will be zero as no keys will have a larger probe distance. In
  249. // general, we want this to be as close to zero as possible.
  250. double probe_avg_distance = 0.0;
  251. // The maximum probe distance found for a single key in the table.
  252. ssize_t probe_max_distance = 0;
  253. // The average number of probing comparisons required to locate a specific key
  254. // in the table. This is how many comparisons are required *before* the key is
  255. // located, or the *failed* comparisons. We always have to do one successful
  256. // comparison at the end. This successful comparison isn't counted because
  257. // that focuses this metric on the overhead the table is introducing, and
  258. // keeps a "perfect" table with an average of `0.0` here similar to the
  259. // perfect average of `0.0` average probe distance.
  260. double probe_avg_compares = 0.0;
  261. // The maximum number of probing comparisons required to locate a specific
  262. // key in the table.
  263. ssize_t probe_max_compares = 0;
  264. };
  265. // A placeholder empty type used to model pointers to the allocated buffer of
  266. // storage.
  267. //
  268. // The allocated storage doesn't have a meaningful static layout -- it consists
  269. // of an array of metadata groups followed by an array of storage entries.
  270. // However, we want to be able to mark pointers to this and so use pointers to
  271. // this placeholder type as that signifier.
  272. //
  273. // This is a complete, empty type so that it can be used as a base class of a
  274. // specific concrete storage type for compile-time sized storage.
  275. struct Storage {};
  276. // Forward declaration to support friending, see the definition below.
  277. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  278. class BaseImpl;
  279. // Implementation helper for defining a read-only view type for a hashtable.
  280. //
  281. // A specific user-facing hashtable view type should derive privately from this
  282. // type, and forward the implementation of its interface to functions in this
  283. // type.
  284. //
  285. // The methods available to user-facing hashtable types are `protected`, and
  286. // where they are expected to directly map to a public API, named with an
  287. // `Impl`. The suffix naming ensures types don't `using` in these low-level APIs
  288. // but declare their own and implement them by forwarding to these APIs. We
  289. // don't want users to have to read these implementation details to understand
  290. // their container's API, so none of these methods should be `using`-ed into the
  291. // user facing types.
  292. //
  293. // Some of the types are just convenience aliases and aren't important to
  294. // surface as part of the user-facing type API for readers and so those are
  295. // reasonable to add via a `using`.
  296. //
  297. // Some methods are used by other parts of the raw hashtable implementation.
  298. // Those are kept `private` and where necessary the other components of the raw
  299. // hashtable implementation are friended to give access to them.
  300. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  301. class ViewImpl {
  302. protected:
  303. using KeyT = InputKeyT;
  304. using ValueT = InputValueT;
  305. using KeyContextT = InputKeyContextT;
  306. using EntryT = StorageEntry<KeyT, ValueT>;
  307. using MetricsT = Metrics;
  308. friend class BaseImpl<KeyT, ValueT, KeyContextT>;
  309. template <typename InputBaseT, ssize_t SmallSize>
  310. friend class TableImpl;
  311. // Make more-`const` types friends to enable conversions that add `const`.
  312. friend class ViewImpl<const KeyT, ValueT, KeyContextT>;
  313. friend class ViewImpl<KeyT, const ValueT, KeyContextT>;
  314. friend class ViewImpl<const KeyT, const ValueT, KeyContextT>;
  315. ViewImpl() = default;
  316. // Support adding `const` to either key or value type of some other view.
  317. template <typename OtherKeyT, typename OtherValueT>
  318. // NOLINTNEXTLINE(google-explicit-constructor)
  319. ViewImpl(ViewImpl<OtherKeyT, OtherValueT, KeyContextT> other_view)
  320. requires(std::same_as<KeyT, OtherKeyT> ||
  321. std::same_as<KeyT, const OtherKeyT>) &&
  322. (std::same_as<ValueT, OtherValueT> ||
  323. std::same_as<ValueT, const OtherValueT>)
  324. : alloc_size_(other_view.alloc_size_), storage_(other_view.storage_) {}
  325. // Looks up an entry in the hashtable and returns its address or null if not
  326. // present.
  327. template <typename LookupKeyT>
  328. auto LookupEntry(LookupKeyT lookup_key, KeyContextT key_context) const
  329. -> EntryT*;
  330. // Calls `entry_callback` for each entry in the hashtable. All the entries
  331. // within a specific group are visited first, and then `group_callback` is
  332. // called on the group itself. The `group_callback` is typically only used by
  333. // the internals of the hashtable.
  334. template <typename EntryCallbackT, typename GroupCallbackT>
  335. auto ForEachEntry(EntryCallbackT entry_callback,
  336. GroupCallbackT group_callback) const -> void;
  337. // Returns a collection of informative metrics on the the current state of the
  338. // table, useful for performance analysis. These include relatively slow to
  339. // compute metrics requiring deep inspection of the table's state.
  340. auto ComputeMetricsImpl(KeyContextT key_context) const -> MetricsT;
  341. private:
  342. ViewImpl(ssize_t alloc_size, Storage* storage)
  343. : alloc_size_(alloc_size), storage_(storage) {}
  344. // Computes the offset from the metadata array to the entries array for a
  345. // given size. This is trivial, but we use this routine to enforce invariants
  346. // on the sizes.
  347. static constexpr auto EntriesOffset(ssize_t alloc_size) -> ssize_t {
  348. CARBON_DCHECK(llvm::isPowerOf2_64(alloc_size))
  349. << "Size must be a power of two for a hashed buffer!";
  350. // The size is always a power of two. We prevent any too-small sizes so it
  351. // being a power of two provides the needed alignment. As a result, the
  352. // offset is exactly the size. We validate this here to catch alignment bugs
  353. // early.
  354. CARBON_DCHECK(static_cast<uint64_t>(alloc_size) ==
  355. llvm::alignTo<alignof(EntryT)>(alloc_size));
  356. return alloc_size;
  357. }
  358. // Compute the allocated table's byte size.
  359. static constexpr auto AllocByteSize(ssize_t alloc_size) -> ssize_t {
  360. return EntriesOffset(alloc_size) + sizeof(EntryT) * alloc_size;
  361. }
  362. auto metadata() const -> uint8_t* {
  363. return reinterpret_cast<uint8_t*>(storage_);
  364. }
  365. auto entries() const -> EntryT* {
  366. return reinterpret_cast<EntryT*>(reinterpret_cast<std::byte*>(storage_) +
  367. EntriesOffset(alloc_size_));
  368. }
  369. ssize_t alloc_size_;
  370. Storage* storage_;
  371. };
  372. // Implementation helper for defining a read-write base type for a hashtable
  373. // that type-erases any SSO buffer.
  374. //
  375. // A specific user-facing hashtable base type should derive using *`protected`*
  376. // inheritance from this type, and forward the implementation of its interface
  377. // to functions in this type.
  378. //
  379. // Other than the use of `protected` inheritance, the patterns for this type,
  380. // and how to build user-facing hashtable base types from it, mirror those of
  381. // `ViewImpl`. See its documentation for more details.
  382. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  383. class BaseImpl {
  384. protected:
  385. using KeyT = InputKeyT;
  386. using ValueT = InputValueT;
  387. using KeyContextT = InputKeyContextT;
  388. using ViewImplT = ViewImpl<KeyT, ValueT, KeyContextT>;
  389. using EntryT = typename ViewImplT::EntryT;
  390. using MetricsT = typename ViewImplT::MetricsT;
  391. BaseImpl(int small_alloc_size, Storage* small_storage)
  392. : small_alloc_size_(small_alloc_size) {
  393. CARBON_CHECK(small_alloc_size >= 0);
  394. Construct(small_storage);
  395. }
  396. // Only used for copying and moving, and leaves storage uninitialized.
  397. BaseImpl(ssize_t alloc_size, int growth_budget, int small_alloc_size)
  398. : view_impl_(alloc_size, nullptr),
  399. growth_budget_(growth_budget),
  400. small_alloc_size_(small_alloc_size) {}
  401. // Destruction must be handled by the table where it can destroy entries in
  402. // any small buffer, so make the base destructor protected but defaulted here.
  403. ~BaseImpl() = default;
  404. // NOLINTNEXTLINE(google-explicit-constructor): Designed to implicitly decay.
  405. operator ViewImplT() const { return view_impl(); }
  406. auto view_impl() const -> ViewImplT { return view_impl_; }
  407. // Looks up the provided key in the hashtable. If found, returns a pointer to
  408. // that entry and `false`.
  409. //
  410. // If not found, will locate an empty entry for inserting into, set the
  411. // metadata for that entry, and return a pointer to the entry and `true`. When
  412. // necessary, this will grow the hashtable to cause there to be sufficient
  413. // empty entries.
  414. template <typename LookupKeyT>
  415. auto InsertImpl(LookupKeyT lookup_key, KeyContextT key_context)
  416. -> std::pair<EntryT*, bool>;
  417. // Grow the table to specific allocation size.
  418. //
  419. // This will grow the the table if necessary for it to have an allocation size
  420. // of `target_alloc_size` which must be a power of two. Note that this will
  421. // not allow that many keys to be inserted into the hashtable, but a smaller
  422. // number based on the load factor. If a specific number of insertions need to
  423. // be achieved without triggering growth, use the `GrowForInsertCountImpl`
  424. // method.
  425. auto GrowToAllocSizeImpl(ssize_t target_alloc_size, KeyContextT key_context)
  426. -> void;
  427. // Grow the table to allow inserting the specified number of keys.
  428. auto GrowForInsertCountImpl(ssize_t count, KeyContextT key_context) -> void;
  429. // Looks up the entry in the hashtable, and if found destroys the entry and
  430. // returns `true`. If not found, returns `false`.
  431. //
  432. // Does not release any memory, just leaves a tombstone behind so this entry
  433. // cannot be found and the slot can in theory be re-used.
  434. template <typename LookupKeyT>
  435. auto EraseImpl(LookupKeyT lookup_key, KeyContextT key_context) -> bool;
  436. // Erases all entries in the hashtable but leaves the allocated storage.
  437. auto ClearImpl() -> void;
  438. private:
  439. template <typename InputBaseT, ssize_t SmallSize>
  440. friend class TableImpl;
  441. static constexpr ssize_t Alignment = std::max<ssize_t>(
  442. {alignof(MetadataGroup), alignof(StorageEntry<KeyT, ValueT>)});
  443. // Implementation of inline small storage for the provided key type, value
  444. // type, and small size. Specialized for a zero small size to be an empty
  445. // struct.
  446. template <ssize_t SmallSize>
  447. struct SmallStorage : Storage {
  448. alignas(Alignment) uint8_t metadata[SmallSize];
  449. mutable StorageEntry<KeyT, ValueT> entries[SmallSize];
  450. };
  451. // Specialized storage with no inline buffer to avoid any extra alignment.
  452. template <>
  453. struct SmallStorage<0> {};
  454. static auto Allocate(ssize_t alloc_size) -> Storage*;
  455. static auto Deallocate(Storage* storage, ssize_t alloc_size) -> void;
  456. auto growth_budget() const -> ssize_t { return growth_budget_; }
  457. auto alloc_size() const -> ssize_t { return view_impl_.alloc_size_; }
  458. auto alloc_size() -> ssize_t& { return view_impl_.alloc_size_; }
  459. auto storage() const -> Storage* { return view_impl_.storage_; }
  460. auto storage() -> Storage*& { return view_impl_.storage_; }
  461. auto metadata() const -> uint8_t* { return view_impl_.metadata(); }
  462. auto entries() const -> EntryT* { return view_impl_.entries(); }
  463. auto small_alloc_size() const -> ssize_t {
  464. return static_cast<unsigned>(small_alloc_size_);
  465. }
  466. auto is_small() const -> bool {
  467. CARBON_DCHECK(alloc_size() >= small_alloc_size());
  468. return alloc_size() == small_alloc_size();
  469. }
  470. auto Construct(Storage* small_storage) -> void;
  471. auto Destroy() -> void;
  472. auto CopySlotsFrom(const BaseImpl& arg) -> void;
  473. auto MoveFrom(BaseImpl&& arg, Storage* small_storage) -> void;
  474. template <typename LookupKeyT>
  475. auto InsertIntoEmpty(LookupKeyT lookup_key, KeyContextT key_context)
  476. -> EntryT*;
  477. static auto ComputeNextAllocSize(ssize_t old_alloc_size) -> ssize_t;
  478. static auto GrowthThresholdForAllocSize(ssize_t alloc_size) -> ssize_t;
  479. auto GrowToNextAllocSize(KeyContextT key_context) -> void;
  480. template <typename LookupKeyT>
  481. auto GrowAndInsert(LookupKeyT lookup_key, KeyContextT key_context) -> EntryT*;
  482. ViewImplT view_impl_;
  483. int growth_budget_;
  484. int small_alloc_size_;
  485. };
  486. // Implementation helper for defining a hashtable type with an SSO buffer.
  487. //
  488. // A specific user-facing hashtable should derive privately from this
  489. // type, and forward the implementation of its interface to functions in this
  490. // type. It should provide the corresponding user-facing hashtable base type as
  491. // the `InputBaseT` type parameter (rather than a key/value pair), and this type
  492. // will in turn derive from that provided base type. This allows derived-to-base
  493. // conversion from the user-facing hashtable type to the user-facing hashtable
  494. // base type. And it does so keeping the inheritance linear. The resulting
  495. // linear inheritance hierarchy for a `Map<K, T>` type will look like:
  496. //
  497. // Map<K, T>
  498. // ↓
  499. // TableImpl<MapBase<K, T>>
  500. // ↓
  501. // MapBase<K, T>
  502. // ↓
  503. // BaseImpl<K, T>
  504. //
  505. // Other than this inheritance technique, the patterns for this type, and how to
  506. // build user-facing hashtable types from it, mirror those of `ViewImpl`. See
  507. // its documentation for more details.
  508. template <typename InputBaseT, ssize_t SmallSize>
  509. class TableImpl : public InputBaseT {
  510. protected:
  511. using BaseT = InputBaseT;
  512. TableImpl() : BaseT(SmallSize, small_storage()) {}
  513. TableImpl(const TableImpl& arg);
  514. TableImpl(TableImpl&& arg) noexcept;
  515. auto operator=(const TableImpl& arg) -> TableImpl&;
  516. auto operator=(TableImpl&& arg) noexcept -> TableImpl&;
  517. ~TableImpl();
  518. // Resets the hashtable to its initial state, clearing all entries and
  519. // releasing all memory. If the hashtable had an SSO buffer, that is restored
  520. // as the storage. Otherwise, a minimum sized table storage is allocated.
  521. auto ResetImpl() -> void;
  522. private:
  523. using KeyT = BaseT::KeyT;
  524. using ValueT = BaseT::ValueT;
  525. using EntryT = BaseT::EntryT;
  526. using SmallStorage = BaseT::template SmallStorage<SmallSize>;
  527. auto small_storage() const -> Storage*;
  528. auto SetUpStorage() -> void;
  529. [[no_unique_address]] mutable SmallStorage small_storage_;
  530. };
  531. ////////////////////////////////////////////////////////////////////////////////
  532. //
  533. // Only implementation details below this point.
  534. //
  535. ////////////////////////////////////////////////////////////////////////////////
  536. // Computes a seed that provides a small amount of entropy from ASLR where
  537. // available with minimal cost. The priority is speed, and this computes the
  538. // entropy in a way that doesn't require loading from memory, merely accessing
  539. // entropy already available without accessing memory.
  540. inline auto ComputeSeed() -> uint64_t {
  541. // A global variable whose address is used as a seed. This allows ASLR to
  542. // introduce some variation in hashtable ordering when enabled via the code
  543. // model for globals.
  544. extern volatile std::byte global_addr_seed;
  545. return reinterpret_cast<uint64_t>(&global_addr_seed);
  546. }
  547. inline auto ComputeProbeMaskFromSize(ssize_t size) -> size_t {
  548. CARBON_DCHECK(llvm::isPowerOf2_64(size))
  549. << "Size must be a power of two for a hashed buffer!";
  550. // Since `size` is a power of two, we can make sure the probes are less
  551. // than `size` by making the mask `size - 1`. We also mask off the low
  552. // bits so the probes are a multiple of the size of the groups of entries.
  553. return (size - 1) & ~GroupMask;
  554. }
  555. // This class handles building a sequence of probe indices from a given
  556. // starting point, including both the quadratic growth and masking the index
  557. // to stay within the bucket array size. The starting point doesn't need to be
  558. // clamped to the size ahead of time (or even be positive), we will do it
  559. // internally.
  560. //
  561. // For reference on quadratic probing:
  562. // https://en.wikipedia.org/wiki/Quadratic_probing
  563. //
  564. // We compute the quadratic probe index incrementally, but we can also compute
  565. // it mathematically and will check that the incremental result matches our
  566. // mathematical expectation. We use the quadratic probing formula of:
  567. //
  568. // p(start, step) = (start + (step + step^2) / 2) (mod size / GroupSize)
  569. //
  570. // However, we compute it incrementally and scale all the variables by the group
  571. // size so it can be used as an index without an additional multiplication.
  572. class ProbeSequence {
  573. public:
  574. ProbeSequence(ssize_t start, ssize_t size) {
  575. mask_ = ComputeProbeMaskFromSize(size);
  576. p_ = start & mask_;
  577. #ifndef NDEBUG
  578. start_ = start & mask_;
  579. size_ = size;
  580. #endif
  581. }
  582. void Next() {
  583. step_ += GroupSize;
  584. p_ = (p_ + step_) & mask_;
  585. #ifndef NDEBUG
  586. // Verify against the quadratic formula we expect to be following by scaling
  587. // everything down by `GroupSize`.
  588. CARBON_DCHECK(
  589. (p_ / GroupSize) ==
  590. ((start_ / GroupSize +
  591. (step_ / GroupSize + (step_ / GroupSize) * (step_ / GroupSize)) / 2) %
  592. (size_ / GroupSize)))
  593. << "Index in probe sequence does not match the expected formula.";
  594. CARBON_DCHECK(step_ < size_) << "We necessarily visit all groups, so we "
  595. "can't have more probe steps than groups.";
  596. #endif
  597. }
  598. auto index() const -> ssize_t { return p_; }
  599. private:
  600. ssize_t step_ = 0;
  601. size_t mask_;
  602. ssize_t p_;
  603. #ifndef NDEBUG
  604. ssize_t start_;
  605. ssize_t size_;
  606. #endif
  607. };
  608. // TODO: Evaluate keeping this outlined to see if macro benchmarks observe the
  609. // same perf hit as micro benchmarks.
  610. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  611. template <typename LookupKeyT>
  612. auto ViewImpl<InputKeyT, InputValueT, InputKeyContextT>::LookupEntry(
  613. LookupKeyT lookup_key, KeyContextT key_context) const -> EntryT* {
  614. // Prefetch with a "low" temporal locality as we're primarily expecting a
  615. // brief use of the storage and then to return to application code.
  616. __builtin_prefetch(storage_, /*read*/ 0, /*low-locality*/ 1);
  617. ssize_t local_size = alloc_size_;
  618. CARBON_DCHECK(local_size > 0);
  619. uint8_t* local_metadata = metadata();
  620. HashCode hash = key_context.HashKey(lookup_key, ComputeSeed());
  621. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  622. EntryT* local_entries = entries();
  623. // Walk through groups of entries using a quadratic probe starting from
  624. // `hash_index`.
  625. ProbeSequence s(hash_index, local_size);
  626. do {
  627. ssize_t group_index = s.index();
  628. // For each group, match the tag against the metadata to extract the
  629. // potentially matching entries within the group.
  630. MetadataGroup g = MetadataGroup::Load(local_metadata, group_index);
  631. auto metadata_matched_range = g.Match(tag);
  632. if (LLVM_LIKELY(metadata_matched_range)) {
  633. // If any entries in this group potentially match based on their metadata,
  634. // walk each candidate and compare its key to see if we have definitively
  635. // found a match.
  636. EntryT* group_entries = &local_entries[group_index];
  637. auto byte_it = metadata_matched_range.begin();
  638. auto byte_end = metadata_matched_range.end();
  639. do {
  640. EntryT* entry = byte_it.index_ptr(group_entries);
  641. if (LLVM_LIKELY(key_context.KeyEq(lookup_key, entry->key()))) {
  642. __builtin_assume(entry != nullptr);
  643. return entry;
  644. }
  645. ++byte_it;
  646. } while (LLVM_UNLIKELY(byte_it != byte_end));
  647. }
  648. // We failed to find a matching entry in this bucket, so check if there are
  649. // empty slots as that indicates we're done probing -- no later probed index
  650. // could have a match.
  651. auto empty_byte_matched_range = g.MatchEmpty();
  652. if (LLVM_LIKELY(empty_byte_matched_range)) {
  653. return nullptr;
  654. }
  655. s.Next();
  656. // We use a weird construct of an "unlikely" condition of `true`. The goal
  657. // is to get the compiler to not prioritize the back edge of the loop for
  658. // code layout, and in at least some tests this seems to be an effective
  659. // construct for achieving this.
  660. } while (LLVM_UNLIKELY(true));
  661. }
  662. // Note that we force inlining here because we expect to be called with lambdas
  663. // that will in turn be inlined to form the loop body. We don't want function
  664. // boundaries within the loop for performance, and recognizing the degree of
  665. // simplification from inlining these callbacks may be difficult to
  666. // automatically recognize.
  667. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  668. template <typename EntryCallbackT, typename GroupCallbackT>
  669. [[clang::always_inline]] auto
  670. ViewImpl<InputKeyT, InputValueT, InputKeyContextT>::ForEachEntry(
  671. EntryCallbackT entry_callback, GroupCallbackT group_callback) const
  672. -> void {
  673. uint8_t* local_metadata = metadata();
  674. EntryT* local_entries = entries();
  675. ssize_t local_size = alloc_size_;
  676. for (ssize_t group_index = 0; group_index < local_size;
  677. group_index += GroupSize) {
  678. auto g = MetadataGroup::Load(local_metadata, group_index);
  679. auto present_matched_range = g.MatchPresent();
  680. if (!present_matched_range) {
  681. continue;
  682. }
  683. for (ssize_t byte_index : present_matched_range) {
  684. entry_callback(local_entries[group_index + byte_index]);
  685. }
  686. group_callback(&local_metadata[group_index]);
  687. }
  688. }
  689. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  690. auto ViewImpl<InputKeyT, InputValueT, InputKeyContextT>::ComputeMetricsImpl(
  691. KeyContextT key_context) const -> Metrics {
  692. uint8_t* local_metadata = metadata();
  693. EntryT* local_entries = entries();
  694. ssize_t local_size = alloc_size_;
  695. Metrics metrics;
  696. // Compute the ones we can directly.
  697. metrics.deleted_count = llvm::count(
  698. llvm::ArrayRef(local_metadata, local_size), MetadataGroup::Deleted);
  699. metrics.storage_bytes = AllocByteSize(local_size);
  700. // We want to process present slots specially to collect metrics on their
  701. // probing behavior.
  702. for (ssize_t group_index = 0; group_index < local_size;
  703. group_index += GroupSize) {
  704. auto g = MetadataGroup::Load(local_metadata, group_index);
  705. auto present_matched_range = g.MatchPresent();
  706. for (ssize_t byte_index : present_matched_range) {
  707. ++metrics.key_count;
  708. ssize_t index = group_index + byte_index;
  709. HashCode hash =
  710. key_context.HashKey(local_entries[index].key(), ComputeSeed());
  711. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  712. ProbeSequence s(hash_index, local_size);
  713. metrics.probed_key_count +=
  714. static_cast<ssize_t>(s.index() != group_index);
  715. // For each probed key, go through the probe sequence to find both the
  716. // probe distance and how many comparisons are required.
  717. ssize_t distance = 0;
  718. ssize_t compares = 0;
  719. for (; s.index() != group_index; s.Next()) {
  720. auto probe_g = MetadataGroup::Load(local_metadata, s.index());
  721. auto probe_matched_range = probe_g.Match(tag);
  722. compares += std::distance(probe_matched_range.begin(),
  723. probe_matched_range.end());
  724. distance += 1;
  725. }
  726. auto probe_g = MetadataGroup::Load(local_metadata, s.index());
  727. auto probe_matched_range = probe_g.Match(tag);
  728. CARBON_CHECK(!probe_matched_range.empty());
  729. for (ssize_t match_index : probe_matched_range) {
  730. if (match_index >= byte_index) {
  731. // Note we only count the compares that will *fail* as part of
  732. // probing. The last successful compare isn't interesting, it is
  733. // always needed.
  734. break;
  735. }
  736. compares += 1;
  737. }
  738. metrics.probe_avg_distance += distance;
  739. metrics.probe_max_distance =
  740. std::max(metrics.probe_max_distance, distance);
  741. metrics.probe_avg_compares += compares;
  742. metrics.probe_max_compares =
  743. std::max(metrics.probe_max_compares, compares);
  744. }
  745. }
  746. if (metrics.key_count > 0) {
  747. metrics.probe_avg_compares /= metrics.key_count;
  748. metrics.probe_avg_distance /= metrics.key_count;
  749. }
  750. return metrics;
  751. }
  752. // TODO: Evaluate whether it is worth forcing this out-of-line given the
  753. // reasonable ABI boundary it forms and large volume of code necessary to
  754. // implement it.
  755. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  756. template <typename LookupKeyT>
  757. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::InsertImpl(
  758. LookupKeyT lookup_key, KeyContextT key_context)
  759. -> std::pair<EntryT*, bool> {
  760. CARBON_DCHECK(alloc_size() > 0);
  761. uint8_t* local_metadata = metadata();
  762. HashCode hash = key_context.HashKey(lookup_key, ComputeSeed());
  763. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  764. // We re-purpose the empty control byte to signal no insert is needed to the
  765. // caller. This is guaranteed to not be a control byte we're inserting.
  766. // constexpr uint8_t NoInsertNeeded = Group::Empty;
  767. ssize_t group_with_deleted_index;
  768. MetadataGroup::MatchIndex deleted_match = {};
  769. EntryT* local_entries = entries();
  770. auto return_insert_at_index = [&](ssize_t index) -> std::pair<EntryT*, bool> {
  771. // We'll need to insert at this index so set the control group byte to the
  772. // proper value.
  773. local_metadata[index] = tag | MetadataGroup::PresentMask;
  774. return {&local_entries[index], true};
  775. };
  776. for (ProbeSequence s(hash_index, alloc_size());; s.Next()) {
  777. ssize_t group_index = s.index();
  778. auto g = MetadataGroup::Load(local_metadata, group_index);
  779. auto control_byte_matched_range = g.Match(tag);
  780. if (control_byte_matched_range) {
  781. EntryT* group_entries = &local_entries[group_index];
  782. auto byte_it = control_byte_matched_range.begin();
  783. auto byte_end = control_byte_matched_range.end();
  784. do {
  785. EntryT* entry = byte_it.index_ptr(group_entries);
  786. if (LLVM_LIKELY(key_context.KeyEq(lookup_key, entry->key()))) {
  787. return {entry, false};
  788. }
  789. ++byte_it;
  790. } while (LLVM_UNLIKELY(byte_it != byte_end));
  791. }
  792. // Track the first group with a deleted entry that we could insert over.
  793. if (!deleted_match) {
  794. deleted_match = g.MatchDeleted();
  795. group_with_deleted_index = group_index;
  796. }
  797. // We failed to find a matching entry in this bucket, so check if there are
  798. // no empty slots. In that case, we'll continue probing.
  799. auto empty_match = g.MatchEmpty();
  800. if (!empty_match) {
  801. continue;
  802. }
  803. // Ok, we've finished probing without finding anything and need to insert
  804. // instead.
  805. // If we found a deleted slot, we don't need the probe sequence to insert
  806. // so just bail. We want to ensure building up a table is fast so we
  807. // de-prioritize this a bit. In practice this doesn't have too much of an
  808. // effect.
  809. if (LLVM_UNLIKELY(deleted_match)) {
  810. return return_insert_at_index(group_with_deleted_index +
  811. deleted_match.index());
  812. }
  813. // We're going to need to grow by inserting into an empty slot. Check that
  814. // we have the budget for that before we compute the exact index of the
  815. // empty slot. Without the growth budget we'll have to completely rehash and
  816. // so we can just bail here.
  817. if (LLVM_UNLIKELY(growth_budget_ == 0)) {
  818. return {GrowAndInsert(lookup_key, key_context), true};
  819. }
  820. --growth_budget_;
  821. CARBON_DCHECK(growth_budget() >= 0)
  822. << "Growth budget shouldn't have gone negative!";
  823. return return_insert_at_index(group_index + empty_match.index());
  824. }
  825. CARBON_FATAL() << "We should never finish probing without finding the entry "
  826. "or an empty slot.";
  827. }
  828. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  829. [[clang::noinline]] auto
  830. BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowToAllocSizeImpl(
  831. ssize_t target_alloc_size, KeyContextT key_context) -> void {
  832. CARBON_CHECK(llvm::isPowerOf2_64(target_alloc_size));
  833. if (target_alloc_size <= alloc_size()) {
  834. return;
  835. }
  836. // If this is the next alloc size, we can used our optimized growth strategy.
  837. if (target_alloc_size == ComputeNextAllocSize(alloc_size())) {
  838. GrowToNextAllocSize(key_context);
  839. return;
  840. }
  841. // Create locals for the old state of the table.
  842. ssize_t old_size = alloc_size();
  843. CARBON_DCHECK(old_size > 0);
  844. bool old_small = is_small();
  845. Storage* old_storage = storage();
  846. uint8_t* old_metadata = metadata();
  847. EntryT* old_entries = entries();
  848. // Configure for the new size and allocate the new storage.
  849. alloc_size() = target_alloc_size;
  850. storage() = Allocate(target_alloc_size);
  851. std::memset(metadata(), 0, target_alloc_size);
  852. growth_budget_ = GrowthThresholdForAllocSize(target_alloc_size);
  853. // Just re-insert all the entries. As we're more than doubling the table size,
  854. // we don't bother with fancy optimizations here. Even using `memcpy` for the
  855. // entries seems unlikely to be a significant win given how sparse the
  856. // insertions will end up being.
  857. ssize_t count = 0;
  858. for (ssize_t group_index = 0; group_index < old_size;
  859. group_index += GroupSize) {
  860. auto g = MetadataGroup::Load(old_metadata, group_index);
  861. auto present_matched_range = g.MatchPresent();
  862. for (ssize_t byte_index : present_matched_range) {
  863. ++count;
  864. ssize_t index = group_index + byte_index;
  865. EntryT* new_entry =
  866. InsertIntoEmpty(old_entries[index].key(), key_context);
  867. new_entry->MoveFrom(std::move(old_entries[index]));
  868. }
  869. }
  870. growth_budget_ -= count;
  871. if (!old_small) {
  872. // Old isn't a small buffer, so we need to deallocate it.
  873. Deallocate(old_storage, old_size);
  874. }
  875. }
  876. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  877. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowForInsertCountImpl(
  878. ssize_t count, KeyContextT key_context) -> void {
  879. if (count < growth_budget_) {
  880. // Already space for the needed growth.
  881. return;
  882. }
  883. // Currently, we don't account for any tombstones marking deleted elements,
  884. // and just conservatively ensure the growth will create adequate growth
  885. // budget for insertions. We could make this more precise by instead walking
  886. // the table and only counting present slots, as once we grow we'll be able to
  887. // reclaim all of the deleted slots. But this adds complexity and it isn't
  888. // clear this is necessary so we do the simpler conservative thing.
  889. ssize_t used_budget =
  890. GrowthThresholdForAllocSize(alloc_size()) - growth_budget_;
  891. ssize_t budget_needed = used_budget + count;
  892. ssize_t space_needed = budget_needed + (budget_needed / 7);
  893. ssize_t target_alloc_size = llvm::NextPowerOf2(space_needed);
  894. CARBON_CHECK(GrowthThresholdForAllocSize(target_alloc_size) >
  895. (budget_needed));
  896. GrowToAllocSizeImpl(target_alloc_size, key_context);
  897. }
  898. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  899. template <typename LookupKeyT>
  900. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::EraseImpl(
  901. LookupKeyT lookup_key, KeyContextT key_context) -> bool {
  902. EntryT* entry = view_impl_.LookupEntry(lookup_key, key_context);
  903. if (!entry) {
  904. return false;
  905. }
  906. // If there are empty slots in this group then nothing will probe past this
  907. // group looking for an entry so we can simply set this slot to empty as
  908. // well. However, if every slot in this group is full, it might be part of
  909. // a long probe chain that we can't disrupt. In that case we mark the slot's
  910. // metadata as deleted to keep probes continuing past it.
  911. //
  912. // If we mark the slot as empty, we'll also need to increase the growth
  913. // budget.
  914. uint8_t* local_metadata = metadata();
  915. EntryT* local_entries = entries();
  916. ssize_t index = entry - local_entries;
  917. ssize_t group_index = index & ~GroupMask;
  918. auto g = MetadataGroup::Load(local_metadata, group_index);
  919. auto empty_matched_range = g.MatchEmpty();
  920. if (empty_matched_range) {
  921. local_metadata[index] = MetadataGroup::Empty;
  922. ++growth_budget_;
  923. } else {
  924. local_metadata[index] = MetadataGroup::Deleted;
  925. }
  926. if constexpr (!EntryT::IsTriviallyDestructible) {
  927. entry->Destroy();
  928. }
  929. return true;
  930. }
  931. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  932. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::ClearImpl() -> void {
  933. view_impl_.ForEachEntry(
  934. [](EntryT& entry) {
  935. if constexpr (!EntryT::IsTriviallyDestructible) {
  936. entry.Destroy();
  937. }
  938. },
  939. [](uint8_t* metadata_group) {
  940. // Clear the group.
  941. std::memset(metadata_group, 0, GroupSize);
  942. });
  943. growth_budget_ = GrowthThresholdForAllocSize(alloc_size());
  944. }
  945. // Allocates the appropriate memory layout for a table of the given
  946. // `alloc_size`, with space both for the metadata array and entries.
  947. //
  948. // The returned pointer *must* be deallocated by calling the below `Deallocate`
  949. // function with the same `alloc_size` as used here.
  950. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  951. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Allocate(
  952. ssize_t alloc_size) -> Storage* {
  953. return reinterpret_cast<Storage*>(__builtin_operator_new(
  954. ViewImplT::AllocByteSize(alloc_size),
  955. static_cast<std::align_val_t>(Alignment), std::nothrow_t()));
  956. }
  957. // Deallocates a table's storage that was allocated with the `Allocate`
  958. // function.
  959. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  960. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Deallocate(
  961. Storage* storage, ssize_t alloc_size) -> void {
  962. ssize_t allocated_size = ViewImplT::AllocByteSize(alloc_size);
  963. // We don't need the size, but make sure it always compiles.
  964. static_cast<void>(allocated_size);
  965. __builtin_operator_delete(storage,
  966. #if __cpp_sized_deallocation
  967. allocated_size,
  968. #endif
  969. static_cast<std::align_val_t>(Alignment));
  970. }
  971. // Construct a table using the provided small storage if `small_alloc_size_` is
  972. // non-zero. If `small_alloc_size_` is zero, then `small_storage` won't be used
  973. // and can be null. Regardless, after this the storage pointer is non-null and
  974. // the size is non-zero so that we can directly begin inserting or querying the
  975. // table.
  976. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  977. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Construct(
  978. Storage* small_storage) -> void {
  979. if (small_alloc_size_ > 0) {
  980. alloc_size() = small_alloc_size_;
  981. storage() = small_storage;
  982. } else {
  983. // Directly allocate the initial buffer so that the hashtable is never in
  984. // an empty state.
  985. alloc_size() = MinAllocatedSize;
  986. storage() = Allocate(MinAllocatedSize);
  987. }
  988. std::memset(metadata(), 0, alloc_size());
  989. growth_budget_ = GrowthThresholdForAllocSize(alloc_size());
  990. }
  991. // Destroy the current table, releasing any memory used.
  992. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  993. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::Destroy() -> void {
  994. // Check for a moved-from state and don't do anything. Only a moved-from table
  995. // has a zero size.
  996. if (alloc_size() == 0) {
  997. return;
  998. }
  999. // Destroy all the entries.
  1000. if constexpr (!EntryT::IsTriviallyDestructible) {
  1001. view_impl_.ForEachEntry([](EntryT& entry) { entry.Destroy(); },
  1002. [](auto...) {});
  1003. }
  1004. // If small, nothing to deallocate.
  1005. if (is_small()) {
  1006. return;
  1007. }
  1008. // Just deallocate the storage without updating anything when destroying the
  1009. // object.
  1010. Deallocate(storage(), alloc_size());
  1011. }
  1012. // Copy all of the slots over from another table that is exactly the same
  1013. // allocation size.
  1014. //
  1015. // This requires the current table to already have storage allocated and set up
  1016. // but not initialized (or already cleared). It directly overwrites the storage
  1017. // allocation of the table to match the incoming argument.
  1018. //
  1019. // Despite being used in construction, this shouldn't be called for a moved-from
  1020. // `arg` -- in practice it is better for callers to handle this when setting up
  1021. // storage.
  1022. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1023. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::CopySlotsFrom(
  1024. const BaseImpl& arg) -> void {
  1025. CARBON_DCHECK(alloc_size() == arg.alloc_size());
  1026. ssize_t local_size = alloc_size();
  1027. // Preserve which slot every entry is in, including tombstones in the
  1028. // metadata, in order to copy into the new table's storage without rehashing
  1029. // all of the keys. This is especially important as we don't have an easy way
  1030. // to access the key context needed for rehashing here.
  1031. uint8_t* local_metadata = metadata();
  1032. EntryT* local_entries = entries();
  1033. const uint8_t* local_arg_metadata = arg.metadata();
  1034. const EntryT* local_arg_entries = arg.entries();
  1035. memcpy(local_metadata, local_arg_metadata, local_size);
  1036. for (ssize_t group_index = 0; group_index < local_size;
  1037. group_index += GroupSize) {
  1038. auto g = MetadataGroup::Load(local_arg_metadata, group_index);
  1039. for (ssize_t byte_index : g.MatchPresent()) {
  1040. local_entries[group_index + byte_index].CopyFrom(
  1041. local_arg_entries[group_index + byte_index]);
  1042. }
  1043. }
  1044. }
  1045. // Move from another table to this one.
  1046. //
  1047. // Note that the `small_storage` is *this* table's small storage pointer,
  1048. // provided from the `TableImpl` to this `BaseImpl` method as an argument.
  1049. //
  1050. // Requires the table to have size and growth already set up but otherwise the
  1051. // the table has not yet been initialized. Notably, storage should either not
  1052. // yet be constructed or already destroyed. It both sets up the storage and
  1053. // handles any moving slots needed.
  1054. //
  1055. // Note that because this is used in construction it needs to handle a
  1056. // moved-from `arg`.
  1057. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1058. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::MoveFrom(
  1059. BaseImpl&& arg, Storage* small_storage) -> void {
  1060. ssize_t local_size = alloc_size();
  1061. CARBON_DCHECK(local_size == arg.alloc_size());
  1062. // If `arg` is moved-from, skip the rest as the local size is all we need.
  1063. if (local_size == 0) {
  1064. return;
  1065. }
  1066. if (arg.is_small()) {
  1067. CARBON_DCHECK(local_size == small_alloc_size_);
  1068. this->storage() = small_storage;
  1069. // For small tables, we have to move the entries as we can't move the tables
  1070. // themselves. We do this preserving their slots and even tombstones to
  1071. // avoid rehashing.
  1072. uint8_t* local_metadata = this->metadata();
  1073. EntryT* local_entries = this->entries();
  1074. uint8_t* local_arg_metadata = arg.metadata();
  1075. EntryT* local_arg_entries = arg.entries();
  1076. memcpy(local_metadata, local_arg_metadata, local_size);
  1077. if (EntryT::IsTriviallyRelocatable) {
  1078. memcpy(local_entries, local_arg_entries, local_size * sizeof(EntryT));
  1079. } else {
  1080. for (ssize_t group_index = 0; group_index < local_size;
  1081. group_index += GroupSize) {
  1082. auto g = MetadataGroup::Load(local_arg_metadata, group_index);
  1083. for (ssize_t byte_index : g.MatchPresent()) {
  1084. local_entries[group_index + byte_index].MoveFrom(
  1085. std::move(local_arg_entries[group_index + byte_index]));
  1086. }
  1087. }
  1088. }
  1089. } else {
  1090. // Just point to the allocated storage.
  1091. storage() = arg.storage();
  1092. }
  1093. // Finally, put the incoming table into a moved-from state.
  1094. arg.alloc_size() = 0;
  1095. // Replace the pointer with null to ease debugging.
  1096. arg.storage() = nullptr;
  1097. }
  1098. // Optimized routine to insert a key into a table when that key *definitely*
  1099. // isn't present in the table and the table *definitely* has a viable empty slot
  1100. // (and growth space) to insert into before any deleted slots. When both of
  1101. // these are true, typically just after growth, we can dramatically simplify the
  1102. // insert position search.
  1103. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1104. template <typename LookupKeyT>
  1105. [[clang::noinline]] auto
  1106. BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::InsertIntoEmpty(
  1107. LookupKeyT lookup_key, KeyContextT key_context) -> EntryT* {
  1108. HashCode hash = key_context.HashKey(lookup_key, ComputeSeed());
  1109. auto [hash_index, tag] = hash.ExtractIndexAndTag<7>();
  1110. uint8_t* local_metadata = metadata();
  1111. EntryT* local_entries = entries();
  1112. for (ProbeSequence s(hash_index, alloc_size());; s.Next()) {
  1113. ssize_t group_index = s.index();
  1114. auto g = MetadataGroup::Load(local_metadata, group_index);
  1115. if (auto empty_match = g.MatchEmpty()) {
  1116. ssize_t index = group_index + empty_match.index();
  1117. local_metadata[index] = tag | MetadataGroup::PresentMask;
  1118. return &local_entries[index];
  1119. }
  1120. // Otherwise we continue probing.
  1121. }
  1122. }
  1123. // Apply our doubling growth strategy and (re-)check invariants around table
  1124. // size.
  1125. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1126. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::ComputeNextAllocSize(
  1127. ssize_t old_alloc_size) -> ssize_t {
  1128. CARBON_DCHECK(llvm::isPowerOf2_64(old_alloc_size))
  1129. << "Expected a power of two!";
  1130. ssize_t new_alloc_size;
  1131. bool overflow = __builtin_mul_overflow(old_alloc_size, 2, &new_alloc_size);
  1132. CARBON_CHECK(!overflow) << "Computing the new size overflowed `ssize_t`!";
  1133. return new_alloc_size;
  1134. }
  1135. // Compute the growth threshold for a given size.
  1136. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1137. auto BaseImpl<InputKeyT, InputValueT,
  1138. InputKeyContextT>::GrowthThresholdForAllocSize(ssize_t alloc_size)
  1139. -> ssize_t {
  1140. // We use a 7/8ths load factor to trigger growth.
  1141. return alloc_size - alloc_size / 8;
  1142. }
  1143. // Optimized routine for growing to the next alloc size.
  1144. //
  1145. // A particularly common and important-to-optimize path is growing to the next
  1146. // alloc size, which will always be a doubling of the allocated size. This
  1147. // allows an important optimization -- we're adding exactly one more high bit to
  1148. // the hash-computed index for each entry. This in turn means we can classify
  1149. // every entry in the table into three cases:
  1150. //
  1151. // 1) The new high bit is zero, the entry is at the same index in the new
  1152. // table as the old.
  1153. //
  1154. // 2) The new high bit is one, the entry is at the old index plus the old
  1155. // size.
  1156. //
  1157. // 3) The entry's current index doesn't match the initial hash index because
  1158. // it required some amount of probing to find an empty slot.
  1159. //
  1160. // The design of the hash table tries to minimize how many entries fall into
  1161. // case (3), so we expect the vast majority of entries to be in (1) or (2). This
  1162. // lets us model growth notionally as copying the hashtable twice into the lower
  1163. // and higher halves of the new allocation, clearing out the now-empty slots
  1164. // (from both deleted entries and entries in the other half of the table after
  1165. // growth), and inserting any probed elements. That model in turn is much more
  1166. // efficient than re-inserting all of the elements as it avoids the unnecessary
  1167. // parts of insertion and avoids interleaving random accesses for the probed
  1168. // elements. But most importantly, for trivially relocatable types it allows us
  1169. // to use `memcpy` rather than moving the elements individually.
  1170. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1171. auto BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowToNextAllocSize(
  1172. KeyContextT key_context) -> void {
  1173. // We collect the probed elements in a small vector for re-insertion. It is
  1174. // tempting to reuse the already allocated storage, but doing so appears to
  1175. // be a (very slight) performance regression. These are relatively rare and
  1176. // storing them into the existing storage creates stores to the same regions
  1177. // of memory we're reading. Moreover, it requires moving both the key and the
  1178. // value twice, and doing the `memcpy` widening for relocatable types before
  1179. // the group walk rather than after the group walk. In practice, between the
  1180. // statistical rareness and using a large small size buffer here on the stack,
  1181. // we can handle this most efficiently with temporary, additional storage.
  1182. llvm::SmallVector<ssize_t, 128> probed_indices;
  1183. // Create locals for the old state of the table.
  1184. ssize_t old_size = alloc_size();
  1185. CARBON_DCHECK(old_size > 0);
  1186. bool old_small = is_small();
  1187. Storage* old_storage = storage();
  1188. uint8_t* old_metadata = metadata();
  1189. EntryT* old_entries = entries();
  1190. #ifndef NDEBUG
  1191. // Count how many of the old table slots will end up being empty after we grow
  1192. // the table. This is both the currently empty slots, but also the deleted
  1193. // slots because we clear them to empty and re-insert everything that had any
  1194. // probing.
  1195. ssize_t debug_empty_count =
  1196. llvm::count(llvm::ArrayRef(old_metadata, old_size), MetadataGroup::Empty);
  1197. ssize_t debug_deleted_count = llvm::count(
  1198. llvm::ArrayRef(old_metadata, old_size), MetadataGroup::Deleted);
  1199. CARBON_DCHECK(debug_empty_count >=
  1200. (old_size - GrowthThresholdForAllocSize(old_size)))
  1201. << "debug_empty_count: " << debug_empty_count
  1202. << ", debug_deleted_count: " << debug_deleted_count
  1203. << ", size: " << old_size;
  1204. #endif
  1205. // Configure for the new size and allocate the new storage.
  1206. ssize_t new_size = ComputeNextAllocSize(old_size);
  1207. alloc_size() = new_size;
  1208. storage() = Allocate(new_size);
  1209. growth_budget_ = GrowthThresholdForAllocSize(new_size);
  1210. // Now extract the new components of the table.
  1211. uint8_t* new_metadata = metadata();
  1212. EntryT* new_entries = entries();
  1213. // Walk the metadata groups, clearing deleted to empty, duplicating the
  1214. // metadata for the low and high halves, and updating it based on where each
  1215. // entry will go in the new table. The updated metadata group is written to
  1216. // the new table, and for non-trivially relocatable entry types, the entry is
  1217. // also moved to its new location.
  1218. ssize_t count = 0;
  1219. for (ssize_t group_index = 0; group_index < old_size;
  1220. group_index += GroupSize) {
  1221. auto low_g = MetadataGroup::Load(old_metadata, group_index);
  1222. // Make sure to match present elements first to enable pipelining with
  1223. // clearing.
  1224. auto present_matched_range = low_g.MatchPresent();
  1225. low_g.ClearDeleted();
  1226. MetadataGroup high_g;
  1227. if constexpr (MetadataGroup::FastByteClear) {
  1228. // When we have a fast byte clear, we can update the metadata for the
  1229. // growth in-register and store at the end.
  1230. high_g = low_g;
  1231. } else {
  1232. // If we don't have a fast byte clear, we can store the metadata group
  1233. // eagerly here and overwrite bytes with a byte store below instead of
  1234. // clearing the byte in-register.
  1235. low_g.Store(new_metadata, group_index);
  1236. low_g.Store(new_metadata, group_index | old_size);
  1237. }
  1238. for (ssize_t byte_index : present_matched_range) {
  1239. ++count;
  1240. ssize_t old_index = group_index + byte_index;
  1241. if constexpr (!MetadataGroup::FastByteClear) {
  1242. CARBON_DCHECK(new_metadata[old_index] == old_metadata[old_index]);
  1243. CARBON_DCHECK(new_metadata[old_index | old_size] ==
  1244. old_metadata[old_index]);
  1245. }
  1246. HashCode hash =
  1247. key_context.HashKey(old_entries[old_index].key(), ComputeSeed());
  1248. ssize_t old_hash_index = hash.ExtractIndexAndTag<7>().first &
  1249. ComputeProbeMaskFromSize(old_size);
  1250. if (LLVM_UNLIKELY(old_hash_index != group_index)) {
  1251. probed_indices.push_back(old_index);
  1252. if constexpr (MetadataGroup::FastByteClear) {
  1253. low_g.ClearByte(byte_index);
  1254. high_g.ClearByte(byte_index);
  1255. } else {
  1256. new_metadata[old_index] = MetadataGroup::Empty;
  1257. new_metadata[old_index | old_size] = MetadataGroup::Empty;
  1258. }
  1259. continue;
  1260. }
  1261. ssize_t new_index = hash.ExtractIndexAndTag<7>().first &
  1262. ComputeProbeMaskFromSize(new_size);
  1263. CARBON_DCHECK(new_index == old_hash_index ||
  1264. new_index == (old_hash_index | old_size));
  1265. // Toggle the newly added bit of the index to get to the other possible
  1266. // target index.
  1267. if constexpr (MetadataGroup::FastByteClear) {
  1268. (new_index == old_hash_index ? high_g : low_g).ClearByte(byte_index);
  1269. new_index += byte_index;
  1270. } else {
  1271. new_index += byte_index;
  1272. new_metadata[new_index ^ old_size] = MetadataGroup::Empty;
  1273. }
  1274. // If we need to explicitly move (and destroy) the key or value, do so
  1275. // here where we already know its target.
  1276. if constexpr (!EntryT::IsTriviallyRelocatable) {
  1277. new_entries[new_index].MoveFrom(std::move(old_entries[old_index]));
  1278. }
  1279. }
  1280. if constexpr (MetadataGroup::FastByteClear) {
  1281. low_g.Store(new_metadata, group_index);
  1282. high_g.Store(new_metadata, (group_index | old_size));
  1283. }
  1284. }
  1285. CARBON_DCHECK((count - static_cast<ssize_t>(probed_indices.size())) ==
  1286. (new_size - llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1287. MetadataGroup::Empty)));
  1288. #ifndef NDEBUG
  1289. CARBON_DCHECK((debug_empty_count + debug_deleted_count) ==
  1290. (old_size - count));
  1291. CARBON_DCHECK(llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1292. MetadataGroup::Empty) ==
  1293. debug_empty_count + debug_deleted_count +
  1294. static_cast<ssize_t>(probed_indices.size()) + old_size);
  1295. #endif
  1296. // If the keys or values are trivially relocatable, we do a bulk memcpy of
  1297. // them into place. This will copy them into both possible locations, which is
  1298. // fine. One will be empty and clobbered if reused or ignored. The other will
  1299. // be the one used. This might seem like it needs it to be valid for us to
  1300. // create two copies, but it doesn't. This produces the exact same storage as
  1301. // copying the storage into the wrong location first, and then again into the
  1302. // correct location. Only one is live and only one is destroyed.
  1303. if constexpr (EntryT::IsTriviallyRelocatable) {
  1304. memcpy(new_entries, old_entries, old_size * sizeof(EntryT));
  1305. memcpy(new_entries + old_size, old_entries, old_size * sizeof(EntryT));
  1306. }
  1307. // We then need to do a normal insertion for anything that was probed before
  1308. // growth, but we know we'll find an empty slot, so leverage that.
  1309. for (ssize_t old_index : probed_indices) {
  1310. EntryT* new_entry =
  1311. InsertIntoEmpty(old_entries[old_index].key(), key_context);
  1312. new_entry->MoveFrom(std::move(old_entries[old_index]));
  1313. }
  1314. CARBON_DCHECK(count ==
  1315. (new_size - llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1316. MetadataGroup::Empty)));
  1317. growth_budget_ -= count;
  1318. CARBON_DCHECK(growth_budget_ ==
  1319. (GrowthThresholdForAllocSize(new_size) -
  1320. (new_size - llvm::count(llvm::ArrayRef(new_metadata, new_size),
  1321. MetadataGroup::Empty))));
  1322. CARBON_DCHECK(growth_budget_ > 0 &&
  1323. "Must still have a growth budget after rehash!");
  1324. if (!old_small) {
  1325. // Old isn't a small buffer, so we need to deallocate it.
  1326. Deallocate(old_storage, old_size);
  1327. }
  1328. }
  1329. // Grow the hashtable to create space and then insert into it. Returns the
  1330. // selected insertion entry. Never returns null. In addition to growing and
  1331. // selecting the insertion entry, this routine updates the metadata array so
  1332. // that this function can be directly called and the result returned from
  1333. // `InsertImpl`.
  1334. template <typename InputKeyT, typename InputValueT, typename InputKeyContextT>
  1335. template <typename LookupKeyT>
  1336. [[clang::noinline]] auto
  1337. BaseImpl<InputKeyT, InputValueT, InputKeyContextT>::GrowAndInsert(
  1338. LookupKeyT lookup_key, KeyContextT key_context) -> EntryT* {
  1339. GrowToNextAllocSize(key_context);
  1340. // And insert the lookup_key into an index in the newly grown map and return
  1341. // that index for use.
  1342. --growth_budget_;
  1343. return InsertIntoEmpty(lookup_key, key_context);
  1344. }
  1345. template <typename InputBaseT, ssize_t SmallSize>
  1346. TableImpl<InputBaseT, SmallSize>::TableImpl(const TableImpl& arg)
  1347. : BaseT(arg.alloc_size(), arg.growth_budget_, SmallSize) {
  1348. // Check for completely broken objects. These invariants should be true even
  1349. // in a moved-from state.
  1350. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1351. arg.alloc_size() == SmallSize);
  1352. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1353. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1354. if (this->alloc_size() != 0) {
  1355. SetUpStorage();
  1356. this->CopySlotsFrom(arg);
  1357. }
  1358. }
  1359. template <typename InputBaseT, ssize_t SmallSize>
  1360. auto TableImpl<InputBaseT, SmallSize>::operator=(const TableImpl& arg)
  1361. -> TableImpl& {
  1362. // Check for completely broken objects. These invariants should be true even
  1363. // in a moved-from state.
  1364. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1365. arg.alloc_size() == SmallSize);
  1366. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1367. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1368. // We have to end up with an allocation size exactly equivalent to the
  1369. // incoming argument to avoid re-hashing every entry in the table, which isn't
  1370. // possible without key context.
  1371. if (arg.alloc_size() == this->alloc_size()) {
  1372. // No effective way for self-assignment to fall out of an efficient
  1373. // implementation so detect and bypass here. Similarly, if both are in a
  1374. // moved-from state, there is nothing to do.
  1375. if (&arg == this || this->alloc_size() == 0) {
  1376. return *this;
  1377. }
  1378. CARBON_DCHECK(arg.storage() != this->storage());
  1379. if constexpr (!EntryT::IsTriviallyDestructible) {
  1380. this->view_impl_.ForEachEntry([](EntryT& entry) { entry.Destroy(); },
  1381. [](auto...) {});
  1382. }
  1383. } else {
  1384. // The sizes don't match so destroy everything and re-setup the table
  1385. // storage.
  1386. this->Destroy();
  1387. this->alloc_size() = arg.alloc_size();
  1388. // If `arg` is moved-from, we've clear out our elements and put ourselves
  1389. // into a moved-from state. We're done.
  1390. if (this->alloc_size() == 0) {
  1391. return *this;
  1392. }
  1393. SetUpStorage();
  1394. }
  1395. this->growth_budget_ = arg.growth_budget_;
  1396. this->CopySlotsFrom(arg);
  1397. return *this;
  1398. }
  1399. // Puts the incoming table into a moved-from state that can be destroyed or
  1400. // re-initialized but must not be used otherwise.
  1401. template <typename InputBaseT, ssize_t SmallSize>
  1402. TableImpl<InputBaseT, SmallSize>::TableImpl(TableImpl&& arg) noexcept
  1403. : BaseT(arg.alloc_size(), arg.growth_budget_, SmallSize) {
  1404. // Check for completely broken objects. These invariants should be true even
  1405. // in a moved-from state.
  1406. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1407. arg.alloc_size() == SmallSize);
  1408. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1409. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1410. this->MoveFrom(std::move(arg), small_storage());
  1411. }
  1412. template <typename InputBaseT, ssize_t SmallSize>
  1413. auto TableImpl<InputBaseT, SmallSize>::operator=(TableImpl&& arg) noexcept
  1414. -> TableImpl& {
  1415. // Check for completely broken objects. These invariants should be true even
  1416. // in a moved-from state.
  1417. CARBON_DCHECK(arg.alloc_size() == 0 || !arg.is_small() ||
  1418. arg.alloc_size() == SmallSize);
  1419. CARBON_DCHECK(arg.small_alloc_size_ == SmallSize);
  1420. CARBON_DCHECK(this->small_alloc_size_ == SmallSize);
  1421. // Destroy and deallocate our table.
  1422. this->Destroy();
  1423. // Defend against self-move by zeroing the size here before we start moving
  1424. // out of `arg`.
  1425. this->alloc_size() = 0;
  1426. // Setup to match argument and then finish the move.
  1427. this->alloc_size() = arg.alloc_size();
  1428. this->growth_budget_ = arg.growth_budget_;
  1429. this->MoveFrom(std::move(arg), small_storage());
  1430. return *this;
  1431. }
  1432. template <typename InputBaseT, ssize_t SmallSize>
  1433. TableImpl<InputBaseT, SmallSize>::~TableImpl() {
  1434. this->Destroy();
  1435. }
  1436. // Reset a table to its original state, including releasing any allocated
  1437. // memory.
  1438. template <typename InputBaseT, ssize_t SmallSize>
  1439. auto TableImpl<InputBaseT, SmallSize>::ResetImpl() -> void {
  1440. this->Destroy();
  1441. // Re-initialize the whole thing.
  1442. CARBON_DCHECK(this->small_alloc_size() == SmallSize);
  1443. this->Construct(small_storage());
  1444. }
  1445. template <typename InputBaseT, ssize_t SmallSize>
  1446. auto TableImpl<InputBaseT, SmallSize>::small_storage() const -> Storage* {
  1447. if constexpr (SmallSize > 0) {
  1448. // Do a bunch of validation of the small size to establish our invariants
  1449. // when we know we have a non-zero small size.
  1450. static_assert(llvm::isPowerOf2_64(SmallSize),
  1451. "SmallSize must be a power of two for a hashed buffer!");
  1452. static_assert(
  1453. SmallSize >= MaxGroupSize,
  1454. "We require all small sizes to multiples of the largest group "
  1455. "size supported to ensure it can be used portably. ");
  1456. static_assert(
  1457. (SmallSize % MaxGroupSize) == 0,
  1458. "Small size must be a multiple of the max group size supported "
  1459. "so that we can allocate a whole number of groups.");
  1460. // Implied by the max asserts above.
  1461. static_assert(SmallSize >= GroupSize);
  1462. static_assert((SmallSize % GroupSize) == 0);
  1463. static_assert(SmallSize >= alignof(StorageEntry<KeyT, ValueT>),
  1464. "Requested a small size that would require padding between "
  1465. "metadata bytes and correctly aligned key and value types. "
  1466. "Either a larger small size or a zero small size and heap "
  1467. "allocation are required for this key and value type.");
  1468. static_assert(offsetof(SmallStorage, entries) == SmallSize,
  1469. "Offset to entries in small size storage doesn't match "
  1470. "computed offset!");
  1471. return &small_storage_;
  1472. } else {
  1473. static_assert(
  1474. sizeof(TableImpl) == sizeof(BaseT),
  1475. "Empty small storage caused a size difference and wasted space!");
  1476. return nullptr;
  1477. }
  1478. }
  1479. // Helper to set up the storage of a table when a specific size has already been
  1480. // set up. If possible, uses any small storage, otherwise allocates.
  1481. template <typename InputBaseT, ssize_t SmallSize>
  1482. auto TableImpl<InputBaseT, SmallSize>::SetUpStorage() -> void {
  1483. CARBON_DCHECK(this->small_alloc_size() == SmallSize);
  1484. ssize_t local_size = this->alloc_size();
  1485. CARBON_DCHECK(local_size != 0);
  1486. if (local_size == SmallSize) {
  1487. this->storage() = small_storage();
  1488. } else {
  1489. this->storage() = BaseT::Allocate(local_size);
  1490. }
  1491. }
  1492. } // namespace Carbon::RawHashtable
  1493. #endif // CARBON_COMMON_RAW_HASHTABLE_H_