hashing.h 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #ifndef CARBON_COMMON_HASHING_H_
  5. #define CARBON_COMMON_HASHING_H_
  6. #include <concepts>
  7. #include <string>
  8. #include <tuple>
  9. #include <type_traits>
  10. #include <utility>
  11. #include "common/check.h"
  12. #include "common/ostream.h"
  13. #include "llvm/ADT/ArrayRef.h"
  14. #include "llvm/ADT/SmallVector.h"
  15. #include "llvm/ADT/StringRef.h"
  16. #include "llvm/Support/FormatVariadic.h"
  17. #ifdef __ARM_ACLE
  18. #include <arm_acle.h>
  19. #endif
  20. namespace Carbon {
  21. // A 64-bit hash code produced by `Carbon::HashValue`.
  22. //
  23. // This provides methods for extracting high-quality bits from the hash code
  24. // quickly.
  25. //
  26. // This class can also be a hashing input when recursively hashing more complex
  27. // data structures.
  28. class HashCode : public Printable<HashCode> {
  29. public:
  30. HashCode() = default;
  31. constexpr explicit HashCode(uint64_t value) : value_(value) {}
  32. friend constexpr auto operator==(HashCode lhs, HashCode rhs) -> bool {
  33. return lhs.value_ == rhs.value_;
  34. }
  35. friend constexpr auto operator!=(HashCode lhs, HashCode rhs) -> bool {
  36. return lhs.value_ != rhs.value_;
  37. }
  38. // Extracts an index from the hash code as a `ssize_t`. This index covers the
  39. // full range of that type, and may even be negative. Typical usage will
  40. // involve masking this down to some positive range using a bitand with a mask
  41. // computed from a power-of-two size. This routine doesn't do any masking to
  42. // ensure a positive index to avoid redundant computations with the typical
  43. // user of the index.
  44. constexpr auto ExtractIndex() -> ssize_t;
  45. // Extracts an index and a fixed `N`-bit tag from the hash code.
  46. //
  47. // This extracts these values from the position of the hash code which
  48. // maximizes the entropy in the tag and the low bits of the index, as typical
  49. // indices will be further masked down to fall in a smaller range.
  50. //
  51. // `N` must be in the range [1, 32]. The returned index will be in the range
  52. // [0, 2**(64-N)).
  53. template <int N>
  54. constexpr auto ExtractIndexAndTag() -> std::pair<ssize_t, uint32_t>;
  55. // Extract the full 64-bit hash code as an integer.
  56. //
  57. // The methods above should be preferred rather than directly manipulating
  58. // this integer. This is provided primarily to enable Merkle-tree hashing or
  59. // other recursive hashing where that is needed or more efficient.
  60. explicit operator uint64_t() const { return value_; }
  61. auto Print(llvm::raw_ostream& out) const -> void {
  62. out << llvm::formatv("{0:x16}", value_);
  63. }
  64. private:
  65. uint64_t value_ = 0;
  66. };
  67. // Computes a hash code for the provided value, incorporating the provided seed.
  68. //
  69. // The seed doesn't need to be of any particular high quality, but a zero seed
  70. // has bad effects in several places. Prefer the unseeded routine rather than
  71. // providing a zero here.
  72. //
  73. // This **not** a cryptographically secure or stable hash -- it is only designed
  74. // for use with in-memory hash table style data structures. Being fast and
  75. // effective for that use case is the guiding principle of its design.
  76. //
  77. // There is no guarantee that the values produced are stable from execution to
  78. // execution. For speed and quality reasons, the implementation does not
  79. // introduce any variance to defend against accidental dependencies. As a
  80. // consequence, it is strongly encouraged to use a seed that varies from
  81. // execution to execution to avoid depending on specific values produced.
  82. //
  83. // The algorithm used is most heavily based on [Abseil's hashing algorithm][1],
  84. // with some additional ideas and inspiration from the fallback hashing
  85. // algorithm in [Rust's AHash][2] and the [FxHash][3] function. However, there
  86. // are also *significant* changes introduced here.
  87. //
  88. // [1]: https://github.com/abseil/abseil-cpp/tree/master/absl/hash/internal
  89. // [2]: https://github.com/tkaitchuck/aHash/wiki/AHash-fallback-algorithm
  90. // [3]: https://docs.rs/fxhash/latest/fxhash/
  91. //
  92. // This hash algorithm does *not* defend against hash flooding. While it can be
  93. // viewed as "keyed" on the seed, it is expected to be possible to craft inputs
  94. // for some data types that cancel out the seed used and manufacture endlessly
  95. // colliding sets of keys. In general, this function works to be *fast* for hash
  96. // tables. If you need to defend against hash flooding, either directly use a
  97. // data structure with strong worst-case guarantees, or a hash table which
  98. // detects catastrophic collisions and falls back to such a data structure.
  99. //
  100. // This hash function is heavily optimized for *latency* over *quality*. Modern
  101. // hash tables designs can efficiently handle reasonable collision rates,
  102. // including using extra bits from the hash to avoid all efficiency coming from
  103. // the same low bits. Because of this, low-latency is significantly more
  104. // important for performance than high-quality, and this is heavily leveraged.
  105. // The result is that the hash codes produced *do* have significant avalanche
  106. // problems for small keys. The upside is that the latency for hashing integers,
  107. // pointers, and small byte strings (up to 32-bytes) is exceptionally low, and
  108. // essentially a small constant time instruction sequence.
  109. //
  110. // No exotic instruction set extensions are required, and the state used is
  111. // small. It does rely on being able to get the low- and high-64-bit results of
  112. // a 64-bit multiply efficiently.
  113. //
  114. // The function supports many typical data types such as primitives, string-ish
  115. // views, and types composing primitives transparently like pairs, tuples, and
  116. // array-ish views. It is also extensible to support user-defined types.
  117. //
  118. // The builtin support for string-like types include:
  119. // - `std::string_view`
  120. // - `std::string`
  121. // - `llvm::StringRef`
  122. // - `llvm::SmallString`
  123. //
  124. // This function supports heterogeneous lookup between all of the string-like
  125. // types. It also supports heterogeneous lookup between pointer types regardless
  126. // of pointee type and `nullptr`.
  127. //
  128. // However, these are the only heterogeneous lookup support including for the
  129. // builtin in, standard, and LLVM types. Notably, each different size and
  130. // signedness integer type may hash differently for efficiency reasons. Hash
  131. // tables should pick a single integer type in which to manage keys and do
  132. // lookups.
  133. //
  134. // To add support for your type, you need to implement a customization point --
  135. // a free function that can be found by ADL for your type -- called
  136. // `CarbonHashValue` with the following signature:
  137. //
  138. // ```cpp
  139. // auto CarbonHashValue(const YourType& value, uint64_t seed) -> HashCode;
  140. // ```
  141. //
  142. // The extension point needs to ensure that values that compare equal (including
  143. // any comparisons with different types that might be used with a hash table of
  144. // `YourType` keys) produce the same `HashCode` values.
  145. //
  146. // `HashCode` values should typically be produced using the `Hasher` helper type
  147. // below. See its documentation for more details about implementing these
  148. // customization points and how best to incorporate the value's state into a
  149. // `HashCode`.
  150. //
  151. // For two input values that are almost but not quite equal, the extension
  152. // point should maximize the probability of each bit of their resulting
  153. // `HashCode`s differing. More formally, `HashCode`s should exhibit an
  154. // [avalanche effect][4]. However, while this is desirable, it should be
  155. // **secondary** to low latency. The intended use case of these functions is not
  156. // cryptography but in-memory hashtables where the latency and overhead of
  157. // computing the `HashCode` is *significantly* more important than achieving a
  158. // particularly high quality. The goal is to have "just enough" avalanche
  159. // effect, but there is not a fixed criteria for how much is enough. That should
  160. // be determined through practical experimentation with a hashtable and
  161. // distribution of keys.
  162. //
  163. // [4]: https://en.wikipedia.org/wiki/Avalanche_effect
  164. template <typename T>
  165. inline auto HashValue(const T& value, uint64_t seed) -> HashCode;
  166. // The same as the seeded version of `HashValue` but without callers needing to
  167. // provide a seed.
  168. //
  169. // Generally prefer the seeded version, but this is available if there is no
  170. // reasonable seed. In particular, this will behave better than using a seed of
  171. // `0`. One important use case is for recursive hashing of sub-objects where
  172. // appropriate or needed.
  173. template <typename T>
  174. inline auto HashValue(const T& value) -> HashCode;
  175. // Object and APIs that eventually produce a hash code.
  176. //
  177. // This type is primarily used by types to implement a customization point
  178. // `CarbonHashValue` that will in turn be used by the `HashValue` function. See
  179. // the `HashValue` function for details of that extension point.
  180. //
  181. // The methods on this type can be used to incorporate data from your
  182. // user-defined type into its internal state which can be converted to a
  183. // `HashCode` at any time. These methods will only produce the same `HashCode`
  184. // if they are called in the exact same order with the same arguments -- there
  185. // are no guaranteed equivalences between calling different methods.
  186. //
  187. // Example usage:
  188. // ```cpp
  189. // auto CarbonHashValue(const MyType& value, uint64_t seed) -> HashCode {
  190. // Hasher hasher(seed);
  191. // hasher.HashTwo(value.x, value.y);
  192. // return static_cast<HashCode>(hasher);
  193. // }
  194. // ```
  195. //
  196. // This type's API also reflects the reality that high-performance hash tables
  197. // are used with keys that are generally small and cheap to hash.
  198. //
  199. // To ensure this type's code is optimized effectively, it should typically be
  200. // used as a local variable and not passed across function boundaries
  201. // unnecessarily.
  202. //
  203. // The type also provides a number of static helper functions and static data
  204. // members that may be used by authors of `CarbonHashValue` implementations to
  205. // efficiently compute the inputs to the core `Hasher` methods, or even to
  206. // manually do some amounts of hashing in performance-tuned ways outside of the
  207. // methods provided.
  208. class Hasher {
  209. public:
  210. Hasher() = default;
  211. explicit Hasher(uint64_t seed) : buffer(seed) {}
  212. Hasher(Hasher&& arg) = default;
  213. Hasher(const Hasher& arg) = delete;
  214. auto operator=(Hasher&& rhs) -> Hasher& = default;
  215. // Extracts the current state as a `HashCode` for use.
  216. explicit operator HashCode() const { return HashCode(buffer); }
  217. // Incorporates an object into the hasher's state by hashing its object
  218. // representation. Requires `value`'s type to have a unique object
  219. // representation. This is primarily useful for builtin and primitive types.
  220. //
  221. // This can be directly used for simple users combining some aggregation of
  222. // objects. However, when possible, prefer the variadic version below for
  223. // aggregating several primitive types into a hash.
  224. template <typename T>
  225. requires std::has_unique_object_representations_v<T>
  226. auto Hash(const T& value) -> void;
  227. // Incorporates a variable number of objects into the `hasher`s state in a
  228. // similar manner to applying the above function to each one in series. It has
  229. // the same requirements as the above function for each `value`. And it
  230. // returns the updated `hasher`.
  231. //
  232. // There is no guaranteed correspondence between the behavior of a single call
  233. // with multiple parameters and multiple calls. This routine is also optimized
  234. // for handling relatively small numbers of objects. For hashing large
  235. // aggregations, consider some Merkle-tree decomposition or arranging for a
  236. // byte buffer that can be hashed as a single buffer. However, hashing large
  237. // aggregations of data in this way is rarely results in effectively
  238. // high-performance hash table data structures and so should generally be
  239. // avoided.
  240. template <typename... Ts>
  241. requires(... && std::has_unique_object_representations_v<Ts>)
  242. auto Hash(const Ts&... value) -> void;
  243. // Simpler and more primitive functions to incorporate state represented in
  244. // `uint64_t` values into the hasher's state.
  245. //
  246. // These may be slightly less efficient than the `Hash` method above for a
  247. // typical application code `uint64_t`, but are designed to work well even
  248. // when relevant data has been packed into the `uint64_t` parameters densely.
  249. auto HashDense(uint64_t data) -> void;
  250. auto HashDense(uint64_t data0, uint64_t data1) -> void;
  251. // A heavily optimized routine for incorporating a dynamically sized sequence
  252. // of bytes into the hasher's state.
  253. //
  254. // This routine has carefully structured inline code paths for short byte
  255. // sequences and a reasonably high bandwidth code path for longer sequences.
  256. // The size of the byte sequence is always incorporated into the hasher's
  257. // state along with the contents.
  258. auto HashSizedBytes(llvm::ArrayRef<std::byte> bytes) -> void;
  259. // An out-of-line, throughput-optimized routine for incorporating a
  260. // dynamically sized sequence when the sequence size is guaranteed to be >32.
  261. // The size is always incorporated into the state.
  262. auto HashSizedBytesLarge(llvm::ArrayRef<std::byte> bytes) -> void;
  263. // Utility functions to read data of various sizes efficiently into a
  264. // 64-bit value. These pointers need-not be aligned, and can alias other
  265. // objects. The representation of the read data in the `uint64_t` returned is
  266. // not stable or guaranteed.
  267. static auto Read1(const std::byte* data) -> uint64_t;
  268. static auto Read2(const std::byte* data) -> uint64_t;
  269. static auto Read4(const std::byte* data) -> uint64_t;
  270. static auto Read8(const std::byte* data) -> uint64_t;
  271. // Similar to the `ReadN` functions, but supports reading a range of different
  272. // bytes provided by the size *without branching on the size*. The lack of
  273. // branches is often key, and the code in these routines works to be efficient
  274. // in extracting a *dynamic* size of bytes into the returned `uint64_t`. There
  275. // may be overlap between different routines, because these routines are based
  276. // on different implementation techniques that do have some overlap in the
  277. // range of sizes they can support. Which routine is the most efficient for a
  278. // size in the overlap isn't trivial, and so these primitives are provided
  279. // as-is and should be selected based on the localized generated code and
  280. // benchmarked performance.
  281. static auto Read1To3(const std::byte* data, ssize_t size) -> uint64_t;
  282. static auto Read4To8(const std::byte* data, ssize_t size) -> uint64_t;
  283. static auto Read8To16(const std::byte* data, ssize_t size)
  284. -> std::pair<uint64_t, uint64_t>;
  285. // Reads the underlying object representation of a type into a 64-bit integer
  286. // efficiently. Only supports types with unique object representation and at
  287. // most 8-bytes large. This is typically used to read primitive types.
  288. template <typename T>
  289. requires std::has_unique_object_representations_v<T> && (sizeof(T) <= 8)
  290. static auto ReadSmall(const T& value) -> uint64_t;
  291. // The core of the hash algorithm is this mix function. The specific
  292. // operations are not guaranteed to be stable but are described here for
  293. // hashing authors to understand what to expect.
  294. //
  295. // Currently, this uses the same "mix" operation as in Abseil, AHash, and
  296. // several other hashing algorithms. It takes two 64-bit integers, and
  297. // multiplies them, capturing both the high 64-bit result and the low 64-bit
  298. // result, and then XOR-ing those two halves together.
  299. //
  300. // A consequence of this operation is that a zero on either side will fail to
  301. // incorporate any bits from the other side. Often, this is an acceptable rate
  302. // of collision in practice. But it is worth being aware of and working to
  303. // avoid common paths encountering this. For example, naively used this might
  304. // cause different length all-zero byte strings to hash the same, essentially
  305. // losing the length in the composition of the hash for a likely important
  306. // case of byte sequence.
  307. //
  308. // Another consequence of the particular implementation is that it is useful
  309. // to have a reasonable distribution of bits throughout both sides of the
  310. // multiplication. However, it is not *necessary* as we do capture the
  311. // complete 128-bit result. Where reasonable, the caller should XOR random
  312. // data into operands before calling `Mix` to try and increase the
  313. // distribution of bits feeding the multiply.
  314. static auto Mix(uint64_t lhs, uint64_t rhs) -> uint64_t;
  315. // An alternative to `Mix` that is significantly weaker but also lower
  316. // latency. It should not be used when the input `uint64_t` is densely packed
  317. // with data, but is a good option for hashing a single integer or pointer
  318. // where the full 64-bits are sparsely populated and especially the high bits
  319. // are often invariant between interestingly different values.
  320. //
  321. // This uses just the low 64-bit result of a multiply. It ensures the operand
  322. // is good at diffusing bits, but inherently the high bits of the input will
  323. // be (significantly) less often represented in the output. It also does some
  324. // reversal to ensure the *low* bits of the result are the most useful ones.
  325. static auto WeakMix(uint64_t value) -> uint64_t;
  326. // We have a 64-byte random data pool designed to fit on a single cache line.
  327. // This routine allows sampling it at byte indices, which allows getting 64 -
  328. // 8 different random 64-bit results. The offset must be in the range [0, 56).
  329. static auto SampleRandomData(ssize_t offset) -> uint64_t {
  330. CARBON_DCHECK(offset + sizeof(uint64_t) < sizeof(StaticRandomData));
  331. uint64_t data;
  332. memcpy(&data,
  333. reinterpret_cast<const unsigned char*>(&StaticRandomData) + offset,
  334. sizeof(data));
  335. return data;
  336. }
  337. // Random data taken from the hexadecimal digits of Pi's fractional component,
  338. // written in lexical order for convenience of reading. The resulting
  339. // byte-stream will be different due to little-endian integers. These can be
  340. // used directly for convenience rather than calling `SampleRandomData`, but
  341. // be aware that this is the underlying pool. The goal is to reuse the same
  342. // single cache-line of constant data.
  343. //
  344. // The initializers here can be generated with the following shell script,
  345. // which will generate 8 64-bit values and one more digit. The `bc` command's
  346. // decimal based scaling means that without getting at least some extra hex
  347. // digits rendered there will be rounding that we don't want so the script
  348. // below goes on to produce one more hex digit ensuring the the 8 initializers
  349. // aren't rounded in any way. Using a higher scale won't cause the 8
  350. // initializers here to change further.
  351. //
  352. // ```sh
  353. // echo 'obase=16; scale=155; 4*a(1)' | env BC_LINE_LENGTH=500 bc -l \
  354. // | cut -c 3- | tr '[:upper:]' '[:lower:]' \
  355. // | sed -e "s/.\{4\}/&'/g" \
  356. // | sed -e "s/\(.\{4\}'.\{4\}'.\{4\}'.\{4\}\)'/0x\1,\n/g"
  357. // ```
  358. static inline constexpr std::array<uint64_t, 8> StaticRandomData = {
  359. 0x243f'6a88'85a3'08d3, 0x1319'8a2e'0370'7344, 0xa409'3822'299f'31d0,
  360. 0x082e'fa98'ec4e'6c89, 0x4528'21e6'38d0'1377, 0xbe54'66cf'34e9'0c6c,
  361. 0xc0ac'29b7'c97c'50dd, 0x3f84'd5b5'b547'0917,
  362. };
  363. // We need a multiplicative hashing constant for both 64-bit multiplicative
  364. // hashing fast paths and some other 128-bit folded multiplies. We use an
  365. // empirically better constant compared to Knuth's, Rust's FxHash, and others
  366. // we've tried. It was found by a search of uniformly distributed odd numbers
  367. // and examining them for desirable properties when used as a multiplicative
  368. // hash, however our search seems largely to have been lucky rather than
  369. // having a highly effective set of criteria. We evaluated this constant by
  370. // integrating this hash function with a hashtable and looking at the
  371. // collision rates of several different but very fundamental patterns of keys:
  372. // integers counting from 0, pointers allocated on the heap, and strings with
  373. // character and size distributions matching C-style ASCII identifiers.
  374. // Different constants found with this search worked better or less well, but
  375. // fairly consistently across the different types of keys. At the end, far and
  376. // away the best behaved constant we found was one of the first ones in the
  377. // search and is what we use here.
  378. //
  379. // For reference, some other constants include one derived by diving 2^64 by
  380. // Phi: 0x9e37'79b9'7f4a'7c15U -- see these sites for details:
  381. // https://probablydance.com/2018/06/16/fibonacci-hashing-the-optimization-that-the-world-forgot-or-a-better-alternative-to-integer-modulo/
  382. // https://book.huihoo.com/data-structures-and-algorithms-with-object-oriented-design-patterns-in-c++/html/page214.html
  383. //
  384. // Another very good constant derived by minimizing repeating bit patterns is
  385. // 0xdcb2'2ca6'8cb1'34edU and its bit-reversed form. However, this constant
  386. // has observed frequent issues at roughly 4k pointer keys, connected to a
  387. // common hashtable seed also being a pointer. These issues appear to occur
  388. // both more often and have a larger impact relative to the number of keys
  389. // than the rare cases where some combinations of pointer seeds and pointer
  390. // keys create minor quality issues with the constant we use.
  391. static constexpr uint64_t MulConstant = 0x7924'f9e0'de1e'8cf5U;
  392. private:
  393. uint64_t buffer;
  394. };
  395. // A dedicated namespace for `CarbonHashValue` overloads that are not found by
  396. // ADL with their associated types. For example, primitive type overloads or
  397. // overloads for types in LLVM's libraries.
  398. //
  399. // Note that these are internal implementation details and **not** part of the
  400. // public API. They should not be used directly by client code.
  401. namespace InternalHashDispatch {
  402. inline auto CarbonHashValue(llvm::ArrayRef<std::byte> bytes, uint64_t seed)
  403. -> HashCode {
  404. Hasher hasher(seed);
  405. hasher.HashSizedBytes(bytes);
  406. return static_cast<HashCode>(hasher);
  407. }
  408. // Hashing implementation for `llvm::StringRef`. We forward all the other
  409. // string-like types that support heterogeneous lookup to this one.
  410. inline auto CarbonHashValue(llvm::StringRef value, uint64_t seed) -> HashCode {
  411. return CarbonHashValue(
  412. llvm::ArrayRef(reinterpret_cast<const std::byte*>(value.data()),
  413. value.size()),
  414. seed);
  415. }
  416. inline auto CarbonHashValue(std::string_view value, uint64_t seed) -> HashCode {
  417. return CarbonHashValue(llvm::StringRef(value.data(), value.size()), seed);
  418. }
  419. inline auto CarbonHashValue(const std::string& value, uint64_t seed)
  420. -> HashCode {
  421. return CarbonHashValue(llvm::StringRef(value.data(), value.size()), seed);
  422. }
  423. template <unsigned Length>
  424. inline auto CarbonHashValue(const llvm::SmallString<Length>& value,
  425. uint64_t seed) -> HashCode {
  426. return CarbonHashValue(llvm::StringRef(value.data(), value.size()), seed);
  427. }
  428. // C++ guarantees this is true for the unsigned variants, but we require it for
  429. // signed variants and pointers.
  430. static_assert(std::has_unique_object_representations_v<int8_t>);
  431. static_assert(std::has_unique_object_representations_v<int16_t>);
  432. static_assert(std::has_unique_object_representations_v<int32_t>);
  433. static_assert(std::has_unique_object_representations_v<int64_t>);
  434. static_assert(std::has_unique_object_representations_v<void*>);
  435. // C++ uses `std::nullptr_t` but unfortunately doesn't make it have a unique
  436. // object representation. To address that, we need a function that converts
  437. // `nullptr` back into a `void*` that will have a unique object representation.
  438. // And this needs to be done by-value as we need to build a temporary object to
  439. // return, which requires a separate overload rather than just using a type
  440. // function that could be used in parallel in the predicate below. Instead, we
  441. // build the predicate independently of the mapping overload, but together they
  442. // should produce the correct result.
  443. template <typename T>
  444. inline auto MapNullPtrToVoidPtr(const T& value) -> const T& {
  445. // This overload should never be selected for `std::nullptr_t`, so
  446. // static_assert to get some better compiler error messages.
  447. static_assert(!std::same_as<T, std::nullptr_t>);
  448. return value;
  449. }
  450. inline auto MapNullPtrToVoidPtr(std::nullptr_t /*value*/) -> const void* {
  451. return nullptr;
  452. }
  453. // Implementation detail predicate to be used in conjunction with a `nullptr`
  454. // mapping routine like the above.
  455. template <typename T>
  456. concept NullPtrOrHasUniqueObjectRepresentations =
  457. std::same_as<T, std::nullptr_t> ||
  458. std::has_unique_object_representations_v<T>;
  459. template <typename T>
  460. requires NullPtrOrHasUniqueObjectRepresentations<T>
  461. inline auto CarbonHashValue(const T& value, uint64_t seed) -> HashCode {
  462. Hasher hasher(seed);
  463. hasher.Hash(MapNullPtrToVoidPtr(value));
  464. return static_cast<HashCode>(hasher);
  465. }
  466. template <typename... Ts>
  467. requires(... && NullPtrOrHasUniqueObjectRepresentations<Ts>)
  468. inline auto CarbonHashValue(const std::tuple<Ts...>& value, uint64_t seed)
  469. -> HashCode {
  470. Hasher hasher(seed);
  471. std::apply(
  472. [&](const auto&... args) { hasher.Hash(MapNullPtrToVoidPtr(args)...); },
  473. value);
  474. return static_cast<HashCode>(hasher);
  475. }
  476. template <typename T, typename U>
  477. requires NullPtrOrHasUniqueObjectRepresentations<T> &&
  478. NullPtrOrHasUniqueObjectRepresentations<U> &&
  479. (sizeof(T) <= sizeof(uint64_t) && sizeof(U) <= sizeof(uint64_t))
  480. inline auto CarbonHashValue(const std::pair<T, U>& value, uint64_t seed)
  481. -> HashCode {
  482. return CarbonHashValue(std::tuple(value.first, value.second), seed);
  483. }
  484. template <typename T>
  485. requires std::has_unique_object_representations_v<T>
  486. inline auto CarbonHashValue(llvm::ArrayRef<T> objs, uint64_t seed) -> HashCode {
  487. return CarbonHashValue(
  488. llvm::ArrayRef(reinterpret_cast<const std::byte*>(objs.data()),
  489. objs.size() * sizeof(T)),
  490. seed);
  491. }
  492. template <typename T>
  493. inline auto DispatchImpl(const T& value, uint64_t seed) -> HashCode {
  494. // This unqualified call will find both the overloads in this namespace and
  495. // ADL-found functions in an associated namespace of `T`.
  496. return CarbonHashValue(value, seed);
  497. }
  498. } // namespace InternalHashDispatch
  499. template <typename T>
  500. inline auto HashValue(const T& value, uint64_t seed) -> HashCode {
  501. return InternalHashDispatch::DispatchImpl(value, seed);
  502. }
  503. template <typename T>
  504. inline auto HashValue(const T& value) -> HashCode {
  505. // When a seed isn't provided, use the last 64-bit chunk of random data. Other
  506. // chunks (especially the first) are more often XOR-ed with the seed and risk
  507. // cancelling each other out and feeding a zero to a `Mix` call in a way that
  508. // sharply increasing collisions.
  509. return HashValue(value, Hasher::StaticRandomData[7]);
  510. }
  511. inline constexpr auto HashCode::ExtractIndex() -> ssize_t { return value_; }
  512. template <int N>
  513. inline constexpr auto HashCode::ExtractIndexAndTag()
  514. -> std::pair<ssize_t, uint32_t> {
  515. static_assert(N >= 1);
  516. static_assert(N <= 32);
  517. return {static_cast<ssize_t>(value_ >> N),
  518. static_cast<uint32_t>(value_ & ((1U << (N + 1)) - 1))};
  519. }
  520. // Building with `-DCARBON_MCA_MARKERS` will enable `llvm-mca` annotations in
  521. // the source code. These can interfere with optimization, but allows analyzing
  522. // the generated `.s` file with the `llvm-mca` tool. Documentation for these
  523. // markers is here:
  524. // https://llvm.org/docs/CommandGuide/llvm-mca.html#using-markers-to-analyze-specific-code-blocks
  525. #if CARBON_MCA_MARKERS
  526. #define CARBON_MCA_BEGIN(NAME) \
  527. __asm volatile("# LLVM-MCA-BEGIN " NAME "" ::: "memory");
  528. #define CARBON_MCA_END(NAME) \
  529. __asm volatile("# LLVM-MCA-END " NAME "" ::: "memory");
  530. #else
  531. #define CARBON_MCA_BEGIN(NAME)
  532. #define CARBON_MCA_END(NAME)
  533. #endif
  534. inline auto Hasher::Read1(const std::byte* data) -> uint64_t {
  535. uint8_t result;
  536. std::memcpy(&result, data, sizeof(result));
  537. return result;
  538. }
  539. inline auto Hasher::Read2(const std::byte* data) -> uint64_t {
  540. uint16_t result;
  541. std::memcpy(&result, data, sizeof(result));
  542. return result;
  543. }
  544. inline auto Hasher::Read4(const std::byte* data) -> uint64_t {
  545. uint32_t result;
  546. std::memcpy(&result, data, sizeof(result));
  547. return result;
  548. }
  549. inline auto Hasher::Read8(const std::byte* data) -> uint64_t {
  550. uint64_t result;
  551. std::memcpy(&result, data, sizeof(result));
  552. return result;
  553. }
  554. inline auto Hasher::Read1To3(const std::byte* data, ssize_t size) -> uint64_t {
  555. // Use carefully crafted indexing to avoid branches on the exact size while
  556. // reading.
  557. uint64_t byte0 = static_cast<uint8_t>(data[0]);
  558. uint64_t byte1 = static_cast<uint8_t>(data[size - 1]);
  559. uint64_t byte2 = static_cast<uint8_t>(data[size >> 1]);
  560. return byte0 | (byte1 << 16) | (byte2 << 8);
  561. }
  562. inline auto Hasher::Read4To8(const std::byte* data, ssize_t size) -> uint64_t {
  563. uint32_t low;
  564. std::memcpy(&low, data, sizeof(low));
  565. uint32_t high;
  566. std::memcpy(&high, data + size - sizeof(high), sizeof(high));
  567. return low | (static_cast<uint64_t>(high) << 32);
  568. }
  569. inline auto Hasher::Read8To16(const std::byte* data, ssize_t size)
  570. -> std::pair<uint64_t, uint64_t> {
  571. uint64_t low;
  572. std::memcpy(&low, data, sizeof(low));
  573. uint64_t high;
  574. std::memcpy(&high, data + size - sizeof(high), sizeof(high));
  575. return {low, high};
  576. }
  577. inline auto Hasher::Mix(uint64_t lhs, uint64_t rhs) -> uint64_t {
  578. // Use the C23 extended integer support that Clang provides as a general
  579. // language extension.
  580. using U128 = unsigned _BitInt(128);
  581. U128 result = static_cast<U128>(lhs) * static_cast<U128>(rhs);
  582. return static_cast<uint64_t>(result) ^ static_cast<uint64_t>(result >> 64);
  583. }
  584. inline auto Hasher::WeakMix(uint64_t value) -> uint64_t {
  585. value *= MulConstant;
  586. #ifdef __ARM_ACLE
  587. // Arm has a fast bit-reversal that gives us the optimal distribution.
  588. value = __rbitll(value);
  589. #else
  590. // Otherwise, assume an optimized BSWAP such as x86's. That's close enough.
  591. value = __builtin_bswap64(value);
  592. #endif
  593. return value;
  594. }
  595. inline auto Hasher::HashDense(uint64_t data) -> void {
  596. // When hashing exactly one 64-bit entity use the Phi-derived constant as this
  597. // is just multiplicative hashing. The initial buffer is mixed on input to
  598. // pipeline with materializing the constant.
  599. buffer = Mix(data ^ buffer, MulConstant);
  600. }
  601. inline auto Hasher::HashDense(uint64_t data0, uint64_t data1) -> void {
  602. // When hashing two chunks of data at the same time, we XOR it with random
  603. // data to avoid common inputs from having especially bad multiplicative
  604. // effects. We also XOR in the starting buffer as seed or to chain. Note that
  605. // we don't use *consecutive* random data 64-bit values to avoid a common
  606. // compiler "optimization" of loading both 64-bit chunks into a 128-bit vector
  607. // and doing the XOR in the vector unit. The latency of extracting the data
  608. // afterward eclipses any benefit. Callers will routinely have two consecutive
  609. // data values here, but using non-consecutive keys avoids any vectorization
  610. // being tempting.
  611. //
  612. // XOR-ing both the incoming state and a random word over the second data is
  613. // done to pipeline with materializing the constants and is observed to have
  614. // better performance than XOR-ing after the mix.
  615. //
  616. // This roughly matches the mix pattern used in the larger mixing routines
  617. // from Abseil, which is a more minimal form than used in other algorithms
  618. // such as AHash and seems adequate for latency-optimized use cases.
  619. buffer =
  620. Mix(data0 ^ StaticRandomData[1], data1 ^ StaticRandomData[3] ^ buffer);
  621. }
  622. template <typename T>
  623. requires std::has_unique_object_representations_v<T> && (sizeof(T) <= 8)
  624. inline auto Hasher::ReadSmall(const T& value) -> uint64_t {
  625. const auto* storage = reinterpret_cast<const std::byte*>(&value);
  626. if constexpr (sizeof(T) == 1) {
  627. return Read1(storage);
  628. } else if constexpr (sizeof(T) == 2) {
  629. return Read2(storage);
  630. } else if constexpr (sizeof(T) == 3) {
  631. return Read2(storage) | (Read1(&storage[2]) << 16);
  632. } else if constexpr (sizeof(T) == 4) {
  633. return Read4(storage);
  634. } else if constexpr (sizeof(T) == 5) {
  635. return Read4(storage) | (Read1(&storage[4]) << 32);
  636. } else if constexpr (sizeof(T) == 6 || sizeof(T) == 7) {
  637. // Use overlapping 4-byte reads for 6 and 7 bytes.
  638. return Read4(storage) | (Read4(&storage[sizeof(T) - 4]) << 32);
  639. } else if constexpr (sizeof(T) == 8) {
  640. return Read8(storage);
  641. } else {
  642. static_assert(sizeof(T) <= 8);
  643. }
  644. }
  645. template <typename T>
  646. requires std::has_unique_object_representations_v<T>
  647. inline auto Hasher::Hash(const T& value) -> void {
  648. if constexpr (sizeof(T) <= 8) {
  649. // For types size 8-bytes and smaller directly being hashed (as opposed to
  650. // 8-bytes potentially bit-packed with data), we rarely expect the incoming
  651. // data to fully and densely populate all 8 bytes. For these cases we have a
  652. // `WeakMix` routine that is lower latency but lower quality.
  653. CARBON_MCA_BEGIN("fixed-8b");
  654. buffer = WeakMix(buffer ^ ReadSmall(value));
  655. CARBON_MCA_END("fixed-8b");
  656. return;
  657. }
  658. const auto* data_ptr = reinterpret_cast<const std::byte*>(&value);
  659. if constexpr (8 < sizeof(T) && sizeof(T) <= 16) {
  660. CARBON_MCA_BEGIN("fixed-16b");
  661. auto values = Read8To16(data_ptr, sizeof(T));
  662. HashDense(values.first, values.second);
  663. CARBON_MCA_END("fixed-16b");
  664. return;
  665. }
  666. if constexpr (16 < sizeof(T) && sizeof(T) <= 32) {
  667. CARBON_MCA_BEGIN("fixed-32b");
  668. // Essentially the same technique used for dynamically sized byte sequences
  669. // of this size, but we start with a fixed XOR of random data.
  670. buffer ^= StaticRandomData[0];
  671. uint64_t m0 = Mix(Read8(data_ptr) ^ StaticRandomData[1],
  672. Read8(data_ptr + 8) ^ buffer);
  673. const std::byte* tail_16b_ptr = data_ptr + (sizeof(T) - 16);
  674. uint64_t m1 = Mix(Read8(tail_16b_ptr) ^ StaticRandomData[3],
  675. Read8(tail_16b_ptr + 8) ^ buffer);
  676. buffer = m0 ^ m1;
  677. CARBON_MCA_END("fixed-32b");
  678. return;
  679. }
  680. // Hashing the size isn't relevant here, but is harmless, so fall back to a
  681. // common code path.
  682. HashSizedBytesLarge(llvm::ArrayRef<std::byte>(data_ptr, sizeof(T)));
  683. }
  684. template <typename... Ts>
  685. requires(... && std::has_unique_object_representations_v<Ts>)
  686. inline auto Hasher::Hash(const Ts&... value) -> void {
  687. if constexpr (sizeof...(Ts) == 0) {
  688. buffer ^= StaticRandomData[0];
  689. return;
  690. }
  691. if constexpr (sizeof...(Ts) == 1) {
  692. Hash(value...);
  693. return;
  694. }
  695. if constexpr ((... && (sizeof(Ts) <= 8))) {
  696. if constexpr (sizeof...(Ts) == 2) {
  697. HashDense(ReadSmall(value)...);
  698. return;
  699. }
  700. // More than two, but all small -- read each one into a contiguous buffer of
  701. // data. This may be a bit memory wasteful by padding everything out to
  702. // 8-byte chunks, but for that regularity the hashing is likely faster.
  703. const uint64_t data[] = {ReadSmall(value)...};
  704. Hash(data);
  705. return;
  706. }
  707. // For larger objects, hash each one down to a hash code and then hash those
  708. // as a buffer.
  709. const uint64_t data[] = {static_cast<uint64_t>(HashValue(value))...};
  710. Hash(data);
  711. }
  712. inline auto Hasher::HashSizedBytes(llvm::ArrayRef<std::byte> bytes) -> void {
  713. const std::byte* data_ptr = bytes.data();
  714. const ssize_t size = bytes.size();
  715. // First handle short sequences under 8 bytes. We distribute the branches a
  716. // bit for short strings.
  717. if (size <= 8) {
  718. if (size >= 4) {
  719. CARBON_MCA_BEGIN("dynamic-8b");
  720. uint64_t data = Read4To8(data_ptr, size);
  721. // We optimize for latency on short strings by hashing both the data and
  722. // size in a single multiply here, using the small nature of size to
  723. // sample a specific sequence of bytes with well distributed bits into one
  724. // side of the multiply. This results in a *statistically* weak hash
  725. // function, but one with very low latency.
  726. //
  727. // Note that we don't drop to the `WeakMix` routine here because we want
  728. // to use sampled random data to encode the size, which may not be as
  729. // effective without the full 128-bit folded result.
  730. buffer = Mix(data ^ buffer, SampleRandomData(size));
  731. CARBON_MCA_END("dynamic-8b");
  732. return;
  733. }
  734. // When we only have 0-3 bytes of string, we can avoid the cost of `Mix`.
  735. // Instead, for empty strings we can just XOR some of our data against the
  736. // existing buffer. For 1-3 byte lengths we do 3 one-byte reads adjusted to
  737. // always read in-bounds without branching. Then we OR the size into the 4th
  738. // byte and use `WeakMix`.
  739. CARBON_MCA_BEGIN("dynamic-4b");
  740. if (size == 0) {
  741. buffer ^= StaticRandomData[0];
  742. } else {
  743. uint64_t data = Read1To3(data_ptr, size) | size << 24;
  744. buffer = WeakMix(data);
  745. }
  746. CARBON_MCA_END("dynamic-4b");
  747. return;
  748. }
  749. if (size <= 16) {
  750. CARBON_MCA_BEGIN("dynamic-16b");
  751. // Similar to the above, we optimize primarily for latency here and spread
  752. // the incoming data across both ends of the multiply. Note that this does
  753. // have a drawback -- any time one half of the mix function becomes zero it
  754. // will fail to incorporate any bits from the other half. However, there is
  755. // exactly 1 in 2^64 values for each side that achieve this, and only when
  756. // the size is exactly 16 -- for smaller sizes there is an overlapping byte
  757. // that makes this impossible unless the seed is *also* incredibly unlucky.
  758. //
  759. // Because this hash function makes no attempt to defend against hash
  760. // flooding, we accept this risk in order to keep the latency low. If this
  761. // becomes a non-flooding problem, we can restrict the size to <16 and send
  762. // the 16-byte case down the next tier of cost.
  763. uint64_t size_hash = SampleRandomData(size);
  764. auto data = Read8To16(data_ptr, size);
  765. buffer = Mix(data.first ^ size_hash, data.second ^ buffer);
  766. CARBON_MCA_END("dynamic-16b");
  767. return;
  768. }
  769. if (size <= 32) {
  770. CARBON_MCA_BEGIN("dynamic-32b");
  771. // Do two mixes of overlapping 16-byte ranges in parallel to minimize
  772. // latency. We also incorporate the size by sampling random data into the
  773. // seed before both.
  774. buffer ^= SampleRandomData(size);
  775. uint64_t m0 = Mix(Read8(data_ptr) ^ StaticRandomData[1],
  776. Read8(data_ptr + 8) ^ buffer);
  777. const std::byte* tail_16b_ptr = data_ptr + (size - 16);
  778. uint64_t m1 = Mix(Read8(tail_16b_ptr) ^ StaticRandomData[3],
  779. Read8(tail_16b_ptr + 8) ^ buffer);
  780. // Just an XOR mix at the end is quite weak here, but we prefer that for
  781. // latency over a more robust approach. Doing another mix with the size (the
  782. // way longer string hashing does) increases the latency on x86-64
  783. // significantly (approx. 20%).
  784. buffer = m0 ^ m1;
  785. CARBON_MCA_END("dynamic-32b");
  786. return;
  787. }
  788. HashSizedBytesLarge(bytes);
  789. }
  790. } // namespace Carbon
  791. #endif // CARBON_COMMON_HASHING_H_