value_store.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #ifndef CARBON_TOOLCHAIN_BASE_VALUE_STORE_H_
  5. #define CARBON_TOOLCHAIN_BASE_VALUE_STORE_H_
  6. #include <bit>
  7. #include <cstddef>
  8. #include <limits>
  9. #include <memory>
  10. #include <type_traits>
  11. #include <utility>
  12. #include "common/check.h"
  13. #include "common/ostream.h"
  14. #include "llvm/ADT/STLExtras.h"
  15. #include "llvm/ADT/Sequence.h"
  16. #include "llvm/ADT/SmallVector.h"
  17. #include "llvm/Support/MemAlloc.h"
  18. #include "toolchain/base/mem_usage.h"
  19. #include "toolchain/base/value_store_types.h"
  20. #include "toolchain/base/yaml.h"
  21. namespace Carbon {
  22. namespace Internal {
  23. // Used as a parent class for non-printable types. This is just for
  24. // std::conditional, not as an API.
  25. class ValueStoreNotPrintable {};
  26. } // namespace Internal
  27. struct IdTag {
  28. IdTag() = default;
  29. explicit IdTag(int32_t id_index, int32_t initial_reserved_ids)
  30. : // Shift down by 1 to get out of the high bit to avoid using any
  31. // negative ids, since they have special uses. Shift down by another 1
  32. // to free up the second highest bit for a marker to indicate whether
  33. // the index is tagged (& needs to be untagged) or not. Add one to the
  34. // index so it's not zero-based, to make it a bit less likely this
  35. // doesn't collide with anything else (though with the
  36. // second-highest-bit-tagging this might not be needed).
  37. id_tag_(llvm::reverseBits((((id_index + 1) << 1) | 1) << 1)),
  38. initial_reserved_ids_(initial_reserved_ids) {}
  39. auto Apply(int32_t index) const -> int32_t {
  40. if (index < initial_reserved_ids_) {
  41. return index;
  42. }
  43. // TODO: Assert that id_tag_ doesn't have the second highest bit set.
  44. return index ^ id_tag_;
  45. }
  46. auto Remove(int32_t tagged_index) const -> int32_t {
  47. if (!HasTag(tagged_index)) {
  48. CARBON_DCHECK(tagged_index < initial_reserved_ids_,
  49. "This untagged index is outside the initial reserved ids "
  50. "and should have been tagged.");
  51. return tagged_index;
  52. }
  53. auto index = tagged_index ^ id_tag_;
  54. CARBON_DCHECK(index >= initial_reserved_ids_,
  55. "When removing tagging bits, found an index that "
  56. "shouldn't've been tagged in the first place.");
  57. return index;
  58. }
  59. // Gets the value unique to this IdTag instance that is added to indices in
  60. // Apply, and removed in Remove.
  61. auto GetContainerTag() const -> int32_t {
  62. return (llvm::reverseBits(id_tag_) >> 2) - 1;
  63. }
  64. // Returns whether `tagged_index` has an IdTag applied to it, from this IdTag
  65. // instance or any other one.
  66. static auto HasTag(int32_t tagged_index) -> bool {
  67. return (llvm::reverseBits(2) & tagged_index) != 0;
  68. }
  69. template <class TagT>
  70. struct TagAndIndex {
  71. int32_t tag;
  72. int32_t index;
  73. };
  74. template <typename TagT>
  75. static auto DecomposeWithBestEffort(int32_t tagged_index)
  76. -> TagAndIndex<TagT> {
  77. if (tagged_index < 0) {
  78. // TODO: This should return TagT::None, but we need a fallback TagT other
  79. // than `int32_t`.
  80. return {TagT{-1}, tagged_index};
  81. }
  82. if (!HasTag(tagged_index)) {
  83. // TODO: This should return TagT::None, but we need a fallback TagT other
  84. // than `int32_t`.
  85. return {TagT{-1}, tagged_index};
  86. }
  87. int length = 0;
  88. int location = 0;
  89. for (int i = 0; i != 32; ++i) {
  90. int current_run = 0;
  91. int location_of_current_run = i;
  92. while (i != 32 && (tagged_index & (1 << i)) == 0) {
  93. ++current_run;
  94. ++i;
  95. }
  96. if (current_run != 0) {
  97. --i;
  98. }
  99. if (current_run > length) {
  100. length = current_run;
  101. location = location_of_current_run;
  102. }
  103. }
  104. if (length < 8) {
  105. // TODO: This should return TagT::None, but we need a fallback TagT other
  106. // than `int32_t`.
  107. return {TagT{-1}, tagged_index};
  108. }
  109. auto index_mask = llvm::maskTrailingOnes<uint32_t>(location);
  110. auto tag = (llvm::reverseBits(tagged_index & ~index_mask) >> 2) - 1;
  111. auto index = tagged_index & index_mask;
  112. return {.tag = TagT{static_cast<int32_t>(tag)},
  113. .index = static_cast<int32_t>(index)};
  114. }
  115. private:
  116. int32_t id_tag_ = 0;
  117. int32_t initial_reserved_ids_ = std::numeric_limits<int32_t>::max();
  118. };
  119. // A simple wrapper for accumulating values, providing IDs to later retrieve the
  120. // value. This does not do deduplication.
  121. template <typename IdT, typename ValueT>
  122. class ValueStore
  123. : public std::conditional<std::is_base_of_v<Printable<ValueT>, ValueT>,
  124. Yaml::Printable<ValueStore<IdT, ValueT>>,
  125. Internal::ValueStoreNotPrintable> {
  126. public:
  127. using IdType = IdT;
  128. using ValueType = ValueStoreTypes<ValueT>::ValueType;
  129. using RefType = ValueStoreTypes<ValueT>::RefType;
  130. using ConstRefType = ValueStoreTypes<ValueT>::ConstRefType;
  131. // A range over references to the values in a ValueStore, returned from
  132. // `ValueStore::values()`. Hides the complex type name of the iterator
  133. // internally to provide a type name (`Range`) that can be
  134. // referred to without auto and templates.
  135. class Range {
  136. public:
  137. explicit Range(const ValueStore& store [[clang::lifetimebound]])
  138. : flattened_range_(MakeFlattenedRange(store)) {}
  139. auto begin() const -> auto { return flattened_range_.begin(); }
  140. auto end() const -> auto { return flattened_range_.end(); }
  141. private:
  142. // Flattens the range of `Chunk`s of `ValueType`s into a single
  143. // range of `ValueType`s.
  144. static auto MakeFlattenedRange(const ValueStore& store) -> auto {
  145. // Because indices into `ValueStore` are all sequential values from 0, we
  146. // can use llvm::seq to walk all indices in the store.
  147. return llvm::map_range(
  148. llvm::seq(store.size_),
  149. [&](int32_t i) -> ConstRefType { return store.Get(IdType(i)); });
  150. }
  151. using FlattenedRangeType =
  152. decltype(MakeFlattenedRange(std::declval<const ValueStore&>()));
  153. FlattenedRangeType flattened_range_;
  154. };
  155. ValueStore() = default;
  156. explicit ValueStore(IdTag tag) : tag_(tag) {}
  157. // Stores the value and returns an ID to reference it.
  158. auto Add(ValueType value) -> IdType {
  159. // This routine is especially hot and the check here relatively expensive
  160. // for the value provided, so only do this in non-optimized builds to make
  161. // tracking down issues easier.
  162. CARBON_DCHECK(size_ < std::numeric_limits<int32_t>::max(), "Id overflow");
  163. IdType id(tag_.Apply(size_));
  164. auto [chunk_index, pos] = RawIndexToChunkIndices(size_);
  165. ++size_;
  166. CARBON_DCHECK(static_cast<size_t>(chunk_index) <= chunks_.size(),
  167. "{0} <= {1}", chunk_index, chunks_.size());
  168. if (static_cast<size_t>(chunk_index) == chunks_.size()) {
  169. chunks_.emplace_back();
  170. }
  171. CARBON_DCHECK(pos == chunks_[chunk_index].size());
  172. chunks_[chunk_index].Add(std::move(value));
  173. return id;
  174. }
  175. // Returns a mutable value for an ID.
  176. auto Get(IdType id) -> RefType {
  177. CARBON_DCHECK(id.index >= 0, "{0}", id);
  178. auto [chunk_index, pos] = IdToChunkIndices(id);
  179. return chunks_[chunk_index].Get(pos);
  180. }
  181. // Returns the value for an ID.
  182. auto Get(IdType id) const -> ConstRefType {
  183. CARBON_DCHECK(id.index >= 0, "{0}", id);
  184. auto [chunk_index, pos] = IdToChunkIndices(id);
  185. return chunks_[chunk_index].Get(pos);
  186. }
  187. // Reserves space.
  188. auto Reserve(int32_t size) -> void {
  189. if (size <= size_) {
  190. return;
  191. }
  192. auto [final_chunk_index, _] = RawIndexToChunkIndices(size - 1);
  193. chunks_.resize(final_chunk_index + 1);
  194. }
  195. // Grows the ValueStore to `size`. Fills entries with `default_value`.
  196. auto Resize(int32_t size, ConstRefType default_value) -> void {
  197. if (size <= size_) {
  198. return;
  199. }
  200. auto [begin_chunk_index, begin_pos] = RawIndexToChunkIndices(size_);
  201. // Use an inclusive range so that if `size` would be the next chunk, we
  202. // don't try doing something with it.
  203. auto [end_chunk_index, end_pos] = RawIndexToChunkIndices(size - 1);
  204. chunks_.resize(end_chunk_index + 1);
  205. // If the begin and end chunks are the same, we only fill from begin to end.
  206. if (begin_chunk_index == end_chunk_index) {
  207. chunks_[begin_chunk_index].UninitializedFill(end_pos - begin_pos + 1,
  208. default_value);
  209. } else {
  210. // Otherwise, we do partial fills on the begin and end chunk, and full
  211. // fills on intermediate chunks.
  212. chunks_[begin_chunk_index].UninitializedFill(
  213. Chunk::Capacity() - begin_pos, default_value);
  214. for (auto i = begin_chunk_index + 1; i < end_chunk_index; ++i) {
  215. chunks_[i].UninitializedFill(Chunk::Capacity(), default_value);
  216. }
  217. chunks_[end_chunk_index].UninitializedFill(end_pos + 1, default_value);
  218. }
  219. // Update size.
  220. size_ = size;
  221. }
  222. // These are to support printable structures, and are not guaranteed.
  223. auto OutputYaml() const -> Yaml::OutputMapping {
  224. return Yaml::OutputMapping([&](Yaml::OutputMapping::Map map) {
  225. for (auto [id, value] : enumerate()) {
  226. map.Add(PrintToString(id), Yaml::OutputScalar(value));
  227. }
  228. });
  229. }
  230. // Collects memory usage of the values.
  231. auto CollectMemUsage(MemUsage& mem_usage, llvm::StringRef label) const
  232. -> void {
  233. mem_usage.Add(label.str(), size_ * sizeof(ValueType),
  234. Chunk::CapacityBytes * chunks_.size());
  235. }
  236. auto size() const -> size_t { return size_; }
  237. // Makes an iterable range over references to all values in the ValueStore.
  238. auto values() [[clang::lifetimebound]] -> auto {
  239. return llvm::map_range(
  240. llvm::seq(size_), [&](int32_t i) -> RefType { return Get(IdType(i)); });
  241. }
  242. auto values() const [[clang::lifetimebound]] -> Range { return Range(*this); }
  243. // Makes an iterable range over pairs of the index and a reference to the
  244. // value for each value in the store.
  245. //
  246. // The range is over references to the values in the store, even if used with
  247. // `auto` to destructure the pair. In this example, the `value` is a
  248. // `ConstRefType`:
  249. // ```
  250. // for (auto [id, value] : store.enumerate()) { ... }
  251. // ```
  252. auto enumerate() const [[clang::lifetimebound]] -> auto {
  253. // For `it->val`, writing `const std::pair` is required; otherwise
  254. // `mapped_iterator` incorrectly infers the pointer type for `PointerProxy`.
  255. // NOLINTNEXTLINE(readability-const-return-type)
  256. auto index_to_id = [&](int32_t i) -> const std::pair<IdType, ConstRefType> {
  257. IdType id(tag_.Apply(i));
  258. return std::pair<IdType, ConstRefType>(id, Get(id));
  259. };
  260. // Because indices into `ValueStore` are all sequential values from 0, we
  261. // can use llvm::seq to walk all indices in the store.
  262. return llvm::map_range(llvm::seq(size_), index_to_id);
  263. }
  264. auto GetIdTag() const -> IdTag { return tag_; }
  265. auto GetRawIndex(IdT id) const -> int32_t {
  266. auto index = tag_.Remove(id.index);
  267. CARBON_DCHECK(index >= 0, "{0}", index);
  268. #ifndef NDEBUG
  269. if (index >= size_) {
  270. // Attempt to decompose id.index to include extra detail in the check
  271. // here.
  272. //
  273. // TODO: Teach ValueStore the type of the tag id with a template, then we
  274. // can print it with proper formatting instead of just as an integer.
  275. auto [id_tag, id_untagged_index] =
  276. IdTag::DecomposeWithBestEffort<int32_t>(id.index);
  277. CARBON_DCHECK(
  278. index < size_,
  279. "Untagged index was outside of container range. Tagged index {0}. "
  280. "Best-effort decomposition: Tag: {1}, Index: {2}. "
  281. "Container size: {3}. "
  282. "Expected Tag for this container: {4}.",
  283. id.index, id_tag, id_untagged_index, size_, tag_.GetContainerTag());
  284. }
  285. #endif
  286. return index;
  287. }
  288. private:
  289. // A chunk of `ValueType`s which has a fixed capacity, but variable size.
  290. // Tracks the size internally for verifying bounds.
  291. struct Chunk {
  292. public:
  293. // The max size of each chunk allocation for `ValueStore`. This is based on
  294. // TLB page sizes for the target platform.
  295. //
  296. // See https://docs.kernel.org/admin-guide/mm/hugetlbpage.html
  297. //
  298. // A 4K chunk size outperforms a 1M chunk size on Linux-x64 and MacOS-arm64
  299. // in benchmarks and when running file_test.
  300. //
  301. // Linux-x64: x64 CPUs support 4K and 2M page sizes, but we see 1M is slower
  302. // than 4K with tcmalloc in opt builds for our tests.
  303. //
  304. // Mac-arm64: arm64 CPUs support 4K, 8K, 64K, 256K, 1M, 4M and up. Like for
  305. // Linux-x64, 4K outperformed 1M. We didn't try other sizes yet.
  306. //
  307. // TODO: Is there a more optimize size for Mac-arm64? What should
  308. // Linux-arm64 and Mac-x64 use? What should Windows use?
  309. //
  310. // TODO: The previous SmallVector<ValueType> seems to outperform 4K chunks
  311. // (they may be slower by up to 5%) in benchmarks. Find ways to make
  312. // chunking faster. Should successive chunks get larger in size? That will
  313. // greatly complicate math for choosing a chunk though.
  314. static constexpr auto MaxAllocationBytes() -> int32_t {
  315. #if !defined(NDEBUG) || LLVM_ADDRESS_SANITIZER_BUILD
  316. // Use a small size in unoptimized builds to ensure multiple chunks get
  317. // used. And do the same in ASAN builds to reduce bookkeeping overheads.
  318. // Using large allocations (e.g. 1M+) incurs a 10x runtime cost for our
  319. // tests under ASAN.
  320. return sizeof(ValueType) * 5;
  321. #else
  322. return 4 * 1024;
  323. #endif
  324. }
  325. // The number of elements stored in each chunk allocation.
  326. //
  327. // The number must be a power of two so that that there are no unused values
  328. // in bits indexing into the allocation.
  329. static constexpr auto Capacity() -> int32_t {
  330. constexpr auto MaxElements = MaxAllocationBytes() / sizeof(ValueType);
  331. return std::bit_floor(MaxElements);
  332. }
  333. // The number of bits needed to index each element in a chunk allocation.
  334. static constexpr auto IndexBits() -> int32_t {
  335. static_assert(Capacity() > 0);
  336. return std::bit_width(uint32_t{Capacity() - 1});
  337. }
  338. static constexpr auto CapacityBytes = Capacity() * sizeof(ValueType);
  339. explicit Chunk()
  340. : buf_(reinterpret_cast<ValueType*>(
  341. llvm::allocate_buffer(CapacityBytes, alignof(ValueType)))) {}
  342. // Moving leaves nullptr behind in the moved-from object so that the
  343. // destructor is a no-op (preventing double free).
  344. Chunk(Chunk&& rhs) noexcept
  345. : buf_(std::exchange(rhs.buf_, nullptr)), num_(rhs.num_) {}
  346. auto operator=(Chunk&& rhs) noexcept -> Chunk& {
  347. buf_ = std::exchange(rhs.buf_, nullptr);
  348. num_ = rhs.num_;
  349. return *this;
  350. }
  351. ~Chunk() {
  352. if (buf_) {
  353. if constexpr (!std::is_trivially_destructible_v<ValueType>) {
  354. std::destroy_n(buf_, num_);
  355. }
  356. llvm::deallocate_buffer(buf_, CapacityBytes, alignof(ValueType));
  357. }
  358. }
  359. auto Get(int32_t i) -> ValueType& {
  360. CARBON_DCHECK(i < num_, "{0}", i);
  361. return buf_[i];
  362. }
  363. auto Get(int32_t i) const -> const ValueType& {
  364. CARBON_DCHECK(i < num_, "{0}", i);
  365. return buf_[i];
  366. }
  367. auto Add(ValueType&& value) -> void {
  368. CARBON_DCHECK(num_ < Capacity());
  369. std::construct_at(buf_ + num_, std::move(value));
  370. ++num_;
  371. }
  372. // Fills `fill_count` entries with `default_value`, increasing the size
  373. // respectively.
  374. auto UninitializedFill(int32_t fill_count, ConstRefType default_value)
  375. -> void {
  376. CARBON_DCHECK(num_ + fill_count <= Capacity());
  377. std::uninitialized_fill_n(buf_ + num_, fill_count, default_value);
  378. num_ += fill_count;
  379. }
  380. auto size() const -> int32_t { return num_; }
  381. private:
  382. // Verify using an `int32_t` for `num_` is sound.
  383. static_assert(Capacity() <= std::numeric_limits<int32_t>::max());
  384. ValueType* buf_;
  385. int32_t num_ = 0;
  386. };
  387. // Converts a raw index into an index into the set of chunks, and an offset
  388. // into that specific chunk. Looks for index overflow in non-optimized builds.
  389. static auto RawIndexToChunkIndices(int32_t index)
  390. -> std::pair<int32_t, int32_t> {
  391. constexpr auto LowBits = Chunk::IndexBits();
  392. // Verify there are no unused bits when indexing up to the `Capacity`. This
  393. // ensures that ids are contiguous values from 0, as if the values were all
  394. // stored in a single array, and allows using the ids to index into other
  395. // arrays.
  396. static_assert((1 << LowBits) == Chunk::Capacity());
  397. // Simple check to make sure nothing went wildly wrong with the `Capacity`,
  398. // and we have some room for a chunk index, and that shifting by the number
  399. // of bits won't be UB in an int32_t.
  400. static_assert(LowBits < 30);
  401. // The index of the chunk is the high bits.
  402. auto chunk = index >> LowBits;
  403. // The index into the chunk is the low bits.
  404. auto pos = index & ((1 << LowBits) - 1);
  405. return {chunk, pos};
  406. }
  407. // Converts an id into an index into the set of chunks, and an offset into
  408. // that specific chunk.
  409. auto IdToChunkIndices(IdType id) const -> std::pair<int32_t, int32_t> {
  410. return RawIndexToChunkIndices(GetRawIndex(id));
  411. }
  412. // Number of elements added to the store. The number should never exceed what
  413. // fits in an `int32_t`, which is checked in non-optimized builds in Add().
  414. int32_t size_ = 0;
  415. IdTag tag_;
  416. // Storage for the `ValueType` objects, indexed by the id. We use a vector of
  417. // chunks of `ValueType` instead of just a vector of `ValueType` so that
  418. // addresses of `ValueType` objects are stable. This allows the rest of the
  419. // toolchain to hold references into `ValueStore` without having to worry
  420. // about invalidation and use-after-free. We ensure at least one Chunk is held
  421. // inline so that in the case where there is only a single Chunk (i.e. small
  422. // files) we can avoid one indirection.
  423. llvm::SmallVector<Chunk, 1> chunks_;
  424. };
  425. } // namespace Carbon
  426. #endif // CARBON_TOOLCHAIN_BASE_VALUE_STORE_H_