Kaynağa Gözat

Document abbreviation style things (#4996)

We had a long discussion of this, so trying to document what seems to be
the conclusion... and also clean up the exceptions that I could find.

---------

Co-authored-by: Dana Jansens <danakj@orodu.net>
Jon Ross-Perkins 1 yıl önce
ebeveyn
işleme
467e510d40

+ 1 - 1
common/hashing_test.cpp

@@ -23,7 +23,7 @@ using ::testing::Eq;
 using ::testing::Le;
 using ::testing::Ne;
 
-TEST(HashingTest, HashCodeAPI) {
+TEST(HashingTest, HashCodeApi) {
   // Manually compute a few hash codes where we can exercise the underlying API.
   HashCode empty = HashValue("");
   HashCode a = HashValue("a");

+ 6 - 6
common/hashtable_key_context.h

@@ -50,7 +50,7 @@ auto HashtableEq(const LeftT& lhs, const RightT& rhs) -> bool;
 // properties of hashes produced by the `hashing.h` infrastructure.
 //
 // The default for comparison is `operator==`. The `KeyEq` method is always
-// called with a key *stored in the hashtable* as the second or "RHS" parameter.
+// called with a key *stored in the hashtable* as the second or "Rhs" parameter.
 // This is to allow simplifying the set of overloads needed for heterogeneous
 // contexts: only the first, LHS, parameter needs to support different lookup
 // key types.
@@ -217,13 +217,13 @@ auto TranslatingKeyContext<DerivedT>::KeyEq(const AnyKeyT& lhs_key,
   const DerivedT& self = *static_cast<const DerivedT*>(this);
   // Because we don't want to make no-op calls and potentially struggle with
   // temporary lifetimes at runtime we have to fully expand the 4 states.
-  constexpr bool TranslateLHS = requires { self.TranslateKey(lhs_key); };
-  constexpr bool TranslateRHS = requires { self.TranslateKey(rhs_key); };
-  if constexpr (TranslateLHS && TranslateRHS) {
+  constexpr bool TranslateLhs = requires { self.TranslateKey(lhs_key); };
+  constexpr bool TranslateRhs = requires { self.TranslateKey(rhs_key); };
+  if constexpr (TranslateLhs && TranslateRhs) {
     return HashtableEq(self.TranslateKey(lhs_key), self.TranslateKey(rhs_key));
-  } else if constexpr (TranslateLHS) {
+  } else if constexpr (TranslateLhs) {
     return HashtableEq(self.TranslateKey(lhs_key), rhs_key);
-  } else if constexpr (TranslateRHS) {
+  } else if constexpr (TranslateRhs) {
     return HashtableEq(lhs_key, self.TranslateKey(rhs_key));
   } else {
     return HashtableEq(lhs_key, rhs_key);

+ 1 - 1
common/map_test.cpp

@@ -124,7 +124,7 @@ TYPED_TEST(MapTest, Basic) {
       m, MakeKeyValues([](int k) { return k * 100 + 1; }, llvm::seq(1, 512)));
 }
 
-TYPED_TEST(MapTest, FactoryAPI) {
+TYPED_TEST(MapTest, FactoryApi) {
   TypeParam m;
   EXPECT_TRUE(m.Insert(1, [] { return 100; }).is_inserted());
   ASSERT_TRUE(m.Contains(1));

+ 100 - 100
common/raw_hashtable_metadata_group.h

@@ -303,14 +303,14 @@ class BitIndexRange
 // auto MetadataGroup::Operation(...) -> ... {
 //   ... portable_result;
 //   ... simd_result;
-//   if constexpr (!UseSIMD || DebugSIMD) {
+//   if constexpr (!UseSimd || DebugSimd) {
 //     portable_result = PortableOperation(...);
 //   }
-//   if (UseSIMD || DebugSIMD) {
-//     simd_result = SIMDOperation(...)
+//   if (UseSimd || DebugSimd) {
+//     simd_result = SimdOperation(...)
 //     CARBON_DCHECK(result == portable_result, "{0}", ...);
 //   }
-//   return UseSIMD ? simd_result : portable_result;
+//   return UseSimd ? simd_result : portable_result;
 // }
 // ```
 class MetadataGroup : public Printable<MetadataGroup> {
@@ -344,7 +344,7 @@ class MetadataGroup : public Printable<MetadataGroup> {
   // Whether to use a SIMD implementation. Even when we *support* a SIMD
   // implementation, we do not always have to use it in the event that it is
   // less efficient than the portable version.
-  static constexpr bool UseSIMD =
+  static constexpr bool UseSimd =
 #if CARBON_X86_SIMD_SUPPORT
       true;
 #else
@@ -375,8 +375,8 @@ class MetadataGroup : public Printable<MetadataGroup> {
   static constexpr bool FastByteClear = Size == 8;
 
   // Most and least significant bits set.
-  static constexpr uint64_t MSBs = 0x8080'8080'8080'8080ULL;
-  static constexpr uint64_t LSBs = 0x0101'0101'0101'0101ULL;
+  static constexpr uint64_t Msbs = 0x8080'8080'8080'8080ULL;
+  static constexpr uint64_t Lsbs = 0x0101'0101'0101'0101ULL;
 
   using MatchIndex =
       BitIndex<std::conditional_t<ByteEncoding, uint64_t, uint32_t>,
@@ -389,16 +389,16 @@ class MetadataGroup : public Printable<MetadataGroup> {
   // We use specialized match range types for SIMD implementations to allow
   // deferring the masking operation where useful. When that optimization
   // doesn't apply, these will be the same type.
-  using SIMDMatchRange =
-      BitIndexRange<MatchIndex, /*ByteEncodingMask=*/ByteEncoding ? MSBs : 0>;
-  using SIMDMatchPresentRange = BitIndexRange<MatchIndex>;
+  using SimdMatchRange =
+      BitIndexRange<MatchIndex, /*ByteEncodingMask=*/ByteEncoding ? Msbs : 0>;
+  using SimdMatchPresentRange = BitIndexRange<MatchIndex>;
 
   // The public API range types can be either the portable or SIMD variations,
   // selected here.
   using MatchRange =
-      std::conditional_t<UseSIMD, SIMDMatchRange, PortableMatchRange>;
+      std::conditional_t<UseSimd, SimdMatchRange, PortableMatchRange>;
   using MatchPresentRange =
-      std::conditional_t<UseSIMD, SIMDMatchPresentRange, PortableMatchRange>;
+      std::conditional_t<UseSimd, SimdMatchPresentRange, PortableMatchRange>;
 
   union {
     uint8_t metadata_bytes[Size];
@@ -477,13 +477,13 @@ class MetadataGroup : public Printable<MetadataGroup> {
   // Two classes only defined in the benchmark code are allowed to directly call
   // the portable and SIMD implementations for benchmarking purposes.
   friend class BenchmarkPortableMetadataGroup;
-  friend class BenchmarkSIMDMetadataGroup;
+  friend class BenchmarkSimdMetadataGroup;
 
   // All SIMD variants that we have an implementation for should be enabled for
   // debugging. This lets us maintain a SIMD implementation even if it is not
   // used due to performance reasons, and easily re-enable it if the performance
   // changes.
-  static constexpr bool DebugSIMD =
+  static constexpr bool DebugSimd =
 #if !defined(NDEBUG) && (CARBON_NEON_SIMD_SUPPORT || CARBON_X86_SIMD_SUPPORT)
       true;
 #else
@@ -553,23 +553,23 @@ class MetadataGroup : public Printable<MetadataGroup> {
   //
   // These routines don't directly verify their results as we can build simpler
   // debug checks by comparing them against the verified portable results.
-  static auto SIMDLoad(const uint8_t* metadata, ssize_t index) -> MetadataGroup;
-  auto SIMDStore(uint8_t* metadata, ssize_t index) const -> void;
+  static auto SimdLoad(const uint8_t* metadata, ssize_t index) -> MetadataGroup;
+  auto SimdStore(uint8_t* metadata, ssize_t index) const -> void;
 
-  auto SIMDClearDeleted() -> void;
+  auto SimdClearDeleted() -> void;
 
-  auto SIMDMatch(uint8_t tag) const -> SIMDMatchRange;
-  auto SIMDMatchPresent() const -> SIMDMatchPresentRange;
+  auto SimdMatch(uint8_t tag) const -> SimdMatchRange;
+  auto SimdMatchPresent() const -> SimdMatchPresentRange;
 
-  auto SIMDMatchEmpty() const -> MatchIndex;
-  auto SIMDMatchDeleted() const -> MatchIndex;
+  auto SimdMatchEmpty() const -> MatchIndex;
+  auto SimdMatchDeleted() const -> MatchIndex;
 
-  static auto SIMDCompareEqual(MetadataGroup lhs, MetadataGroup rhs) -> bool;
+  static auto SimdCompareEqual(MetadataGroup lhs, MetadataGroup rhs) -> bool;
 
 #if CARBON_X86_SIMD_SUPPORT
   // A common routine for x86 SIMD matching that can be used for matching
   // present, empty, and deleted bytes with equal efficiency.
-  auto X86SIMDMatch(uint8_t match_byte) const -> SIMDMatchRange;
+  auto X86SimdMatch(uint8_t match_byte) const -> SimdMatchRange;
 #endif
 };
 
@@ -581,23 +581,23 @@ inline constexpr ssize_t GroupMask = MetadataGroup::Mask;
 inline auto MetadataGroup::Load(const uint8_t* metadata, ssize_t index)
     -> MetadataGroup {
   MetadataGroup portable_g;
-  if constexpr (!UseSIMD || DebugSIMD) {
+  if constexpr (!UseSimd || DebugSimd) {
     portable_g = PortableLoad(metadata, index);
-    if constexpr (!UseSIMD) {
+    if constexpr (!UseSimd) {
       return portable_g;
     }
   }
-  MetadataGroup g = SIMDLoad(metadata, index);
+  MetadataGroup g = SimdLoad(metadata, index);
   CARBON_DCHECK(g == portable_g);
   return g;
 }
 
 inline auto MetadataGroup::Store(uint8_t* metadata, ssize_t index) const
     -> void {
-  if constexpr (!UseSIMD) {
+  if constexpr (!UseSimd) {
     std::memcpy(metadata + index, &metadata_bytes, Size);
   } else {
-    SIMDStore(metadata, index);
+    SimdStore(metadata, index);
   }
   CARBON_DCHECK(0 == std::memcmp(metadata + index, &metadata_bytes, Size));
 }
@@ -615,17 +615,17 @@ inline auto MetadataGroup::ClearByte(ssize_t byte_index) -> void {
 inline auto MetadataGroup::ClearDeleted() -> void {
   MetadataGroup portable_g = *this;
   MetadataGroup simd_g = *this;
-  if constexpr (!UseSIMD || DebugSIMD) {
+  if constexpr (!UseSimd || DebugSimd) {
     portable_g.PortableClearDeleted();
   }
-  if constexpr (UseSIMD || DebugSIMD) {
-    simd_g.SIMDClearDeleted();
+  if constexpr (UseSimd || DebugSimd) {
+    simd_g.SimdClearDeleted();
     CARBON_DCHECK(
         simd_g == portable_g,
         "SIMD cleared group '{0}' doesn't match portable cleared group '{1}'",
         simd_g, portable_g);
   }
-  *this = UseSIMD ? simd_g : portable_g;
+  *this = UseSimd ? simd_g : portable_g;
 }
 
 inline auto MetadataGroup::Match(uint8_t tag) const -> MatchRange {
@@ -635,78 +635,78 @@ inline auto MetadataGroup::Match(uint8_t tag) const -> MatchRange {
   CARBON_DCHECK((tag & PresentMask) == 0, "{0:x}", tag);
 
   PortableMatchRange portable_result;
-  SIMDMatchRange simd_result;
-  if constexpr (!UseSIMD || DebugSIMD) {
+  SimdMatchRange simd_result;
+  if constexpr (!UseSimd || DebugSimd) {
     portable_result = PortableMatch(tag);
   }
-  if constexpr (UseSIMD || DebugSIMD) {
-    simd_result = SIMDMatch(tag);
+  if constexpr (UseSimd || DebugSimd) {
+    simd_result = SimdMatch(tag);
     CARBON_DCHECK(simd_result == portable_result,
                   "SIMD result '{0}' doesn't match portable result '{1}'",
                   simd_result, portable_result);
   }
   // Return whichever result we're using.
-  return ConstexprTernary<UseSIMD>(simd_result, portable_result);
+  return ConstexprTernary<UseSimd>(simd_result, portable_result);
 }
 
 inline auto MetadataGroup::MatchPresent() const -> MatchPresentRange {
   PortableMatchRange portable_result;
-  SIMDMatchPresentRange simd_result;
-  if constexpr (!UseSIMD || DebugSIMD) {
+  SimdMatchPresentRange simd_result;
+  if constexpr (!UseSimd || DebugSimd) {
     portable_result = PortableMatchPresent();
   }
-  if constexpr (UseSIMD || DebugSIMD) {
-    simd_result = SIMDMatchPresent();
+  if constexpr (UseSimd || DebugSimd) {
+    simd_result = SimdMatchPresent();
     CARBON_DCHECK(simd_result == portable_result,
                   "SIMD result '{0}' doesn't match portable result '{1}'",
                   simd_result, portable_result);
   }
   // Return whichever result we're using.
-  return ConstexprTernary<UseSIMD>(simd_result, portable_result);
+  return ConstexprTernary<UseSimd>(simd_result, portable_result);
 }
 
 inline auto MetadataGroup::MatchEmpty() const -> MatchIndex {
   MatchIndex portable_result;
   MatchIndex simd_result;
-  if constexpr (!UseSIMD || DebugSIMD) {
+  if constexpr (!UseSimd || DebugSimd) {
     portable_result = PortableMatchEmpty();
   }
-  if constexpr (UseSIMD || DebugSIMD) {
-    simd_result = SIMDMatchEmpty();
+  if constexpr (UseSimd || DebugSimd) {
+    simd_result = SimdMatchEmpty();
     CARBON_DCHECK(simd_result == portable_result,
                   "SIMD result '{0}' doesn't match portable result '{1}'",
                   simd_result, portable_result);
   }
-  return UseSIMD ? simd_result : portable_result;
+  return UseSimd ? simd_result : portable_result;
 }
 
 inline auto MetadataGroup::MatchDeleted() const -> MatchIndex {
   MatchIndex portable_result;
   MatchIndex simd_result;
-  if constexpr (!UseSIMD || DebugSIMD) {
+  if constexpr (!UseSimd || DebugSimd) {
     portable_result = PortableMatchDeleted();
   }
-  if constexpr (UseSIMD || DebugSIMD) {
-    simd_result = SIMDMatchDeleted();
+  if constexpr (UseSimd || DebugSimd) {
+    simd_result = SimdMatchDeleted();
     CARBON_DCHECK(simd_result == portable_result,
                   "SIMD result '{0}' doesn't match portable result '{1}'",
                   simd_result, portable_result);
   }
-  return UseSIMD ? simd_result : portable_result;
+  return UseSimd ? simd_result : portable_result;
 }
 
 inline auto MetadataGroup::CompareEqual(MetadataGroup lhs, MetadataGroup rhs)
     -> bool {
   bool portable_result;
   bool simd_result;
-  if constexpr (!UseSIMD || DebugSIMD) {
+  if constexpr (!UseSimd || DebugSimd) {
     portable_result = PortableCompareEqual(lhs, rhs);
   }
-  if constexpr (UseSIMD || DebugSIMD) {
-    simd_result = SIMDCompareEqual(lhs, rhs);
+  if constexpr (UseSimd || DebugSimd) {
+    simd_result = SimdCompareEqual(lhs, rhs);
     CARBON_DCHECK(simd_result == portable_result);
   }
-  return UseSIMD ? simd_result : portable_result;
+  return UseSimd ? simd_result : portable_result;
 }
 
 inline auto MetadataGroup::VerifyIndexBits(
@@ -798,10 +798,10 @@ inline auto MetadataGroup::PortableClearDeleted() -> void {
     // need to preserve are those of present bytes. The most significant bit of
     // every present byte is set, so we take the most significant bit of each
     // byte, shift it into the least significant bit position, and bit-or it
-    // with the compliment of `LSBs`. This will have ones for every bit but the
+    // with the compliment of `Lsbs`. This will have ones for every bit but the
     // least significant bits, and ones for the least significant bits of every
     // present byte.
-    metadata_int &= (~LSBs | metadata_int >> 7);
+    metadata_int &= (~Lsbs | metadata_int >> 7);
   }
 }
 
@@ -834,13 +834,13 @@ inline auto MetadataGroup::PortableMatch(uint8_t tag) const -> MatchRange {
   // algorithm has a critical path height of 4 operations, and does 6
   // operations total on AArch64. The operation dependency graph is:
   //
-  //          group | MSBs        LSBs * match_byte + MSBs
+  //          group | Msbs        Lsbs * match_byte + Msbs
   //                 \                /
   //                 match_bits ^ broadcast
   //                            |
-  //   group & MSBs        MSBs - match_bits
+  //   group & Msbs        Msbs - match_bits
   //          \                /
-  //        group_MSBs & match_bits
+  //        group_Msbs & match_bits
   //
   // This diagram and the operation count are specific to AArch64 where we have
   // a fused *integer* multiply-add operation.
@@ -856,13 +856,13 @@ inline auto MetadataGroup::PortableMatch(uint8_t tag) const -> MatchRange {
   // and so always has this bit set as well, which means the xor below, in
   // addition to zeroing the low 7 bits of any byte that matches the tag, also
   // clears the high bit of every byte.
-  uint64_t match_bits = metadata_ints[0] | MSBs;
+  uint64_t match_bits = metadata_ints[0] | Msbs;
   // Broadcast the match byte to all bytes, and mask in the present bits in the
-  // MSBs of each byte. We structure this as a multiply and an add because we
+  // Msbs of each byte. We structure this as a multiply and an add because we
   // know that the add cannot carry, and this way it can be lowered using
   // combined multiply-add instructions if available.
-  uint64_t broadcast = LSBs * tag + MSBs;
-  CARBON_DCHECK(broadcast == (LSBs * tag | MSBs),
+  uint64_t broadcast = Lsbs * tag + Msbs;
+  CARBON_DCHECK(broadcast == (Lsbs * tag | Msbs),
                 "Unexpected carry from addition!");
 
   // Xor the broadcast byte pattern. This makes bytes with matches become 0, and
@@ -872,11 +872,11 @@ inline auto MetadataGroup::PortableMatch(uint8_t tag) const -> MatchRange {
   match_bits = match_bits ^ broadcast;
   // Subtract each byte of `match_bits` from `0x80` bytes. After this, the high
   // bit will be set only for those bytes that were zero.
-  match_bits = MSBs - match_bits;
+  match_bits = Msbs - match_bits;
   // Zero everything but the high bits, and also zero the high bits of any bytes
   // for "not present" slots in the original group. This avoids false positives
   // for `Empty` and `Deleted` bytes in the metadata.
-  match_bits &= (metadata_ints[0] & MSBs);
+  match_bits &= (metadata_ints[0] & Msbs);
 
   // At this point, `match_bits` has the high bit set for bytes where the
   // original group byte equals `tag` plus the high bit.
@@ -905,7 +905,7 @@ inline auto MetadataGroup::PortableMatchPresent() const -> MatchRange {
 
   // Want to keep the high bit of each byte, which indicates whether that byte
   // represents a present slot.
-  uint64_t match_bits = metadata_ints[0] & MSBs;
+  uint64_t match_bits = metadata_ints[0] & Msbs;
 
   CARBON_DCHECK(VerifyPortableRangeBits(
       match_bits, [&](uint8_t byte) { return (byte & PresentMask) != 0; }));
@@ -937,7 +937,7 @@ inline auto MetadataGroup::PortableMatchEmpty() const -> MatchIndex {
   //   cause the high bit to be set.
   uint64_t match_bits = metadata_ints[0] | (metadata_ints[0] << 7);
   // This inverts the high bits of the bytes, and clears the remaining bits.
-  match_bits = ~match_bits & MSBs;
+  match_bits = ~match_bits & Msbs;
 
   // The high bits of the bytes of `match_bits` are set if the corresponding
   // metadata byte is `Empty`.
@@ -971,7 +971,7 @@ inline auto MetadataGroup::PortableMatchDeleted() const -> MatchIndex {
   //   shifting left by 7 will have the high bit set.
   uint64_t match_bits = metadata_ints[0] | (~metadata_ints[0] << 7);
   // This inverts the high bits of the bytes, and clears the remaining bits.
-  match_bits = ~match_bits & MSBs;
+  match_bits = ~match_bits & Msbs;
 
   // The high bits of the bytes of `match_bits` are set if the corresponding
   // metadata byte is `Deleted`.
@@ -985,7 +985,7 @@ inline auto MetadataGroup::PortableCompareEqual(MetadataGroup lhs,
   return llvm::equal(lhs.metadata_bytes, rhs.metadata_bytes);
 }
 
-inline auto MetadataGroup::SIMDLoad(const uint8_t* metadata, ssize_t index)
+inline auto MetadataGroup::SimdLoad(const uint8_t* metadata, ssize_t index)
     -> MetadataGroup {
   MetadataGroup g;
 #if CARBON_NEON_SIMD_SUPPORT
@@ -994,33 +994,33 @@ inline auto MetadataGroup::SIMDLoad(const uint8_t* metadata, ssize_t index)
   g.metadata_vec =
       _mm_load_si128(reinterpret_cast<const __m128i*>(metadata + index));
 #else
-  static_assert(!UseSIMD, "Unimplemented SIMD operation");
+  static_assert(!UseSimd, "Unimplemented SIMD operation");
   static_cast<void>(metadata);
   static_cast<void>(index);
 #endif
   return g;
 }
 
-inline auto MetadataGroup::SIMDStore(uint8_t* metadata, ssize_t index) const
+inline auto MetadataGroup::SimdStore(uint8_t* metadata, ssize_t index) const
     -> void {
 #if CARBON_NEON_SIMD_SUPPORT
   vst1_u8(metadata + index, metadata_vec);
 #elif CARBON_X86_SIMD_SUPPORT
   _mm_store_si128(reinterpret_cast<__m128i*>(metadata + index), metadata_vec);
 #else
-  static_assert(!UseSIMD, "Unimplemented SIMD operation");
+  static_assert(!UseSimd, "Unimplemented SIMD operation");
   static_cast<void>(metadata);
   static_cast<void>(index);
 #endif
 }
 
-inline auto MetadataGroup::SIMDClearDeleted() -> void {
+inline auto MetadataGroup::SimdClearDeleted() -> void {
 #if CARBON_NEON_SIMD_SUPPORT
   // There is no good Neon operation to implement this, so do it using integer
   // code. This is reasonably fast, but unfortunate because it forces the group
   // out of a SIMD register and into a general purpose register, which can have
   // high latency.
-  metadata_ints[0] &= (~LSBs | metadata_ints[0] >> 7);
+  metadata_ints[0] &= (~Lsbs | metadata_ints[0] >> 7);
 #elif CARBON_X86_SIMD_SUPPORT
   // For each byte, use `metadata_vec` if the byte's high bit is set (indicating
   // it is present), otherwise (it is empty or deleted) replace it with zero
@@ -1028,49 +1028,49 @@ inline auto MetadataGroup::SIMDClearDeleted() -> void {
   metadata_vec =
       _mm_blendv_epi8(_mm_setzero_si128(), metadata_vec, metadata_vec);
 #else
-  static_assert(!UseSIMD && !DebugSIMD, "Unimplemented SIMD operation");
+  static_assert(!UseSimd && !DebugSimd, "Unimplemented SIMD operation");
 #endif
 }
 
-inline auto MetadataGroup::SIMDMatch(uint8_t tag) const -> SIMDMatchRange {
-  SIMDMatchRange result;
+inline auto MetadataGroup::SimdMatch(uint8_t tag) const -> SimdMatchRange {
+  SimdMatchRange result;
 #if CARBON_NEON_SIMD_SUPPORT
   // Broadcast byte we want to match to every byte in the vector.
   auto match_byte_vec = vdup_n_u8(tag | PresentMask);
   // Result bytes have all bits set for the bytes that match, so we have to
-  // clear everything but MSBs next.
+  // clear everything but Msbs next.
   auto match_byte_cmp_vec = vceq_u8(metadata_vec, match_byte_vec);
   uint64_t match_bits = vreinterpret_u64_u8(match_byte_cmp_vec)[0];
-  // Note that the range will lazily mask to the MSBs as part of incrementing.
-  result = SIMDMatchRange(match_bits);
+  // Note that the range will lazily mask to the Msbs as part of incrementing.
+  result = SimdMatchRange(match_bits);
 #elif CARBON_X86_SIMD_SUPPORT
-  result = X86SIMDMatch(tag | PresentMask);
+  result = X86SimdMatch(tag | PresentMask);
 #else
-  static_assert(!UseSIMD && !DebugSIMD, "Unimplemented SIMD operation");
+  static_assert(!UseSimd && !DebugSimd, "Unimplemented SIMD operation");
   static_cast<void>(tag);
 #endif
   return result;
 }
 
-inline auto MetadataGroup::SIMDMatchPresent() const -> SIMDMatchPresentRange {
-  SIMDMatchPresentRange result;
+inline auto MetadataGroup::SimdMatchPresent() const -> SimdMatchPresentRange {
+  SimdMatchPresentRange result;
 #if CARBON_NEON_SIMD_SUPPORT
   // Just extract the metadata directly.
   uint64_t match_bits = vreinterpret_u64_u8(metadata_vec)[0];
   // Even though the Neon SIMD range will do its own masking, we have to mask
   // here so that `empty` is correct.
-  result = SIMDMatchPresentRange(match_bits & MSBs);
+  result = SimdMatchPresentRange(match_bits & Msbs);
 #elif CARBON_X86_SIMD_SUPPORT
   // We arranged the byte vector so that present bytes have the high bit set,
   // which this instruction extracts.
-  result = SIMDMatchPresentRange(_mm_movemask_epi8(metadata_vec));
+  result = SimdMatchPresentRange(_mm_movemask_epi8(metadata_vec));
 #else
-  static_assert(!UseSIMD && !DebugSIMD, "Unimplemented SIMD operation");
+  static_assert(!UseSimd && !DebugSimd, "Unimplemented SIMD operation");
 #endif
   return result;
 }
 
-inline auto MetadataGroup::SIMDMatchEmpty() const -> MatchIndex {
+inline auto MetadataGroup::SimdMatchEmpty() const -> MatchIndex {
   MatchIndex result;
 #if CARBON_NEON_SIMD_SUPPORT
   // Compare all bytes with zero, as that is the empty byte value. Result will
@@ -1079,23 +1079,23 @@ inline auto MetadataGroup::SIMDMatchEmpty() const -> MatchIndex {
   auto cmp_vec = vceqz_u8(metadata_vec);
   uint64_t metadata_bits = vreinterpret_u64_u8(cmp_vec)[0];
   // The matched range is likely to be tested for zero by the caller, and that
-  // test can often be folded into masking the bits with `MSBs` when we do that
+  // test can often be folded into masking the bits with `Msbs` when we do that
   // mask in the scalar domain rather than the SIMD domain. So we do the mask
   // here rather than above prior to extracting the match bits.
-  result = MatchIndex(metadata_bits & MSBs);
+  result = MatchIndex(metadata_bits & Msbs);
 #elif CARBON_X86_SIMD_SUPPORT
   // Even though we only need the first match rather than all matches, we don't
   // have a more efficient way to compute this on x86 and so we reuse the
   // general match infrastructure that computes all matches in a bit-encoding.
   // We then convert it into a `MatchIndex` that just finds the first one.
-  result = static_cast<MatchIndex>(X86SIMDMatch(Empty));
+  result = static_cast<MatchIndex>(X86SimdMatch(Empty));
 #else
-  static_assert(!UseSIMD && !DebugSIMD, "Unimplemented SIMD operation");
+  static_assert(!UseSimd && !DebugSimd, "Unimplemented SIMD operation");
 #endif
   return result;
 }
 
-inline auto MetadataGroup::SIMDMatchDeleted() const -> MatchIndex {
+inline auto MetadataGroup::SimdMatchDeleted() const -> MatchIndex {
   MatchIndex result;
 #if CARBON_NEON_SIMD_SUPPORT
   // Broadcast the `Deleted` byte across the vector and compare the bytes of
@@ -1104,23 +1104,23 @@ inline auto MetadataGroup::SIMDMatchDeleted() const -> MatchIndex {
   auto cmp_vec = vceq_u8(metadata_vec, vdup_n_u8(Deleted));
   uint64_t match_bits = vreinterpret_u64_u8(cmp_vec)[0];
   // The matched range is likely to be tested for zero by the caller, and that
-  // test can often be folded into masking the bits with `MSBs` when we do that
+  // test can often be folded into masking the bits with `Msbs` when we do that
   // mask in the scalar domain rather than the SIMD domain. So we do the mask
   // here rather than above prior to extracting the match bits.
-  result = MatchIndex(match_bits & MSBs);
+  result = MatchIndex(match_bits & Msbs);
 #elif CARBON_X86_SIMD_SUPPORT
   // Even though we only need the first match rather than all matches, we don't
   // have a more efficient way to compute this on x86 and so we reuse the
   // general match infrastructure that computes all matches in a bit-encoding.
   // We then convert it into a `MatchIndex` that just finds the first one.
-  result = static_cast<MatchIndex>(X86SIMDMatch(Deleted));
+  result = static_cast<MatchIndex>(X86SimdMatch(Deleted));
 #else
-  static_assert(!UseSIMD && !DebugSIMD, "Unimplemented SIMD operation");
+  static_assert(!UseSimd && !DebugSimd, "Unimplemented SIMD operation");
 #endif
   return result;
 }
 
-inline auto MetadataGroup::SIMDCompareEqual(MetadataGroup lhs,
+inline auto MetadataGroup::SimdCompareEqual(MetadataGroup lhs,
                                             MetadataGroup rhs) -> bool {
 #if CARBON_NEON_SIMD_SUPPORT
   return vreinterpret_u64_u8(vceq_u8(lhs.metadata_vec, rhs.metadata_vec))[0] ==
@@ -1141,7 +1141,7 @@ inline auto MetadataGroup::SIMDCompareEqual(MetadataGroup lhs,
                                           rhs.metadata_vec)) == 0x0000'ffffU;
 #endif
 #else
-  static_assert(!UseSIMD && !DebugSIMD, "Unimplemented SIMD operation");
+  static_assert(!UseSimd && !DebugSimd, "Unimplemented SIMD operation");
   static_cast<void>(lhs);
   static_cast<void>(rhs);
   return false;
@@ -1149,7 +1149,7 @@ inline auto MetadataGroup::SIMDCompareEqual(MetadataGroup lhs,
 }
 
 #if CARBON_X86_SIMD_SUPPORT
-inline auto MetadataGroup::X86SIMDMatch(uint8_t match_byte) const
+inline auto MetadataGroup::X86SimdMatch(uint8_t match_byte) const
     -> MatchRange {
   // Broadcast the byte we're matching against to all bytes in a vector, and
   // compare those bytes with the metadata vector bytes.

+ 16 - 16
common/raw_hashtable_metadata_group_benchmark.cpp

@@ -41,29 +41,29 @@ class BenchmarkPortableMetadataGroup : public MetadataGroup {
 };
 
 // Override the core API with explicit use of the SIMD API.
-class BenchmarkSIMDMetadataGroup : public MetadataGroup {
+class BenchmarkSimdMetadataGroup : public MetadataGroup {
  public:
-  explicit BenchmarkSIMDMetadataGroup(MetadataGroup g) : MetadataGroup(g) {}
+  explicit BenchmarkSimdMetadataGroup(MetadataGroup g) : MetadataGroup(g) {}
 
   static auto Load(uint8_t* metadata, ssize_t index)
-      -> BenchmarkSIMDMetadataGroup {
-    return BenchmarkSIMDMetadataGroup(SIMDLoad(metadata, index));
+      -> BenchmarkSimdMetadataGroup {
+    return BenchmarkSimdMetadataGroup(SimdLoad(metadata, index));
   }
   auto Store(uint8_t* metadata, ssize_t index) const -> void {
-    SIMDStore(metadata, index);
+    SimdStore(metadata, index);
   }
 
-  auto ClearDeleted() -> void { SIMDClearDeleted(); }
+  auto ClearDeleted() -> void { SimdClearDeleted(); }
 
-  auto Match(uint8_t present_byte) const -> SIMDMatchRange {
-    return SIMDMatch(present_byte);
+  auto Match(uint8_t present_byte) const -> SimdMatchRange {
+    return SimdMatch(present_byte);
   }
-  auto MatchPresent() const -> SIMDMatchPresentRange {
-    return SIMDMatchPresent();
+  auto MatchPresent() const -> SimdMatchPresentRange {
+    return SimdMatchPresent();
   }
 
-  auto MatchEmpty() const -> MatchIndex { return SIMDMatchEmpty(); }
-  auto MatchDeleted() const -> MatchIndex { return SIMDMatchDeleted(); }
+  auto MatchEmpty() const -> MatchIndex { return SimdMatchEmpty(); }
+  auto MatchDeleted() const -> MatchIndex { return SimdMatchDeleted(); }
 };
 #endif
 
@@ -75,7 +75,7 @@ constexpr ssize_t BenchSize = 256;
 
 #if CARBON_NEON_SIMD_SUPPORT || CARBON_X86_SIMD_SUPPORT
 using PortableGroup = BenchmarkPortableMetadataGroup;
-using SIMDGroup = BenchmarkSIMDMetadataGroup;
+using SimdGroup = BenchmarkSimdMetadataGroup;
 #endif
 
 struct BenchMetadata {
@@ -251,9 +251,9 @@ BENCHMARK(BM_LoadMatch<BenchKind::Deleted>);
 BENCHMARK(BM_LoadMatch<BenchKind::Random, PortableGroup>);
 BENCHMARK(BM_LoadMatch<BenchKind::Empty, PortableGroup>);
 BENCHMARK(BM_LoadMatch<BenchKind::Deleted, PortableGroup>);
-BENCHMARK(BM_LoadMatch<BenchKind::Random, SIMDGroup>);
-BENCHMARK(BM_LoadMatch<BenchKind::Empty, SIMDGroup>);
-BENCHMARK(BM_LoadMatch<BenchKind::Deleted, SIMDGroup>);
+BENCHMARK(BM_LoadMatch<BenchKind::Random, SimdGroup>);
+BENCHMARK(BM_LoadMatch<BenchKind::Empty, SimdGroup>);
+BENCHMARK(BM_LoadMatch<BenchKind::Deleted, SimdGroup>);
 #endif
 
 // Benchmark that measures the speed of a match that is only found after at

+ 1 - 1
common/set_test.cpp

@@ -95,7 +95,7 @@ TYPED_TEST(SetTest, Basic) {
   ExpectSetElementsAre(s, MakeElements(llvm::seq(1, 512)));
 }
 
-TYPED_TEST(SetTest, FactoryAPI) {
+TYPED_TEST(SetTest, FactoryApi) {
   using SetT = TypeParam;
   SetT s;
   EXPECT_TRUE(s.Insert(1, [](int k, void* key_storage) {

+ 6 - 0
docs/project/cpp_style_guide.md

@@ -70,6 +70,12 @@ serves to simplify it.
 -   All other names use `snake_case`, including function parameters, and
     non-constant local and member variables.
     -   Private member variables should have a trailing `_`.
+-   For acronyms and initialisms, we generally follow the
+    [capitalization style](https://google.github.io/styleguide/cppguide.html#General_Naming_Rules)
+    (`Api` instead of `API`).
+    -   The exceptions are `LLVM` and `IR`, which we capitalize.
+-   For abbreviations, there is a list of
+    [common toolchain abbreviations](/toolchain/docs/idioms.md#abbreviations-used-in-the-code-aka-carbon-abbreviation-decoder-ring).
 
 ### File names
 

+ 1 - 1
testing/base/source_gen.cpp

@@ -309,7 +309,7 @@ static auto EstimateAvgClassDefLines(SourceGen::ClassParams params) -> double {
   return avg;
 }
 
-auto SourceGen::GenAPIFileDenseDecls(int target_lines,
+auto SourceGen::GenApiFileDenseDecls(int target_lines,
                                      const DenseDeclParams& params)
     -> std::string {
   RawStringOstream source;

+ 1 - 1
testing/base/source_gen.h

@@ -188,7 +188,7 @@ class SourceGen {
   // `target_lines`. Long term, the goal is to get as close as we can to any
   // automatically formatted code while still keeping the stability of
   // benchmarking.
-  auto GenAPIFileDenseDecls(int target_lines, const DenseDeclParams& params)
+  auto GenApiFileDenseDecls(int target_lines, const DenseDeclParams& params)
       -> std::string;
 
   // Get some number of randomly shuffled identifiers.

+ 1 - 1
testing/base/source_gen_main.cpp

@@ -95,7 +95,7 @@ auto Run(llvm::ArrayRef<llvm::StringRef> args) -> bool {
   }
 
   SourceGen gen(language);
-  *output << gen.GenAPIFileDenseDecls(lines, SourceGen::DenseDeclParams{});
+  *output << gen.GenApiFileDenseDecls(lines, SourceGen::DenseDeclParams{});
   output->flush();
   return true;
 }

+ 4 - 4
testing/base/source_gen_test.cpp

@@ -157,11 +157,11 @@ auto TestCompile(llvm::StringRef source) -> bool {
   return driver.RunCommand({"compile", "--phase=check", "test.carbon"}).success;
 }
 
-TEST(SourceGenTest, GenAPIFileDenseDeclsTest) {
+TEST(SourceGenTest, GenApiFileDenseDeclsTest) {
   SourceGen gen;
 
   std::string source =
-      gen.GenAPIFileDenseDecls(1000, SourceGen::DenseDeclParams{});
+      gen.GenApiFileDenseDecls(1000, SourceGen::DenseDeclParams{});
   // Should be within 1% of the requested line count.
   EXPECT_THAT(source, Contains('\n').Times(AllOf(Ge(950), Le(1050))));
 
@@ -169,13 +169,13 @@ TEST(SourceGenTest, GenAPIFileDenseDeclsTest) {
   EXPECT_TRUE(TestCompile(source));
 }
 
-TEST(SourceGenTest, GenAPIFileDenseDeclsCppTest) {
+TEST(SourceGenTest, GenApiFileDenseDeclsCppTest) {
   SourceGen gen(SourceGen::Language::Cpp);
 
   // Generate a 1000-line file which is enough to have a reasonably accurate
   // line count estimate and have a few classes.
   std::string source =
-      gen.GenAPIFileDenseDecls(1000, SourceGen::DenseDeclParams{});
+      gen.GenApiFileDenseDecls(1000, SourceGen::DenseDeclParams{});
   // Should be within 10% of the requested line count.
   EXPECT_THAT(source, Contains('\n').Times(AllOf(Ge(900), Le(1100))));
 

+ 2 - 2
toolchain/diagnostics/coverage_test.cpp

@@ -37,8 +37,8 @@ constexpr DiagnosticKind UntestedDiagnosticKinds[] = {
     // These aren't feasible to test with a normal testcase, but are tested in
     // lex/tokenized_buffer_test.cpp.
     DiagnosticKind::TooManyTokens,
-    DiagnosticKind::UnsupportedCRLineEnding,
-    DiagnosticKind::UnsupportedLFCRLineEnding,
+    DiagnosticKind::UnsupportedCrLineEnding,
+    DiagnosticKind::UnsupportedLfCrLineEnding,
 
     // This is a little long but is tested in lex/numeric_literal_test.cpp.
     DiagnosticKind::TooManyDigits,

+ 2 - 2
toolchain/diagnostics/diagnostic_kind.def

@@ -64,8 +64,8 @@ CARBON_DIAGNOSTIC_KIND(UnicodeEscapeSurrogate)
 CARBON_DIAGNOSTIC_KIND(UnicodeEscapeTooLarge)
 CARBON_DIAGNOSTIC_KIND(UnknownBaseSpecifier)
 CARBON_DIAGNOSTIC_KIND(UnknownEscapeSequence)
-CARBON_DIAGNOSTIC_KIND(UnsupportedCRLineEnding)
-CARBON_DIAGNOSTIC_KIND(UnsupportedLFCRLineEnding)
+CARBON_DIAGNOSTIC_KIND(UnsupportedCrLineEnding)
+CARBON_DIAGNOSTIC_KIND(UnsupportedLfCrLineEnding)
 CARBON_DIAGNOSTIC_KIND(UnmatchedOpening)
 CARBON_DIAGNOSTIC_KIND(UnmatchedClosing)
 CARBON_DIAGNOSTIC_KIND(UnrecognizedCharacters)

+ 1 - 1
toolchain/docs/adding_features.md

@@ -332,7 +332,7 @@ Adding an instruction will generally also require a handler in the Lower step.
 Most new instructions will automatically be formatted reasonably by the SemIR
 formatter. If not, then add a `FormatInst` overload to
 [`sem_ir/formatter.cpp`](/toolchain/sem_ir/formatter.cpp). If only the arguments
-need custom formatting, then a `FormatInstRHS` overload can be implemented
+need custom formatting, then a `FormatInstRhs` overload can be implemented
 instead.
 
 If the resulting SemIR needs a new built-in, add it to

+ 5 - 5
toolchain/driver/compile_benchmark.cpp

@@ -93,7 +93,7 @@ static auto ComputeFileCount(int target_lines) -> int {
 }
 
 template <Phase P>
-static auto BM_CompileAPIFileDenseDecls(benchmark::State& state) -> void {
+static auto BM_CompileApiFileDenseDecls(benchmark::State& state) -> void {
   CompileBenchmark bench;
   int target_lines = state.range(0);
   int num_files = ComputeFileCount(target_lines);
@@ -106,7 +106,7 @@ static auto BM_CompileAPIFileDenseDecls(benchmark::State& state) -> void {
   double total_tokens = 0.0;
   double total_lines = 0.0;
   for (std::string& source : sources) {
-    source = bench.gen().GenAPIFileDenseDecls(target_lines,
+    source = bench.gen().GenApiFileDenseDecls(target_lines,
                                               SourceGen::DenseDeclParams{});
     total_bytes += source.size();
     total_tokens += compile_helper.GetTokenizedBuffer(source).size();
@@ -151,13 +151,13 @@ static auto BM_CompileAPIFileDenseDecls(benchmark::State& state) -> void {
 
 // Benchmark from 256-line test cases through 256k line test cases, and for each
 // phase of compilation.
-BENCHMARK(BM_CompileAPIFileDenseDecls<Phase::Lex>)
+BENCHMARK(BM_CompileApiFileDenseDecls<Phase::Lex>)
     ->RangeMultiplier(4)
     ->Range(256, static_cast<int64_t>(256 * 1024));
-BENCHMARK(BM_CompileAPIFileDenseDecls<Phase::Parse>)
+BENCHMARK(BM_CompileApiFileDenseDecls<Phase::Parse>)
     ->RangeMultiplier(4)
     ->Range(256, static_cast<int64_t>(256 * 1024));
-BENCHMARK(BM_CompileAPIFileDenseDecls<Phase::Check>)
+BENCHMARK(BM_CompileApiFileDenseDecls<Phase::Check>)
     ->RangeMultiplier(4)
     ->Range(256, static_cast<int64_t>(256 * 1024));
 

+ 10 - 10
toolchain/lex/lex.cpp

@@ -230,23 +230,23 @@ class [[clang::internal_linkage]] Lexer {
 #if CARBON_USE_SIMD
 namespace {
 #if __ARM_NEON
-using SIMDMaskT = uint8x16_t;
+using SimdMaskT = uint8x16_t;
 #elif __x86_64__
-using SIMDMaskT = __m128i;
+using SimdMaskT = __m128i;
 #else
 #error "Unsupported SIMD architecture!"
 #endif
-using SIMDMaskArrayT = std::array<SIMDMaskT, sizeof(SIMDMaskT) + 1>;
+using SimdMaskArrayT = std::array<SimdMaskT, sizeof(SimdMaskT) + 1>;
 }  // namespace
 // A table of masks to include 0-16 bytes of an SSE register.
-static constexpr SIMDMaskArrayT PrefixMasks = []() constexpr {
-  SIMDMaskArrayT masks = {};
+static constexpr SimdMaskArrayT PrefixMasks = []() constexpr {
+  SimdMaskArrayT masks = {};
   for (int i = 1; i < static_cast<int>(masks.size()); ++i) {
     masks[i] =
         // The SIMD types and constexpr require a C-style cast.
         // NOLINTNEXTLINE(google-readability-casting)
-        (SIMDMaskT)(std::numeric_limits<unsigned __int128>::max() >>
-                    ((sizeof(SIMDMaskT) - i) * 8));
+        (SimdMaskT)(std::numeric_limits<unsigned __int128>::max() >>
+                    ((sizeof(SimdMaskT) - i) * 8));
   }
   return masks;
 }();
@@ -829,17 +829,17 @@ auto Lexer::LexCR(llvm::StringRef source_text, ssize_t& position) -> void {
     return;
   }
 
-  CARBON_DIAGNOSTIC(UnsupportedLFCRLineEnding, Error,
+  CARBON_DIAGNOSTIC(UnsupportedLfCrLineEnding, Error,
                     "the LF+CR line ending is not supported, only LF and CR+LF "
                     "are supported");
-  CARBON_DIAGNOSTIC(UnsupportedCRLineEnding, Error,
+  CARBON_DIAGNOSTIC(UnsupportedCrLineEnding, Error,
                     "a raw CR line ending is not supported, only LF and CR+LF "
                     "are supported");
   bool is_lfcr = position > 0 && source_text[position - 1] == '\n';
   // TODO: This diagnostic has an unfortunate snippet -- we should tweak the
   // snippet rendering to gracefully handle CRs.
   emitter_.Emit(source_text.begin() + position,
-                is_lfcr ? UnsupportedLFCRLineEnding : UnsupportedCRLineEnding);
+                is_lfcr ? UnsupportedLfCrLineEnding : UnsupportedCrLineEnding);
 
   // Recover by treating the CR as a horizontal whitespace. This should make our
   // whitespace rules largely work and parse cleanly without disrupting the line

+ 5 - 5
toolchain/lex/tokenized_buffer_test.cpp

@@ -86,7 +86,7 @@ TEST_F(LexerTest, TracksLinesAndColumns) {
       }));
 }
 
-TEST_F(LexerTest, TracksLinesAndColumnsCRLF) {
+TEST_F(LexerTest, TracksLinesAndColumnsCrLf) {
   auto& buffer = compile_helper_.GetTokenizedBuffer(
       "\r\n  ;;\r\n   ;;;\r\n   x\"foo\" '''baz\r\n  a\r\n ''' y");
   EXPECT_FALSE(buffer.has_errors());
@@ -146,7 +146,7 @@ TEST_F(LexerTest, InvalidCR) {
       }));
 }
 
-TEST_F(LexerTest, InvalidLFCR) {
+TEST_F(LexerTest, InvalidLfCr) {
   auto& buffer = compile_helper_.GetTokenizedBuffer("\n ;;\n\r ;\n   x");
   EXPECT_TRUE(buffer.has_errors());
   EXPECT_THAT(
@@ -1123,15 +1123,15 @@ TEST_F(LexerTest, DiagnosticInvalidDigit) {
 TEST_F(LexerTest, DiagnosticCR) {
   Testing::MockDiagnosticConsumer consumer;
   EXPECT_CALL(consumer, HandleDiagnostic(IsSingleDiagnostic(
-                            DiagnosticKind::UnsupportedCRLineEnding,
+                            DiagnosticKind::UnsupportedCrLineEnding,
                             DiagnosticLevel::Error, 1, 1, _)));
   compile_helper_.GetTokenizedBuffer("\r", &consumer);
 }
 
-TEST_F(LexerTest, DiagnosticLFCR) {
+TEST_F(LexerTest, DiagnosticLfCr) {
   Testing::MockDiagnosticConsumer consumer;
   EXPECT_CALL(consumer, HandleDiagnostic(IsSingleDiagnostic(
-                            DiagnosticKind::UnsupportedLFCRLineEnding,
+                            DiagnosticKind::UnsupportedLfCrLineEnding,
                             DiagnosticLevel::Error, 2, 1, _)));
   compile_helper_.GetTokenizedBuffer("\n\r", &consumer);
 }

+ 2 - 2
toolchain/parse/tree_test.cpp

@@ -85,7 +85,7 @@ TEST_F(TreeTest, AsAndTryAs) {
   EXPECT_TRUE(*any_decl_id == any_decl_id2);
 }
 
-TEST_F(TreeTest, PrintPostorderAsYAML) {
+TEST_F(TreeTest, PrintPostorderAsYaml) {
   auto [tokens, tree_and_subtrees] =
       compile_helper_.GetTokenizedBufferWithTreeAndSubtrees("fn F();");
   EXPECT_FALSE(tree_and_subtrees.tree().has_errors());
@@ -114,7 +114,7 @@ TEST_F(TreeTest, PrintPostorderAsYAML) {
               IsYaml(ElementsAre(root)));
 }
 
-TEST_F(TreeTest, PrintPreorderAsYAML) {
+TEST_F(TreeTest, PrintPreorderAsYaml) {
   auto [tokens, tree_and_subtrees] =
       compile_helper_.GetTokenizedBufferWithTreeAndSubtrees("fn F();");
   EXPECT_FALSE(tree_and_subtrees.tree().has_errors());

+ 39 - 39
toolchain/sem_ir/formatter.cpp

@@ -754,13 +754,13 @@ class FormatterImpl {
   template <typename InstT>
   auto FormatInst(InstId inst_id, InstT inst) -> void {
     Indent();
-    FormatInstLHS(inst_id, inst);
+    FormatInstLhs(inst_id, inst);
     out_ << InstT::Kind.ir_name();
     pending_constant_value_ = sem_ir_->constant_values().Get(inst_id);
     pending_constant_value_is_self_ =
         sem_ir_->constant_values().GetInstIdIfValid(pending_constant_value_) ==
         inst_id;
-    FormatInstRHS(inst);
+    FormatInstRhs(inst);
     FormatPendingConstantValue(AddSpace::Before);
     out_ << "\n";
   }
@@ -768,9 +768,9 @@ class FormatterImpl {
   // Don't print a constant for ImportRefUnloaded.
   auto FormatInst(InstId inst_id, ImportRefUnloaded inst) -> void {
     Indent();
-    FormatInstLHS(inst_id, inst);
+    FormatInstLhs(inst_id, inst);
     out_ << ImportRefUnloaded::Kind.ir_name();
-    FormatInstRHS(inst);
+    FormatInstRhs(inst);
     out_ << "\n";
   }
 
@@ -835,7 +835,7 @@ class FormatterImpl {
     pending_constant_value_ = ConstantId::NotConstant;
   }
 
-  auto FormatInstLHS(InstId inst_id, Inst inst) -> void {
+  auto FormatInstLhs(InstId inst_id, Inst inst) -> void {
     switch (inst.kind().value_kind()) {
       case InstValueKind::Typed:
         FormatName(inst_id);
@@ -863,26 +863,26 @@ class FormatterImpl {
   }
 
   // Format ImportCppDecl name.
-  auto FormatInstLHS(InstId inst_id, ImportCppDecl /*inst*/) -> void {
+  auto FormatInstLhs(InstId inst_id, ImportCppDecl /*inst*/) -> void {
     FormatName(inst_id);
     out_ << " = ";
   }
 
   // Format ImportDecl with its name.
-  auto FormatInstLHS(InstId inst_id, ImportDecl /*inst*/) -> void {
+  auto FormatInstLhs(InstId inst_id, ImportDecl /*inst*/) -> void {
     FormatName(inst_id);
     out_ << " = ";
   }
 
   // Print ImportRefUnloaded with type-like semantics even though it lacks a
   // type_id.
-  auto FormatInstLHS(InstId inst_id, ImportRefUnloaded /*inst*/) -> void {
+  auto FormatInstLhs(InstId inst_id, ImportRefUnloaded /*inst*/) -> void {
     FormatName(inst_id);
     out_ << " = ";
   }
 
   template <typename InstT>
-  auto FormatInstRHS(InstT inst) -> void {
+  auto FormatInstRhs(InstT inst) -> void {
     // By default, an instruction has a comma-separated argument list.
     using Info = Internal::InstLikeTypeInfo<InstT>;
     if constexpr (Info::NumArgs == 2) {
@@ -904,7 +904,7 @@ class FormatterImpl {
     }
   }
 
-  auto FormatInstRHS(BindSymbolicName inst) -> void {
+  auto FormatInstRhs(BindSymbolicName inst) -> void {
     // A BindSymbolicName with no value is a purely symbolic binding, such as
     // the `Self` in an interface. Don't print out `none` for the value.
     if (inst.value_id.has_value()) {
@@ -914,12 +914,12 @@ class FormatterImpl {
     }
   }
 
-  auto FormatInstRHS(BlockArg inst) -> void {
+  auto FormatInstRhs(BlockArg inst) -> void {
     out_ << " ";
     FormatLabel(inst.block_id);
   }
 
-  auto FormatInstRHS(Namespace inst) -> void {
+  auto FormatInstRhs(Namespace inst) -> void {
     if (inst.import_id.has_value()) {
       FormatArgs(inst.import_id, inst.name_scope_id);
     } else {
@@ -961,7 +961,7 @@ class FormatterImpl {
     in_terminator_sequence_ = false;
   }
 
-  auto FormatInstRHS(Call inst) -> void {
+  auto FormatInstRhs(Call inst) -> void {
     out_ << " ";
     FormatArg(inst.callee_id);
 
@@ -997,56 +997,56 @@ class FormatterImpl {
     }
   }
 
-  auto FormatInstRHS(ArrayInit inst) -> void {
+  auto FormatInstRhs(ArrayInit inst) -> void {
     FormatArgs(inst.inits_id);
     FormatReturnSlotArg(inst.dest_id);
   }
 
-  auto FormatInstRHS(InitializeFrom inst) -> void {
+  auto FormatInstRhs(InitializeFrom inst) -> void {
     FormatArgs(inst.src_id);
     FormatReturnSlotArg(inst.dest_id);
   }
 
-  auto FormatInstRHS(ValueParam inst) -> void {
+  auto FormatInstRhs(ValueParam inst) -> void {
     FormatArgs(inst.runtime_index);
     // Omit pretty_name because it's an implementation detail of
     // pretty-printing.
   }
 
-  auto FormatInstRHS(OutParam inst) -> void {
+  auto FormatInstRhs(OutParam inst) -> void {
     FormatArgs(inst.runtime_index);
     // Omit pretty_name because it's an implementation detail of
     // pretty-printing.
   }
 
-  auto FormatInstRHS(ReturnExpr ret) -> void {
+  auto FormatInstRhs(ReturnExpr ret) -> void {
     FormatArgs(ret.expr_id);
     if (ret.dest_id.has_value()) {
       FormatReturnSlotArg(ret.dest_id);
     }
   }
 
-  auto FormatInstRHS(ReturnSlot inst) -> void {
+  auto FormatInstRhs(ReturnSlot inst) -> void {
     // Omit inst.type_inst_id because it's not semantically significant.
     FormatArgs(inst.storage_id);
   }
 
-  auto FormatInstRHS(ReturnSlotPattern /*inst*/) -> void {
+  auto FormatInstRhs(ReturnSlotPattern /*inst*/) -> void {
     // No-op because type_id is the only semantically significant field,
     // and it's handled separately.
   }
 
-  auto FormatInstRHS(StructInit init) -> void {
+  auto FormatInstRhs(StructInit init) -> void {
     FormatArgs(init.elements_id);
     FormatReturnSlotArg(init.dest_id);
   }
 
-  auto FormatInstRHS(TupleInit init) -> void {
+  auto FormatInstRhs(TupleInit init) -> void {
     FormatArgs(init.elements_id);
     FormatReturnSlotArg(init.dest_id);
   }
 
-  auto FormatInstRHS(FunctionDecl inst) -> void {
+  auto FormatInstRhs(FunctionDecl inst) -> void {
     FormatArgs(inst.function_id);
     llvm::SaveAndRestore class_scope(
         scope_, inst_namer_->GetScopeFor(inst.function_id));
@@ -1055,7 +1055,7 @@ class FormatterImpl {
     FormatTrailingBlock(inst.decl_block_id);
   }
 
-  auto FormatInstRHS(ClassDecl inst) -> void {
+  auto FormatInstRhs(ClassDecl inst) -> void {
     FormatArgs(inst.class_id);
     llvm::SaveAndRestore class_scope(scope_,
                                      inst_namer_->GetScopeFor(inst.class_id));
@@ -1063,7 +1063,7 @@ class FormatterImpl {
     FormatTrailingBlock(inst.decl_block_id);
   }
 
-  auto FormatInstRHS(ImplDecl inst) -> void {
+  auto FormatInstRhs(ImplDecl inst) -> void {
     FormatArgs(inst.impl_id);
     llvm::SaveAndRestore class_scope(scope_,
                                      inst_namer_->GetScopeFor(inst.impl_id));
@@ -1071,7 +1071,7 @@ class FormatterImpl {
     FormatTrailingBlock(inst.decl_block_id);
   }
 
-  auto FormatInstRHS(InterfaceDecl inst) -> void {
+  auto FormatInstRhs(InterfaceDecl inst) -> void {
     FormatArgs(inst.interface_id);
     llvm::SaveAndRestore class_scope(
         scope_, inst_namer_->GetScopeFor(inst.interface_id));
@@ -1080,28 +1080,28 @@ class FormatterImpl {
     FormatTrailingBlock(inst.decl_block_id);
   }
 
-  auto FormatInstRHS(AssociatedConstantDecl inst) -> void {
+  auto FormatInstRhs(AssociatedConstantDecl inst) -> void {
     FormatArgs(inst.assoc_const_id);
     llvm::SaveAndRestore assoc_const_scope(
         scope_, inst_namer_->GetScopeFor(inst.assoc_const_id));
     FormatTrailingBlock(inst.decl_block_id);
   }
 
-  auto FormatInstRHS(IntValue inst) -> void {
+  auto FormatInstRhs(IntValue inst) -> void {
     out_ << " ";
     sem_ir_->ints()
         .Get(inst.int_id)
         .print(out_, sem_ir_->types().IsSignedInt(inst.type_id));
   }
 
-  auto FormatInstRHS(FloatLiteral inst) -> void {
+  auto FormatInstRhs(FloatLiteral inst) -> void {
     llvm::SmallVector<char, 16> buffer;
     sem_ir_->floats().Get(inst.float_id).toString(buffer);
     out_ << " " << buffer;
   }
 
   // Format the metadata in File for `import Cpp`.
-  auto FormatInstRHS(ImportCppDecl /*inst*/) -> void {
+  auto FormatInstRhs(ImportCppDecl /*inst*/) -> void {
     out_ << " ";
     OpenBrace();
     for (ImportCpp import_cpp : sem_ir_->import_cpps().array_ref()) {
@@ -1114,7 +1114,7 @@ class FormatterImpl {
     CloseBrace();
   }
 
-  auto FormatImportRefRHS(ImportIRInstId import_ir_inst_id,
+  auto FormatImportRefRhs(ImportIRInstId import_ir_inst_id,
                           EntityNameId entity_name_id,
                           llvm::StringLiteral loaded_label) -> void {
     out_ << " ";
@@ -1148,29 +1148,29 @@ class FormatterImpl {
     out_ << ", " << loaded_label;
   }
 
-  auto FormatInstRHS(ImportRefLoaded inst) -> void {
-    FormatImportRefRHS(inst.import_ir_inst_id, inst.entity_name_id, "loaded");
+  auto FormatInstRhs(ImportRefLoaded inst) -> void {
+    FormatImportRefRhs(inst.import_ir_inst_id, inst.entity_name_id, "loaded");
   }
 
-  auto FormatInstRHS(ImportRefUnloaded inst) -> void {
-    FormatImportRefRHS(inst.import_ir_inst_id, inst.entity_name_id, "unloaded");
+  auto FormatInstRhs(ImportRefUnloaded inst) -> void {
+    FormatImportRefRhs(inst.import_ir_inst_id, inst.entity_name_id, "unloaded");
   }
 
-  auto FormatInstRHS(NameBindingDecl inst) -> void {
+  auto FormatInstRhs(NameBindingDecl inst) -> void {
     FormatTrailingBlock(inst.pattern_block_id);
   }
 
-  auto FormatInstRHS(SpliceBlock inst) -> void {
+  auto FormatInstRhs(SpliceBlock inst) -> void {
     FormatArgs(inst.result_id);
     FormatTrailingBlock(inst.block_id);
   }
 
-  auto FormatInstRHS(WhereExpr inst) -> void {
+  auto FormatInstRhs(WhereExpr inst) -> void {
     FormatArgs(inst.period_self_id);
     FormatTrailingBlock(inst.requirements_id);
   }
 
-  auto FormatInstRHS(StructType inst) -> void {
+  auto FormatInstRhs(StructType inst) -> void {
     out_ << " {";
     llvm::ListSeparator sep;
     for (auto field : sem_ir_->struct_type_fields().Get(inst.fields_id)) {

+ 1 - 1
toolchain/sem_ir/yaml_test.cpp

@@ -29,7 +29,7 @@ using ::testing::SizeIs;
 
 namespace Yaml = ::Carbon::Testing::Yaml;
 
-TEST(SemIRTest, YAML) {
+TEST(SemIRTest, Yaml) {
   llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> fs =
       new llvm::vfs::InMemoryFileSystem;
   CARBON_CHECK(fs->addFile(