tokenized_buffer.cpp 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/lex/tokenized_buffer.h"
  5. #include <algorithm>
  6. #include <array>
  7. #include <cmath>
  8. #include "common/check.h"
  9. #include "common/string_helpers.h"
  10. #include "llvm/ADT/StringRef.h"
  11. #include "llvm/ADT/StringSwitch.h"
  12. #include "llvm/Support/ErrorHandling.h"
  13. #include "llvm/Support/Format.h"
  14. #include "llvm/Support/FormatVariadic.h"
  15. #include "llvm/Support/raw_ostream.h"
  16. #include "toolchain/base/value_store.h"
  17. #include "toolchain/lex/character_set.h"
  18. #include "toolchain/lex/helpers.h"
  19. #include "toolchain/lex/numeric_literal.h"
  20. #include "toolchain/lex/string_literal.h"
  21. #if __ARM_NEON
  22. #include <arm_neon.h>
  23. #define CARBON_USE_SIMD 1
  24. #elif __x86_64__
  25. #include <x86intrin.h>
  26. #define CARBON_USE_SIMD 1
  27. #else
  28. #define CARBON_USE_SIMD 0
  29. #endif
  30. namespace Carbon::Lex {
  31. // TODO: Move Overload and VariantMatch somewhere more central.
  32. // Form an overload set from a list of functions. For example:
  33. //
  34. // ```
  35. // auto overloaded = Overload{[] (int) {}, [] (float) {}};
  36. // ```
  37. template <typename... Fs>
  38. struct Overload : Fs... {
  39. using Fs::operator()...;
  40. };
  41. template <typename... Fs>
  42. Overload(Fs...) -> Overload<Fs...>;
  43. // Pattern-match against the type of the value stored in the variant `V`. Each
  44. // element of `fs` should be a function that takes one or more of the variant
  45. // values in `V`.
  46. template <typename V, typename... Fs>
  47. auto VariantMatch(V&& v, Fs&&... fs) -> decltype(auto) {
  48. return std::visit(Overload{std::forward<Fs&&>(fs)...}, std::forward<V&&>(v));
  49. }
  50. #if CARBON_USE_SIMD
  51. namespace {
  52. #if __ARM_NEON
  53. using SIMDMaskT = uint8x16_t;
  54. #elif __x86_64__
  55. using SIMDMaskT = __m128i;
  56. #else
  57. #error "Unsupported SIMD architecture!"
  58. #endif
  59. using SIMDMaskArrayT = std::array<SIMDMaskT, sizeof(SIMDMaskT) + 1>;
  60. } // namespace
  61. // A table of masks to include 0-16 bytes of an SSE register.
  62. static constexpr SIMDMaskArrayT PrefixMasks = []() constexpr {
  63. SIMDMaskArrayT masks = {};
  64. for (int i = 1; i < static_cast<int>(masks.size()); ++i) {
  65. // The SIMD types and constexpr require a C-style cast.
  66. // NOLINTNEXTLINE(google-readability-casting)
  67. masks[i] = (SIMDMaskT)(std::numeric_limits<unsigned __int128>::max() >>
  68. ((sizeof(SIMDMaskT) - i) * 8));
  69. }
  70. return masks;
  71. }();
  72. #endif // CARBON_USE_SIMD
  73. // A table of booleans that we can use to classify bytes as being valid
  74. // identifier start. This is used by raw identifier detection.
  75. constexpr std::array<bool, 256> IsIdStartByteTable = [] {
  76. std::array<bool, 256> table = {};
  77. for (char c = 'A'; c <= 'Z'; ++c) {
  78. table[c] = true;
  79. }
  80. for (char c = 'a'; c <= 'z'; ++c) {
  81. table[c] = true;
  82. }
  83. table['_'] = true;
  84. return table;
  85. }();
  86. // A table of booleans that we can use to classify bytes as being valid
  87. // identifier (or keyword) characters. This is used in the generic,
  88. // non-vectorized fallback code to scan for length of an identifier.
  89. constexpr std::array<bool, 256> IsIdByteTable = [] {
  90. std::array<bool, 256> table = IsIdStartByteTable;
  91. for (char c = '0'; c <= '9'; ++c) {
  92. table[c] = true;
  93. }
  94. return table;
  95. }();
  96. // Baseline scalar version, also available for scalar-fallback in SIMD code.
  97. // Uses `ssize_t` for performance when indexing in the loop.
  98. //
  99. // TODO: This assumes all Unicode characters are non-identifiers.
  100. static auto ScanForIdentifierPrefixScalar(llvm::StringRef text, ssize_t i)
  101. -> llvm::StringRef {
  102. const ssize_t size = text.size();
  103. while (i < size && IsIdByteTable[static_cast<unsigned char>(text[i])]) {
  104. ++i;
  105. }
  106. return text.substr(0, i);
  107. }
  108. #if CARBON_USE_SIMD && __x86_64__
  109. // The SIMD code paths uses a scheme derived from the techniques in Geoff
  110. // Langdale and Daniel Lemire's work on parsing JSON[1]. Specifically, that
  111. // paper outlines a technique of using two 4-bit indexed in-register look-up
  112. // tables (LUTs) to classify bytes in a branchless SIMD code sequence.
  113. //
  114. // [1]: https://arxiv.org/pdf/1902.08318.pdf
  115. //
  116. // The goal is to get a bit mask classifying different sets of bytes. For each
  117. // input byte, we first test for a high bit indicating a UTF-8 encoded Unicode
  118. // character. Otherwise, we want the mask bits to be set with the following
  119. // logic derived by inspecting the high nibble and low nibble of the input:
  120. // bit0 = 1 for `_`: high `0x5` and low `0xF`
  121. // bit1 = 1 for `0-9`: high `0x3` and low `0x0` - `0x9`
  122. // bit2 = 1 for `A-O` and `a-o`: high `0x4` or `0x6` and low `0x1` - `0xF`
  123. // bit3 = 1 for `P-Z` and 'p-z': high `0x5` or `0x7` and low `0x0` - `0xA`
  124. // bit4 = unused
  125. // bit5 = unused
  126. // bit6 = unused
  127. // bit7 = unused
  128. //
  129. // No bits set means definitively non-ID ASCII character.
  130. //
  131. // Bits 4-7 remain unused if we need to classify more characters.
  132. namespace {
  133. // Struct used to implement the nibble LUT for SIMD implementations.
  134. //
  135. // Forced to 16-byte alignment to ensure we can load it easily in SIMD code.
  136. struct alignas(16) NibbleLUT {
  137. auto Load() const -> __m128i {
  138. return _mm_load_si128(reinterpret_cast<const __m128i*>(this));
  139. }
  140. uint8_t nibble_0;
  141. uint8_t nibble_1;
  142. uint8_t nibble_2;
  143. uint8_t nibble_3;
  144. uint8_t nibble_4;
  145. uint8_t nibble_5;
  146. uint8_t nibble_6;
  147. uint8_t nibble_7;
  148. uint8_t nibble_8;
  149. uint8_t nibble_9;
  150. uint8_t nibble_a;
  151. uint8_t nibble_b;
  152. uint8_t nibble_c;
  153. uint8_t nibble_d;
  154. uint8_t nibble_e;
  155. uint8_t nibble_f;
  156. };
  157. } // namespace
  158. constexpr NibbleLUT HighLUT = {
  159. .nibble_0 = 0b0000'0000,
  160. .nibble_1 = 0b0000'0000,
  161. .nibble_2 = 0b0000'0000,
  162. .nibble_3 = 0b0000'0010,
  163. .nibble_4 = 0b0000'0100,
  164. .nibble_5 = 0b0000'1001,
  165. .nibble_6 = 0b0000'0100,
  166. .nibble_7 = 0b0000'1000,
  167. .nibble_8 = 0b1000'0000,
  168. .nibble_9 = 0b1000'0000,
  169. .nibble_a = 0b1000'0000,
  170. .nibble_b = 0b1000'0000,
  171. .nibble_c = 0b1000'0000,
  172. .nibble_d = 0b1000'0000,
  173. .nibble_e = 0b1000'0000,
  174. .nibble_f = 0b1000'0000,
  175. };
  176. constexpr NibbleLUT LowLUT = {
  177. .nibble_0 = 0b1000'1010,
  178. .nibble_1 = 0b1000'1110,
  179. .nibble_2 = 0b1000'1110,
  180. .nibble_3 = 0b1000'1110,
  181. .nibble_4 = 0b1000'1110,
  182. .nibble_5 = 0b1000'1110,
  183. .nibble_6 = 0b1000'1110,
  184. .nibble_7 = 0b1000'1110,
  185. .nibble_8 = 0b1000'1110,
  186. .nibble_9 = 0b1000'1110,
  187. .nibble_a = 0b1000'1100,
  188. .nibble_b = 0b1000'0100,
  189. .nibble_c = 0b1000'0100,
  190. .nibble_d = 0b1000'0100,
  191. .nibble_e = 0b1000'0100,
  192. .nibble_f = 0b1000'0101,
  193. };
  194. static auto ScanForIdentifierPrefixX86(llvm::StringRef text)
  195. -> llvm::StringRef {
  196. const auto high_lut = HighLUT.Load();
  197. const auto low_lut = LowLUT.Load();
  198. // Use `ssize_t` for performance here as we index memory in a tight loop.
  199. ssize_t i = 0;
  200. const ssize_t size = text.size();
  201. while ((i + 16) <= size) {
  202. __m128i input =
  203. _mm_loadu_si128(reinterpret_cast<const __m128i*>(text.data() + i));
  204. // The high bits of each byte indicate a non-ASCII character encoded using
  205. // UTF-8. Test those and fall back to the scalar code if present. These
  206. // bytes will also cause spurious zeros in the LUT results, but we can
  207. // ignore that because we track them independently here.
  208. #if __SSE4_1__
  209. if (!_mm_test_all_zeros(_mm_set1_epi8(0x80), input)) {
  210. break;
  211. }
  212. #else
  213. if (_mm_movemask_epi8(input) != 0) {
  214. break;
  215. }
  216. #endif
  217. // Do two LUT lookups and mask the results together to get the results for
  218. // both low and high nibbles. Note that we don't need to mask out the high
  219. // bit of input here because we track that above for UTF-8 handling.
  220. __m128i low_mask = _mm_shuffle_epi8(low_lut, input);
  221. // Note that the input needs to be masked to only include the high nibble or
  222. // we could end up with bit7 set forcing the result to a zero byte.
  223. __m128i input_high =
  224. _mm_and_si128(_mm_srli_epi32(input, 4), _mm_set1_epi8(0x0f));
  225. __m128i high_mask = _mm_shuffle_epi8(high_lut, input_high);
  226. __m128i mask = _mm_and_si128(low_mask, high_mask);
  227. // Now compare to find the completely zero bytes.
  228. __m128i id_byte_mask_vec = _mm_cmpeq_epi8(mask, _mm_setzero_si128());
  229. int tail_ascii_mask = _mm_movemask_epi8(id_byte_mask_vec);
  230. // Check if there are bits in the tail mask, which means zero bytes and the
  231. // end of the identifier. We could do this without materializing the scalar
  232. // mask on more recent CPUs, but we generally expect the median length we
  233. // encounter to be <16 characters and so we avoid the extra instruction in
  234. // that case and predict this branch to succeed so it is laid out in a
  235. // reasonable way.
  236. if (LLVM_LIKELY(tail_ascii_mask != 0)) {
  237. // Move past the definitively classified bytes that are part of the
  238. // identifier, and return the complete identifier text.
  239. i += __builtin_ctz(tail_ascii_mask);
  240. return text.substr(0, i);
  241. }
  242. i += 16;
  243. }
  244. return ScanForIdentifierPrefixScalar(text, i);
  245. }
  246. #endif // CARBON_USE_SIMD && __x86_64__
  247. // Scans the provided text and returns the prefix `StringRef` of contiguous
  248. // identifier characters.
  249. //
  250. // This is a performance sensitive function and where profitable uses vectorized
  251. // code sequences to optimize its scanning. When modifying, the identifier
  252. // lexing benchmarks should be checked for regressions.
  253. //
  254. // Identifier characters here are currently the ASCII characters `[0-9A-Za-z_]`.
  255. //
  256. // TODO: Currently, this code does not implement Carbon's design for Unicode
  257. // characters in identifiers. It does work on UTF-8 code unit sequences, but
  258. // currently considers non-ASCII characters to be non-identifier characters.
  259. // Some work has been done to ensure the hot loop, while optimized, retains
  260. // enough information to add Unicode handling without completely destroying the
  261. // relevant optimizations.
  262. static auto ScanForIdentifierPrefix(llvm::StringRef text) -> llvm::StringRef {
  263. // Dispatch to an optimized architecture optimized routine.
  264. #if CARBON_USE_SIMD && __x86_64__
  265. return ScanForIdentifierPrefixX86(text);
  266. #elif CARBON_USE_SIMD && __ARM_NEON
  267. // Somewhat surprisingly, there is basically nothing worth doing in SIMD on
  268. // Arm to optimize this scan. The Neon SIMD operations end up requiring you to
  269. // move from the SIMD unit to the scalar unit in the critical path of finding
  270. // the offset of the end of an identifier. Current ARM cores make the code
  271. // sequences here (quite) unpleasant. For example, on Apple M1 and similar
  272. // cores, the latency is as much as 10 cycles just to extract from the vector.
  273. // SIMD might be more interesting on Neoverse cores, but it'd be nice to avoid
  274. // core-specific tunings at this point.
  275. //
  276. // If this proves problematic and critical to optimize, the current leading
  277. // theory is to have the newline searching code also create a bitmask for the
  278. // entire source file of identifier and non-identifier bytes, and then use the
  279. // bit-counting instructions here to do a fast scan of that bitmask. However,
  280. // crossing that bridge will add substantial complexity to the newline
  281. // scanner, and so currently we just use a boring scalar loop that pipelines
  282. // well.
  283. #endif
  284. return ScanForIdentifierPrefixScalar(text, 0);
  285. }
  286. // Implementation of the lexer logic itself.
  287. //
  288. // The design is that lexing can loop over the source buffer, consuming it into
  289. // tokens by calling into this API. This class handles the state and breaks down
  290. // the different lexing steps that may be used. It directly updates the provided
  291. // tokenized buffer with the lexed tokens.
  292. class [[clang::internal_linkage]] TokenizedBuffer::Lexer {
  293. public:
  294. // Symbolic result of a lexing action. This indicates whether we successfully
  295. // lexed a token, or whether other lexing actions should be attempted.
  296. //
  297. // While it wraps a simple boolean state, its API both helps make the failures
  298. // more self documenting, and by consuming the actual token constructively
  299. // when one is produced, it helps ensure the correct result is returned.
  300. class LexResult {
  301. public:
  302. // Consumes (and discard) a valid token to construct a result
  303. // indicating a token has been produced. Relies on implicit conversions.
  304. // NOLINTNEXTLINE(google-explicit-constructor)
  305. LexResult(Token /*discarded_token*/) : LexResult(true) {}
  306. // Returns a result indicating no token was produced.
  307. static auto NoMatch() -> LexResult { return LexResult(false); }
  308. // Tests whether a token was produced by the lexing routine, and
  309. // the lexer can continue forming tokens.
  310. explicit operator bool() const { return formed_token_; }
  311. private:
  312. explicit LexResult(bool formed_token) : formed_token_(formed_token) {}
  313. bool formed_token_;
  314. };
  315. Lexer(SharedValueStores& value_stores, SourceBuffer& source,
  316. DiagnosticConsumer& consumer)
  317. : buffer_(value_stores, source),
  318. consumer_(consumer),
  319. translator_(&buffer_),
  320. emitter_(translator_, consumer_),
  321. token_translator_(&buffer_),
  322. token_emitter_(token_translator_, consumer_) {}
  323. // Find all line endings and create the line data structures. Explicitly kept
  324. // out-of-line because this is a significant loop that is useful to have in
  325. // the profile and it doesn't simplify by inlining at all. But because it can,
  326. // the compiler will flatten this otherwise.
  327. [[gnu::noinline]] auto CreateLines(llvm::StringRef source_text) -> void {
  328. // We currently use `memchr` here which typically is well optimized to use
  329. // SIMD or other significantly faster than byte-wise scanning. We also use
  330. // carefully selected variables and the `ssize_t` type for performance and
  331. // code size of this hot loop.
  332. //
  333. // TODO: Eventually, we'll likely need to roll our own SIMD-optimized
  334. // routine here in order to handle CR+LF line endings, as we'll want those
  335. // to stay on the fast path. We'll also need to detect and diagnose Unicode
  336. // vertical whitespace. Starting with `memchr` should give us a strong
  337. // baseline performance target when adding those features.
  338. const char* const text = source_text.data();
  339. const ssize_t size = source_text.size();
  340. ssize_t start = 0;
  341. while (const char* nl = reinterpret_cast<const char*>(
  342. memchr(&text[start], '\n', size - start))) {
  343. ssize_t nl_index = nl - text;
  344. buffer_.AddLine(LineInfo(start, nl_index - start));
  345. start = nl_index + 1;
  346. }
  347. // The last line ends at the end of the file.
  348. buffer_.AddLine(LineInfo(start, size - start));
  349. // If the last line wasn't empty, the file ends with an unterminated line.
  350. // Add an extra blank line so that we never need to handle the special case
  351. // of being on the last line inside the lexer and needing to not increment
  352. // to the next line.
  353. if (start != size) {
  354. buffer_.AddLine(LineInfo(size, 0));
  355. }
  356. // Now that all the infos are allocated, get a fresh pointer to the first
  357. // info for use while lexing.
  358. line_index_ = 0;
  359. }
  360. auto current_line() -> Line { return Line(line_index_); }
  361. auto current_line_info() -> LineInfo* {
  362. return &buffer_.line_infos_[line_index_];
  363. }
  364. auto ComputeColumn(ssize_t position) -> int {
  365. CARBON_DCHECK(position >= current_line_info()->start);
  366. return position - current_line_info()->start;
  367. }
  368. auto NoteWhitespace() -> void {
  369. buffer_.token_infos_.back().has_trailing_space = true;
  370. }
  371. auto SkipHorizontalWhitespace(llvm::StringRef source_text, ssize_t& position)
  372. -> void {
  373. // Handle adjacent whitespace quickly. This comes up frequently for example
  374. // due to indentation. We don't expect *huge* runs, so just use a scalar
  375. // loop. While still scalar, this avoids repeated table dispatch and marking
  376. // whitespace.
  377. while (position < static_cast<ssize_t>(source_text.size()) &&
  378. (source_text[position] == ' ' || source_text[position] == '\t')) {
  379. ++position;
  380. }
  381. }
  382. auto LexHorizontalWhitespace(llvm::StringRef source_text, ssize_t& position)
  383. -> void {
  384. CARBON_DCHECK(source_text[position] == ' ' ||
  385. source_text[position] == '\t');
  386. NoteWhitespace();
  387. // Skip runs using an optimized code path.
  388. SkipHorizontalWhitespace(source_text, position);
  389. }
  390. auto LexVerticalWhitespace(llvm::StringRef source_text, ssize_t& position)
  391. -> void {
  392. NoteWhitespace();
  393. ++line_index_;
  394. auto* line_info = current_line_info();
  395. ssize_t line_start = line_info->start;
  396. position = line_start;
  397. SkipHorizontalWhitespace(source_text, position);
  398. line_info->indent = position - line_start;
  399. }
  400. auto LexCommentOrSlash(llvm::StringRef source_text, ssize_t& position)
  401. -> void {
  402. CARBON_DCHECK(source_text[position] == '/');
  403. // Both comments and slash symbols start with a `/`. We disambiguate with a
  404. // max-munch rule -- if the next character is another `/` then we lex it as
  405. // a comment start. If it isn't, then we lex as a slash. We also optimize
  406. // for the comment case as we expect that to be much more important for
  407. // overall lexer performance.
  408. if (LLVM_LIKELY(position + 1 < static_cast<ssize_t>(source_text.size()) &&
  409. source_text[position + 1] == '/')) {
  410. LexComment(source_text, position);
  411. return;
  412. }
  413. // This code path should produce a token, make sure that happens.
  414. LexResult result = LexSymbolToken(source_text, position);
  415. CARBON_CHECK(result) << "Failed to form a token!";
  416. }
  417. auto LexComment(llvm::StringRef source_text, ssize_t& position) -> void {
  418. CARBON_DCHECK(source_text.substr(position).startswith("//"));
  419. // Any comment must be the only non-whitespace on the line.
  420. const auto* line_info = current_line_info();
  421. if (LLVM_UNLIKELY(position != line_info->start + line_info->indent)) {
  422. CARBON_DIAGNOSTIC(TrailingComment, Error,
  423. "Trailing comments are not permitted.");
  424. emitter_.Emit(source_text.begin() + position, TrailingComment);
  425. // Note that we cannot fall-through here as the logic below doesn't handle
  426. // trailing comments. For simplicity, we just consume the trailing comment
  427. // itself and let the normal lexer handle the newline as if there weren't
  428. // a comment at all.
  429. position = line_info->start + line_info->length;
  430. return;
  431. }
  432. // The introducer '//' must be followed by whitespace or EOF.
  433. bool is_valid_after_slashes = true;
  434. if (position + 2 < static_cast<ssize_t>(source_text.size()) &&
  435. LLVM_UNLIKELY(!IsSpace(source_text[position + 2]))) {
  436. CARBON_DIAGNOSTIC(NoWhitespaceAfterCommentIntroducer, Error,
  437. "Whitespace is required after '//'.");
  438. emitter_.Emit(source_text.begin() + position + 2,
  439. NoWhitespaceAfterCommentIntroducer);
  440. // We use this to tweak the lexing of blocks below.
  441. is_valid_after_slashes = false;
  442. }
  443. // Skip over this line.
  444. ssize_t line_index = line_index_;
  445. ++line_index;
  446. position = buffer_.line_infos_[line_index].start;
  447. // A very common pattern is a long block of comment lines all with the same
  448. // indent and comment start. We skip these comment blocks in bulk both for
  449. // speed and to reduce redundant diagnostics if each line has the same
  450. // erroneous comment start like `//!`.
  451. //
  452. // When we have SIMD support this is even more important for speed, as short
  453. // indents can be scanned extremely quickly with SIMD and we expect these to
  454. // be the dominant cases.
  455. //
  456. // TODO: We should extend this to 32-byte SIMD on platforms with support.
  457. constexpr int MaxIndent = 13;
  458. const int indent = line_info->indent;
  459. const ssize_t first_line_start = line_info->start;
  460. ssize_t prefix_size = indent + (is_valid_after_slashes ? 3 : 2);
  461. auto skip_to_next_line = [this, indent, &line_index, &position] {
  462. // We're guaranteed to have a line here even on a comment on the last line
  463. // as we ensure there is an empty line structure at the end of every file.
  464. ++line_index;
  465. auto* next_line_info = &buffer_.line_infos_[line_index];
  466. next_line_info->indent = indent;
  467. position = next_line_info->start;
  468. };
  469. if (CARBON_USE_SIMD &&
  470. position + 16 < static_cast<ssize_t>(source_text.size()) &&
  471. indent <= MaxIndent) {
  472. // Load a mask based on the amount of text we want to compare.
  473. auto mask = PrefixMasks[prefix_size];
  474. #if __ARM_NEON
  475. // Load and mask the prefix of the current line.
  476. auto prefix = vld1q_u8(reinterpret_cast<const uint8_t*>(
  477. source_text.data() + first_line_start));
  478. prefix = vandq_u8(mask, prefix);
  479. do {
  480. // Load and mask the next line to consider's prefix.
  481. auto next_prefix = vld1q_u8(
  482. reinterpret_cast<const uint8_t*>(source_text.data() + position));
  483. next_prefix = vandq_u8(mask, next_prefix);
  484. // Compare the two prefixes and if any lanes differ, break.
  485. auto compare = vceqq_u8(prefix, next_prefix);
  486. if (vminvq_u8(compare) == 0) {
  487. break;
  488. }
  489. skip_to_next_line();
  490. } while (position + 16 < static_cast<ssize_t>(source_text.size()));
  491. #elif __x86_64__
  492. // Use the current line's prefix as the exemplar to compare against.
  493. // We don't mask here as we will mask when doing the comparison.
  494. auto prefix = _mm_loadu_si128(reinterpret_cast<const __m128i*>(
  495. source_text.data() + first_line_start));
  496. do {
  497. // Load the next line to consider's prefix.
  498. auto next_prefix = _mm_loadu_si128(
  499. reinterpret_cast<const __m128i*>(source_text.data() + position));
  500. // Compute the difference between the next line and our exemplar. Again,
  501. // we don't mask the difference because the comparison below will be
  502. // masked.
  503. auto prefix_diff = _mm_xor_si128(prefix, next_prefix);
  504. // If we have any differences (non-zero bits) within the mask, we can't
  505. // skip the next line too.
  506. if (!_mm_test_all_zeros(mask, prefix_diff)) {
  507. break;
  508. }
  509. skip_to_next_line();
  510. } while (position + 16 < static_cast<ssize_t>(source_text.size()));
  511. #else
  512. #error "Unsupported SIMD architecture!"
  513. #endif
  514. // TODO: If we finish the loop due to the position approaching the end of
  515. // the buffer we may fail to skip the last line in a comment block that
  516. // has an invalid initial sequence and thus emit extra diagnostics. We
  517. // should really fall through to the generic skipping logic, but the code
  518. // organization will need to change significantly to allow that.
  519. } else {
  520. while (position + prefix_size <
  521. static_cast<ssize_t>(source_text.size()) &&
  522. memcmp(source_text.data() + first_line_start,
  523. source_text.data() + position, prefix_size) == 0) {
  524. skip_to_next_line();
  525. }
  526. }
  527. // Now compute the indent of this next line before we finish.
  528. ssize_t line_start = position;
  529. SkipHorizontalWhitespace(source_text, position);
  530. // Now that we're done scanning, update to the latest line index and indent.
  531. line_index_ = line_index;
  532. current_line_info()->indent = position - line_start;
  533. }
  534. auto LexNumericLiteral(llvm::StringRef source_text, ssize_t& position)
  535. -> LexResult {
  536. std::optional<NumericLiteral> literal =
  537. NumericLiteral::Lex(source_text.substr(position));
  538. if (!literal) {
  539. return LexError(source_text, position);
  540. }
  541. int int_column = ComputeColumn(position);
  542. int token_size = literal->text().size();
  543. position += token_size;
  544. return VariantMatch(
  545. literal->ComputeValue(emitter_),
  546. [&](NumericLiteral::IntegerValue&& value) {
  547. auto token = buffer_.AddToken({.kind = TokenKind::IntegerLiteral,
  548. .token_line = current_line(),
  549. .column = int_column});
  550. buffer_.GetTokenInfo(token).integer_id =
  551. buffer_.value_stores_->integers().Add(std::move(value.value));
  552. return token;
  553. },
  554. [&](NumericLiteral::RealValue&& value) {
  555. auto token = buffer_.AddToken({.kind = TokenKind::RealLiteral,
  556. .token_line = current_line(),
  557. .column = int_column});
  558. buffer_.GetTokenInfo(token).real_id =
  559. buffer_.value_stores_->reals().Add(
  560. Real{.mantissa = value.mantissa,
  561. .exponent = value.exponent,
  562. .is_decimal =
  563. (value.radix == NumericLiteral::Radix::Decimal)});
  564. return token;
  565. },
  566. [&](NumericLiteral::UnrecoverableError) {
  567. auto token = buffer_.AddToken({
  568. .kind = TokenKind::Error,
  569. .token_line = current_line(),
  570. .column = int_column,
  571. .error_length = token_size,
  572. });
  573. return token;
  574. });
  575. }
  576. auto LexStringLiteral(llvm::StringRef source_text, ssize_t& position)
  577. -> LexResult {
  578. std::optional<StringLiteral> literal =
  579. StringLiteral::Lex(source_text.substr(position));
  580. if (!literal) {
  581. return LexError(source_text, position);
  582. }
  583. Line string_line = current_line();
  584. int string_column = ComputeColumn(position);
  585. ssize_t literal_size = literal->text().size();
  586. position += literal_size;
  587. // Update line and column information.
  588. if (literal->is_multi_line()) {
  589. while (current_line_info()->start + current_line_info()->length <
  590. position) {
  591. ++line_index_;
  592. current_line_info()->indent = string_column;
  593. }
  594. // Note that we've updated the current line at this point, but
  595. // `set_indent_` is already true from above. That remains correct as the
  596. // last line of the multi-line literal *also* has its indent set.
  597. }
  598. if (literal->is_terminated()) {
  599. auto string_id = buffer_.value_stores_->strings().Add(
  600. literal->ComputeValue(buffer_.allocator_, emitter_));
  601. auto token = buffer_.AddToken({.kind = TokenKind::StringLiteral,
  602. .token_line = string_line,
  603. .column = string_column,
  604. .string_id = string_id});
  605. return token;
  606. } else {
  607. CARBON_DIAGNOSTIC(UnterminatedString, Error,
  608. "String is missing a terminator.");
  609. emitter_.Emit(literal->text().begin(), UnterminatedString);
  610. return buffer_.AddToken(
  611. {.kind = TokenKind::Error,
  612. .token_line = string_line,
  613. .column = string_column,
  614. .error_length = static_cast<int32_t>(literal_size)});
  615. }
  616. }
  617. auto LexOneCharSymbolToken(llvm::StringRef source_text, TokenKind kind,
  618. ssize_t& position) -> Token {
  619. // Verify in a debug build that the incoming token kind is correct.
  620. CARBON_DCHECK(kind != TokenKind::Error);
  621. CARBON_DCHECK(kind.fixed_spelling().size() == 1);
  622. CARBON_DCHECK(source_text[position] == kind.fixed_spelling().front())
  623. << "Source text starts with '" << source_text[position]
  624. << "' instead of the spelling '" << kind.fixed_spelling()
  625. << "' of the incoming token kind '" << kind << "'";
  626. Token token = buffer_.AddToken({.kind = kind,
  627. .token_line = current_line(),
  628. .column = ComputeColumn(position)});
  629. ++position;
  630. return token;
  631. }
  632. auto LexOpeningSymbolToken(llvm::StringRef source_text, TokenKind kind,
  633. ssize_t& position) -> LexResult {
  634. Token token = LexOneCharSymbolToken(source_text, kind, position);
  635. open_groups_.push_back(token);
  636. return token;
  637. }
  638. auto LexClosingSymbolToken(llvm::StringRef source_text, TokenKind kind,
  639. ssize_t& position) -> LexResult {
  640. auto unmatched_error = [&] {
  641. CARBON_DIAGNOSTIC(
  642. UnmatchedClosing, Error,
  643. "Closing symbol without a corresponding opening symbol.");
  644. emitter_.Emit(source_text.begin() + position, UnmatchedClosing);
  645. Token token = buffer_.AddToken({.kind = TokenKind::Error,
  646. .token_line = current_line(),
  647. .column = ComputeColumn(position),
  648. .error_length = 1});
  649. ++position;
  650. return token;
  651. };
  652. // If we have no open groups, this is an error.
  653. if (LLVM_UNLIKELY(open_groups_.empty())) {
  654. return unmatched_error();
  655. }
  656. Token opening_token = open_groups_.back();
  657. // Close any invalid open groups first.
  658. if (LLVM_UNLIKELY(buffer_.GetTokenInfo(opening_token).kind !=
  659. kind.opening_symbol())) {
  660. CloseInvalidOpenGroups(kind, position);
  661. // This may exhaust the open groups so re-check and re-error if needed.
  662. if (open_groups_.empty()) {
  663. return unmatched_error();
  664. }
  665. opening_token = open_groups_.back();
  666. CARBON_DCHECK(buffer_.GetTokenInfo(opening_token).kind ==
  667. kind.opening_symbol());
  668. }
  669. open_groups_.pop_back();
  670. // Now that the groups are all matched up, lex the actual token.
  671. Token token = LexOneCharSymbolToken(source_text, kind, position);
  672. // Note that it is important to get fresh token infos here as lexing the
  673. // open token would invalidate any pointers.
  674. buffer_.GetTokenInfo(opening_token).closing_token = token;
  675. buffer_.GetTokenInfo(token).opening_token = opening_token;
  676. return token;
  677. }
  678. auto LexSymbolToken(llvm::StringRef source_text, ssize_t& position)
  679. -> LexResult {
  680. // One character symbols and grouping symbols are handled with dedicated
  681. // dispatch. We only lex the multi-character tokens here.
  682. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text.substr(position))
  683. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  684. .StartsWith(Spelling, TokenKind::Name)
  685. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling)
  686. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName)
  687. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName)
  688. #include "toolchain/lex/token_kind.def"
  689. .Default(TokenKind::Error);
  690. if (kind == TokenKind::Error) {
  691. return LexError(source_text, position);
  692. }
  693. Token token = buffer_.AddToken({.kind = kind,
  694. .token_line = current_line(),
  695. .column = ComputeColumn(position)});
  696. position += kind.fixed_spelling().size();
  697. return token;
  698. }
  699. // Given a word that has already been lexed, determine whether it is a type
  700. // literal and if so form the corresponding token.
  701. auto LexWordAsTypeLiteralToken(llvm::StringRef word, int column)
  702. -> LexResult {
  703. if (word.size() < 2) {
  704. // Too short to form one of these tokens.
  705. return LexResult::NoMatch();
  706. }
  707. if (word[1] < '1' || word[1] > '9') {
  708. // Doesn't start with a valid initial digit.
  709. return LexResult::NoMatch();
  710. }
  711. std::optional<TokenKind> kind;
  712. switch (word.front()) {
  713. case 'i':
  714. kind = TokenKind::IntegerTypeLiteral;
  715. break;
  716. case 'u':
  717. kind = TokenKind::UnsignedIntegerTypeLiteral;
  718. break;
  719. case 'f':
  720. kind = TokenKind::FloatingPointTypeLiteral;
  721. break;
  722. default:
  723. return LexResult::NoMatch();
  724. };
  725. llvm::StringRef suffix = word.substr(1);
  726. if (!CanLexInteger(emitter_, suffix)) {
  727. return buffer_.AddToken(
  728. {.kind = TokenKind::Error,
  729. .token_line = current_line(),
  730. .column = column,
  731. .error_length = static_cast<int32_t>(word.size())});
  732. }
  733. llvm::APInt suffix_value;
  734. if (suffix.getAsInteger(10, suffix_value)) {
  735. return LexResult::NoMatch();
  736. }
  737. auto token = buffer_.AddToken(
  738. {.kind = *kind, .token_line = current_line(), .column = column});
  739. buffer_.GetTokenInfo(token).integer_id =
  740. buffer_.value_stores_->integers().Add(std::move(suffix_value));
  741. return token;
  742. }
  743. // Closes all open groups that cannot remain open across a closing symbol.
  744. // Users may pass `Error` to close all open groups.
  745. [[gnu::noinline]] auto CloseInvalidOpenGroups(TokenKind kind,
  746. ssize_t position) -> void {
  747. CARBON_CHECK(kind.is_closing_symbol() || kind == TokenKind::Error);
  748. CARBON_CHECK(!open_groups_.empty());
  749. int column = ComputeColumn(position);
  750. do {
  751. Token opening_token = open_groups_.back();
  752. TokenKind opening_kind = buffer_.GetTokenInfo(opening_token).kind;
  753. if (kind == opening_kind.closing_symbol()) {
  754. return;
  755. }
  756. open_groups_.pop_back();
  757. CARBON_DIAGNOSTIC(
  758. MismatchedClosing, Error,
  759. "Closing symbol does not match most recent opening symbol.");
  760. token_emitter_.Emit(opening_token, MismatchedClosing);
  761. CARBON_CHECK(!buffer_.tokens().empty())
  762. << "Must have a prior opening token!";
  763. Token prev_token = buffer_.tokens().end()[-1];
  764. // TODO: do a smarter backwards scan for where to put the closing
  765. // token.
  766. Token closing_token = buffer_.AddToken(
  767. {.kind = opening_kind.closing_symbol(),
  768. .has_trailing_space = buffer_.HasTrailingWhitespace(prev_token),
  769. .is_recovery = true,
  770. .token_line = current_line(),
  771. .column = column});
  772. TokenInfo& opening_token_info = buffer_.GetTokenInfo(opening_token);
  773. TokenInfo& closing_token_info = buffer_.GetTokenInfo(closing_token);
  774. opening_token_info.closing_token = closing_token;
  775. closing_token_info.opening_token = opening_token;
  776. } while (!open_groups_.empty());
  777. }
  778. auto LexKeywordOrIdentifier(llvm::StringRef source_text, ssize_t& position)
  779. -> LexResult {
  780. if (static_cast<unsigned char>(source_text[position]) > 0x7F) {
  781. // TODO: Need to add support for Unicode lexing.
  782. return LexError(source_text, position);
  783. }
  784. CARBON_CHECK(IsIdStartByteTable[source_text[position]]);
  785. int column = ComputeColumn(position);
  786. // Take the valid characters off the front of the source buffer.
  787. llvm::StringRef identifier_text =
  788. ScanForIdentifierPrefix(source_text.substr(position));
  789. CARBON_CHECK(!identifier_text.empty())
  790. << "Must have at least one character!";
  791. position += identifier_text.size();
  792. // Check if the text is a type literal, and if so form such a literal.
  793. if (LexResult result = LexWordAsTypeLiteralToken(identifier_text, column)) {
  794. return result;
  795. }
  796. // Check if the text matches a keyword token, and if so use that.
  797. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  798. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name)
  799. #include "toolchain/lex/token_kind.def"
  800. .Default(TokenKind::Error);
  801. if (kind != TokenKind::Error) {
  802. return buffer_.AddToken(
  803. {.kind = kind, .token_line = current_line(), .column = column});
  804. }
  805. // Otherwise we have a generic identifier.
  806. return buffer_.AddToken(
  807. {.kind = TokenKind::Identifier,
  808. .token_line = current_line(),
  809. .column = column,
  810. .string_id = buffer_.value_stores_->strings().Add(identifier_text)});
  811. }
  812. auto LexKeywordOrIdentifierMaybeRaw(llvm::StringRef source_text,
  813. ssize_t& position) -> LexResult {
  814. CARBON_CHECK(source_text[position] == 'r');
  815. // Raw identifiers must look like `r#<valid identifier>`, otherwise it's an
  816. // identifier starting with the 'r'.
  817. // TODO: Need to add support for Unicode lexing.
  818. if (LLVM_LIKELY(position + 2 >= static_cast<ssize_t>(source_text.size()) ||
  819. source_text[position + 1] != '#' ||
  820. !IsIdStartByteTable[source_text[position + 2]])) {
  821. // TODO: Should this print a different error when there is `r#`, but it
  822. // isn't followed by identifier text? Or is it right to put it back so
  823. // that the `#` could be parsed as part of a raw string literal?
  824. return LexKeywordOrIdentifier(source_text, position);
  825. }
  826. int column = ComputeColumn(position);
  827. // Take the valid characters off the front of the source buffer.
  828. llvm::StringRef identifier_text =
  829. ScanForIdentifierPrefix(source_text.substr(position + 2));
  830. CARBON_CHECK(!identifier_text.empty())
  831. << "Must have at least one character!";
  832. position += identifier_text.size() + 2;
  833. // Versus LexKeywordOrIdentifier, raw identifiers do not do keyword checks.
  834. // Otherwise we have a raw identifier.
  835. // TODO: This token doesn't carry any indicator that it's raw, so
  836. // diagnostics are unclear.
  837. return buffer_.AddToken(
  838. {.kind = TokenKind::Identifier,
  839. .token_line = current_line(),
  840. .column = column,
  841. .string_id = buffer_.value_stores_->strings().Add(identifier_text)});
  842. }
  843. auto LexError(llvm::StringRef source_text, ssize_t& position) -> LexResult {
  844. llvm::StringRef error_text =
  845. source_text.substr(position).take_while([](char c) {
  846. if (IsAlnum(c)) {
  847. return false;
  848. }
  849. switch (c) {
  850. case '_':
  851. case '\t':
  852. case '\n':
  853. return false;
  854. default:
  855. break;
  856. }
  857. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  858. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  859. #include "toolchain/lex/token_kind.def"
  860. .Default(true);
  861. });
  862. if (error_text.empty()) {
  863. // TODO: Reimplement this to use the lexer properly. In the meantime,
  864. // guarantee that we eat at least one byte.
  865. error_text = source_text.substr(position, 1);
  866. }
  867. auto token = buffer_.AddToken(
  868. {.kind = TokenKind::Error,
  869. .token_line = current_line(),
  870. .column = ComputeColumn(position),
  871. .error_length = static_cast<int32_t>(error_text.size())});
  872. CARBON_DIAGNOSTIC(UnrecognizedCharacters, Error,
  873. "Encountered unrecognized characters while parsing.");
  874. emitter_.Emit(error_text.begin(), UnrecognizedCharacters);
  875. position += error_text.size();
  876. return token;
  877. }
  878. auto LexStartOfFile(llvm::StringRef source_text, ssize_t& position) -> void {
  879. // Before lexing any source text, add the start-of-file token so that code
  880. // can assume a non-empty token buffer for the rest of lexing. Note that the
  881. // start-of-file always has trailing space because it *is* whitespace.
  882. buffer_.AddToken({.kind = TokenKind::StartOfFile,
  883. .has_trailing_space = true,
  884. .token_line = current_line(),
  885. .column = 0});
  886. // Also skip any horizontal whitespace and record the indentation of the
  887. // first line.
  888. SkipHorizontalWhitespace(source_text, position);
  889. auto* line_info = current_line_info();
  890. CARBON_CHECK(line_info->start == 0);
  891. line_info->indent = position;
  892. }
  893. auto LexEndOfFile(llvm::StringRef source_text, ssize_t position) -> void {
  894. CARBON_CHECK(position == static_cast<ssize_t>(source_text.size()));
  895. // Check if the last line is empty and not the first line (and only). If so,
  896. // re-pin the last line to be the prior one so that diagnostics and editors
  897. // can treat newlines as terminators even though we internally handle them
  898. // as separators in case of a missing newline on the last line. We do this
  899. // here instead of detecting this when we see the newline to avoid more
  900. // conditions along that fast path.
  901. if (position == current_line_info()->start && line_index_ != 0) {
  902. --line_index_;
  903. --position;
  904. } else {
  905. // Update the line length as this is also the end of a line.
  906. current_line_info()->length = ComputeColumn(position);
  907. }
  908. // The end-of-file token is always considered to be whitespace.
  909. NoteWhitespace();
  910. // Close any open groups. We do this after marking whitespace, it will
  911. // preserve that.
  912. if (!open_groups_.empty()) {
  913. CloseInvalidOpenGroups(TokenKind::Error, position);
  914. }
  915. buffer_.AddToken({.kind = TokenKind::EndOfFile,
  916. .token_line = current_line(),
  917. .column = ComputeColumn(position)});
  918. }
  919. // We use a collection of static member functions for table-based dispatch to
  920. // lexer methods. These are named static member functions so that they show up
  921. // helpfully in profiles and backtraces, but they tend to not contain the
  922. // interesting logic and simply delegate to the relevant methods. All of their
  923. // signatures need to be exactly the same however in order to ensure we can
  924. // build efficient dispatch tables out of them. All of them end by doing a
  925. // must-tail return call to this routine. It handles continuing the dispatch
  926. // chain.
  927. static auto DispatchNext(Lexer& lexer, llvm::StringRef source_text,
  928. ssize_t position) -> void {
  929. if (LLVM_LIKELY(position < static_cast<ssize_t>(source_text.size()))) {
  930. // The common case is to tail recurse based on the next character. Note
  931. // that because this is a must-tail return, this cannot fail to tail-call
  932. // and will not grow the stack. This is in essence a loop with dynamic
  933. // tail dispatch to the next stage of the loop.
  934. [[clang::musttail]] return DispatchTable[static_cast<unsigned char>(
  935. source_text[position])](lexer, source_text, position);
  936. }
  937. // When we finish the source text, stop recursing. We also hint this so that
  938. // the tail-dispatch is optimized as that's essentially the loop back-edge
  939. // and this is the loop exit.
  940. lexer.LexEndOfFile(source_text, position);
  941. }
  942. // Define a set of dispatch functions that simply forward to a method that
  943. // lexes a token. This includes validating that an actual token was produced,
  944. // and continuing the dispatch.
  945. #define CARBON_DISPATCH_LEX_TOKEN(LexMethod) \
  946. static auto Dispatch##LexMethod(Lexer& lexer, llvm::StringRef source_text, \
  947. ssize_t position) \
  948. ->void { \
  949. LexResult result = lexer.LexMethod(source_text, position); \
  950. CARBON_CHECK(result) << "Failed to form a token!"; \
  951. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  952. }
  953. CARBON_DISPATCH_LEX_TOKEN(LexError)
  954. CARBON_DISPATCH_LEX_TOKEN(LexSymbolToken)
  955. CARBON_DISPATCH_LEX_TOKEN(LexKeywordOrIdentifier)
  956. CARBON_DISPATCH_LEX_TOKEN(LexKeywordOrIdentifierMaybeRaw)
  957. CARBON_DISPATCH_LEX_TOKEN(LexNumericLiteral)
  958. CARBON_DISPATCH_LEX_TOKEN(LexStringLiteral)
  959. // A custom dispatch functions that pre-select the symbol token to lex.
  960. #define CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexMethod) \
  961. static auto Dispatch##LexMethod##SymbolToken( \
  962. Lexer& lexer, llvm::StringRef source_text, ssize_t position) \
  963. ->void { \
  964. LexResult result = lexer.LexMethod##SymbolToken( \
  965. source_text, OneCharTokenKindTable[source_text[position]], position); \
  966. CARBON_CHECK(result) << "Failed to form a token!"; \
  967. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  968. }
  969. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexOneChar)
  970. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexOpening)
  971. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexClosing)
  972. // Define a set of non-token dispatch functions that handle things like
  973. // whitespace and comments.
  974. #define CARBON_DISPATCH_LEX_NON_TOKEN(LexMethod) \
  975. static auto Dispatch##LexMethod(Lexer& lexer, llvm::StringRef source_text, \
  976. ssize_t position) \
  977. ->void { \
  978. lexer.LexMethod(source_text, position); \
  979. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  980. }
  981. CARBON_DISPATCH_LEX_NON_TOKEN(LexHorizontalWhitespace)
  982. CARBON_DISPATCH_LEX_NON_TOKEN(LexVerticalWhitespace)
  983. CARBON_DISPATCH_LEX_NON_TOKEN(LexCommentOrSlash)
  984. // The main entry point for dispatching through the lexer's table. This method
  985. // should always fully consume the source text.
  986. auto Lex() && -> TokenizedBuffer {
  987. llvm::StringRef source_text = buffer_.source_->text();
  988. // First build up our line data structures.
  989. CreateLines(source_text);
  990. ssize_t position = 0;
  991. LexStartOfFile(source_text, position);
  992. // Manually enter the dispatch loop. This call will tail-recurse through the
  993. // dispatch table until everything from source_text is consumed.
  994. DispatchNext(*this, source_text, position);
  995. if (consumer_.seen_error()) {
  996. buffer_.has_errors_ = true;
  997. }
  998. return std::move(buffer_);
  999. }
  1000. private:
  1001. using DispatchFunctionT = auto(Lexer& lexer, llvm::StringRef source_text,
  1002. ssize_t position) -> void;
  1003. using DispatchTableT = std::array<DispatchFunctionT*, 256>;
  1004. // Build a table of function pointers that we can use to dispatch to the
  1005. // correct lexer routine based on the first byte of source text.
  1006. //
  1007. // While it is tempting to simply use a `switch` on the first byte and
  1008. // dispatch with cases into this, in practice that doesn't produce great code.
  1009. // There seem to be two issues that are the root cause.
  1010. //
  1011. // First, there are lots of different values of bytes that dispatch to a
  1012. // fairly small set of routines, and then some byte values that dispatch
  1013. // differently for each byte. This pattern isn't one that the compiler-based
  1014. // lowering of switches works well with -- it tries to balance all the cases,
  1015. // and in doing so emits several compares and other control flow rather than a
  1016. // simple jump table.
  1017. //
  1018. // Second, with a `case`, it isn't as obvious how to create a single, uniform
  1019. // interface that is effective for *every* byte value, and thus makes for a
  1020. // single consistent table-based dispatch. By forcing these to be function
  1021. // pointers, we also coerce the code to use a strictly homogeneous structure
  1022. // that can form a single dispatch table.
  1023. //
  1024. // These two actually interact -- the second issue is part of what makes the
  1025. // non-table lowering in the first one desirable for many switches and cases.
  1026. //
  1027. // Ultimately, when table-based dispatch is such an important technique, we
  1028. // get better results by taking full control and manually creating the
  1029. // dispatch structures.
  1030. //
  1031. // The functions in this table also use tail-recursion to implement the loop
  1032. // of the lexer. This is based on the technique described more fully for any
  1033. // kind of byte-stream loop structure here:
  1034. // https://blog.reverberate.org/2021/04/21/musttail-efficient-interpreters.html
  1035. constexpr static auto MakeDispatchTable() -> DispatchTableT {
  1036. DispatchTableT table = {};
  1037. // First set the table entries to dispatch to our error token handler as the
  1038. // base case. Everything valid comes from an override below.
  1039. for (int i = 0; i < 256; ++i) {
  1040. table[i] = &DispatchLexError;
  1041. }
  1042. // Symbols have some special dispatching. First, set the first character of
  1043. // each symbol token spelling to dispatch to the symbol lexer. We don't
  1044. // provide a pre-computed token here, so the symbol lexer will compute the
  1045. // exact symbol token kind. We'll override this with more specific dispatch
  1046. // below.
  1047. #define CARBON_SYMBOL_TOKEN(TokenName, Spelling) \
  1048. table[(Spelling)[0]] = &DispatchLexSymbolToken;
  1049. #include "toolchain/lex/token_kind.def"
  1050. // Now special cased single-character symbols that are guaranteed to not
  1051. // join with another symbol. These are grouping symbols, terminators,
  1052. // or separators in the grammar and have a good reason to be
  1053. // orthogonal to any other punctuation. We do this separately because this
  1054. // needs to override some of the generic handling above, and provide a
  1055. // custom token.
  1056. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling) \
  1057. table[(Spelling)[0]] = &DispatchLexOneCharSymbolToken;
  1058. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName) \
  1059. table[(Spelling)[0]] = &DispatchLexOpeningSymbolToken;
  1060. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName) \
  1061. table[(Spelling)[0]] = &DispatchLexClosingSymbolToken;
  1062. #include "toolchain/lex/token_kind.def"
  1063. // Override the handling for `/` to consider comments as well as a `/`
  1064. // symbol.
  1065. table['/'] = &DispatchLexCommentOrSlash;
  1066. table['_'] = &DispatchLexKeywordOrIdentifier;
  1067. // Note that we don't use `llvm::seq` because this needs to be `constexpr`
  1068. // evaluated.
  1069. for (unsigned char c = 'a'; c <= 'z'; ++c) {
  1070. table[c] = &DispatchLexKeywordOrIdentifier;
  1071. }
  1072. table['r'] = &DispatchLexKeywordOrIdentifierMaybeRaw;
  1073. for (unsigned char c = 'A'; c <= 'Z'; ++c) {
  1074. table[c] = &DispatchLexKeywordOrIdentifier;
  1075. }
  1076. // We dispatch all non-ASCII UTF-8 characters to the identifier lexing
  1077. // as whitespace characters should already have been skipped and the
  1078. // only remaining valid Unicode characters would be part of an
  1079. // identifier. That code can either accept or reject.
  1080. for (int i = 0x80; i < 0x100; ++i) {
  1081. table[i] = &DispatchLexKeywordOrIdentifier;
  1082. }
  1083. for (unsigned char c = '0'; c <= '9'; ++c) {
  1084. table[c] = &DispatchLexNumericLiteral;
  1085. }
  1086. table['\''] = &DispatchLexStringLiteral;
  1087. table['"'] = &DispatchLexStringLiteral;
  1088. table['#'] = &DispatchLexStringLiteral;
  1089. table[' '] = &DispatchLexHorizontalWhitespace;
  1090. table['\t'] = &DispatchLexHorizontalWhitespace;
  1091. table['\n'] = &DispatchLexVerticalWhitespace;
  1092. return table;
  1093. };
  1094. static const DispatchTableT DispatchTable;
  1095. static const std::array<TokenKind, 256> OneCharTokenKindTable;
  1096. TokenizedBuffer buffer_;
  1097. ssize_t line_index_;
  1098. llvm::SmallVector<Token> open_groups_;
  1099. ErrorTrackingDiagnosticConsumer consumer_;
  1100. SourceBufferLocationTranslator translator_;
  1101. LexerDiagnosticEmitter emitter_;
  1102. TokenLocationTranslator token_translator_;
  1103. TokenDiagnosticEmitter token_emitter_;
  1104. };
  1105. constexpr TokenizedBuffer::Lexer::DispatchTableT
  1106. TokenizedBuffer::Lexer::DispatchTable = MakeDispatchTable();
  1107. constexpr std::array<TokenKind, 256>
  1108. TokenizedBuffer::Lexer::OneCharTokenKindTable = [] {
  1109. std::array<TokenKind, 256> table = {};
  1110. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling) \
  1111. table[(Spelling)[0]] = TokenKind::TokenName;
  1112. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName) \
  1113. table[(Spelling)[0]] = TokenKind::TokenName;
  1114. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName) \
  1115. table[(Spelling)[0]] = TokenKind::TokenName;
  1116. #include "toolchain/lex/token_kind.def"
  1117. return table;
  1118. }();
  1119. auto TokenizedBuffer::Lex(SharedValueStores& value_stores, SourceBuffer& source,
  1120. DiagnosticConsumer& consumer) -> TokenizedBuffer {
  1121. Lexer lexer(value_stores, source, consumer);
  1122. return std::move(lexer).Lex();
  1123. }
  1124. auto TokenizedBuffer::GetKind(Token token) const -> TokenKind {
  1125. return GetTokenInfo(token).kind;
  1126. }
  1127. auto TokenizedBuffer::GetLine(Token token) const -> Line {
  1128. return GetTokenInfo(token).token_line;
  1129. }
  1130. auto TokenizedBuffer::GetLineNumber(Token token) const -> int {
  1131. return GetLineNumber(GetLine(token));
  1132. }
  1133. auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
  1134. return GetTokenInfo(token).column + 1;
  1135. }
  1136. auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
  1137. const auto& token_info = GetTokenInfo(token);
  1138. llvm::StringRef fixed_spelling = token_info.kind.fixed_spelling();
  1139. if (!fixed_spelling.empty()) {
  1140. return fixed_spelling;
  1141. }
  1142. if (token_info.kind == TokenKind::Error) {
  1143. const auto& line_info = GetLineInfo(token_info.token_line);
  1144. int64_t token_start = line_info.start + token_info.column;
  1145. return source_->text().substr(token_start, token_info.error_length);
  1146. }
  1147. // Refer back to the source text to preserve oddities like radix or digit
  1148. // separators the author included.
  1149. if (token_info.kind == TokenKind::IntegerLiteral ||
  1150. token_info.kind == TokenKind::RealLiteral) {
  1151. const auto& line_info = GetLineInfo(token_info.token_line);
  1152. int64_t token_start = line_info.start + token_info.column;
  1153. std::optional<NumericLiteral> relexed_token =
  1154. NumericLiteral::Lex(source_->text().substr(token_start));
  1155. CARBON_CHECK(relexed_token) << "Could not reform numeric literal token.";
  1156. return relexed_token->text();
  1157. }
  1158. // Refer back to the source text to find the original spelling, including
  1159. // escape sequences etc.
  1160. if (token_info.kind == TokenKind::StringLiteral) {
  1161. const auto& line_info = GetLineInfo(token_info.token_line);
  1162. int64_t token_start = line_info.start + token_info.column;
  1163. std::optional<StringLiteral> relexed_token =
  1164. StringLiteral::Lex(source_->text().substr(token_start));
  1165. CARBON_CHECK(relexed_token) << "Could not reform string literal token.";
  1166. return relexed_token->text();
  1167. }
  1168. // Refer back to the source text to avoid needing to reconstruct the
  1169. // spelling from the size.
  1170. if (token_info.kind.is_sized_type_literal()) {
  1171. const auto& line_info = GetLineInfo(token_info.token_line);
  1172. int64_t token_start = line_info.start + token_info.column;
  1173. llvm::StringRef suffix =
  1174. source_->text().substr(token_start + 1).take_while(IsDecimalDigit);
  1175. return llvm::StringRef(suffix.data() - 1, suffix.size() + 1);
  1176. }
  1177. if (token_info.kind == TokenKind::StartOfFile ||
  1178. token_info.kind == TokenKind::EndOfFile) {
  1179. return llvm::StringRef();
  1180. }
  1181. CARBON_CHECK(token_info.kind == TokenKind::Identifier) << token_info.kind;
  1182. return value_stores_->strings().Get(token_info.string_id);
  1183. }
  1184. auto TokenizedBuffer::GetIdentifier(Token token) const -> StringId {
  1185. const auto& token_info = GetTokenInfo(token);
  1186. CARBON_CHECK(token_info.kind == TokenKind::Identifier) << token_info.kind;
  1187. return token_info.string_id;
  1188. }
  1189. auto TokenizedBuffer::GetIntegerLiteral(Token token) const -> IntegerId {
  1190. const auto& token_info = GetTokenInfo(token);
  1191. CARBON_CHECK(token_info.kind == TokenKind::IntegerLiteral) << token_info.kind;
  1192. return token_info.integer_id;
  1193. }
  1194. auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealId {
  1195. const auto& token_info = GetTokenInfo(token);
  1196. CARBON_CHECK(token_info.kind == TokenKind::RealLiteral) << token_info.kind;
  1197. return token_info.real_id;
  1198. }
  1199. auto TokenizedBuffer::GetStringLiteral(Token token) const -> StringId {
  1200. const auto& token_info = GetTokenInfo(token);
  1201. CARBON_CHECK(token_info.kind == TokenKind::StringLiteral) << token_info.kind;
  1202. return token_info.string_id;
  1203. }
  1204. auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
  1205. -> const llvm::APInt& {
  1206. const auto& token_info = GetTokenInfo(token);
  1207. CARBON_CHECK(token_info.kind.is_sized_type_literal()) << token_info.kind;
  1208. return value_stores_->integers().Get(token_info.integer_id);
  1209. }
  1210. auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
  1211. -> Token {
  1212. const auto& opening_token_info = GetTokenInfo(opening_token);
  1213. CARBON_CHECK(opening_token_info.kind.is_opening_symbol())
  1214. << opening_token_info.kind;
  1215. return opening_token_info.closing_token;
  1216. }
  1217. auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
  1218. -> Token {
  1219. const auto& closing_token_info = GetTokenInfo(closing_token);
  1220. CARBON_CHECK(closing_token_info.kind.is_closing_symbol())
  1221. << closing_token_info.kind;
  1222. return closing_token_info.opening_token;
  1223. }
  1224. auto TokenizedBuffer::HasLeadingWhitespace(Token token) const -> bool {
  1225. auto it = TokenIterator(token);
  1226. return it == tokens().begin() || GetTokenInfo(*(it - 1)).has_trailing_space;
  1227. }
  1228. auto TokenizedBuffer::HasTrailingWhitespace(Token token) const -> bool {
  1229. return GetTokenInfo(token).has_trailing_space;
  1230. }
  1231. auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
  1232. return GetTokenInfo(token).is_recovery;
  1233. }
  1234. auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
  1235. return line.index + 1;
  1236. }
  1237. auto TokenizedBuffer::GetNextLine(Line line) const -> Line {
  1238. Line next(line.index + 1);
  1239. CARBON_DCHECK(static_cast<size_t>(next.index) < line_infos_.size());
  1240. return next;
  1241. }
  1242. auto TokenizedBuffer::GetPrevLine(Line line) const -> Line {
  1243. CARBON_CHECK(line.index > 0);
  1244. return Line(line.index - 1);
  1245. }
  1246. auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
  1247. return GetLineInfo(line).indent + 1;
  1248. }
  1249. auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
  1250. index = std::max(widths.index, index);
  1251. kind = std::max(widths.kind, kind);
  1252. column = std::max(widths.column, column);
  1253. line = std::max(widths.line, line);
  1254. indent = std::max(widths.indent, indent);
  1255. }
  1256. // Compute the printed width of a number. When numbers are printed in decimal,
  1257. // the number of digits needed is is one more than the log-base-10 of the
  1258. // value. We handle a value of `zero` explicitly.
  1259. //
  1260. // This routine requires its argument to be *non-negative*.
  1261. static auto ComputeDecimalPrintedWidth(int number) -> int {
  1262. CARBON_CHECK(number >= 0) << "Negative numbers are not supported.";
  1263. if (number == 0) {
  1264. return 1;
  1265. }
  1266. return static_cast<int>(std::log10(number)) + 1;
  1267. }
  1268. auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
  1269. PrintWidths widths = {};
  1270. widths.index = ComputeDecimalPrintedWidth(token_infos_.size());
  1271. widths.kind = GetKind(token).name().size();
  1272. widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
  1273. widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
  1274. widths.indent =
  1275. ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
  1276. return widths;
  1277. }
  1278. auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
  1279. if (tokens().begin() == tokens().end()) {
  1280. return;
  1281. }
  1282. output_stream << "- filename: " << source_->filename() << "\n"
  1283. << " tokens: [\n";
  1284. PrintWidths widths = {};
  1285. widths.index = ComputeDecimalPrintedWidth((token_infos_.size()));
  1286. for (Token token : tokens()) {
  1287. widths.Widen(GetTokenPrintWidths(token));
  1288. }
  1289. for (Token token : tokens()) {
  1290. PrintToken(output_stream, token, widths);
  1291. output_stream << "\n";
  1292. }
  1293. output_stream << " ]\n";
  1294. }
  1295. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  1296. Token token) const -> void {
  1297. PrintToken(output_stream, token, {});
  1298. }
  1299. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
  1300. PrintWidths widths) const -> void {
  1301. widths.Widen(GetTokenPrintWidths(token));
  1302. int token_index = token.index;
  1303. const auto& token_info = GetTokenInfo(token);
  1304. llvm::StringRef token_text = GetTokenText(token);
  1305. // Output the main chunk using one format string. We have to do the
  1306. // justification manually in order to use the dynamically computed widths
  1307. // and get the quotes included.
  1308. output_stream << llvm::formatv(
  1309. " { index: {0}, kind: {1}, line: {2}, column: {3}, indent: {4}, "
  1310. "spelling: '{5}'",
  1311. llvm::format_decimal(token_index, widths.index),
  1312. llvm::right_justify(llvm::formatv("'{0}'", token_info.kind.name()).str(),
  1313. widths.kind + 2),
  1314. llvm::format_decimal(GetLineNumber(token_info.token_line), widths.line),
  1315. llvm::format_decimal(GetColumnNumber(token), widths.column),
  1316. llvm::format_decimal(GetIndentColumnNumber(token_info.token_line),
  1317. widths.indent),
  1318. token_text);
  1319. switch (token_info.kind) {
  1320. case TokenKind::Identifier:
  1321. output_stream << ", identifier: " << GetIdentifier(token).index;
  1322. break;
  1323. case TokenKind::IntegerLiteral:
  1324. output_stream << ", value: `";
  1325. value_stores_->integers()
  1326. .Get(GetIntegerLiteral(token))
  1327. .print(output_stream, /*isSigned=*/false);
  1328. output_stream << "`";
  1329. break;
  1330. case TokenKind::RealLiteral:
  1331. output_stream << ", value: `"
  1332. << value_stores_->reals().Get(GetRealLiteral(token)) << "`";
  1333. break;
  1334. case TokenKind::StringLiteral:
  1335. output_stream << ", value: `"
  1336. << value_stores_->strings().Get(GetStringLiteral(token))
  1337. << "`";
  1338. break;
  1339. default:
  1340. if (token_info.kind.is_opening_symbol()) {
  1341. output_stream << ", closing_token: "
  1342. << GetMatchedClosingToken(token).index;
  1343. } else if (token_info.kind.is_closing_symbol()) {
  1344. output_stream << ", opening_token: "
  1345. << GetMatchedOpeningToken(token).index;
  1346. }
  1347. break;
  1348. }
  1349. if (token_info.has_trailing_space) {
  1350. output_stream << ", has_trailing_space: true";
  1351. }
  1352. if (token_info.is_recovery) {
  1353. output_stream << ", recovery: true";
  1354. }
  1355. output_stream << " },";
  1356. }
  1357. auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
  1358. return line_infos_[line.index];
  1359. }
  1360. auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
  1361. return line_infos_[line.index];
  1362. }
  1363. auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
  1364. line_infos_.push_back(info);
  1365. return Line(static_cast<int>(line_infos_.size()) - 1);
  1366. }
  1367. auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
  1368. return token_infos_[token.index];
  1369. }
  1370. auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
  1371. return token_infos_[token.index];
  1372. }
  1373. auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
  1374. token_infos_.push_back(info);
  1375. expected_parse_tree_size_ += info.kind.expected_parse_tree_size();
  1376. return Token(static_cast<int>(token_infos_.size()) - 1);
  1377. }
  1378. auto TokenIterator::Print(llvm::raw_ostream& output) const -> void {
  1379. output << token_.index;
  1380. }
  1381. auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
  1382. const char* loc) -> DiagnosticLocation {
  1383. CARBON_CHECK(StringRefContainsPointer(buffer_->source_->text(), loc))
  1384. << "location not within buffer";
  1385. int64_t offset = loc - buffer_->source_->text().begin();
  1386. // Find the first line starting after the given location. Note that we can't
  1387. // inspect `line.length` here because it is not necessarily correct for the
  1388. // final line during lexing (but will be correct later for the parse tree).
  1389. const auto* line_it = std::partition_point(
  1390. buffer_->line_infos_.begin(), buffer_->line_infos_.end(),
  1391. [offset](const LineInfo& line) { return line.start <= offset; });
  1392. // Step back one line to find the line containing the given position.
  1393. CARBON_CHECK(line_it != buffer_->line_infos_.begin())
  1394. << "location precedes the start of the first line";
  1395. --line_it;
  1396. int line_number = line_it - buffer_->line_infos_.begin();
  1397. int column_number = offset - line_it->start;
  1398. // Start by grabbing the line from the buffer. If the line isn't fully lexed,
  1399. // the length will be npos and the line will be grabbed from the known start
  1400. // to the end of the buffer; we'll then adjust the length.
  1401. llvm::StringRef line =
  1402. buffer_->source_->text().substr(line_it->start, line_it->length);
  1403. if (line_it->length == static_cast<int32_t>(llvm::StringRef::npos)) {
  1404. CARBON_CHECK(line.take_front(column_number).count('\n') == 0)
  1405. << "Currently we assume no unlexed newlines prior to the error column, "
  1406. "but there was one when erroring at "
  1407. << buffer_->source_->filename() << ":" << line_number << ":"
  1408. << column_number;
  1409. // Look for the next newline since we don't know the length. We can start at
  1410. // the column because prior newlines will have been lexed.
  1411. auto end_newline_pos = line.find('\n', column_number);
  1412. if (end_newline_pos != llvm::StringRef::npos) {
  1413. line = line.take_front(end_newline_pos);
  1414. }
  1415. }
  1416. return {.file_name = buffer_->source_->filename(),
  1417. .line = line,
  1418. .line_number = line_number + 1,
  1419. .column_number = column_number + 1};
  1420. }
  1421. auto TokenLocationTranslator::GetLocation(Token token) -> DiagnosticLocation {
  1422. // Map the token location into a position within the source buffer.
  1423. const auto& token_info = buffer_->GetTokenInfo(token);
  1424. const auto& line_info = buffer_->GetLineInfo(token_info.token_line);
  1425. const char* token_start =
  1426. buffer_->source_->text().begin() + line_info.start + token_info.column;
  1427. // Find the corresponding file location.
  1428. // TODO: Should we somehow indicate in the diagnostic location if this token
  1429. // is a recovery token that doesn't correspond to the original source?
  1430. return TokenizedBuffer::SourceBufferLocationTranslator(buffer_).GetLocation(
  1431. token_start);
  1432. }
  1433. } // namespace Carbon::Lex