tokenized_buffer.cpp 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/lex/tokenized_buffer.h"
  5. #include <algorithm>
  6. #include <array>
  7. #include <cmath>
  8. #include "common/check.h"
  9. #include "common/string_helpers.h"
  10. #include "llvm/ADT/StringRef.h"
  11. #include "llvm/ADT/StringSwitch.h"
  12. #include "llvm/Support/ErrorHandling.h"
  13. #include "llvm/Support/Format.h"
  14. #include "llvm/Support/FormatVariadic.h"
  15. #include "llvm/Support/raw_ostream.h"
  16. #include "toolchain/base/value_store.h"
  17. #include "toolchain/lex/character_set.h"
  18. #include "toolchain/lex/helpers.h"
  19. #include "toolchain/lex/numeric_literal.h"
  20. #include "toolchain/lex/string_literal.h"
  21. #if __ARM_NEON
  22. #include <arm_neon.h>
  23. #define CARBON_USE_SIMD 1
  24. #elif __x86_64__
  25. #include <x86intrin.h>
  26. #define CARBON_USE_SIMD 1
  27. #else
  28. #define CARBON_USE_SIMD 0
  29. #endif
  30. namespace Carbon::Lex {
  31. // TODO: Move Overload and VariantMatch somewhere more central.
  32. // Form an overload set from a list of functions. For example:
  33. //
  34. // ```
  35. // auto overloaded = Overload{[] (int) {}, [] (float) {}};
  36. // ```
  37. template <typename... Fs>
  38. struct Overload : Fs... {
  39. using Fs::operator()...;
  40. };
  41. template <typename... Fs>
  42. Overload(Fs...) -> Overload<Fs...>;
  43. // Pattern-match against the type of the value stored in the variant `V`. Each
  44. // element of `fs` should be a function that takes one or more of the variant
  45. // values in `V`.
  46. template <typename V, typename... Fs>
  47. auto VariantMatch(V&& v, Fs&&... fs) -> decltype(auto) {
  48. return std::visit(Overload{std::forward<Fs&&>(fs)...}, std::forward<V&&>(v));
  49. }
  50. #if CARBON_USE_SIMD
  51. namespace {
  52. #if __ARM_NEON
  53. using SIMDMaskT = uint8x16_t;
  54. #elif __x86_64__
  55. using SIMDMaskT = __m128i;
  56. #else
  57. #error "Unsupported SIMD architecture!"
  58. #endif
  59. using SIMDMaskArrayT = std::array<SIMDMaskT, sizeof(SIMDMaskT) + 1>;
  60. } // namespace
  61. // A table of masks to include 0-16 bytes of an SSE register.
  62. static constexpr SIMDMaskArrayT PrefixMasks = []() constexpr {
  63. SIMDMaskArrayT masks = {};
  64. for (int i = 1; i < static_cast<int>(masks.size()); ++i) {
  65. // The SIMD types and constexpr require a C-style cast.
  66. // NOLINTNEXTLINE(google-readability-casting)
  67. masks[i] = (SIMDMaskT)(std::numeric_limits<unsigned __int128>::max() >>
  68. ((sizeof(SIMDMaskT) - i) * 8));
  69. }
  70. return masks;
  71. }();
  72. #endif // CARBON_USE_SIMD
  73. // A table of booleans that we can use to classify bytes as being valid
  74. // identifier (or keyword) characters. This is used in the generic,
  75. // non-vectorized fallback code to scan for length of an identifier.
  76. constexpr std::array<bool, 256> IsIdByteTable = [] {
  77. std::array<bool, 256> table = {};
  78. for (char c = '0'; c <= '9'; ++c) {
  79. table[c] = true;
  80. }
  81. for (char c = 'A'; c <= 'Z'; ++c) {
  82. table[c] = true;
  83. }
  84. for (char c = 'a'; c <= 'z'; ++c) {
  85. table[c] = true;
  86. }
  87. table['_'] = true;
  88. return table;
  89. }();
  90. // Baseline scalar version, also available for scalar-fallback in SIMD code.
  91. // Uses `ssize_t` for performance when indexing in the loop.
  92. //
  93. // TODO: This assumes all Unicode characters are non-identifiers.
  94. static auto ScanForIdentifierPrefixScalar(llvm::StringRef text, ssize_t i)
  95. -> llvm::StringRef {
  96. const ssize_t size = text.size();
  97. while (i < size && IsIdByteTable[static_cast<unsigned char>(text[i])]) {
  98. ++i;
  99. }
  100. return text.substr(0, i);
  101. }
  102. #if CARBON_USE_SIMD && __x86_64__
  103. // The SIMD code paths uses a scheme derived from the techniques in Geoff
  104. // Langdale and Daniel Lemire's work on parsing JSON[1]. Specifically, that
  105. // paper outlines a technique of using two 4-bit indexed in-register look-up
  106. // tables (LUTs) to classify bytes in a branchless SIMD code sequence.
  107. //
  108. // [1]: https://arxiv.org/pdf/1902.08318.pdf
  109. //
  110. // The goal is to get a bit mask classifying different sets of bytes. For each
  111. // input byte, we first test for a high bit indicating a UTF-8 encoded Unicode
  112. // character. Otherwise, we want the mask bits to be set with the following
  113. // logic derived by inspecting the high nibble and low nibble of the input:
  114. // bit0 = 1 for `_`: high `0x5` and low `0xF`
  115. // bit1 = 1 for `0-9`: high `0x3` and low `0x0` - `0x9`
  116. // bit2 = 1 for `A-O` and `a-o`: high `0x4` or `0x6` and low `0x1` - `0xF`
  117. // bit3 = 1 for `P-Z` and 'p-z': high `0x5` or `0x7` and low `0x0` - `0xA`
  118. // bit4 = unused
  119. // bit5 = unused
  120. // bit6 = unused
  121. // bit7 = unused
  122. //
  123. // No bits set means definitively non-ID ASCII character.
  124. //
  125. // Bits 4-7 remain unused if we need to classify more characters.
  126. namespace {
  127. // Struct used to implement the nibble LUT for SIMD implementations.
  128. //
  129. // Forced to 16-byte alignment to ensure we can load it easily in SIMD code.
  130. struct alignas(16) NibbleLUT {
  131. auto Load() const -> __m128i {
  132. return _mm_load_si128(reinterpret_cast<const __m128i*>(this));
  133. }
  134. uint8_t nibble_0;
  135. uint8_t nibble_1;
  136. uint8_t nibble_2;
  137. uint8_t nibble_3;
  138. uint8_t nibble_4;
  139. uint8_t nibble_5;
  140. uint8_t nibble_6;
  141. uint8_t nibble_7;
  142. uint8_t nibble_8;
  143. uint8_t nibble_9;
  144. uint8_t nibble_a;
  145. uint8_t nibble_b;
  146. uint8_t nibble_c;
  147. uint8_t nibble_d;
  148. uint8_t nibble_e;
  149. uint8_t nibble_f;
  150. };
  151. } // namespace
  152. constexpr NibbleLUT HighLUT = {
  153. .nibble_0 = 0b0000'0000,
  154. .nibble_1 = 0b0000'0000,
  155. .nibble_2 = 0b0000'0000,
  156. .nibble_3 = 0b0000'0010,
  157. .nibble_4 = 0b0000'0100,
  158. .nibble_5 = 0b0000'1001,
  159. .nibble_6 = 0b0000'0100,
  160. .nibble_7 = 0b0000'1000,
  161. .nibble_8 = 0b1000'0000,
  162. .nibble_9 = 0b1000'0000,
  163. .nibble_a = 0b1000'0000,
  164. .nibble_b = 0b1000'0000,
  165. .nibble_c = 0b1000'0000,
  166. .nibble_d = 0b1000'0000,
  167. .nibble_e = 0b1000'0000,
  168. .nibble_f = 0b1000'0000,
  169. };
  170. constexpr NibbleLUT LowLUT = {
  171. .nibble_0 = 0b1000'1010,
  172. .nibble_1 = 0b1000'1110,
  173. .nibble_2 = 0b1000'1110,
  174. .nibble_3 = 0b1000'1110,
  175. .nibble_4 = 0b1000'1110,
  176. .nibble_5 = 0b1000'1110,
  177. .nibble_6 = 0b1000'1110,
  178. .nibble_7 = 0b1000'1110,
  179. .nibble_8 = 0b1000'1110,
  180. .nibble_9 = 0b1000'1110,
  181. .nibble_a = 0b1000'1100,
  182. .nibble_b = 0b1000'0100,
  183. .nibble_c = 0b1000'0100,
  184. .nibble_d = 0b1000'0100,
  185. .nibble_e = 0b1000'0100,
  186. .nibble_f = 0b1000'0101,
  187. };
  188. static auto ScanForIdentifierPrefixX86(llvm::StringRef text)
  189. -> llvm::StringRef {
  190. const auto high_lut = HighLUT.Load();
  191. const auto low_lut = LowLUT.Load();
  192. // Use `ssize_t` for performance here as we index memory in a tight loop.
  193. ssize_t i = 0;
  194. const ssize_t size = text.size();
  195. while ((i + 16) <= size) {
  196. __m128i input =
  197. _mm_loadu_si128(reinterpret_cast<const __m128i*>(text.data() + i));
  198. // The high bits of each byte indicate a non-ASCII character encoded using
  199. // UTF-8. Test those and fall back to the scalar code if present. These
  200. // bytes will also cause spurious zeros in the LUT results, but we can
  201. // ignore that because we track them independently here.
  202. #if __SSE4_1__
  203. if (!_mm_test_all_zeros(_mm_set1_epi8(0x80), input)) {
  204. break;
  205. }
  206. #else
  207. if (_mm_movemask_epi8(input) != 0) {
  208. break;
  209. }
  210. #endif
  211. // Do two LUT lookups and mask the results together to get the results for
  212. // both low and high nibbles. Note that we don't need to mask out the high
  213. // bit of input here because we track that above for UTF-8 handling.
  214. __m128i low_mask = _mm_shuffle_epi8(low_lut, input);
  215. // Note that the input needs to be masked to only include the high nibble or
  216. // we could end up with bit7 set forcing the result to a zero byte.
  217. __m128i input_high =
  218. _mm_and_si128(_mm_srli_epi32(input, 4), _mm_set1_epi8(0x0f));
  219. __m128i high_mask = _mm_shuffle_epi8(high_lut, input_high);
  220. __m128i mask = _mm_and_si128(low_mask, high_mask);
  221. // Now compare to find the completely zero bytes.
  222. __m128i id_byte_mask_vec = _mm_cmpeq_epi8(mask, _mm_setzero_si128());
  223. int tail_ascii_mask = _mm_movemask_epi8(id_byte_mask_vec);
  224. // Check if there are bits in the tail mask, which means zero bytes and the
  225. // end of the identifier. We could do this without materializing the scalar
  226. // mask on more recent CPUs, but we generally expect the median length we
  227. // encounter to be <16 characters and so we avoid the extra instruction in
  228. // that case and predict this branch to succeed so it is laid out in a
  229. // reasonable way.
  230. if (LLVM_LIKELY(tail_ascii_mask != 0)) {
  231. // Move past the definitively classified bytes that are part of the
  232. // identifier, and return the complete identifier text.
  233. i += __builtin_ctz(tail_ascii_mask);
  234. return text.substr(0, i);
  235. }
  236. i += 16;
  237. }
  238. return ScanForIdentifierPrefixScalar(text, i);
  239. }
  240. #endif // CARBON_USE_SIMD && __x86_64__
  241. // Scans the provided text and returns the prefix `StringRef` of contiguous
  242. // identifier characters.
  243. //
  244. // This is a performance sensitive function and where profitable uses vectorized
  245. // code sequences to optimize its scanning. When modifying, the identifier
  246. // lexing benchmarks should be checked for regressions.
  247. //
  248. // Identifier characters here are currently the ASCII characters `[0-9A-Za-z_]`.
  249. //
  250. // TODO: Currently, this code does not implement Carbon's design for Unicode
  251. // characters in identifiers. It does work on UTF-8 code unit sequences, but
  252. // currently considers non-ASCII characters to be non-identifier characters.
  253. // Some work has been done to ensure the hot loop, while optimized, retains
  254. // enough information to add Unicode handling without completely destroying the
  255. // relevant optimizations.
  256. static auto ScanForIdentifierPrefix(llvm::StringRef text) -> llvm::StringRef {
  257. // Dispatch to an optimized architecture optimized routine.
  258. #if CARBON_USE_SIMD && __x86_64__
  259. return ScanForIdentifierPrefixX86(text);
  260. #elif CARBON_USE_SIMD && __ARM_NEON
  261. // Somewhat surprisingly, there is basically nothing worth doing in SIMD on
  262. // Arm to optimize this scan. The Neon SIMD operations end up requiring you to
  263. // move from the SIMD unit to the scalar unit in the critical path of finding
  264. // the offset of the end of an identifier. Current ARM cores make the code
  265. // sequences here (quite) unpleasant. For example, on Apple M1 and similar
  266. // cores, the latency is as much as 10 cycles just to extract from the vector.
  267. // SIMD might be more interesting on Neoverse cores, but it'd be nice to avoid
  268. // core-specific tunings at this point.
  269. //
  270. // If this proves problematic and critical to optimize, the current leading
  271. // theory is to have the newline searching code also create a bitmask for the
  272. // entire source file of identifier and non-identifier bytes, and then use the
  273. // bit-counting instructions here to do a fast scan of that bitmask. However,
  274. // crossing that bridge will add substantial complexity to the newline
  275. // scanner, and so currently we just use a boring scalar loop that pipelines
  276. // well.
  277. #endif
  278. return ScanForIdentifierPrefixScalar(text, 0);
  279. }
  280. // Implementation of the lexer logic itself.
  281. //
  282. // The design is that lexing can loop over the source buffer, consuming it into
  283. // tokens by calling into this API. This class handles the state and breaks down
  284. // the different lexing steps that may be used. It directly updates the provided
  285. // tokenized buffer with the lexed tokens.
  286. class [[clang::internal_linkage]] TokenizedBuffer::Lexer {
  287. public:
  288. // Symbolic result of a lexing action. This indicates whether we successfully
  289. // lexed a token, or whether other lexing actions should be attempted.
  290. //
  291. // While it wraps a simple boolean state, its API both helps make the failures
  292. // more self documenting, and by consuming the actual token constructively
  293. // when one is produced, it helps ensure the correct result is returned.
  294. class LexResult {
  295. public:
  296. // Consumes (and discard) a valid token to construct a result
  297. // indicating a token has been produced. Relies on implicit conversions.
  298. // NOLINTNEXTLINE(google-explicit-constructor)
  299. LexResult(Token /*discarded_token*/) : LexResult(true) {}
  300. // Returns a result indicating no token was produced.
  301. static auto NoMatch() -> LexResult { return LexResult(false); }
  302. // Tests whether a token was produced by the lexing routine, and
  303. // the lexer can continue forming tokens.
  304. explicit operator bool() const { return formed_token_; }
  305. private:
  306. explicit LexResult(bool formed_token) : formed_token_(formed_token) {}
  307. bool formed_token_;
  308. };
  309. Lexer(SharedValueStores& value_stores, SourceBuffer& source,
  310. DiagnosticConsumer& consumer)
  311. : buffer_(value_stores, source),
  312. consumer_(consumer),
  313. translator_(&buffer_),
  314. emitter_(translator_, consumer_),
  315. token_translator_(&buffer_),
  316. token_emitter_(token_translator_, consumer_) {}
  317. // Find all line endings and create the line data structures. Explicitly kept
  318. // out-of-line because this is a significant loop that is useful to have in
  319. // the profile and it doesn't simplify by inlining at all. But because it can,
  320. // the compiler will flatten this otherwise.
  321. [[gnu::noinline]] auto CreateLines(llvm::StringRef source_text) -> void {
  322. // We currently use `memchr` here which typically is well optimized to use
  323. // SIMD or other significantly faster than byte-wise scanning. We also use
  324. // carefully selected variables and the `ssize_t` type for performance and
  325. // code size of this hot loop.
  326. //
  327. // TODO: Eventually, we'll likely need to roll our own SIMD-optimized
  328. // routine here in order to handle CR+LF line endings, as we'll want those
  329. // to stay on the fast path. We'll also need to detect and diagnose Unicode
  330. // vertical whitespace. Starting with `memchr` should give us a strong
  331. // baseline performance target when adding those features.
  332. const char* const text = source_text.data();
  333. const ssize_t size = source_text.size();
  334. ssize_t start = 0;
  335. while (const char* nl = reinterpret_cast<const char*>(
  336. memchr(&text[start], '\n', size - start))) {
  337. ssize_t nl_index = nl - text;
  338. buffer_.AddLine(LineInfo(start, nl_index - start));
  339. start = nl_index + 1;
  340. }
  341. // The last line ends at the end of the file.
  342. buffer_.AddLine(LineInfo(start, size - start));
  343. // If the last line wasn't empty, the file ends with an unterminated line.
  344. // Add an extra blank line so that we never need to handle the special case
  345. // of being on the last line inside the lexer and needing to not increment
  346. // to the next line.
  347. if (start != size) {
  348. buffer_.AddLine(LineInfo(size, 0));
  349. }
  350. // Now that all the infos are allocated, get a fresh pointer to the first
  351. // info for use while lexing.
  352. line_index_ = 0;
  353. }
  354. auto current_line() -> Line { return Line(line_index_); }
  355. auto current_line_info() -> LineInfo* {
  356. return &buffer_.line_infos_[line_index_];
  357. }
  358. auto ComputeColumn(ssize_t position) -> int {
  359. CARBON_DCHECK(position >= current_line_info()->start);
  360. return position - current_line_info()->start;
  361. }
  362. auto NoteWhitespace() -> void {
  363. buffer_.token_infos_.back().has_trailing_space = true;
  364. }
  365. auto SkipHorizontalWhitespace(llvm::StringRef source_text, ssize_t& position)
  366. -> void {
  367. // Handle adjacent whitespace quickly. This comes up frequently for example
  368. // due to indentation. We don't expect *huge* runs, so just use a scalar
  369. // loop. While still scalar, this avoids repeated table dispatch and marking
  370. // whitespace.
  371. while (position < static_cast<ssize_t>(source_text.size()) &&
  372. (source_text[position] == ' ' || source_text[position] == '\t')) {
  373. ++position;
  374. }
  375. }
  376. auto LexHorizontalWhitespace(llvm::StringRef source_text, ssize_t& position)
  377. -> void {
  378. CARBON_DCHECK(source_text[position] == ' ' ||
  379. source_text[position] == '\t');
  380. NoteWhitespace();
  381. // Skip runs using an optimized code path.
  382. SkipHorizontalWhitespace(source_text, position);
  383. }
  384. auto LexVerticalWhitespace(llvm::StringRef source_text, ssize_t& position)
  385. -> void {
  386. NoteWhitespace();
  387. ++line_index_;
  388. auto* line_info = current_line_info();
  389. ssize_t line_start = line_info->start;
  390. position = line_start;
  391. SkipHorizontalWhitespace(source_text, position);
  392. line_info->indent = position - line_start;
  393. }
  394. auto LexCommentOrSlash(llvm::StringRef source_text, ssize_t& position)
  395. -> void {
  396. CARBON_DCHECK(source_text[position] == '/');
  397. // Both comments and slash symbols start with a `/`. We disambiguate with a
  398. // max-munch rule -- if the next character is another `/` then we lex it as
  399. // a comment start. If it isn't, then we lex as a slash. We also optimize
  400. // for the comment case as we expect that to be much more important for
  401. // overall lexer performance.
  402. if (LLVM_LIKELY(position + 1 < static_cast<ssize_t>(source_text.size()) &&
  403. source_text[position + 1] == '/')) {
  404. LexComment(source_text, position);
  405. return;
  406. }
  407. // This code path should produce a token, make sure that happens.
  408. LexResult result = LexSymbolToken(source_text, position);
  409. CARBON_CHECK(result) << "Failed to form a token!";
  410. }
  411. auto LexComment(llvm::StringRef source_text, ssize_t& position) -> void {
  412. CARBON_DCHECK(source_text.substr(position).startswith("//"));
  413. // Any comment must be the only non-whitespace on the line.
  414. const auto* line_info = current_line_info();
  415. if (LLVM_UNLIKELY(position != line_info->start + line_info->indent)) {
  416. CARBON_DIAGNOSTIC(TrailingComment, Error,
  417. "Trailing comments are not permitted.");
  418. emitter_.Emit(source_text.begin() + position, TrailingComment);
  419. // Note that we cannot fall-through here as the logic below doesn't handle
  420. // trailing comments. For simplicity, we just consume the trailing comment
  421. // itself and let the normal lexer handle the newline as if there weren't
  422. // a comment at all.
  423. position = line_info->start + line_info->length;
  424. return;
  425. }
  426. // The introducer '//' must be followed by whitespace or EOF.
  427. bool is_valid_after_slashes = true;
  428. if (position + 2 < static_cast<ssize_t>(source_text.size()) &&
  429. LLVM_UNLIKELY(!IsSpace(source_text[position + 2]))) {
  430. CARBON_DIAGNOSTIC(NoWhitespaceAfterCommentIntroducer, Error,
  431. "Whitespace is required after '//'.");
  432. emitter_.Emit(source_text.begin() + position + 2,
  433. NoWhitespaceAfterCommentIntroducer);
  434. // We use this to tweak the lexing of blocks below.
  435. is_valid_after_slashes = false;
  436. }
  437. // Skip over this line.
  438. ssize_t line_index = line_index_;
  439. ++line_index;
  440. position = buffer_.line_infos_[line_index].start;
  441. // A very common pattern is a long block of comment lines all with the same
  442. // indent and comment start. We skip these comment blocks in bulk both for
  443. // speed and to reduce redundant diagnostics if each line has the same
  444. // erroneous comment start like `//!`.
  445. //
  446. // When we have SIMD support this is even more important for speed, as short
  447. // indents can be scanned extremely quickly with SIMD and we expect these to
  448. // be the dominant cases.
  449. //
  450. // TODO: We should extend this to 32-byte SIMD on platforms with support.
  451. constexpr int MaxIndent = 13;
  452. const int indent = line_info->indent;
  453. const ssize_t first_line_start = line_info->start;
  454. ssize_t prefix_size = indent + (is_valid_after_slashes ? 3 : 2);
  455. auto skip_to_next_line = [this, indent, &line_index, &position] {
  456. // We're guaranteed to have a line here even on a comment on the last line
  457. // as we ensure there is an empty line structure at the end of every file.
  458. ++line_index;
  459. auto* next_line_info = &buffer_.line_infos_[line_index];
  460. next_line_info->indent = indent;
  461. position = next_line_info->start;
  462. };
  463. if (CARBON_USE_SIMD &&
  464. position + 16 < static_cast<ssize_t>(source_text.size()) &&
  465. indent <= MaxIndent) {
  466. // Load a mask based on the amount of text we want to compare.
  467. auto mask = PrefixMasks[prefix_size];
  468. #if __ARM_NEON
  469. // Load and mask the prefix of the current line.
  470. auto prefix = vld1q_u8(reinterpret_cast<const uint8_t*>(
  471. source_text.data() + first_line_start));
  472. prefix = vandq_u8(mask, prefix);
  473. do {
  474. // Load and mask the next line to consider's prefix.
  475. auto next_prefix = vld1q_u8(
  476. reinterpret_cast<const uint8_t*>(source_text.data() + position));
  477. next_prefix = vandq_u8(mask, next_prefix);
  478. // Compare the two prefixes and if any lanes differ, break.
  479. auto compare = vceqq_u8(prefix, next_prefix);
  480. if (vminvq_u8(compare) == 0) {
  481. break;
  482. }
  483. skip_to_next_line();
  484. } while (position + 16 < static_cast<ssize_t>(source_text.size()));
  485. #elif __x86_64__
  486. // Use the current line's prefix as the exemplar to compare against.
  487. // We don't mask here as we will mask when doing the comparison.
  488. auto prefix = _mm_loadu_si128(reinterpret_cast<const __m128i*>(
  489. source_text.data() + first_line_start));
  490. do {
  491. // Load the next line to consider's prefix.
  492. auto next_prefix = _mm_loadu_si128(
  493. reinterpret_cast<const __m128i*>(source_text.data() + position));
  494. // Compute the difference between the next line and our exemplar. Again,
  495. // we don't mask the difference because the comparison below will be
  496. // masked.
  497. auto prefix_diff = _mm_xor_si128(prefix, next_prefix);
  498. // If we have any differences (non-zero bits) within the mask, we can't
  499. // skip the next line too.
  500. if (!_mm_test_all_zeros(mask, prefix_diff)) {
  501. break;
  502. }
  503. skip_to_next_line();
  504. } while (position + 16 < static_cast<ssize_t>(source_text.size()));
  505. #else
  506. #error "Unsupported SIMD architecture!"
  507. #endif
  508. // TODO: If we finish the loop due to the position approaching the end of
  509. // the buffer we may fail to skip the last line in a comment block that
  510. // has an invalid initial sequence and thus emit extra diagnostics. We
  511. // should really fall through to the generic skipping logic, but the code
  512. // organization will need to change significantly to allow that.
  513. } else {
  514. while (position + prefix_size <
  515. static_cast<ssize_t>(source_text.size()) &&
  516. memcmp(source_text.data() + first_line_start,
  517. source_text.data() + position, prefix_size) == 0) {
  518. skip_to_next_line();
  519. }
  520. }
  521. // Now compute the indent of this next line before we finish.
  522. ssize_t line_start = position;
  523. SkipHorizontalWhitespace(source_text, position);
  524. // Now that we're done scanning, update to the latest line index and indent.
  525. line_index_ = line_index;
  526. current_line_info()->indent = position - line_start;
  527. }
  528. auto LexNumericLiteral(llvm::StringRef source_text, ssize_t& position)
  529. -> LexResult {
  530. std::optional<NumericLiteral> literal =
  531. NumericLiteral::Lex(source_text.substr(position));
  532. if (!literal) {
  533. return LexError(source_text, position);
  534. }
  535. int int_column = ComputeColumn(position);
  536. int token_size = literal->text().size();
  537. position += token_size;
  538. return VariantMatch(
  539. literal->ComputeValue(emitter_),
  540. [&](NumericLiteral::IntegerValue&& value) {
  541. auto token = buffer_.AddToken({.kind = TokenKind::IntegerLiteral,
  542. .token_line = current_line(),
  543. .column = int_column});
  544. buffer_.GetTokenInfo(token).integer_id =
  545. buffer_.value_stores_->integers().Add(std::move(value.value));
  546. return token;
  547. },
  548. [&](NumericLiteral::RealValue&& value) {
  549. auto token = buffer_.AddToken({.kind = TokenKind::RealLiteral,
  550. .token_line = current_line(),
  551. .column = int_column});
  552. buffer_.GetTokenInfo(token).real_id =
  553. buffer_.value_stores_->reals().Add(
  554. Real{.mantissa = value.mantissa,
  555. .exponent = value.exponent,
  556. .is_decimal =
  557. (value.radix == NumericLiteral::Radix::Decimal)});
  558. return token;
  559. },
  560. [&](NumericLiteral::UnrecoverableError) {
  561. auto token = buffer_.AddToken({
  562. .kind = TokenKind::Error,
  563. .token_line = current_line(),
  564. .column = int_column,
  565. .error_length = token_size,
  566. });
  567. return token;
  568. });
  569. }
  570. auto LexStringLiteral(llvm::StringRef source_text, ssize_t& position)
  571. -> LexResult {
  572. std::optional<StringLiteral> literal =
  573. StringLiteral::Lex(source_text.substr(position));
  574. if (!literal) {
  575. return LexError(source_text, position);
  576. }
  577. Line string_line = current_line();
  578. int string_column = ComputeColumn(position);
  579. ssize_t literal_size = literal->text().size();
  580. position += literal_size;
  581. // Update line and column information.
  582. if (literal->is_multi_line()) {
  583. while (current_line_info()->start + current_line_info()->length <
  584. position) {
  585. ++line_index_;
  586. current_line_info()->indent = string_column;
  587. }
  588. // Note that we've updated the current line at this point, but
  589. // `set_indent_` is already true from above. That remains correct as the
  590. // last line of the multi-line literal *also* has its indent set.
  591. }
  592. if (literal->is_terminated()) {
  593. auto string_id = buffer_.value_stores_->strings().Add(
  594. literal->ComputeValue(buffer_.allocator_, emitter_));
  595. auto token = buffer_.AddToken({.kind = TokenKind::StringLiteral,
  596. .token_line = string_line,
  597. .column = string_column,
  598. .string_id = string_id});
  599. return token;
  600. } else {
  601. CARBON_DIAGNOSTIC(UnterminatedString, Error,
  602. "String is missing a terminator.");
  603. emitter_.Emit(literal->text().begin(), UnterminatedString);
  604. return buffer_.AddToken(
  605. {.kind = TokenKind::Error,
  606. .token_line = string_line,
  607. .column = string_column,
  608. .error_length = static_cast<int32_t>(literal_size)});
  609. }
  610. }
  611. auto LexOneCharSymbolToken(llvm::StringRef source_text, TokenKind kind,
  612. ssize_t& position) -> Token {
  613. // Verify in a debug build that the incoming token kind is correct.
  614. CARBON_DCHECK(kind != TokenKind::Error);
  615. CARBON_DCHECK(kind.fixed_spelling().size() == 1);
  616. CARBON_DCHECK(source_text[position] == kind.fixed_spelling().front())
  617. << "Source text starts with '" << source_text[position]
  618. << "' instead of the spelling '" << kind.fixed_spelling()
  619. << "' of the incoming token kind '" << kind << "'";
  620. Token token = buffer_.AddToken({.kind = kind,
  621. .token_line = current_line(),
  622. .column = ComputeColumn(position)});
  623. ++position;
  624. return token;
  625. }
  626. auto LexOpeningSymbolToken(llvm::StringRef source_text, TokenKind kind,
  627. ssize_t& position) -> LexResult {
  628. Token token = LexOneCharSymbolToken(source_text, kind, position);
  629. open_groups_.push_back(token);
  630. return token;
  631. }
  632. auto LexClosingSymbolToken(llvm::StringRef source_text, TokenKind kind,
  633. ssize_t& position) -> LexResult {
  634. auto unmatched_error = [&] {
  635. CARBON_DIAGNOSTIC(
  636. UnmatchedClosing, Error,
  637. "Closing symbol without a corresponding opening symbol.");
  638. emitter_.Emit(source_text.begin() + position, UnmatchedClosing);
  639. Token token = buffer_.AddToken({.kind = TokenKind::Error,
  640. .token_line = current_line(),
  641. .column = ComputeColumn(position),
  642. .error_length = 1});
  643. ++position;
  644. return token;
  645. };
  646. // If we have no open groups, this is an error.
  647. if (LLVM_UNLIKELY(open_groups_.empty())) {
  648. return unmatched_error();
  649. }
  650. Token opening_token = open_groups_.back();
  651. // Close any invalid open groups first.
  652. if (LLVM_UNLIKELY(buffer_.GetTokenInfo(opening_token).kind !=
  653. kind.opening_symbol())) {
  654. CloseInvalidOpenGroups(kind, position);
  655. // This may exhaust the open groups so re-check and re-error if needed.
  656. if (open_groups_.empty()) {
  657. return unmatched_error();
  658. }
  659. opening_token = open_groups_.back();
  660. CARBON_DCHECK(buffer_.GetTokenInfo(opening_token).kind ==
  661. kind.opening_symbol());
  662. }
  663. open_groups_.pop_back();
  664. // Now that the groups are all matched up, lex the actual token.
  665. Token token = LexOneCharSymbolToken(source_text, kind, position);
  666. // Note that it is important to get fresh token infos here as lexing the
  667. // open token would invalidate any pointers.
  668. buffer_.GetTokenInfo(opening_token).closing_token = token;
  669. buffer_.GetTokenInfo(token).opening_token = opening_token;
  670. return token;
  671. }
  672. auto LexSymbolToken(llvm::StringRef source_text, ssize_t& position)
  673. -> LexResult {
  674. // One character symbols and grouping symbols are handled with dedicated
  675. // dispatch. We only lex the multi-character tokens here.
  676. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text.substr(position))
  677. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  678. .StartsWith(Spelling, TokenKind::Name)
  679. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling)
  680. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName)
  681. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName)
  682. #include "toolchain/lex/token_kind.def"
  683. .Default(TokenKind::Error);
  684. if (kind == TokenKind::Error) {
  685. return LexError(source_text, position);
  686. }
  687. Token token = buffer_.AddToken({.kind = kind,
  688. .token_line = current_line(),
  689. .column = ComputeColumn(position)});
  690. position += kind.fixed_spelling().size();
  691. return token;
  692. }
  693. // Given a word that has already been lexed, determine whether it is a type
  694. // literal and if so form the corresponding token.
  695. auto LexWordAsTypeLiteralToken(llvm::StringRef word, int column)
  696. -> LexResult {
  697. if (word.size() < 2) {
  698. // Too short to form one of these tokens.
  699. return LexResult::NoMatch();
  700. }
  701. if (word[1] < '1' || word[1] > '9') {
  702. // Doesn't start with a valid initial digit.
  703. return LexResult::NoMatch();
  704. }
  705. std::optional<TokenKind> kind;
  706. switch (word.front()) {
  707. case 'i':
  708. kind = TokenKind::IntegerTypeLiteral;
  709. break;
  710. case 'u':
  711. kind = TokenKind::UnsignedIntegerTypeLiteral;
  712. break;
  713. case 'f':
  714. kind = TokenKind::FloatingPointTypeLiteral;
  715. break;
  716. default:
  717. return LexResult::NoMatch();
  718. };
  719. llvm::StringRef suffix = word.substr(1);
  720. if (!CanLexInteger(emitter_, suffix)) {
  721. return buffer_.AddToken(
  722. {.kind = TokenKind::Error,
  723. .token_line = current_line(),
  724. .column = column,
  725. .error_length = static_cast<int32_t>(word.size())});
  726. }
  727. llvm::APInt suffix_value;
  728. if (suffix.getAsInteger(10, suffix_value)) {
  729. return LexResult::NoMatch();
  730. }
  731. auto token = buffer_.AddToken(
  732. {.kind = *kind, .token_line = current_line(), .column = column});
  733. buffer_.GetTokenInfo(token).integer_id =
  734. buffer_.value_stores_->integers().Add(std::move(suffix_value));
  735. return token;
  736. }
  737. // Closes all open groups that cannot remain open across a closing symbol.
  738. // Users may pass `Error` to close all open groups.
  739. [[gnu::noinline]] auto CloseInvalidOpenGroups(TokenKind kind,
  740. ssize_t position) -> void {
  741. CARBON_CHECK(kind.is_closing_symbol() || kind == TokenKind::Error);
  742. CARBON_CHECK(!open_groups_.empty());
  743. int column = ComputeColumn(position);
  744. do {
  745. Token opening_token = open_groups_.back();
  746. TokenKind opening_kind = buffer_.GetTokenInfo(opening_token).kind;
  747. if (kind == opening_kind.closing_symbol()) {
  748. return;
  749. }
  750. open_groups_.pop_back();
  751. CARBON_DIAGNOSTIC(
  752. MismatchedClosing, Error,
  753. "Closing symbol does not match most recent opening symbol.");
  754. token_emitter_.Emit(opening_token, MismatchedClosing);
  755. CARBON_CHECK(!buffer_.tokens().empty())
  756. << "Must have a prior opening token!";
  757. Token prev_token = buffer_.tokens().end()[-1];
  758. // TODO: do a smarter backwards scan for where to put the closing
  759. // token.
  760. Token closing_token = buffer_.AddToken(
  761. {.kind = opening_kind.closing_symbol(),
  762. .has_trailing_space = buffer_.HasTrailingWhitespace(prev_token),
  763. .is_recovery = true,
  764. .token_line = current_line(),
  765. .column = column});
  766. TokenInfo& opening_token_info = buffer_.GetTokenInfo(opening_token);
  767. TokenInfo& closing_token_info = buffer_.GetTokenInfo(closing_token);
  768. opening_token_info.closing_token = closing_token;
  769. closing_token_info.opening_token = opening_token;
  770. } while (!open_groups_.empty());
  771. }
  772. auto LexKeywordOrIdentifier(llvm::StringRef source_text, ssize_t& position)
  773. -> LexResult {
  774. if (static_cast<unsigned char>(source_text[position]) > 0x7F) {
  775. // TODO: Need to add support for Unicode lexing.
  776. return LexError(source_text, position);
  777. }
  778. CARBON_CHECK(IsAlpha(source_text[position]) ||
  779. source_text[position] == '_');
  780. int column = ComputeColumn(position);
  781. // Take the valid characters off the front of the source buffer.
  782. llvm::StringRef identifier_text =
  783. ScanForIdentifierPrefix(source_text.substr(position));
  784. CARBON_CHECK(!identifier_text.empty())
  785. << "Must have at least one character!";
  786. position += identifier_text.size();
  787. // Check if the text is a type literal, and if so form such a literal.
  788. if (LexResult result = LexWordAsTypeLiteralToken(identifier_text, column)) {
  789. return result;
  790. }
  791. // Check if the text matches a keyword token, and if so use that.
  792. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  793. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name)
  794. #include "toolchain/lex/token_kind.def"
  795. .Default(TokenKind::Error);
  796. if (kind != TokenKind::Error) {
  797. return buffer_.AddToken(
  798. {.kind = kind, .token_line = current_line(), .column = column});
  799. }
  800. // Otherwise we have a generic identifier.
  801. return buffer_.AddToken(
  802. {.kind = TokenKind::Identifier,
  803. .token_line = current_line(),
  804. .column = column,
  805. .string_id = buffer_.value_stores_->strings().Add(identifier_text)});
  806. }
  807. auto LexError(llvm::StringRef source_text, ssize_t& position) -> LexResult {
  808. llvm::StringRef error_text =
  809. source_text.substr(position).take_while([](char c) {
  810. if (IsAlnum(c)) {
  811. return false;
  812. }
  813. switch (c) {
  814. case '_':
  815. case '\t':
  816. case '\n':
  817. return false;
  818. default:
  819. break;
  820. }
  821. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  822. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  823. #include "toolchain/lex/token_kind.def"
  824. .Default(true);
  825. });
  826. if (error_text.empty()) {
  827. // TODO: Reimplement this to use the lexer properly. In the meantime,
  828. // guarantee that we eat at least one byte.
  829. error_text = source_text.substr(position, 1);
  830. }
  831. auto token = buffer_.AddToken(
  832. {.kind = TokenKind::Error,
  833. .token_line = current_line(),
  834. .column = ComputeColumn(position),
  835. .error_length = static_cast<int32_t>(error_text.size())});
  836. CARBON_DIAGNOSTIC(UnrecognizedCharacters, Error,
  837. "Encountered unrecognized characters while parsing.");
  838. emitter_.Emit(error_text.begin(), UnrecognizedCharacters);
  839. position += error_text.size();
  840. return token;
  841. }
  842. auto LexStartOfFile(llvm::StringRef source_text, ssize_t& position) -> void {
  843. // Before lexing any source text, add the start-of-file token so that code
  844. // can assume a non-empty token buffer for the rest of lexing. Note that the
  845. // start-of-file always has trailing space because it *is* whitespace.
  846. buffer_.AddToken({.kind = TokenKind::StartOfFile,
  847. .has_trailing_space = true,
  848. .token_line = current_line(),
  849. .column = 0});
  850. // Also skip any horizontal whitespace and record the indentation of the
  851. // first line.
  852. SkipHorizontalWhitespace(source_text, position);
  853. auto* line_info = current_line_info();
  854. CARBON_CHECK(line_info->start == 0);
  855. line_info->indent = position;
  856. }
  857. auto LexEndOfFile(llvm::StringRef source_text, ssize_t position) -> void {
  858. CARBON_CHECK(position == static_cast<ssize_t>(source_text.size()));
  859. // Check if the last line is empty and not the first line (and only). If so,
  860. // re-pin the last line to be the prior one so that diagnostics and editors
  861. // can treat newlines as terminators even though we internally handle them
  862. // as separators in case of a missing newline on the last line. We do this
  863. // here instead of detecting this when we see the newline to avoid more
  864. // conditions along that fast path.
  865. if (position == current_line_info()->start && line_index_ != 0) {
  866. --line_index_;
  867. --position;
  868. } else {
  869. // Update the line length as this is also the end of a line.
  870. current_line_info()->length = ComputeColumn(position);
  871. }
  872. // The end-of-file token is always considered to be whitespace.
  873. NoteWhitespace();
  874. // Close any open groups. We do this after marking whitespace, it will
  875. // preserve that.
  876. if (!open_groups_.empty()) {
  877. CloseInvalidOpenGroups(TokenKind::Error, position);
  878. }
  879. buffer_.AddToken({.kind = TokenKind::EndOfFile,
  880. .token_line = current_line(),
  881. .column = ComputeColumn(position)});
  882. }
  883. // We use a collection of static member functions for table-based dispatch to
  884. // lexer methods. These are named static member functions so that they show up
  885. // helpfully in profiles and backtraces, but they tend to not contain the
  886. // interesting logic and simply delegate to the relevant methods. All of their
  887. // signatures need to be exactly the same however in order to ensure we can
  888. // build efficient dispatch tables out of them. All of them end by doing a
  889. // must-tail return call to this routine. It handles continuing the dispatch
  890. // chain.
  891. static auto DispatchNext(Lexer& lexer, llvm::StringRef source_text,
  892. ssize_t position) -> void {
  893. if (LLVM_LIKELY(position < static_cast<ssize_t>(source_text.size()))) {
  894. // The common case is to tail recurse based on the next character. Note
  895. // that because this is a must-tail return, this cannot fail to tail-call
  896. // and will not grow the stack. This is in essence a loop with dynamic
  897. // tail dispatch to the next stage of the loop.
  898. [[clang::musttail]] return DispatchTable[static_cast<unsigned char>(
  899. source_text[position])](lexer, source_text, position);
  900. }
  901. // When we finish the source text, stop recursing. We also hint this so that
  902. // the tail-dispatch is optimized as that's essentially the loop back-edge
  903. // and this is the loop exit.
  904. lexer.LexEndOfFile(source_text, position);
  905. }
  906. // Define a set of dispatch functions that simply forward to a method that
  907. // lexes a token. This includes validating that an actual token was produced,
  908. // and continuing the dispatch.
  909. #define CARBON_DISPATCH_LEX_TOKEN(LexMethod) \
  910. static auto Dispatch##LexMethod(Lexer& lexer, llvm::StringRef source_text, \
  911. ssize_t position) \
  912. ->void { \
  913. LexResult result = lexer.LexMethod(source_text, position); \
  914. CARBON_CHECK(result) << "Failed to form a token!"; \
  915. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  916. }
  917. CARBON_DISPATCH_LEX_TOKEN(LexError)
  918. CARBON_DISPATCH_LEX_TOKEN(LexSymbolToken)
  919. CARBON_DISPATCH_LEX_TOKEN(LexKeywordOrIdentifier)
  920. CARBON_DISPATCH_LEX_TOKEN(LexNumericLiteral)
  921. CARBON_DISPATCH_LEX_TOKEN(LexStringLiteral)
  922. // A custom dispatch functions that pre-select the symbol token to lex.
  923. #define CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexMethod) \
  924. static auto Dispatch##LexMethod##SymbolToken( \
  925. Lexer& lexer, llvm::StringRef source_text, ssize_t position) \
  926. ->void { \
  927. LexResult result = lexer.LexMethod##SymbolToken( \
  928. source_text, OneCharTokenKindTable[source_text[position]], position); \
  929. CARBON_CHECK(result) << "Failed to form a token!"; \
  930. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  931. }
  932. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexOneChar)
  933. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexOpening)
  934. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexClosing)
  935. // Define a set of non-token dispatch functions that handle things like
  936. // whitespace and comments.
  937. #define CARBON_DISPATCH_LEX_NON_TOKEN(LexMethod) \
  938. static auto Dispatch##LexMethod(Lexer& lexer, llvm::StringRef source_text, \
  939. ssize_t position) \
  940. ->void { \
  941. lexer.LexMethod(source_text, position); \
  942. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  943. }
  944. CARBON_DISPATCH_LEX_NON_TOKEN(LexHorizontalWhitespace)
  945. CARBON_DISPATCH_LEX_NON_TOKEN(LexVerticalWhitespace)
  946. CARBON_DISPATCH_LEX_NON_TOKEN(LexCommentOrSlash)
  947. // The main entry point for dispatching through the lexer's table. This method
  948. // should always fully consume the source text.
  949. auto Lex() && -> TokenizedBuffer {
  950. llvm::StringRef source_text = buffer_.source_->text();
  951. // First build up our line data structures.
  952. CreateLines(source_text);
  953. ssize_t position = 0;
  954. LexStartOfFile(source_text, position);
  955. // Manually enter the dispatch loop. This call will tail-recurse through the
  956. // dispatch table until everything from source_text is consumed.
  957. DispatchNext(*this, source_text, position);
  958. if (consumer_.seen_error()) {
  959. buffer_.has_errors_ = true;
  960. }
  961. return std::move(buffer_);
  962. }
  963. private:
  964. using DispatchFunctionT = auto(Lexer& lexer, llvm::StringRef source_text,
  965. ssize_t position) -> void;
  966. using DispatchTableT = std::array<DispatchFunctionT*, 256>;
  967. // Build a table of function pointers that we can use to dispatch to the
  968. // correct lexer routine based on the first byte of source text.
  969. //
  970. // While it is tempting to simply use a `switch` on the first byte and
  971. // dispatch with cases into this, in practice that doesn't produce great code.
  972. // There seem to be two issues that are the root cause.
  973. //
  974. // First, there are lots of different values of bytes that dispatch to a
  975. // fairly small set of routines, and then some byte values that dispatch
  976. // differently for each byte. This pattern isn't one that the compiler-based
  977. // lowering of switches works well with -- it tries to balance all the cases,
  978. // and in doing so emits several compares and other control flow rather than a
  979. // simple jump table.
  980. //
  981. // Second, with a `case`, it isn't as obvious how to create a single, uniform
  982. // interface that is effective for *every* byte value, and thus makes for a
  983. // single consistent table-based dispatch. By forcing these to be function
  984. // pointers, we also coerce the code to use a strictly homogeneous structure
  985. // that can form a single dispatch table.
  986. //
  987. // These two actually interact -- the second issue is part of what makes the
  988. // non-table lowering in the first one desirable for many switches and cases.
  989. //
  990. // Ultimately, when table-based dispatch is such an important technique, we
  991. // get better results by taking full control and manually creating the
  992. // dispatch structures.
  993. //
  994. // The functions in this table also use tail-recursion to implement the loop
  995. // of the lexer. This is based on the technique described more fully for any
  996. // kind of byte-stream loop structure here:
  997. // https://blog.reverberate.org/2021/04/21/musttail-efficient-interpreters.html
  998. constexpr static auto MakeDispatchTable() -> DispatchTableT {
  999. DispatchTableT table = {};
  1000. // First set the table entries to dispatch to our error token handler as the
  1001. // base case. Everything valid comes from an override below.
  1002. for (int i = 0; i < 256; ++i) {
  1003. table[i] = &DispatchLexError;
  1004. }
  1005. // Symbols have some special dispatching. First, set the first character of
  1006. // each symbol token spelling to dispatch to the symbol lexer. We don't
  1007. // provide a pre-computed token here, so the symbol lexer will compute the
  1008. // exact symbol token kind. We'll override this with more specific dispatch
  1009. // below.
  1010. #define CARBON_SYMBOL_TOKEN(TokenName, Spelling) \
  1011. table[(Spelling)[0]] = &DispatchLexSymbolToken;
  1012. #include "toolchain/lex/token_kind.def"
  1013. // Now special cased single-character symbols that are guaranteed to not
  1014. // join with another symbol. These are grouping symbols, terminators,
  1015. // or separators in the grammar and have a good reason to be
  1016. // orthogonal to any other punctuation. We do this separately because this
  1017. // needs to override some of the generic handling above, and provide a
  1018. // custom token.
  1019. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling) \
  1020. table[(Spelling)[0]] = &DispatchLexOneCharSymbolToken;
  1021. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName) \
  1022. table[(Spelling)[0]] = &DispatchLexOpeningSymbolToken;
  1023. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName) \
  1024. table[(Spelling)[0]] = &DispatchLexClosingSymbolToken;
  1025. #include "toolchain/lex/token_kind.def"
  1026. // Override the handling for `/` to consider comments as well as a `/`
  1027. // symbol.
  1028. table['/'] = &DispatchLexCommentOrSlash;
  1029. table['_'] = &DispatchLexKeywordOrIdentifier;
  1030. // Note that we don't use `llvm::seq` because this needs to be `constexpr`
  1031. // evaluated.
  1032. for (unsigned char c = 'a'; c <= 'z'; ++c) {
  1033. table[c] = &DispatchLexKeywordOrIdentifier;
  1034. }
  1035. for (unsigned char c = 'A'; c <= 'Z'; ++c) {
  1036. table[c] = &DispatchLexKeywordOrIdentifier;
  1037. }
  1038. // We dispatch all non-ASCII UTF-8 characters to the identifier lexing
  1039. // as whitespace characters should already have been skipped and the
  1040. // only remaining valid Unicode characters would be part of an
  1041. // identifier. That code can either accept or reject.
  1042. for (int i = 0x80; i < 0x100; ++i) {
  1043. table[i] = &DispatchLexKeywordOrIdentifier;
  1044. }
  1045. for (unsigned char c = '0'; c <= '9'; ++c) {
  1046. table[c] = &DispatchLexNumericLiteral;
  1047. }
  1048. table['\''] = &DispatchLexStringLiteral;
  1049. table['"'] = &DispatchLexStringLiteral;
  1050. table['#'] = &DispatchLexStringLiteral;
  1051. table[' '] = &DispatchLexHorizontalWhitespace;
  1052. table['\t'] = &DispatchLexHorizontalWhitespace;
  1053. table['\n'] = &DispatchLexVerticalWhitespace;
  1054. return table;
  1055. };
  1056. static const DispatchTableT DispatchTable;
  1057. static const std::array<TokenKind, 256> OneCharTokenKindTable;
  1058. TokenizedBuffer buffer_;
  1059. ssize_t line_index_;
  1060. llvm::SmallVector<Token> open_groups_;
  1061. ErrorTrackingDiagnosticConsumer consumer_;
  1062. SourceBufferLocationTranslator translator_;
  1063. LexerDiagnosticEmitter emitter_;
  1064. TokenLocationTranslator token_translator_;
  1065. TokenDiagnosticEmitter token_emitter_;
  1066. };
  1067. constexpr TokenizedBuffer::Lexer::DispatchTableT
  1068. TokenizedBuffer::Lexer::DispatchTable = MakeDispatchTable();
  1069. constexpr std::array<TokenKind, 256>
  1070. TokenizedBuffer::Lexer::OneCharTokenKindTable = [] {
  1071. std::array<TokenKind, 256> table = {};
  1072. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling) \
  1073. table[(Spelling)[0]] = TokenKind::TokenName;
  1074. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName) \
  1075. table[(Spelling)[0]] = TokenKind::TokenName;
  1076. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName) \
  1077. table[(Spelling)[0]] = TokenKind::TokenName;
  1078. #include "toolchain/lex/token_kind.def"
  1079. return table;
  1080. }();
  1081. auto TokenizedBuffer::Lex(SharedValueStores& value_stores, SourceBuffer& source,
  1082. DiagnosticConsumer& consumer) -> TokenizedBuffer {
  1083. Lexer lexer(value_stores, source, consumer);
  1084. return std::move(lexer).Lex();
  1085. }
  1086. auto TokenizedBuffer::GetKind(Token token) const -> TokenKind {
  1087. return GetTokenInfo(token).kind;
  1088. }
  1089. auto TokenizedBuffer::GetLine(Token token) const -> Line {
  1090. return GetTokenInfo(token).token_line;
  1091. }
  1092. auto TokenizedBuffer::GetLineNumber(Token token) const -> int {
  1093. return GetLineNumber(GetLine(token));
  1094. }
  1095. auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
  1096. return GetTokenInfo(token).column + 1;
  1097. }
  1098. auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
  1099. const auto& token_info = GetTokenInfo(token);
  1100. llvm::StringRef fixed_spelling = token_info.kind.fixed_spelling();
  1101. if (!fixed_spelling.empty()) {
  1102. return fixed_spelling;
  1103. }
  1104. if (token_info.kind == TokenKind::Error) {
  1105. const auto& line_info = GetLineInfo(token_info.token_line);
  1106. int64_t token_start = line_info.start + token_info.column;
  1107. return source_->text().substr(token_start, token_info.error_length);
  1108. }
  1109. // Refer back to the source text to preserve oddities like radix or digit
  1110. // separators the author included.
  1111. if (token_info.kind == TokenKind::IntegerLiteral ||
  1112. token_info.kind == TokenKind::RealLiteral) {
  1113. const auto& line_info = GetLineInfo(token_info.token_line);
  1114. int64_t token_start = line_info.start + token_info.column;
  1115. std::optional<NumericLiteral> relexed_token =
  1116. NumericLiteral::Lex(source_->text().substr(token_start));
  1117. CARBON_CHECK(relexed_token) << "Could not reform numeric literal token.";
  1118. return relexed_token->text();
  1119. }
  1120. // Refer back to the source text to find the original spelling, including
  1121. // escape sequences etc.
  1122. if (token_info.kind == TokenKind::StringLiteral) {
  1123. const auto& line_info = GetLineInfo(token_info.token_line);
  1124. int64_t token_start = line_info.start + token_info.column;
  1125. std::optional<StringLiteral> relexed_token =
  1126. StringLiteral::Lex(source_->text().substr(token_start));
  1127. CARBON_CHECK(relexed_token) << "Could not reform string literal token.";
  1128. return relexed_token->text();
  1129. }
  1130. // Refer back to the source text to avoid needing to reconstruct the
  1131. // spelling from the size.
  1132. if (token_info.kind.is_sized_type_literal()) {
  1133. const auto& line_info = GetLineInfo(token_info.token_line);
  1134. int64_t token_start = line_info.start + token_info.column;
  1135. llvm::StringRef suffix =
  1136. source_->text().substr(token_start + 1).take_while(IsDecimalDigit);
  1137. return llvm::StringRef(suffix.data() - 1, suffix.size() + 1);
  1138. }
  1139. if (token_info.kind == TokenKind::StartOfFile ||
  1140. token_info.kind == TokenKind::EndOfFile) {
  1141. return llvm::StringRef();
  1142. }
  1143. CARBON_CHECK(token_info.kind == TokenKind::Identifier) << token_info.kind;
  1144. return value_stores_->strings().Get(token_info.string_id);
  1145. }
  1146. auto TokenizedBuffer::GetIdentifier(Token token) const -> StringId {
  1147. const auto& token_info = GetTokenInfo(token);
  1148. CARBON_CHECK(token_info.kind == TokenKind::Identifier) << token_info.kind;
  1149. return token_info.string_id;
  1150. }
  1151. auto TokenizedBuffer::GetIntegerLiteral(Token token) const -> IntegerId {
  1152. const auto& token_info = GetTokenInfo(token);
  1153. CARBON_CHECK(token_info.kind == TokenKind::IntegerLiteral) << token_info.kind;
  1154. return token_info.integer_id;
  1155. }
  1156. auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealId {
  1157. const auto& token_info = GetTokenInfo(token);
  1158. CARBON_CHECK(token_info.kind == TokenKind::RealLiteral) << token_info.kind;
  1159. return token_info.real_id;
  1160. }
  1161. auto TokenizedBuffer::GetStringLiteral(Token token) const -> StringId {
  1162. const auto& token_info = GetTokenInfo(token);
  1163. CARBON_CHECK(token_info.kind == TokenKind::StringLiteral) << token_info.kind;
  1164. return token_info.string_id;
  1165. }
  1166. auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
  1167. -> const llvm::APInt& {
  1168. const auto& token_info = GetTokenInfo(token);
  1169. CARBON_CHECK(token_info.kind.is_sized_type_literal()) << token_info.kind;
  1170. return value_stores_->integers().Get(token_info.integer_id);
  1171. }
  1172. auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
  1173. -> Token {
  1174. const auto& opening_token_info = GetTokenInfo(opening_token);
  1175. CARBON_CHECK(opening_token_info.kind.is_opening_symbol())
  1176. << opening_token_info.kind;
  1177. return opening_token_info.closing_token;
  1178. }
  1179. auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
  1180. -> Token {
  1181. const auto& closing_token_info = GetTokenInfo(closing_token);
  1182. CARBON_CHECK(closing_token_info.kind.is_closing_symbol())
  1183. << closing_token_info.kind;
  1184. return closing_token_info.opening_token;
  1185. }
  1186. auto TokenizedBuffer::HasLeadingWhitespace(Token token) const -> bool {
  1187. auto it = TokenIterator(token);
  1188. return it == tokens().begin() || GetTokenInfo(*(it - 1)).has_trailing_space;
  1189. }
  1190. auto TokenizedBuffer::HasTrailingWhitespace(Token token) const -> bool {
  1191. return GetTokenInfo(token).has_trailing_space;
  1192. }
  1193. auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
  1194. return GetTokenInfo(token).is_recovery;
  1195. }
  1196. auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
  1197. return line.index + 1;
  1198. }
  1199. auto TokenizedBuffer::GetNextLine(Line line) const -> Line {
  1200. Line next(line.index + 1);
  1201. CARBON_DCHECK(static_cast<size_t>(next.index) < line_infos_.size());
  1202. return next;
  1203. }
  1204. auto TokenizedBuffer::GetPrevLine(Line line) const -> Line {
  1205. CARBON_CHECK(line.index > 0);
  1206. return Line(line.index - 1);
  1207. }
  1208. auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
  1209. return GetLineInfo(line).indent + 1;
  1210. }
  1211. auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
  1212. index = std::max(widths.index, index);
  1213. kind = std::max(widths.kind, kind);
  1214. column = std::max(widths.column, column);
  1215. line = std::max(widths.line, line);
  1216. indent = std::max(widths.indent, indent);
  1217. }
  1218. // Compute the printed width of a number. When numbers are printed in decimal,
  1219. // the number of digits needed is is one more than the log-base-10 of the
  1220. // value. We handle a value of `zero` explicitly.
  1221. //
  1222. // This routine requires its argument to be *non-negative*.
  1223. static auto ComputeDecimalPrintedWidth(int number) -> int {
  1224. CARBON_CHECK(number >= 0) << "Negative numbers are not supported.";
  1225. if (number == 0) {
  1226. return 1;
  1227. }
  1228. return static_cast<int>(std::log10(number)) + 1;
  1229. }
  1230. auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
  1231. PrintWidths widths = {};
  1232. widths.index = ComputeDecimalPrintedWidth(token_infos_.size());
  1233. widths.kind = GetKind(token).name().size();
  1234. widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
  1235. widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
  1236. widths.indent =
  1237. ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
  1238. return widths;
  1239. }
  1240. auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
  1241. if (tokens().begin() == tokens().end()) {
  1242. return;
  1243. }
  1244. output_stream << "- filename: " << source_->filename() << "\n"
  1245. << " tokens: [\n";
  1246. PrintWidths widths = {};
  1247. widths.index = ComputeDecimalPrintedWidth((token_infos_.size()));
  1248. for (Token token : tokens()) {
  1249. widths.Widen(GetTokenPrintWidths(token));
  1250. }
  1251. for (Token token : tokens()) {
  1252. PrintToken(output_stream, token, widths);
  1253. output_stream << "\n";
  1254. }
  1255. output_stream << " ]\n";
  1256. }
  1257. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  1258. Token token) const -> void {
  1259. PrintToken(output_stream, token, {});
  1260. }
  1261. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
  1262. PrintWidths widths) const -> void {
  1263. widths.Widen(GetTokenPrintWidths(token));
  1264. int token_index = token.index;
  1265. const auto& token_info = GetTokenInfo(token);
  1266. llvm::StringRef token_text = GetTokenText(token);
  1267. // Output the main chunk using one format string. We have to do the
  1268. // justification manually in order to use the dynamically computed widths
  1269. // and get the quotes included.
  1270. output_stream << llvm::formatv(
  1271. " { index: {0}, kind: {1}, line: {2}, column: {3}, indent: {4}, "
  1272. "spelling: '{5}'",
  1273. llvm::format_decimal(token_index, widths.index),
  1274. llvm::right_justify(llvm::formatv("'{0}'", token_info.kind.name()).str(),
  1275. widths.kind + 2),
  1276. llvm::format_decimal(GetLineNumber(token_info.token_line), widths.line),
  1277. llvm::format_decimal(GetColumnNumber(token), widths.column),
  1278. llvm::format_decimal(GetIndentColumnNumber(token_info.token_line),
  1279. widths.indent),
  1280. token_text);
  1281. switch (token_info.kind) {
  1282. case TokenKind::Identifier:
  1283. output_stream << ", identifier: " << GetIdentifier(token).index;
  1284. break;
  1285. case TokenKind::IntegerLiteral:
  1286. output_stream << ", value: `";
  1287. value_stores_->integers()
  1288. .Get(GetIntegerLiteral(token))
  1289. .print(output_stream, /*isSigned=*/false);
  1290. output_stream << "`";
  1291. break;
  1292. case TokenKind::RealLiteral:
  1293. output_stream << ", value: `"
  1294. << value_stores_->reals().Get(GetRealLiteral(token)) << "`";
  1295. break;
  1296. case TokenKind::StringLiteral:
  1297. output_stream << ", value: `"
  1298. << value_stores_->strings().Get(GetStringLiteral(token))
  1299. << "`";
  1300. break;
  1301. default:
  1302. if (token_info.kind.is_opening_symbol()) {
  1303. output_stream << ", closing_token: "
  1304. << GetMatchedClosingToken(token).index;
  1305. } else if (token_info.kind.is_closing_symbol()) {
  1306. output_stream << ", opening_token: "
  1307. << GetMatchedOpeningToken(token).index;
  1308. }
  1309. break;
  1310. }
  1311. if (token_info.has_trailing_space) {
  1312. output_stream << ", has_trailing_space: true";
  1313. }
  1314. if (token_info.is_recovery) {
  1315. output_stream << ", recovery: true";
  1316. }
  1317. output_stream << " },";
  1318. }
  1319. auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
  1320. return line_infos_[line.index];
  1321. }
  1322. auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
  1323. return line_infos_[line.index];
  1324. }
  1325. auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
  1326. line_infos_.push_back(info);
  1327. return Line(static_cast<int>(line_infos_.size()) - 1);
  1328. }
  1329. auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
  1330. return token_infos_[token.index];
  1331. }
  1332. auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
  1333. return token_infos_[token.index];
  1334. }
  1335. auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
  1336. token_infos_.push_back(info);
  1337. expected_parse_tree_size_ += info.kind.expected_parse_tree_size();
  1338. return Token(static_cast<int>(token_infos_.size()) - 1);
  1339. }
  1340. auto TokenIterator::Print(llvm::raw_ostream& output) const -> void {
  1341. output << token_.index;
  1342. }
  1343. auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
  1344. const char* loc) -> DiagnosticLocation {
  1345. CARBON_CHECK(StringRefContainsPointer(buffer_->source_->text(), loc))
  1346. << "location not within buffer";
  1347. int64_t offset = loc - buffer_->source_->text().begin();
  1348. // Find the first line starting after the given location. Note that we can't
  1349. // inspect `line.length` here because it is not necessarily correct for the
  1350. // final line during lexing (but will be correct later for the parse tree).
  1351. const auto* line_it = std::partition_point(
  1352. buffer_->line_infos_.begin(), buffer_->line_infos_.end(),
  1353. [offset](const LineInfo& line) { return line.start <= offset; });
  1354. // Step back one line to find the line containing the given position.
  1355. CARBON_CHECK(line_it != buffer_->line_infos_.begin())
  1356. << "location precedes the start of the first line";
  1357. --line_it;
  1358. int line_number = line_it - buffer_->line_infos_.begin();
  1359. int column_number = offset - line_it->start;
  1360. // Start by grabbing the line from the buffer. If the line isn't fully lexed,
  1361. // the length will be npos and the line will be grabbed from the known start
  1362. // to the end of the buffer; we'll then adjust the length.
  1363. llvm::StringRef line =
  1364. buffer_->source_->text().substr(line_it->start, line_it->length);
  1365. if (line_it->length == static_cast<int32_t>(llvm::StringRef::npos)) {
  1366. CARBON_CHECK(line.take_front(column_number).count('\n') == 0)
  1367. << "Currently we assume no unlexed newlines prior to the error column, "
  1368. "but there was one when erroring at "
  1369. << buffer_->source_->filename() << ":" << line_number << ":"
  1370. << column_number;
  1371. // Look for the next newline since we don't know the length. We can start at
  1372. // the column because prior newlines will have been lexed.
  1373. auto end_newline_pos = line.find('\n', column_number);
  1374. if (end_newline_pos != llvm::StringRef::npos) {
  1375. line = line.take_front(end_newline_pos);
  1376. }
  1377. }
  1378. return {.file_name = buffer_->source_->filename(),
  1379. .line = line,
  1380. .line_number = line_number + 1,
  1381. .column_number = column_number + 1};
  1382. }
  1383. auto TokenLocationTranslator::GetLocation(Token token) -> DiagnosticLocation {
  1384. // Map the token location into a position within the source buffer.
  1385. const auto& token_info = buffer_->GetTokenInfo(token);
  1386. const auto& line_info = buffer_->GetLineInfo(token_info.token_line);
  1387. const char* token_start =
  1388. buffer_->source_->text().begin() + line_info.start + token_info.column;
  1389. // Find the corresponding file location.
  1390. // TODO: Should we somehow indicate in the diagnostic location if this token
  1391. // is a recovery token that doesn't correspond to the original source?
  1392. return TokenizedBuffer::SourceBufferLocationTranslator(buffer_).GetLocation(
  1393. token_start);
  1394. }
  1395. } // namespace Carbon::Lex