tokenized_buffer.cpp 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/lex/tokenized_buffer.h"
  5. #include <algorithm>
  6. #include <array>
  7. #include <cmath>
  8. #include "common/check.h"
  9. #include "common/string_helpers.h"
  10. #include "llvm/ADT/StringRef.h"
  11. #include "llvm/ADT/StringSwitch.h"
  12. #include "llvm/Support/ErrorHandling.h"
  13. #include "llvm/Support/Format.h"
  14. #include "llvm/Support/FormatVariadic.h"
  15. #include "llvm/Support/raw_ostream.h"
  16. #include "toolchain/lex/character_set.h"
  17. #include "toolchain/lex/helpers.h"
  18. #include "toolchain/lex/numeric_literal.h"
  19. #include "toolchain/lex/string_literal.h"
  20. #if __x86_64__
  21. #include <x86intrin.h>
  22. #endif
  23. namespace Carbon::Lex {
  24. // TODO: Move Overload and VariantMatch somewhere more central.
  25. // Form an overload set from a list of functions. For example:
  26. //
  27. // ```
  28. // auto overloaded = Overload{[] (int) {}, [] (float) {}};
  29. // ```
  30. template <typename... Fs>
  31. struct Overload : Fs... {
  32. using Fs::operator()...;
  33. };
  34. template <typename... Fs>
  35. Overload(Fs...) -> Overload<Fs...>;
  36. // Pattern-match against the type of the value stored in the variant `V`. Each
  37. // element of `fs` should be a function that takes one or more of the variant
  38. // values in `V`.
  39. template <typename V, typename... Fs>
  40. auto VariantMatch(V&& v, Fs&&... fs) -> decltype(auto) {
  41. return std::visit(Overload{std::forward<Fs&&>(fs)...}, std::forward<V&&>(v));
  42. }
  43. // Scans the provided text and returns the prefix `StringRef` of contiguous
  44. // identifier characters.
  45. //
  46. // This is a performance sensitive function and so uses vectorized code
  47. // sequences to optimize its scanning. When modifying, the identifier lexing
  48. // benchmarks should be checked for regressions.
  49. //
  50. // Identifier characters here are currently the ASCII characters `[0-9A-Za-z_]`.
  51. //
  52. // TODO: Currently, this code does not implement Carbon's design for Unicode
  53. // characters in identifiers. It does work on UTF-8 code unit sequences, but
  54. // currently considers non-ASCII characters to be non-identifier characters.
  55. // Some work has been done to ensure the hot loop, while optimized, retains
  56. // enough information to add Unicode handling without completely destroying the
  57. // relevant optimizations.
  58. static auto ScanForIdentifierPrefix(llvm::StringRef text) -> llvm::StringRef {
  59. // A table of booleans that we can use to classify bytes as being valid
  60. // identifier (or keyword) characters. This is used in the generic,
  61. // non-vectorized fallback code to scan for length of an identifier.
  62. static constexpr std::array<bool, 256> IsIdByteTable = ([]() constexpr {
  63. std::array<bool, 256> table = {};
  64. for (char c = '0'; c <= '9'; ++c) {
  65. table[c] = true;
  66. }
  67. for (char c = 'A'; c <= 'Z'; ++c) {
  68. table[c] = true;
  69. }
  70. for (char c = 'a'; c <= 'z'; ++c) {
  71. table[c] = true;
  72. }
  73. table['_'] = true;
  74. return table;
  75. })();
  76. #if __x86_64__
  77. // This code uses a scheme derived from the techniques in Geoff Langdale and
  78. // Daniel Lemire's work on parsing JSON[1]. Specifically, that paper outlines
  79. // a technique of using two 4-bit indexed in-register look-up tables (LUTs) to
  80. // classify bytes in a branchless SIMD code sequence.
  81. //
  82. // [1]: https://arxiv.org/pdf/1902.08318.pdf
  83. //
  84. // The goal is to get a bit mask classifying different sets of bytes. For each
  85. // input byte, we first test for a high bit indicating a UTF-8 encoded Unicode
  86. // character. Otherwise, we want the mask bits to be set with the following
  87. // logic derived by inspecting the high nibble and low nibble of the input:
  88. // bit0 = 1 for `_`: high `0x5` and low `0xF`
  89. // bit1 = 1 for `0-9`: high `0x3` and low `0x0` - `0x9`
  90. // bit2 = 1 for `A-O` and `a-o`: high `0x4` or `0x6` and low `0x1` - `0xF`
  91. // bit3 = 1 for `P-Z` and 'p-z': high `0x5` or `0x7` and low `0x0` - `0xA`
  92. // bit4 = unused
  93. // bit5 = unused
  94. // bit6 = unused
  95. // bit7 = unused
  96. //
  97. // No bits set means definitively non-ID ASCII character.
  98. //
  99. // bits 4-7 remain unused if we need to classify more characters.
  100. const auto high_lut = _mm_setr_epi8(
  101. /* __b0=*/0b0000'0000,
  102. /* __b1=*/0b0000'0000,
  103. /* __b2=*/0b0000'0000,
  104. /* __b3=*/0b0000'0010,
  105. /* __b4=*/0b0000'0100,
  106. /* __b5=*/0b0000'1001,
  107. /* __b6=*/0b0000'0100,
  108. /* __b7=*/0b0000'1000,
  109. /* __b8=*/0b0000'0000,
  110. /* __b9=*/0b0000'0000,
  111. /*__b10=*/0b0000'0000,
  112. /*__b11=*/0b0000'0000,
  113. /*__b12=*/0b0000'0000,
  114. /*__b13=*/0b0000'0000,
  115. /*__b14=*/0b0000'0000,
  116. /*__b15=*/0b0000'0000);
  117. const auto low_lut = _mm_setr_epi8(
  118. /* __b0=*/0b0000'1010,
  119. /* __b1=*/0b0000'1110,
  120. /* __b2=*/0b0000'1110,
  121. /* __b3=*/0b0000'1110,
  122. /* __b4=*/0b0000'1110,
  123. /* __b5=*/0b0000'1110,
  124. /* __b6=*/0b0000'1110,
  125. /* __b7=*/0b0000'1110,
  126. /* __b8=*/0b0000'1110,
  127. /* __b9=*/0b0000'1110,
  128. /*__b10=*/0b0000'1100,
  129. /*__b11=*/0b0000'0100,
  130. /*__b12=*/0b0000'0100,
  131. /*__b13=*/0b0000'0100,
  132. /*__b14=*/0b0000'0100,
  133. /*__b15=*/0b0000'0101);
  134. // Use `ssize_t` for performance here as we index memory in a tight loop.
  135. ssize_t i = 0;
  136. const ssize_t size = text.size();
  137. while ((i + 16) <= size) {
  138. __m128i input =
  139. _mm_loadu_si128(reinterpret_cast<const __m128i*>(text.data() + i));
  140. // The high bits of each byte indicate a non-ASCII character encoded using
  141. // UTF-8. Test those and fall back to the scalar code if present. These
  142. // bytes will also cause spurious zeros in the LUT results, but we can
  143. // ignore that because we track them independently here.
  144. #if __SSE4_1__
  145. if (!_mm_test_all_zeros(_mm_set1_epi8(0x80), input)) {
  146. break;
  147. }
  148. #else
  149. if (_mm_movemask_epi8(input) != 0) {
  150. break;
  151. }
  152. #endif
  153. // Do two LUT lookups and mask the results together to get the results for
  154. // both low and high nibbles. Note that we don't need to mask out the high
  155. // bit of input here because we track that above for UTF-8 handling.
  156. __m128i low_mask = _mm_shuffle_epi8(low_lut, input);
  157. // Note that the input needs to be masked to only include the high nibble or
  158. // we could end up with bit7 set forcing the result to a zero byte.
  159. __m128i input_high =
  160. _mm_and_si128(_mm_srli_epi32(input, 4), _mm_set1_epi8(0x0f));
  161. __m128i high_mask = _mm_shuffle_epi8(high_lut, input_high);
  162. __m128i mask = _mm_and_si128(low_mask, high_mask);
  163. // Now compare to find the completely zero bytes.
  164. __m128i id_byte_mask_vec = _mm_cmpeq_epi8(mask, _mm_setzero_si128());
  165. int tail_ascii_mask = _mm_movemask_epi8(id_byte_mask_vec);
  166. // Check if there are bits in the tail mask, which means zero bytes and the
  167. // end of the identifier. We could do this without materializing the scalar
  168. // mask on more recent CPUs, but we generally expect the median length we
  169. // encounter to be <16 characters and so we avoid the extra instruction in
  170. // that case and predict this branch to succeed so it is laid out in a
  171. // reasonable way.
  172. if (LLVM_LIKELY(tail_ascii_mask != 0)) {
  173. // Move past the definitively classified bytes that are part of the
  174. // identifier, and return the complete identifier text.
  175. i += __builtin_ctz(tail_ascii_mask);
  176. return text.substr(0, i);
  177. }
  178. i += 16;
  179. }
  180. // Fallback to scalar loop. We only end up here when we don't have >=16
  181. // bytes to scan or we find a UTF-8 unicode character.
  182. // TODO: This assumes all Unicode characters are non-identifiers.
  183. while (i < size && IsIdByteTable[static_cast<unsigned char>(text[i])]) {
  184. ++i;
  185. }
  186. return text.substr(0, i);
  187. #else
  188. // TODO: Optimize this with SIMD for other architectures.
  189. return text.take_while(
  190. [](char c) { return IsIdByteTable[static_cast<unsigned char>(c)]; });
  191. #endif
  192. }
  193. // Implementation of the lexer logic itself.
  194. //
  195. // The design is that lexing can loop over the source buffer, consuming it into
  196. // tokens by calling into this API. This class handles the state and breaks down
  197. // the different lexing steps that may be used. It directly updates the provided
  198. // tokenized buffer with the lexed tokens.
  199. class [[clang::internal_linkage]] TokenizedBuffer::Lexer {
  200. public:
  201. // Symbolic result of a lexing action. This indicates whether we successfully
  202. // lexed a token, or whether other lexing actions should be attempted.
  203. //
  204. // While it wraps a simple boolean state, its API both helps make the failures
  205. // more self documenting, and by consuming the actual token constructively
  206. // when one is produced, it helps ensure the correct result is returned.
  207. class LexResult {
  208. public:
  209. // Consumes (and discard) a valid token to construct a result
  210. // indicating a token has been produced. Relies on implicit conversions.
  211. // NOLINTNEXTLINE(google-explicit-constructor)
  212. LexResult(Token /*discarded_token*/) : LexResult(true) {}
  213. // Returns a result indicating no token was produced.
  214. static auto NoMatch() -> LexResult { return LexResult(false); }
  215. // Tests whether a token was produced by the lexing routine, and
  216. // the lexer can continue forming tokens.
  217. explicit operator bool() const { return formed_token_; }
  218. private:
  219. explicit LexResult(bool formed_token) : formed_token_(formed_token) {}
  220. bool formed_token_;
  221. };
  222. Lexer(SourceBuffer& source, DiagnosticConsumer& consumer)
  223. : buffer_(source),
  224. consumer_(consumer),
  225. translator_(&buffer_),
  226. emitter_(translator_, consumer_),
  227. token_translator_(&buffer_),
  228. token_emitter_(token_translator_, consumer_) {}
  229. // Find all line endings and create the line data structures. Explicitly kept
  230. // out-of-line because this is a significant loop that is useful to have in
  231. // the profile and it doesn't simplify by inlining at all. But because it can,
  232. // the compiler will flatten this otherwise.
  233. [[gnu::noinline]] auto CreateLines(llvm::StringRef source_text) -> void {
  234. // We currently use `memchr` here which typically is well optimized to use
  235. // SIMD or other significantly faster than byte-wise scanning. We also use
  236. // carefully selected variables and the `ssize_t` type for performance and
  237. // code size of this hot loop.
  238. //
  239. // TODO: Eventually, we'll likely need to roll our own SIMD-optimized
  240. // routine here in order to handle CR+LF line endings, as we'll want those
  241. // to stay on the fast path. We'll also need to detect and diagnose Unicode
  242. // vertical whitespace. Starting with `memchr` should give us a strong
  243. // baseline performance target when adding those features.
  244. const char* const text = source_text.data();
  245. const ssize_t size = source_text.size();
  246. ssize_t start = 0;
  247. while (const char* nl = reinterpret_cast<const char*>(
  248. memchr(&text[start], '\n', size - start))) {
  249. ssize_t nl_index = nl - text;
  250. buffer_.AddLine(LineInfo(start, nl_index - start));
  251. start = nl_index + 1;
  252. }
  253. // The last line ends at the end of the file.
  254. buffer_.AddLine(LineInfo(start, size - start));
  255. // Now that all the infos are allocated, get a fresh pointer to the first
  256. // info for use while lexing.
  257. line_index_ = 0;
  258. }
  259. auto current_line() -> Line { return Line(line_index_); }
  260. auto current_line_info() -> LineInfo* {
  261. return &buffer_.line_infos_[line_index_];
  262. }
  263. auto ComputeColumn(ssize_t position) -> int {
  264. CARBON_DCHECK(position >= current_line_info()->start);
  265. return position - current_line_info()->start;
  266. }
  267. // Perform the necessary bookkeeping to step past a newline at the current
  268. // line and column.
  269. auto HandleNewline(llvm::StringRef source_text, ssize_t& position) -> void {
  270. CARBON_DCHECK(source_text[position] == '\n');
  271. CARBON_DCHECK(current_line_info()->start + current_line_info()->length ==
  272. position);
  273. ++position;
  274. ++line_index_;
  275. CARBON_DCHECK(current_line_info()->start = position);
  276. set_indent_ = false;
  277. }
  278. auto NoteWhitespace() -> void {
  279. buffer_.token_infos_.back().has_trailing_space = true;
  280. }
  281. auto LexHorizontalWhitespace(llvm::StringRef source_text, ssize_t& position)
  282. -> void {
  283. CARBON_DCHECK(source_text[position] == ' ' ||
  284. source_text[position] == '\t');
  285. NoteWhitespace();
  286. // Handle adjacent whitespace quickly. This comes up frequently for example
  287. // due to indentation. We don't expect *huge* runs, so just use a scalar
  288. // loop. While still scalar, this avoids repeated table dispatch and marking
  289. // whitespace. We use `ssize_t` in the loop for performance.
  290. while (position < static_cast<ssize_t>(source_text.size()) &&
  291. (source_text[position] == ' ' || source_text[position] == '\t')) {
  292. ++position;
  293. }
  294. }
  295. auto LexVerticalWhitespace(llvm::StringRef source_text, ssize_t& position)
  296. -> void {
  297. NoteWhitespace();
  298. HandleNewline(source_text, position);
  299. }
  300. auto LexCommentOrSlash(llvm::StringRef source_text, ssize_t& position)
  301. -> void {
  302. CARBON_DCHECK(source_text[position] == '/');
  303. // Both comments and slash symbols start with a `/`. We disambiguate with a
  304. // max-munch rule -- if the next character is another `/` then we lex it as
  305. // a comment start. If it isn't, then we lex as a slash.
  306. if (position + 1 < static_cast<ssize_t>(source_text.size()) &&
  307. source_text[position + 1] == '/') {
  308. LexComment(source_text, position);
  309. return;
  310. }
  311. // This code path should produce a token, make sure that happens.
  312. LexResult result = LexSymbolToken(source_text, position);
  313. CARBON_CHECK(result) << "Failed to form a token!";
  314. }
  315. auto LexComment(llvm::StringRef source_text, ssize_t& position) -> void {
  316. CARBON_DCHECK(source_text.substr(position).startswith("//"));
  317. // Any comment must be the only non-whitespace on the line.
  318. if (set_indent_) {
  319. CARBON_DIAGNOSTIC(TrailingComment, Error,
  320. "Trailing comments are not permitted.");
  321. emitter_.Emit(source_text.begin() + position, TrailingComment);
  322. }
  323. // The introducer '//' must be followed by whitespace or EOF.
  324. if (position + 2 < static_cast<ssize_t>(source_text.size()) &&
  325. !IsSpace(source_text[position + 2])) {
  326. CARBON_DIAGNOSTIC(NoWhitespaceAfterCommentIntroducer, Error,
  327. "Whitespace is required after '//'.");
  328. emitter_.Emit(source_text.begin() + position + 2,
  329. NoWhitespaceAfterCommentIntroducer);
  330. }
  331. // Use the current line info to jump to the end of the line.
  332. position = current_line_info()->start + current_line_info()->length;
  333. // This may be the end of the file in which case we immediately return.
  334. if (position == static_cast<ssize_t>(source_text.size())) {
  335. // Finished lexing.
  336. return;
  337. }
  338. // Otherwise, lex the newline.
  339. LexVerticalWhitespace(source_text, position);
  340. }
  341. auto LexNumericLiteral(llvm::StringRef source_text, ssize_t& position)
  342. -> LexResult {
  343. std::optional<NumericLiteral> literal =
  344. NumericLiteral::Lex(source_text.substr(position));
  345. if (!literal) {
  346. return LexError(source_text, position);
  347. }
  348. int int_column = ComputeColumn(position);
  349. int token_size = literal->text().size();
  350. position += token_size;
  351. if (!set_indent_) {
  352. current_line_info()->indent = int_column;
  353. set_indent_ = true;
  354. }
  355. return VariantMatch(
  356. literal->ComputeValue(emitter_),
  357. [&](NumericLiteral::IntegerValue&& value) {
  358. auto token = buffer_.AddToken({.kind = TokenKind::IntegerLiteral,
  359. .token_line = current_line(),
  360. .column = int_column});
  361. buffer_.GetTokenInfo(token).literal_index =
  362. buffer_.literal_int_storage_.size();
  363. buffer_.literal_int_storage_.push_back(std::move(value.value));
  364. return token;
  365. },
  366. [&](NumericLiteral::RealValue&& value) {
  367. auto token = buffer_.AddToken({.kind = TokenKind::RealLiteral,
  368. .token_line = current_line(),
  369. .column = int_column});
  370. buffer_.GetTokenInfo(token).literal_index =
  371. buffer_.literal_int_storage_.size();
  372. buffer_.literal_int_storage_.push_back(std::move(value.mantissa));
  373. buffer_.literal_int_storage_.push_back(std::move(value.exponent));
  374. CARBON_CHECK(buffer_.GetRealLiteral(token).is_decimal ==
  375. (value.radix == NumericLiteral::Radix::Decimal));
  376. return token;
  377. },
  378. [&](NumericLiteral::UnrecoverableError) {
  379. auto token = buffer_.AddToken({
  380. .kind = TokenKind::Error,
  381. .token_line = current_line(),
  382. .column = int_column,
  383. .error_length = token_size,
  384. });
  385. return token;
  386. });
  387. }
  388. auto LexStringLiteral(llvm::StringRef source_text, ssize_t& position)
  389. -> LexResult {
  390. std::optional<StringLiteral> literal =
  391. StringLiteral::Lex(source_text.substr(position));
  392. if (!literal) {
  393. return LexError(source_text, position);
  394. }
  395. Line string_line = current_line();
  396. int string_column = ComputeColumn(position);
  397. ssize_t literal_size = literal->text().size();
  398. position += literal_size;
  399. if (!set_indent_) {
  400. current_line_info()->indent = string_column;
  401. set_indent_ = true;
  402. }
  403. // Update line and column information.
  404. if (literal->is_multi_line()) {
  405. while (current_line_info()->start + current_line_info()->length <
  406. position) {
  407. ++line_index_;
  408. current_line_info()->indent = string_column;
  409. }
  410. // Note that we've updated the current line at this point, but
  411. // `set_indent_` is already true from above. That remains correct as the
  412. // last line of the multi-line literal *also* has its indent set.
  413. }
  414. if (literal->is_terminated()) {
  415. auto token =
  416. buffer_.AddToken({.kind = TokenKind::StringLiteral,
  417. .token_line = string_line,
  418. .column = string_column,
  419. .literal_index = static_cast<int32_t>(
  420. buffer_.literal_string_storage_.size())});
  421. buffer_.literal_string_storage_.push_back(
  422. literal->ComputeValue(emitter_));
  423. return token;
  424. } else {
  425. CARBON_DIAGNOSTIC(UnterminatedString, Error,
  426. "String is missing a terminator.");
  427. emitter_.Emit(literal->text().begin(), UnterminatedString);
  428. return buffer_.AddToken(
  429. {.kind = TokenKind::Error,
  430. .token_line = string_line,
  431. .column = string_column,
  432. .error_length = static_cast<int32_t>(literal_size)});
  433. }
  434. }
  435. auto LexOneCharSymbolToken(llvm::StringRef source_text, TokenKind kind,
  436. ssize_t& position) -> Token {
  437. // Verify in a debug build that the incoming token kind is correct.
  438. CARBON_DCHECK(kind != TokenKind::Error);
  439. CARBON_DCHECK(kind.fixed_spelling().size() == 1);
  440. CARBON_DCHECK(source_text[position] == kind.fixed_spelling().front())
  441. << "Source text starts with '" << source_text[position]
  442. << "' instead of the spelling '" << kind.fixed_spelling()
  443. << "' of the incoming token kind '" << kind << "'";
  444. int column = ComputeColumn(position);
  445. if (!set_indent_) {
  446. current_line_info()->indent = column;
  447. set_indent_ = true;
  448. }
  449. Token token = buffer_.AddToken(
  450. {.kind = kind, .token_line = current_line(), .column = column});
  451. ++position;
  452. return token;
  453. }
  454. auto LexOpeningSymbolToken(llvm::StringRef source_text, TokenKind kind,
  455. ssize_t& position) -> LexResult {
  456. Token token = LexOneCharSymbolToken(source_text, kind, position);
  457. open_groups_.push_back(token);
  458. return token;
  459. }
  460. auto LexClosingSymbolToken(llvm::StringRef source_text, TokenKind kind,
  461. ssize_t& position) -> LexResult {
  462. auto unmatched_error = [&] {
  463. int column = ComputeColumn(position);
  464. // We still need to set the indent here, and can't use the common code
  465. // used by the non-error path.
  466. if (!set_indent_) {
  467. current_line_info()->indent = column;
  468. set_indent_ = true;
  469. }
  470. CARBON_DIAGNOSTIC(
  471. UnmatchedClosing, Error,
  472. "Closing symbol without a corresponding opening symbol.");
  473. emitter_.Emit(source_text.begin() + position, UnmatchedClosing);
  474. Token token = buffer_.AddToken({.kind = TokenKind::Error,
  475. .token_line = current_line(),
  476. .column = column,
  477. .error_length = 1});
  478. ++position;
  479. return token;
  480. };
  481. // If we have no open groups, this is an error.
  482. if (LLVM_UNLIKELY(open_groups_.empty())) {
  483. return unmatched_error();
  484. }
  485. Token opening_token = open_groups_.back();
  486. // Close any invalid open groups first.
  487. if (LLVM_UNLIKELY(buffer_.GetTokenInfo(opening_token).kind !=
  488. kind.opening_symbol())) {
  489. CloseInvalidOpenGroups(kind, position);
  490. // This may exhaust the open groups so re-check and re-error if needed.
  491. if (open_groups_.empty()) {
  492. return unmatched_error();
  493. }
  494. opening_token = open_groups_.back();
  495. CARBON_DCHECK(buffer_.GetTokenInfo(opening_token).kind ==
  496. kind.opening_symbol());
  497. }
  498. open_groups_.pop_back();
  499. // Now that the groups are all matched up, lex the actual token.
  500. Token token = LexOneCharSymbolToken(source_text, kind, position);
  501. // Note that it is important to get fresh token infos here as lexing the
  502. // open token would invalidate any pointers.
  503. buffer_.GetTokenInfo(opening_token).closing_token = token;
  504. buffer_.GetTokenInfo(token).opening_token = opening_token;
  505. return token;
  506. }
  507. auto LexSymbolToken(llvm::StringRef source_text, ssize_t& position)
  508. -> LexResult {
  509. // One character symbols and grouping symbols are handled with dedicated
  510. // dispatch. We only lex the multi-character tokens here.
  511. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text.substr(position))
  512. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  513. .StartsWith(Spelling, TokenKind::Name)
  514. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling)
  515. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName)
  516. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName)
  517. #include "toolchain/lex/token_kind.def"
  518. .Default(TokenKind::Error);
  519. if (kind == TokenKind::Error) {
  520. return LexError(source_text, position);
  521. }
  522. int column = ComputeColumn(position);
  523. if (!set_indent_) {
  524. current_line_info()->indent = column;
  525. set_indent_ = true;
  526. }
  527. Token token = buffer_.AddToken(
  528. {.kind = kind, .token_line = current_line(), .column = column});
  529. position += kind.fixed_spelling().size();
  530. return token;
  531. }
  532. // Given a word that has already been lexed, determine whether it is a type
  533. // literal and if so form the corresponding token.
  534. auto LexWordAsTypeLiteralToken(llvm::StringRef word, int column)
  535. -> LexResult {
  536. if (word.size() < 2) {
  537. // Too short to form one of these tokens.
  538. return LexResult::NoMatch();
  539. }
  540. if (word[1] < '1' || word[1] > '9') {
  541. // Doesn't start with a valid initial digit.
  542. return LexResult::NoMatch();
  543. }
  544. std::optional<TokenKind> kind;
  545. switch (word.front()) {
  546. case 'i':
  547. kind = TokenKind::IntegerTypeLiteral;
  548. break;
  549. case 'u':
  550. kind = TokenKind::UnsignedIntegerTypeLiteral;
  551. break;
  552. case 'f':
  553. kind = TokenKind::FloatingPointTypeLiteral;
  554. break;
  555. default:
  556. return LexResult::NoMatch();
  557. };
  558. llvm::StringRef suffix = word.substr(1);
  559. if (!CanLexInteger(emitter_, suffix)) {
  560. return buffer_.AddToken(
  561. {.kind = TokenKind::Error,
  562. .token_line = current_line(),
  563. .column = column,
  564. .error_length = static_cast<int32_t>(word.size())});
  565. }
  566. llvm::APInt suffix_value;
  567. if (suffix.getAsInteger(10, suffix_value)) {
  568. return LexResult::NoMatch();
  569. }
  570. auto token = buffer_.AddToken(
  571. {.kind = *kind, .token_line = current_line(), .column = column});
  572. buffer_.GetTokenInfo(token).literal_index =
  573. buffer_.literal_int_storage_.size();
  574. buffer_.literal_int_storage_.push_back(std::move(suffix_value));
  575. return token;
  576. }
  577. // Closes all open groups that cannot remain open across a closing symbol.
  578. // Users may pass `Error` to close all open groups.
  579. [[gnu::noinline]] auto CloseInvalidOpenGroups(TokenKind kind,
  580. ssize_t position) -> void {
  581. CARBON_CHECK(kind.is_closing_symbol() || kind == TokenKind::Error);
  582. CARBON_CHECK(!open_groups_.empty());
  583. int column = ComputeColumn(position);
  584. do {
  585. Token opening_token = open_groups_.back();
  586. TokenKind opening_kind = buffer_.GetTokenInfo(opening_token).kind;
  587. if (kind == opening_kind.closing_symbol()) {
  588. return;
  589. }
  590. open_groups_.pop_back();
  591. CARBON_DIAGNOSTIC(
  592. MismatchedClosing, Error,
  593. "Closing symbol does not match most recent opening symbol.");
  594. token_emitter_.Emit(opening_token, MismatchedClosing);
  595. CARBON_CHECK(!buffer_.tokens().empty())
  596. << "Must have a prior opening token!";
  597. Token prev_token = buffer_.tokens().end()[-1];
  598. // TODO: do a smarter backwards scan for where to put the closing
  599. // token.
  600. Token closing_token = buffer_.AddToken(
  601. {.kind = opening_kind.closing_symbol(),
  602. .has_trailing_space = buffer_.HasTrailingWhitespace(prev_token),
  603. .is_recovery = true,
  604. .token_line = current_line(),
  605. .column = column});
  606. TokenInfo& opening_token_info = buffer_.GetTokenInfo(opening_token);
  607. TokenInfo& closing_token_info = buffer_.GetTokenInfo(closing_token);
  608. opening_token_info.closing_token = closing_token;
  609. closing_token_info.opening_token = opening_token;
  610. } while (!open_groups_.empty());
  611. }
  612. auto GetOrCreateIdentifier(llvm::StringRef text) -> Identifier {
  613. auto insert_result = buffer_.identifier_map_.insert(
  614. {text, Identifier(buffer_.identifier_infos_.size())});
  615. if (insert_result.second) {
  616. buffer_.identifier_infos_.push_back({text});
  617. }
  618. return insert_result.first->second;
  619. }
  620. auto LexKeywordOrIdentifier(llvm::StringRef source_text, ssize_t& position)
  621. -> LexResult {
  622. if (static_cast<unsigned char>(source_text[position]) > 0x7F) {
  623. // TODO: Need to add support for Unicode lexing.
  624. return LexError(source_text, position);
  625. }
  626. CARBON_CHECK(IsAlpha(source_text[position]) ||
  627. source_text[position] == '_');
  628. int column = ComputeColumn(position);
  629. if (!set_indent_) {
  630. current_line_info()->indent = column;
  631. set_indent_ = true;
  632. }
  633. // Take the valid characters off the front of the source buffer.
  634. llvm::StringRef identifier_text =
  635. ScanForIdentifierPrefix(source_text.substr(position));
  636. CARBON_CHECK(!identifier_text.empty())
  637. << "Must have at least one character!";
  638. position += identifier_text.size();
  639. // Check if the text is a type literal, and if so form such a literal.
  640. if (LexResult result = LexWordAsTypeLiteralToken(identifier_text, column)) {
  641. return result;
  642. }
  643. // Check if the text matches a keyword token, and if so use that.
  644. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  645. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name)
  646. #include "toolchain/lex/token_kind.def"
  647. .Default(TokenKind::Error);
  648. if (kind != TokenKind::Error) {
  649. return buffer_.AddToken(
  650. {.kind = kind, .token_line = current_line(), .column = column});
  651. }
  652. // Otherwise we have a generic identifier.
  653. return buffer_.AddToken({.kind = TokenKind::Identifier,
  654. .token_line = current_line(),
  655. .column = column,
  656. .id = GetOrCreateIdentifier(identifier_text)});
  657. }
  658. auto LexError(llvm::StringRef source_text, ssize_t& position) -> LexResult {
  659. llvm::StringRef error_text =
  660. source_text.substr(position).take_while([](char c) {
  661. if (IsAlnum(c)) {
  662. return false;
  663. }
  664. switch (c) {
  665. case '_':
  666. case '\t':
  667. case '\n':
  668. return false;
  669. default:
  670. break;
  671. }
  672. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  673. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  674. #include "toolchain/lex/token_kind.def"
  675. .Default(true);
  676. });
  677. if (error_text.empty()) {
  678. // TODO: Reimplement this to use the lexer properly. In the meantime,
  679. // guarantee that we eat at least one byte.
  680. error_text = source_text.substr(position, 1);
  681. }
  682. auto token = buffer_.AddToken(
  683. {.kind = TokenKind::Error,
  684. .token_line = current_line(),
  685. .column = ComputeColumn(position),
  686. .error_length = static_cast<int32_t>(error_text.size())});
  687. CARBON_DIAGNOSTIC(UnrecognizedCharacters, Error,
  688. "Encountered unrecognized characters while parsing.");
  689. emitter_.Emit(error_text.begin(), UnrecognizedCharacters);
  690. position += error_text.size();
  691. return token;
  692. }
  693. auto LexStartOfFile(llvm::StringRef /*source_text*/) -> void {
  694. // Before lexing any source text, add the start-of-file token so that code
  695. // can assume a non-empty token buffer for the rest of lexing. Note that the
  696. // start-of-file always has trailing space because it *is* whitespace.
  697. buffer_.AddToken({.kind = TokenKind::StartOfFile,
  698. .has_trailing_space = true,
  699. .token_line = current_line(),
  700. .column = 0});
  701. }
  702. auto LexEndOfFile(llvm::StringRef source_text, ssize_t position) -> void {
  703. CARBON_CHECK(position == static_cast<ssize_t>(source_text.size()));
  704. // Check if the last line is empty and not the first line (and only). If so,
  705. // re-pin the last line to be the prior one so that diagnostics and editors
  706. // can treat newlines as terminators even though we internally handle them
  707. // as separators in case of a missing newline on the last line. We do this
  708. // here instead of detecting this when we see the newline to avoid more
  709. // conditions along that fast path.
  710. if (position == current_line_info()->start && line_index_ != 0) {
  711. --line_index_;
  712. --position;
  713. } else {
  714. // Update the line length as this is also the end of a line.
  715. current_line_info()->length = ComputeColumn(position);
  716. }
  717. // The end-of-file token is always considered to be whitespace.
  718. NoteWhitespace();
  719. // Close any open groups. We do this after marking whitespace, it will
  720. // preserve that.
  721. if (!open_groups_.empty()) {
  722. CloseInvalidOpenGroups(TokenKind::Error, position);
  723. }
  724. buffer_.AddToken({.kind = TokenKind::EndOfFile,
  725. .token_line = current_line(),
  726. .column = ComputeColumn(position)});
  727. }
  728. // We use a collection of static member functions for table-based dispatch to
  729. // lexer methods. These are named static member functions so that they show up
  730. // helpfully in profiles and backtraces, but they tend to not contain the
  731. // interesting logic and simply delegate to the relevant methods. All of their
  732. // signatures need to be exactly the same however in order to ensure we can
  733. // build efficient dispatch tables out of them. All of them end by doing a
  734. // must-tail return call to this routine. It handles continuing the dispatch
  735. // chain.
  736. static auto DispatchNext(Lexer& lexer, llvm::StringRef source_text,
  737. ssize_t position) -> void {
  738. if (LLVM_LIKELY(position < static_cast<ssize_t>(source_text.size()))) {
  739. // The common case is to tail recurse based on the next character. Note
  740. // that because this is a must-tail return, this cannot fail to tail-call
  741. // and will not grow the stack. This is in essence a loop with dynamic
  742. // tail dispatch to the next stage of the loop.
  743. [[clang::musttail]] return DispatchTable[static_cast<unsigned char>(
  744. source_text[position])](lexer, source_text, position);
  745. }
  746. // When we finish the source text, stop recursing. We also hint this so that
  747. // the tail-dispatch is optimized as that's essentially the loop back-edge
  748. // and this is the loop exit.
  749. lexer.LexEndOfFile(source_text, position);
  750. }
  751. // Define a set of dispatch functions that simply forward to a method that
  752. // lexes a token. This includes validating that an actual token was produced,
  753. // and continuing the dispatch.
  754. #define CARBON_DISPATCH_LEX_TOKEN(LexMethod) \
  755. static auto Dispatch##LexMethod(Lexer& lexer, llvm::StringRef source_text, \
  756. ssize_t position) \
  757. ->void { \
  758. LexResult result = lexer.LexMethod(source_text, position); \
  759. CARBON_CHECK(result) << "Failed to form a token!"; \
  760. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  761. }
  762. CARBON_DISPATCH_LEX_TOKEN(LexError)
  763. CARBON_DISPATCH_LEX_TOKEN(LexSymbolToken)
  764. CARBON_DISPATCH_LEX_TOKEN(LexKeywordOrIdentifier)
  765. CARBON_DISPATCH_LEX_TOKEN(LexNumericLiteral)
  766. CARBON_DISPATCH_LEX_TOKEN(LexStringLiteral)
  767. // A custom dispatch functions that pre-select the symbol token to lex.
  768. #define CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexMethod) \
  769. static auto Dispatch##LexMethod##SymbolToken( \
  770. Lexer& lexer, llvm::StringRef source_text, ssize_t position) \
  771. ->void { \
  772. LexResult result = lexer.LexMethod##SymbolToken( \
  773. source_text, OneCharTokenKindTable[source_text[position]], position); \
  774. CARBON_CHECK(result) << "Failed to form a token!"; \
  775. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  776. }
  777. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexOneChar)
  778. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexOpening)
  779. CARBON_DISPATCH_LEX_SYMBOL_TOKEN(LexClosing)
  780. // Define a set of non-token dispatch functions that handle things like
  781. // whitespace and comments.
  782. #define CARBON_DISPATCH_LEX_NON_TOKEN(LexMethod) \
  783. static auto Dispatch##LexMethod(Lexer& lexer, llvm::StringRef source_text, \
  784. ssize_t position) \
  785. ->void { \
  786. lexer.LexMethod(source_text, position); \
  787. [[clang::musttail]] return DispatchNext(lexer, source_text, position); \
  788. }
  789. CARBON_DISPATCH_LEX_NON_TOKEN(LexHorizontalWhitespace)
  790. CARBON_DISPATCH_LEX_NON_TOKEN(LexVerticalWhitespace)
  791. CARBON_DISPATCH_LEX_NON_TOKEN(LexCommentOrSlash)
  792. // The main entry point for dispatching through the lexer's table. This method
  793. // should always fully consume the source text.
  794. auto Lex() && -> TokenizedBuffer {
  795. llvm::StringRef source_text = buffer_.source_->text();
  796. // First build up our line data structures.
  797. CreateLines(source_text);
  798. LexStartOfFile(source_text);
  799. // Manually enter the dispatch loop. This call will tail-recurse through the
  800. // dispatch table until everything from source_text is consumed.
  801. DispatchNext(*this, source_text, /*position=*/0);
  802. if (consumer_.seen_error()) {
  803. buffer_.has_errors_ = true;
  804. }
  805. return std::move(buffer_);
  806. }
  807. private:
  808. using DispatchFunctionT = auto(Lexer& lexer, llvm::StringRef source_text,
  809. ssize_t position) -> void;
  810. using DispatchTableT = std::array<DispatchFunctionT*, 256>;
  811. // Build a table of function pointers that we can use to dispatch to the
  812. // correct lexer routine based on the first byte of source text.
  813. //
  814. // While it is tempting to simply use a `switch` on the first byte and
  815. // dispatch with cases into this, in practice that doesn't produce great code.
  816. // There seem to be two issues that are the root cause.
  817. //
  818. // First, there are lots of different values of bytes that dispatch to a
  819. // fairly small set of routines, and then some byte values that dispatch
  820. // differently for each byte. This pattern isn't one that the compiler-based
  821. // lowering of switches works well with -- it tries to balance all the cases,
  822. // and in doing so emits several compares and other control flow rather than a
  823. // simple jump table.
  824. //
  825. // Second, with a `case`, it isn't as obvious how to create a single, uniform
  826. // interface that is effective for *every* byte value, and thus makes for a
  827. // single consistent table-based dispatch. By forcing these to be function
  828. // pointers, we also coerce the code to use a strictly homogeneous structure
  829. // that can form a single dispatch table.
  830. //
  831. // These two actually interact -- the second issue is part of what makes the
  832. // non-table lowering in the first one desirable for many switches and cases.
  833. //
  834. // Ultimately, when table-based dispatch is such an important technique, we
  835. // get better results by taking full control and manually creating the
  836. // dispatch structures.
  837. //
  838. // The functions in this table also use tail-recursion to implement the loop
  839. // of the lexer. This is based on the technique described more fully for any
  840. // kind of byte-stream loop structure here:
  841. // https://blog.reverberate.org/2021/04/21/musttail-efficient-interpreters.html
  842. constexpr static auto MakeDispatchTable() -> DispatchTableT {
  843. DispatchTableT table = {};
  844. // First set the table entries to dispatch to our error token handler as the
  845. // base case. Everything valid comes from an override below.
  846. for (int i = 0; i < 256; ++i) {
  847. table[i] = &DispatchLexError;
  848. }
  849. // Symbols have some special dispatching. First, set the first character of
  850. // each symbol token spelling to dispatch to the symbol lexer. We don't
  851. // provide a pre-computed token here, so the symbol lexer will compute the
  852. // exact symbol token kind. We'll override this with more specific dispatch
  853. // below.
  854. #define CARBON_SYMBOL_TOKEN(TokenName, Spelling) \
  855. table[(Spelling)[0]] = &DispatchLexSymbolToken;
  856. #include "toolchain/lex/token_kind.def"
  857. // Now special cased single-character symbols that are guaranteed to not
  858. // join with another symbol. These are grouping symbols, terminators,
  859. // or separators in the grammar and have a good reason to be
  860. // orthogonal to any other punctuation. We do this separately because this
  861. // needs to override some of the generic handling above, and provide a
  862. // custom token.
  863. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling) \
  864. table[(Spelling)[0]] = &DispatchLexOneCharSymbolToken;
  865. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName) \
  866. table[(Spelling)[0]] = &DispatchLexOpeningSymbolToken;
  867. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName) \
  868. table[(Spelling)[0]] = &DispatchLexClosingSymbolToken;
  869. #include "toolchain/lex/token_kind.def"
  870. // Override the handling for `/` to consider comments as well as a `/`
  871. // symbol.
  872. table['/'] = &DispatchLexCommentOrSlash;
  873. table['_'] = &DispatchLexKeywordOrIdentifier;
  874. // Note that we don't use `llvm::seq` because this needs to be `constexpr`
  875. // evaluated.
  876. for (unsigned char c = 'a'; c <= 'z'; ++c) {
  877. table[c] = &DispatchLexKeywordOrIdentifier;
  878. }
  879. for (unsigned char c = 'A'; c <= 'Z'; ++c) {
  880. table[c] = &DispatchLexKeywordOrIdentifier;
  881. }
  882. // We dispatch all non-ASCII UTF-8 characters to the identifier lexing
  883. // as whitespace characters should already have been skipped and the
  884. // only remaining valid Unicode characters would be part of an
  885. // identifier. That code can either accept or reject.
  886. for (int i = 0x80; i < 0x100; ++i) {
  887. table[i] = &DispatchLexKeywordOrIdentifier;
  888. }
  889. for (unsigned char c = '0'; c <= '9'; ++c) {
  890. table[c] = &DispatchLexNumericLiteral;
  891. }
  892. table['\''] = &DispatchLexStringLiteral;
  893. table['"'] = &DispatchLexStringLiteral;
  894. table['#'] = &DispatchLexStringLiteral;
  895. table[' '] = &DispatchLexHorizontalWhitespace;
  896. table['\t'] = &DispatchLexHorizontalWhitespace;
  897. table['\n'] = &DispatchLexVerticalWhitespace;
  898. return table;
  899. };
  900. static const DispatchTableT DispatchTable;
  901. static const std::array<TokenKind, 256> OneCharTokenKindTable;
  902. TokenizedBuffer buffer_;
  903. ssize_t line_index_;
  904. bool set_indent_ = false;
  905. llvm::SmallVector<Token> open_groups_;
  906. ErrorTrackingDiagnosticConsumer consumer_;
  907. SourceBufferLocationTranslator translator_;
  908. LexerDiagnosticEmitter emitter_;
  909. TokenLocationTranslator token_translator_;
  910. TokenDiagnosticEmitter token_emitter_;
  911. };
  912. constexpr TokenizedBuffer::Lexer::DispatchTableT
  913. TokenizedBuffer::Lexer::DispatchTable = MakeDispatchTable();
  914. constexpr std::array<TokenKind, 256>
  915. TokenizedBuffer::Lexer::OneCharTokenKindTable = [] {
  916. std::array<TokenKind, 256> table = {};
  917. #define CARBON_ONE_CHAR_SYMBOL_TOKEN(TokenName, Spelling) \
  918. table[(Spelling)[0]] = TokenKind::TokenName;
  919. #define CARBON_OPENING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, ClosingName) \
  920. table[(Spelling)[0]] = TokenKind::TokenName;
  921. #define CARBON_CLOSING_GROUP_SYMBOL_TOKEN(TokenName, Spelling, OpeningName) \
  922. table[(Spelling)[0]] = TokenKind::TokenName;
  923. #include "toolchain/lex/token_kind.def"
  924. return table;
  925. }();
  926. auto TokenizedBuffer::Lex(SourceBuffer& source, DiagnosticConsumer& consumer)
  927. -> TokenizedBuffer {
  928. Lexer lexer(source, consumer);
  929. return std::move(lexer).Lex();
  930. }
  931. auto TokenizedBuffer::GetKind(Token token) const -> TokenKind {
  932. return GetTokenInfo(token).kind;
  933. }
  934. auto TokenizedBuffer::GetLine(Token token) const -> Line {
  935. return GetTokenInfo(token).token_line;
  936. }
  937. auto TokenizedBuffer::GetLineNumber(Token token) const -> int {
  938. return GetLineNumber(GetLine(token));
  939. }
  940. auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
  941. return GetTokenInfo(token).column + 1;
  942. }
  943. auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
  944. const auto& token_info = GetTokenInfo(token);
  945. llvm::StringRef fixed_spelling = token_info.kind.fixed_spelling();
  946. if (!fixed_spelling.empty()) {
  947. return fixed_spelling;
  948. }
  949. if (token_info.kind == TokenKind::Error) {
  950. const auto& line_info = GetLineInfo(token_info.token_line);
  951. int64_t token_start = line_info.start + token_info.column;
  952. return source_->text().substr(token_start, token_info.error_length);
  953. }
  954. // Refer back to the source text to preserve oddities like radix or digit
  955. // separators the author included.
  956. if (token_info.kind == TokenKind::IntegerLiteral ||
  957. token_info.kind == TokenKind::RealLiteral) {
  958. const auto& line_info = GetLineInfo(token_info.token_line);
  959. int64_t token_start = line_info.start + token_info.column;
  960. std::optional<NumericLiteral> relexed_token =
  961. NumericLiteral::Lex(source_->text().substr(token_start));
  962. CARBON_CHECK(relexed_token) << "Could not reform numeric literal token.";
  963. return relexed_token->text();
  964. }
  965. // Refer back to the source text to find the original spelling, including
  966. // escape sequences etc.
  967. if (token_info.kind == TokenKind::StringLiteral) {
  968. const auto& line_info = GetLineInfo(token_info.token_line);
  969. int64_t token_start = line_info.start + token_info.column;
  970. std::optional<StringLiteral> relexed_token =
  971. StringLiteral::Lex(source_->text().substr(token_start));
  972. CARBON_CHECK(relexed_token) << "Could not reform string literal token.";
  973. return relexed_token->text();
  974. }
  975. // Refer back to the source text to avoid needing to reconstruct the
  976. // spelling from the size.
  977. if (token_info.kind.is_sized_type_literal()) {
  978. const auto& line_info = GetLineInfo(token_info.token_line);
  979. int64_t token_start = line_info.start + token_info.column;
  980. llvm::StringRef suffix =
  981. source_->text().substr(token_start + 1).take_while(IsDecimalDigit);
  982. return llvm::StringRef(suffix.data() - 1, suffix.size() + 1);
  983. }
  984. if (token_info.kind == TokenKind::StartOfFile ||
  985. token_info.kind == TokenKind::EndOfFile) {
  986. return llvm::StringRef();
  987. }
  988. CARBON_CHECK(token_info.kind == TokenKind::Identifier) << token_info.kind;
  989. return GetIdentifierText(token_info.id);
  990. }
  991. auto TokenizedBuffer::GetIdentifier(Token token) const -> Identifier {
  992. const auto& token_info = GetTokenInfo(token);
  993. CARBON_CHECK(token_info.kind == TokenKind::Identifier) << token_info.kind;
  994. return token_info.id;
  995. }
  996. auto TokenizedBuffer::GetIntegerLiteral(Token token) const
  997. -> const llvm::APInt& {
  998. const auto& token_info = GetTokenInfo(token);
  999. CARBON_CHECK(token_info.kind == TokenKind::IntegerLiteral) << token_info.kind;
  1000. return literal_int_storage_[token_info.literal_index];
  1001. }
  1002. auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealLiteralValue {
  1003. const auto& token_info = GetTokenInfo(token);
  1004. CARBON_CHECK(token_info.kind == TokenKind::RealLiteral) << token_info.kind;
  1005. // Note that every real literal is at least three characters long, so we can
  1006. // safely look at the second character to determine whether we have a
  1007. // decimal or hexadecimal literal.
  1008. const auto& line_info = GetLineInfo(token_info.token_line);
  1009. int64_t token_start = line_info.start + token_info.column;
  1010. char second_char = source_->text()[token_start + 1];
  1011. bool is_decimal = second_char != 'x' && second_char != 'b';
  1012. return {.mantissa = literal_int_storage_[token_info.literal_index],
  1013. .exponent = literal_int_storage_[token_info.literal_index + 1],
  1014. .is_decimal = is_decimal};
  1015. }
  1016. auto TokenizedBuffer::GetStringLiteral(Token token) const -> llvm::StringRef {
  1017. const auto& token_info = GetTokenInfo(token);
  1018. CARBON_CHECK(token_info.kind == TokenKind::StringLiteral) << token_info.kind;
  1019. return literal_string_storage_[token_info.literal_index];
  1020. }
  1021. auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
  1022. -> const llvm::APInt& {
  1023. const auto& token_info = GetTokenInfo(token);
  1024. CARBON_CHECK(token_info.kind.is_sized_type_literal()) << token_info.kind;
  1025. return literal_int_storage_[token_info.literal_index];
  1026. }
  1027. auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
  1028. -> Token {
  1029. const auto& opening_token_info = GetTokenInfo(opening_token);
  1030. CARBON_CHECK(opening_token_info.kind.is_opening_symbol())
  1031. << opening_token_info.kind;
  1032. return opening_token_info.closing_token;
  1033. }
  1034. auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
  1035. -> Token {
  1036. const auto& closing_token_info = GetTokenInfo(closing_token);
  1037. CARBON_CHECK(closing_token_info.kind.is_closing_symbol())
  1038. << closing_token_info.kind;
  1039. return closing_token_info.opening_token;
  1040. }
  1041. auto TokenizedBuffer::HasLeadingWhitespace(Token token) const -> bool {
  1042. auto it = TokenIterator(token);
  1043. return it == tokens().begin() || GetTokenInfo(*(it - 1)).has_trailing_space;
  1044. }
  1045. auto TokenizedBuffer::HasTrailingWhitespace(Token token) const -> bool {
  1046. return GetTokenInfo(token).has_trailing_space;
  1047. }
  1048. auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
  1049. return GetTokenInfo(token).is_recovery;
  1050. }
  1051. auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
  1052. return line.index + 1;
  1053. }
  1054. auto TokenizedBuffer::GetNextLine(Line line) const -> Line {
  1055. Line next(line.index + 1);
  1056. CARBON_DCHECK(static_cast<size_t>(next.index) < line_infos_.size());
  1057. return next;
  1058. }
  1059. auto TokenizedBuffer::GetPrevLine(Line line) const -> Line {
  1060. CARBON_CHECK(line.index > 0);
  1061. return Line(line.index - 1);
  1062. }
  1063. auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
  1064. return GetLineInfo(line).indent + 1;
  1065. }
  1066. auto TokenizedBuffer::GetIdentifierText(Identifier identifier) const
  1067. -> llvm::StringRef {
  1068. return identifier_infos_[identifier.index].text;
  1069. }
  1070. auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
  1071. index = std::max(widths.index, index);
  1072. kind = std::max(widths.kind, kind);
  1073. column = std::max(widths.column, column);
  1074. line = std::max(widths.line, line);
  1075. indent = std::max(widths.indent, indent);
  1076. }
  1077. // Compute the printed width of a number. When numbers are printed in decimal,
  1078. // the number of digits needed is is one more than the log-base-10 of the
  1079. // value. We handle a value of `zero` explicitly.
  1080. //
  1081. // This routine requires its argument to be *non-negative*.
  1082. static auto ComputeDecimalPrintedWidth(int number) -> int {
  1083. CARBON_CHECK(number >= 0) << "Negative numbers are not supported.";
  1084. if (number == 0) {
  1085. return 1;
  1086. }
  1087. return static_cast<int>(std::log10(number)) + 1;
  1088. }
  1089. auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
  1090. PrintWidths widths = {};
  1091. widths.index = ComputeDecimalPrintedWidth(token_infos_.size());
  1092. widths.kind = GetKind(token).name().size();
  1093. widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
  1094. widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
  1095. widths.indent =
  1096. ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
  1097. return widths;
  1098. }
  1099. auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
  1100. if (tokens().begin() == tokens().end()) {
  1101. return;
  1102. }
  1103. output_stream << "- filename: " << source_->filename() << "\n"
  1104. << " tokens: [\n";
  1105. PrintWidths widths = {};
  1106. widths.index = ComputeDecimalPrintedWidth((token_infos_.size()));
  1107. for (Token token : tokens()) {
  1108. widths.Widen(GetTokenPrintWidths(token));
  1109. }
  1110. for (Token token : tokens()) {
  1111. PrintToken(output_stream, token, widths);
  1112. output_stream << "\n";
  1113. }
  1114. output_stream << " ]\n";
  1115. }
  1116. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  1117. Token token) const -> void {
  1118. PrintToken(output_stream, token, {});
  1119. }
  1120. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
  1121. PrintWidths widths) const -> void {
  1122. widths.Widen(GetTokenPrintWidths(token));
  1123. int token_index = token.index;
  1124. const auto& token_info = GetTokenInfo(token);
  1125. llvm::StringRef token_text = GetTokenText(token);
  1126. // Output the main chunk using one format string. We have to do the
  1127. // justification manually in order to use the dynamically computed widths
  1128. // and get the quotes included.
  1129. output_stream << llvm::formatv(
  1130. " { index: {0}, kind: {1}, line: {2}, column: {3}, indent: {4}, "
  1131. "spelling: '{5}'",
  1132. llvm::format_decimal(token_index, widths.index),
  1133. llvm::right_justify(llvm::formatv("'{0}'", token_info.kind.name()).str(),
  1134. widths.kind + 2),
  1135. llvm::format_decimal(GetLineNumber(token_info.token_line), widths.line),
  1136. llvm::format_decimal(GetColumnNumber(token), widths.column),
  1137. llvm::format_decimal(GetIndentColumnNumber(token_info.token_line),
  1138. widths.indent),
  1139. token_text);
  1140. switch (token_info.kind) {
  1141. case TokenKind::Identifier:
  1142. output_stream << ", identifier: " << GetIdentifier(token).index;
  1143. break;
  1144. case TokenKind::IntegerLiteral:
  1145. output_stream << ", value: `";
  1146. GetIntegerLiteral(token).print(output_stream, /*isSigned=*/false);
  1147. output_stream << "`";
  1148. break;
  1149. case TokenKind::RealLiteral:
  1150. output_stream << ", value: `" << GetRealLiteral(token) << "`";
  1151. break;
  1152. case TokenKind::StringLiteral:
  1153. output_stream << ", value: `" << GetStringLiteral(token) << "`";
  1154. break;
  1155. default:
  1156. if (token_info.kind.is_opening_symbol()) {
  1157. output_stream << ", closing_token: "
  1158. << GetMatchedClosingToken(token).index;
  1159. } else if (token_info.kind.is_closing_symbol()) {
  1160. output_stream << ", opening_token: "
  1161. << GetMatchedOpeningToken(token).index;
  1162. }
  1163. break;
  1164. }
  1165. if (token_info.has_trailing_space) {
  1166. output_stream << ", has_trailing_space: true";
  1167. }
  1168. if (token_info.is_recovery) {
  1169. output_stream << ", recovery: true";
  1170. }
  1171. output_stream << " },";
  1172. }
  1173. auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
  1174. return line_infos_[line.index];
  1175. }
  1176. auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
  1177. return line_infos_[line.index];
  1178. }
  1179. auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
  1180. line_infos_.push_back(info);
  1181. return Line(static_cast<int>(line_infos_.size()) - 1);
  1182. }
  1183. auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
  1184. return token_infos_[token.index];
  1185. }
  1186. auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
  1187. return token_infos_[token.index];
  1188. }
  1189. auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
  1190. token_infos_.push_back(info);
  1191. expected_parse_tree_size_ += info.kind.expected_parse_tree_size();
  1192. return Token(static_cast<int>(token_infos_.size()) - 1);
  1193. }
  1194. auto TokenIterator::Print(llvm::raw_ostream& output) const -> void {
  1195. output << token_.index;
  1196. }
  1197. auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
  1198. const char* loc) -> DiagnosticLocation {
  1199. CARBON_CHECK(StringRefContainsPointer(buffer_->source_->text(), loc))
  1200. << "location not within buffer";
  1201. int64_t offset = loc - buffer_->source_->text().begin();
  1202. // Find the first line starting after the given location. Note that we can't
  1203. // inspect `line.length` here because it is not necessarily correct for the
  1204. // final line during lexing (but will be correct later for the parse tree).
  1205. const auto* line_it = std::partition_point(
  1206. buffer_->line_infos_.begin(), buffer_->line_infos_.end(),
  1207. [offset](const LineInfo& line) { return line.start <= offset; });
  1208. // Step back one line to find the line containing the given position.
  1209. CARBON_CHECK(line_it != buffer_->line_infos_.begin())
  1210. << "location precedes the start of the first line";
  1211. --line_it;
  1212. int line_number = line_it - buffer_->line_infos_.begin();
  1213. int column_number = offset - line_it->start;
  1214. // Start by grabbing the line from the buffer. If the line isn't fully lexed,
  1215. // the length will be npos and the line will be grabbed from the known start
  1216. // to the end of the buffer; we'll then adjust the length.
  1217. llvm::StringRef line =
  1218. buffer_->source_->text().substr(line_it->start, line_it->length);
  1219. if (line_it->length == static_cast<int32_t>(llvm::StringRef::npos)) {
  1220. CARBON_CHECK(line.take_front(column_number).count('\n') == 0)
  1221. << "Currently we assume no unlexed newlines prior to the error column, "
  1222. "but there was one when erroring at "
  1223. << buffer_->source_->filename() << ":" << line_number << ":"
  1224. << column_number;
  1225. // Look for the next newline since we don't know the length. We can start at
  1226. // the column because prior newlines will have been lexed.
  1227. auto end_newline_pos = line.find('\n', column_number);
  1228. if (end_newline_pos != llvm::StringRef::npos) {
  1229. line = line.take_front(end_newline_pos);
  1230. }
  1231. }
  1232. return {.file_name = buffer_->source_->filename(),
  1233. .line = line,
  1234. .line_number = line_number + 1,
  1235. .column_number = column_number + 1};
  1236. }
  1237. auto TokenLocationTranslator::GetLocation(Token token) -> DiagnosticLocation {
  1238. // Map the token location into a position within the source buffer.
  1239. const auto& token_info = buffer_->GetTokenInfo(token);
  1240. const auto& line_info = buffer_->GetLineInfo(token_info.token_line);
  1241. const char* token_start =
  1242. buffer_->source_->text().begin() + line_info.start + token_info.column;
  1243. // Find the corresponding file location.
  1244. // TODO: Should we somehow indicate in the diagnostic location if this token
  1245. // is a recovery token that doesn't correspond to the original source?
  1246. return TokenizedBuffer::SourceBufferLocationTranslator(buffer_).GetLocation(
  1247. token_start);
  1248. }
  1249. } // namespace Carbon::Lex