tokenized_buffer.cpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/lexer/tokenized_buffer.h"
  5. #include <algorithm>
  6. #include <array>
  7. #include <cmath>
  8. #include <iterator>
  9. #include <string>
  10. #include "llvm/ADT/STLExtras.h"
  11. #include "llvm/ADT/StringRef.h"
  12. #include "llvm/ADT/StringSwitch.h"
  13. #include "llvm/ADT/Twine.h"
  14. #include "llvm/Support/ErrorHandling.h"
  15. #include "llvm/Support/Format.h"
  16. #include "llvm/Support/FormatVariadic.h"
  17. #include "llvm/Support/raw_ostream.h"
  18. #include "toolchain/lexer/character_set.h"
  19. #include "toolchain/lexer/numeric_literal.h"
  20. #include "toolchain/lexer/string_literal.h"
  21. namespace Carbon {
  22. struct TrailingComment : SimpleDiagnostic<TrailingComment> {
  23. static constexpr llvm::StringLiteral ShortName = "syntax-comments";
  24. static constexpr llvm::StringLiteral Message =
  25. "Trailing comments are not permitted.";
  26. };
  27. struct NoWhitespaceAfterCommentIntroducer
  28. : SimpleDiagnostic<NoWhitespaceAfterCommentIntroducer> {
  29. static constexpr llvm::StringLiteral ShortName = "syntax-comments";
  30. static constexpr llvm::StringLiteral Message =
  31. "Whitespace is required after '//'.";
  32. };
  33. struct UnmatchedClosing : SimpleDiagnostic<UnmatchedClosing> {
  34. static constexpr llvm::StringLiteral ShortName = "syntax-balanced-delimiters";
  35. static constexpr llvm::StringLiteral Message =
  36. "Closing symbol without a corresponding opening symbol.";
  37. };
  38. struct MismatchedClosing : SimpleDiagnostic<MismatchedClosing> {
  39. static constexpr llvm::StringLiteral ShortName = "syntax-balanced-delimiters";
  40. static constexpr llvm::StringLiteral Message =
  41. "Closing symbol does not match most recent opening symbol.";
  42. };
  43. struct UnrecognizedCharacters : SimpleDiagnostic<UnrecognizedCharacters> {
  44. static constexpr llvm::StringLiteral ShortName =
  45. "syntax-unrecognized-characters";
  46. static constexpr llvm::StringLiteral Message =
  47. "Encountered unrecognized characters while parsing.";
  48. };
  49. // TODO: Move Overload and VariantMatch somewhere more central.
  50. // Form an overload set from a list of functions. For example:
  51. //
  52. // ```
  53. // auto overloaded = Overload{[] (int) {}, [] (float) {}};
  54. // ```
  55. template <typename... Fs>
  56. struct Overload : Fs... {
  57. using Fs::operator()...;
  58. };
  59. template <typename... Fs>
  60. Overload(Fs...) -> Overload<Fs...>;
  61. // Pattern-match against the type of the value stored in the variant `V`. Each
  62. // element of `fs` should be a function that takes one or more of the variant
  63. // values in `V`.
  64. template <typename V, typename... Fs>
  65. auto VariantMatch(V&& v, Fs&&... fs) -> decltype(auto) {
  66. return std::visit(Overload{std::forward<Fs&&>(fs)...}, std::forward<V&&>(v));
  67. }
  68. // Implementation of the lexer logic itself.
  69. //
  70. // The design is that lexing can loop over the source buffer, consuming it into
  71. // tokens by calling into this API. This class handles the state and breaks down
  72. // the different lexing steps that may be used. It directly updates the provided
  73. // tokenized buffer with the lexed tokens.
  74. class TokenizedBuffer::Lexer {
  75. TokenizedBuffer& buffer;
  76. SourceBufferLocationTranslator translator;
  77. LexerDiagnosticEmitter emitter;
  78. TokenLocationTranslator token_translator;
  79. TokenDiagnosticEmitter token_emitter;
  80. Line current_line;
  81. LineInfo* current_line_info;
  82. int current_column = 0;
  83. bool set_indent = false;
  84. llvm::SmallVector<Token, 8> open_groups;
  85. public:
  86. Lexer(TokenizedBuffer& buffer, DiagnosticConsumer& consumer)
  87. : buffer(buffer),
  88. translator(buffer),
  89. emitter(translator, consumer),
  90. token_translator(buffer),
  91. token_emitter(token_translator, consumer),
  92. current_line(buffer.AddLine({0, 0, 0})),
  93. current_line_info(&buffer.GetLineInfo(current_line)) {}
  94. // Symbolic result of a lexing action. This indicates whether we successfully
  95. // lexed a token, or whether other lexing actions should be attempted.
  96. //
  97. // While it wraps a simple boolean state, its API both helps make the failures
  98. // more self documenting, and by consuming the actual token constructively
  99. // when one is produced, it helps ensure the correct result is returned.
  100. class LexResult {
  101. bool formed_token;
  102. explicit LexResult(bool formed_token) : formed_token(formed_token) {}
  103. public:
  104. // Consumes (and discard) a valid token to construct a result
  105. // indicating a token has been produced. Relies on implicit conversions.
  106. // NOLINTNEXTLINE(google-explicit-constructor)
  107. LexResult(Token) : LexResult(true) {}
  108. // Returns a result indicating no token was produced.
  109. static auto NoMatch() -> LexResult { return LexResult(false); }
  110. // Tests whether a token was produced by the lexing routine, and
  111. // the lexer can continue forming tokens.
  112. explicit operator bool() const { return formed_token; }
  113. };
  114. // Perform the necessary bookkeeping to step past a newline at the current
  115. // line and column.
  116. auto HandleNewline() -> void {
  117. current_line_info->length = current_column;
  118. current_line =
  119. buffer.AddLine({current_line_info->start + current_column + 1, 0, 0});
  120. current_line_info = &buffer.GetLineInfo(current_line);
  121. current_column = 0;
  122. set_indent = false;
  123. }
  124. auto NoteWhitespace() -> void {
  125. if (!buffer.token_infos.empty()) {
  126. buffer.token_infos.back().has_trailing_space = true;
  127. }
  128. }
  129. auto SkipWhitespace(llvm::StringRef& source_text) -> bool {
  130. const char* const whitespace_start = source_text.begin();
  131. while (!source_text.empty()) {
  132. // We only support line-oriented commenting and lex comments as-if they
  133. // were whitespace.
  134. if (source_text.startswith("//")) {
  135. // Any comment must be the only non-whitespace on the line.
  136. if (set_indent) {
  137. emitter.EmitError<TrailingComment>(source_text.begin());
  138. }
  139. // The introducer '//' must be followed by whitespace or EOF.
  140. if (source_text.size() > 2 && !IsSpace(source_text[2])) {
  141. emitter.EmitError<NoWhitespaceAfterCommentIntroducer>(
  142. source_text.begin() + 2);
  143. }
  144. while (!source_text.empty() && source_text.front() != '\n') {
  145. ++current_column;
  146. source_text = source_text.drop_front();
  147. }
  148. if (source_text.empty()) {
  149. break;
  150. }
  151. }
  152. switch (source_text.front()) {
  153. default:
  154. // If we find a non-whitespace character without exhausting the
  155. // buffer, return true to continue lexing.
  156. assert(!IsSpace(source_text.front()));
  157. if (whitespace_start != source_text.begin()) {
  158. NoteWhitespace();
  159. }
  160. return true;
  161. case '\n':
  162. // If this is the last character in the source, directly return here
  163. // to avoid creating an empty line.
  164. source_text = source_text.drop_front();
  165. if (source_text.empty()) {
  166. current_line_info->length = current_column;
  167. return false;
  168. }
  169. // Otherwise, add a line and set up to continue lexing.
  170. HandleNewline();
  171. continue;
  172. case ' ':
  173. case '\t':
  174. // Skip other forms of whitespace while tracking column.
  175. // FIXME: This obviously needs looooots more work to handle unicode
  176. // whitespace as well as special handling to allow better tokenization
  177. // of operators. This is just a stub to check that our column
  178. // management works.
  179. ++current_column;
  180. source_text = source_text.drop_front();
  181. continue;
  182. }
  183. }
  184. assert(source_text.empty() && "Cannot reach here w/o finishing the text!");
  185. // Update the line length as this is also the end of a line.
  186. current_line_info->length = current_column;
  187. return false;
  188. }
  189. auto LexNumericLiteral(llvm::StringRef& source_text) -> LexResult {
  190. llvm::Optional<LexedNumericLiteral> literal =
  191. LexedNumericLiteral::Lex(source_text);
  192. if (!literal) {
  193. return LexResult::NoMatch();
  194. }
  195. int int_column = current_column;
  196. int token_size = literal->Text().size();
  197. current_column += token_size;
  198. source_text = source_text.drop_front(token_size);
  199. if (!set_indent) {
  200. current_line_info->indent = int_column;
  201. set_indent = true;
  202. }
  203. return VariantMatch(
  204. literal->ComputeValue(emitter),
  205. [&](LexedNumericLiteral::IntegerValue&& value) {
  206. auto token = buffer.AddToken({.kind = TokenKind::IntegerLiteral(),
  207. .token_line = current_line,
  208. .column = int_column});
  209. buffer.GetTokenInfo(token).literal_index =
  210. buffer.literal_int_storage.size();
  211. buffer.literal_int_storage.push_back(std::move(value.value));
  212. return token;
  213. },
  214. [&](LexedNumericLiteral::RealValue&& value) {
  215. auto token = buffer.AddToken({.kind = TokenKind::RealLiteral(),
  216. .token_line = current_line,
  217. .column = int_column});
  218. buffer.GetTokenInfo(token).literal_index =
  219. buffer.literal_int_storage.size();
  220. buffer.literal_int_storage.push_back(std::move(value.mantissa));
  221. buffer.literal_int_storage.push_back(std::move(value.exponent));
  222. assert(buffer.GetRealLiteral(token).IsDecimal() ==
  223. (value.radix == 10));
  224. return token;
  225. },
  226. [&](LexedNumericLiteral::UnrecoverableError) {
  227. auto token = buffer.AddToken({
  228. .kind = TokenKind::Error(),
  229. .token_line = current_line,
  230. .column = int_column,
  231. .error_length = token_size,
  232. });
  233. return token;
  234. });
  235. }
  236. auto LexStringLiteral(llvm::StringRef& source_text) -> LexResult {
  237. llvm::Optional<LexedStringLiteral> literal =
  238. LexedStringLiteral::Lex(source_text);
  239. if (!literal) {
  240. return LexResult::NoMatch();
  241. }
  242. Line string_line = current_line;
  243. int string_column = current_column;
  244. int literal_size = literal->Text().size();
  245. source_text = source_text.drop_front(literal_size);
  246. if (!set_indent) {
  247. current_line_info->indent = string_column;
  248. set_indent = true;
  249. }
  250. // Update line and column information.
  251. if (!literal->IsMultiLine()) {
  252. current_column += literal_size;
  253. } else {
  254. for (char c : literal->Text()) {
  255. if (c == '\n') {
  256. HandleNewline();
  257. // The indentation of all lines in a multi-line string literal is
  258. // that of the first line.
  259. current_line_info->indent = string_column;
  260. set_indent = true;
  261. } else {
  262. ++current_column;
  263. }
  264. }
  265. }
  266. auto token = buffer.AddToken({.kind = TokenKind::StringLiteral(),
  267. .token_line = string_line,
  268. .column = string_column});
  269. buffer.GetTokenInfo(token).literal_index =
  270. buffer.literal_string_storage.size();
  271. buffer.literal_string_storage.push_back(literal->ComputeValue(emitter));
  272. return token;
  273. }
  274. auto LexSymbolToken(llvm::StringRef& source_text) -> LexResult {
  275. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text)
  276. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  277. .StartsWith(Spelling, TokenKind::Name())
  278. #include "toolchain/lexer/token_registry.def"
  279. .Default(TokenKind::Error());
  280. if (kind == TokenKind::Error()) {
  281. return LexResult::NoMatch();
  282. }
  283. if (!set_indent) {
  284. current_line_info->indent = current_column;
  285. set_indent = true;
  286. }
  287. CloseInvalidOpenGroups(kind);
  288. const char* location = source_text.begin();
  289. Token token = buffer.AddToken(
  290. {.kind = kind, .token_line = current_line, .column = current_column});
  291. current_column += kind.GetFixedSpelling().size();
  292. source_text = source_text.drop_front(kind.GetFixedSpelling().size());
  293. // Opening symbols just need to be pushed onto our queue of opening groups.
  294. if (kind.IsOpeningSymbol()) {
  295. open_groups.push_back(token);
  296. return token;
  297. }
  298. // Only closing symbols need further special handling.
  299. if (!kind.IsClosingSymbol()) {
  300. return token;
  301. }
  302. TokenInfo& closing_token_info = buffer.GetTokenInfo(token);
  303. // Check that there is a matching opening symbol before we consume this as
  304. // a closing symbol.
  305. if (open_groups.empty()) {
  306. closing_token_info.kind = TokenKind::Error();
  307. closing_token_info.error_length = kind.GetFixedSpelling().size();
  308. emitter.EmitError<UnmatchedClosing>(location);
  309. // Note that this still returns true as we do consume a symbol.
  310. return token;
  311. }
  312. // Finally can handle a normal closing symbol.
  313. Token opening_token = open_groups.pop_back_val();
  314. TokenInfo& opening_token_info = buffer.GetTokenInfo(opening_token);
  315. opening_token_info.closing_token = token;
  316. closing_token_info.opening_token = opening_token;
  317. return token;
  318. }
  319. // Given a word that has already been lexed, determine whether it is a type
  320. // literal and if so form the corresponding token.
  321. auto LexWordAsTypeLiteralToken(llvm::StringRef word, int column)
  322. -> LexResult {
  323. if (word.size() < 2) {
  324. // Too short to form one of these tokens.
  325. return LexResult::NoMatch();
  326. }
  327. if (!('1' <= word[1] && word[1] <= '9')) {
  328. // Doesn't start with a valid initial digit.
  329. return LexResult::NoMatch();
  330. }
  331. llvm::Optional<TokenKind> kind;
  332. switch (word.front()) {
  333. case 'i':
  334. kind = TokenKind::IntegerTypeLiteral();
  335. break;
  336. case 'u':
  337. kind = TokenKind::UnsignedIntegerTypeLiteral();
  338. break;
  339. case 'f':
  340. kind = TokenKind::FloatingPointTypeLiteral();
  341. break;
  342. default:
  343. return LexResult::NoMatch();
  344. };
  345. llvm::StringRef suffix = word.substr(1);
  346. llvm::APInt suffix_value;
  347. if (suffix.getAsInteger(10, suffix_value)) {
  348. return LexResult::NoMatch();
  349. }
  350. auto token = buffer.AddToken(
  351. {.kind = *kind, .token_line = current_line, .column = column});
  352. buffer.GetTokenInfo(token).literal_index =
  353. buffer.literal_int_storage.size();
  354. buffer.literal_int_storage.push_back(std::move(suffix_value));
  355. return token;
  356. }
  357. // Closes all open groups that cannot remain open across the symbol `K`.
  358. // Users may pass `Error` to close all open groups.
  359. auto CloseInvalidOpenGroups(TokenKind kind) -> void {
  360. if (!kind.IsClosingSymbol() && kind != TokenKind::Error()) {
  361. return;
  362. }
  363. while (!open_groups.empty()) {
  364. Token opening_token = open_groups.back();
  365. TokenKind opening_kind = buffer.GetTokenInfo(opening_token).kind;
  366. if (kind == opening_kind.GetClosingSymbol()) {
  367. return;
  368. }
  369. open_groups.pop_back();
  370. token_emitter.EmitError<MismatchedClosing>(opening_token);
  371. assert(!buffer.Tokens().empty() && "Must have a prior opening token!");
  372. Token prev_token = buffer.Tokens().end()[-1];
  373. // TODO: do a smarter backwards scan for where to put the closing
  374. // token.
  375. Token closing_token = buffer.AddToken(
  376. {.kind = opening_kind.GetClosingSymbol(),
  377. .has_trailing_space = buffer.HasTrailingWhitespace(prev_token),
  378. .is_recovery = true,
  379. .token_line = current_line,
  380. .column = current_column});
  381. TokenInfo& opening_token_info = buffer.GetTokenInfo(opening_token);
  382. TokenInfo& closing_token_info = buffer.GetTokenInfo(closing_token);
  383. opening_token_info.closing_token = closing_token;
  384. closing_token_info.opening_token = opening_token;
  385. }
  386. }
  387. auto GetOrCreateIdentifier(llvm::StringRef text) -> Identifier {
  388. auto insert_result = buffer.identifier_map.insert(
  389. {text, Identifier(buffer.identifier_infos.size())});
  390. if (insert_result.second) {
  391. buffer.identifier_infos.push_back({text});
  392. }
  393. return insert_result.first->second;
  394. }
  395. auto LexKeywordOrIdentifier(llvm::StringRef& source_text) -> LexResult {
  396. if (!IsAlpha(source_text.front()) && source_text.front() != '_') {
  397. return LexResult::NoMatch();
  398. }
  399. if (!set_indent) {
  400. current_line_info->indent = current_column;
  401. set_indent = true;
  402. }
  403. // Take the valid characters off the front of the source buffer.
  404. llvm::StringRef identifier_text =
  405. source_text.take_while([](char c) { return IsAlnum(c) || c == '_'; });
  406. assert(!identifier_text.empty() && "Must have at least one character!");
  407. int identifier_column = current_column;
  408. current_column += identifier_text.size();
  409. source_text = source_text.drop_front(identifier_text.size());
  410. // Check if the text is a type literal, and if so form such a literal.
  411. if (LexResult result =
  412. LexWordAsTypeLiteralToken(identifier_text, identifier_column)) {
  413. return result;
  414. }
  415. // Check if the text matches a keyword token, and if so use that.
  416. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  417. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name())
  418. #include "toolchain/lexer/token_registry.def"
  419. .Default(TokenKind::Error());
  420. if (kind != TokenKind::Error()) {
  421. return buffer.AddToken({.kind = kind,
  422. .token_line = current_line,
  423. .column = identifier_column});
  424. }
  425. // Otherwise we have a generic identifier.
  426. return buffer.AddToken({.kind = TokenKind::Identifier(),
  427. .token_line = current_line,
  428. .column = identifier_column,
  429. .id = GetOrCreateIdentifier(identifier_text)});
  430. }
  431. auto LexError(llvm::StringRef& source_text) -> LexResult {
  432. llvm::StringRef error_text = source_text.take_while([](char c) {
  433. if (IsAlnum(c)) {
  434. return false;
  435. }
  436. switch (c) {
  437. case '_':
  438. case '\t':
  439. case '\n':
  440. return false;
  441. }
  442. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  443. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  444. #include "toolchain/lexer/token_registry.def"
  445. .Default(true);
  446. });
  447. if (error_text.empty()) {
  448. // TODO: Reimplement this to use the lexer properly. In the meantime,
  449. // guarantee that we eat at least one byte.
  450. error_text = source_text.take_front(1);
  451. }
  452. // Longer errors get to be two tokens.
  453. error_text = error_text.substr(0, std::numeric_limits<int32_t>::max());
  454. auto token = buffer.AddToken(
  455. {.kind = TokenKind::Error(),
  456. .token_line = current_line,
  457. .column = current_column,
  458. .error_length = static_cast<int32_t>(error_text.size())});
  459. emitter.EmitError<UnrecognizedCharacters>(error_text.begin());
  460. current_column += error_text.size();
  461. source_text = source_text.drop_front(error_text.size());
  462. return token;
  463. }
  464. auto AddEndOfFileToken() -> void {
  465. buffer.AddToken({.kind = TokenKind::EndOfFile(),
  466. .token_line = current_line,
  467. .column = current_column});
  468. }
  469. };
  470. auto TokenizedBuffer::Lex(SourceBuffer& source, DiagnosticConsumer& consumer)
  471. -> TokenizedBuffer {
  472. TokenizedBuffer buffer(source);
  473. ErrorTrackingDiagnosticConsumer error_tracking_consumer(consumer);
  474. Lexer lexer(buffer, error_tracking_consumer);
  475. llvm::StringRef source_text = source.Text();
  476. while (lexer.SkipWhitespace(source_text)) {
  477. // Each time we find non-whitespace characters, try each kind of token we
  478. // support lexing, from simplest to most complex.
  479. Lexer::LexResult result = lexer.LexSymbolToken(source_text);
  480. if (!result) {
  481. result = lexer.LexKeywordOrIdentifier(source_text);
  482. }
  483. if (!result) {
  484. result = lexer.LexNumericLiteral(source_text);
  485. }
  486. if (!result) {
  487. result = lexer.LexStringLiteral(source_text);
  488. }
  489. if (!result) {
  490. result = lexer.LexError(source_text);
  491. }
  492. assert(result && "No token was lexed.");
  493. }
  494. // The end-of-file token is always considered to be whitespace.
  495. lexer.NoteWhitespace();
  496. lexer.CloseInvalidOpenGroups(TokenKind::Error());
  497. lexer.AddEndOfFileToken();
  498. if (error_tracking_consumer.SeenError()) {
  499. buffer.has_errors = true;
  500. }
  501. return buffer;
  502. }
  503. auto TokenizedBuffer::GetKind(Token token) const -> TokenKind {
  504. return GetTokenInfo(token).kind;
  505. }
  506. auto TokenizedBuffer::GetLine(Token token) const -> Line {
  507. return GetTokenInfo(token).token_line;
  508. }
  509. auto TokenizedBuffer::GetLineNumber(Token token) const -> int {
  510. return GetLineNumber(GetLine(token));
  511. }
  512. auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
  513. return GetTokenInfo(token).column + 1;
  514. }
  515. auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
  516. auto& token_info = GetTokenInfo(token);
  517. llvm::StringRef fixed_spelling = token_info.kind.GetFixedSpelling();
  518. if (!fixed_spelling.empty()) {
  519. return fixed_spelling;
  520. }
  521. if (token_info.kind == TokenKind::Error()) {
  522. auto& line_info = GetLineInfo(token_info.token_line);
  523. int64_t token_start = line_info.start + token_info.column;
  524. return source->Text().substr(token_start, token_info.error_length);
  525. }
  526. // Refer back to the source text to preserve oddities like radix or digit
  527. // separators the author included.
  528. if (token_info.kind == TokenKind::IntegerLiteral() ||
  529. token_info.kind == TokenKind::RealLiteral()) {
  530. auto& line_info = GetLineInfo(token_info.token_line);
  531. int64_t token_start = line_info.start + token_info.column;
  532. llvm::Optional<LexedNumericLiteral> relexed_token =
  533. LexedNumericLiteral::Lex(source->Text().substr(token_start));
  534. assert(relexed_token && "Could not reform numeric literal token.");
  535. return relexed_token->Text();
  536. }
  537. // Refer back to the source text to find the original spelling, including
  538. // escape sequences etc.
  539. if (token_info.kind == TokenKind::StringLiteral()) {
  540. auto& line_info = GetLineInfo(token_info.token_line);
  541. int64_t token_start = line_info.start + token_info.column;
  542. llvm::Optional<LexedStringLiteral> relexed_token =
  543. LexedStringLiteral::Lex(source->Text().substr(token_start));
  544. assert(relexed_token && "Could not reform string literal token.");
  545. return relexed_token->Text();
  546. }
  547. // Refer back to the source text to avoid needing to reconstruct the
  548. // spelling from the size.
  549. if (token_info.kind.IsSizedTypeLiteral()) {
  550. auto& line_info = GetLineInfo(token_info.token_line);
  551. int64_t token_start = line_info.start + token_info.column;
  552. llvm::StringRef suffix =
  553. source->Text().substr(token_start + 1).take_while(IsDecimalDigit);
  554. return llvm::StringRef(suffix.data() - 1, suffix.size() + 1);
  555. }
  556. if (token_info.kind == TokenKind::EndOfFile()) {
  557. return llvm::StringRef();
  558. }
  559. assert(token_info.kind == TokenKind::Identifier() &&
  560. "Only identifiers have stored text!");
  561. return GetIdentifierText(token_info.id);
  562. }
  563. auto TokenizedBuffer::GetIdentifier(Token token) const -> Identifier {
  564. auto& token_info = GetTokenInfo(token);
  565. assert(token_info.kind == TokenKind::Identifier() &&
  566. "The token must be an identifier!");
  567. return token_info.id;
  568. }
  569. auto TokenizedBuffer::GetIntegerLiteral(Token token) const
  570. -> const llvm::APInt& {
  571. auto& token_info = GetTokenInfo(token);
  572. assert(token_info.kind == TokenKind::IntegerLiteral() &&
  573. "The token must be an integer literal!");
  574. return literal_int_storage[token_info.literal_index];
  575. }
  576. auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealLiteralValue {
  577. auto& token_info = GetTokenInfo(token);
  578. assert(token_info.kind == TokenKind::RealLiteral() &&
  579. "The token must be a real literal!");
  580. // Note that every real literal is at least three characters long, so we can
  581. // safely look at the second character to determine whether we have a decimal
  582. // or hexadecimal literal.
  583. auto& line_info = GetLineInfo(token_info.token_line);
  584. int64_t token_start = line_info.start + token_info.column;
  585. char second_char = source->Text()[token_start + 1];
  586. bool is_decimal = second_char != 'x' && second_char != 'b';
  587. return RealLiteralValue(this, token_info.literal_index, is_decimal);
  588. }
  589. auto TokenizedBuffer::GetStringLiteral(Token token) const -> llvm::StringRef {
  590. auto& token_info = GetTokenInfo(token);
  591. assert(token_info.kind == TokenKind::StringLiteral() &&
  592. "The token must be a string literal!");
  593. return literal_string_storage[token_info.literal_index];
  594. }
  595. auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
  596. -> const llvm::APInt& {
  597. auto& token_info = GetTokenInfo(token);
  598. assert(token_info.kind.IsSizedTypeLiteral() &&
  599. "The token must be a sized type literal!");
  600. return literal_int_storage[token_info.literal_index];
  601. }
  602. auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
  603. -> Token {
  604. auto& opening_token_info = GetTokenInfo(opening_token);
  605. assert(opening_token_info.kind.IsOpeningSymbol() &&
  606. "The token must be an opening group symbol!");
  607. return opening_token_info.closing_token;
  608. }
  609. auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
  610. -> Token {
  611. auto& closing_token_info = GetTokenInfo(closing_token);
  612. assert(closing_token_info.kind.IsClosingSymbol() &&
  613. "The token must be an closing group symbol!");
  614. return closing_token_info.opening_token;
  615. }
  616. auto TokenizedBuffer::HasLeadingWhitespace(Token token) const -> bool {
  617. auto it = TokenIterator(token);
  618. return it == Tokens().begin() || GetTokenInfo(*(it - 1)).has_trailing_space;
  619. }
  620. auto TokenizedBuffer::HasTrailingWhitespace(Token token) const -> bool {
  621. return GetTokenInfo(token).has_trailing_space;
  622. }
  623. auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
  624. return GetTokenInfo(token).is_recovery;
  625. }
  626. auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
  627. return line.index + 1;
  628. }
  629. auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
  630. return GetLineInfo(line).indent + 1;
  631. }
  632. auto TokenizedBuffer::GetIdentifierText(Identifier identifier) const
  633. -> llvm::StringRef {
  634. return identifier_infos[identifier.index].text;
  635. }
  636. auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
  637. index = std::max(widths.index, index);
  638. kind = std::max(widths.kind, kind);
  639. column = std::max(widths.column, column);
  640. line = std::max(widths.line, line);
  641. indent = std::max(widths.indent, indent);
  642. }
  643. // Compute the printed width of a number. When numbers are printed in decimal,
  644. // the number of digits needed is is one more than the log-base-10 of the value.
  645. // We handle a value of `zero` explicitly.
  646. //
  647. // This routine requires its argument to be *non-negative*.
  648. static auto ComputeDecimalPrintedWidth(int number) -> int {
  649. assert(number >= 0 && "Negative numbers are not supported.");
  650. if (number == 0) {
  651. return 1;
  652. }
  653. return static_cast<int>(std::log10(number)) + 1;
  654. }
  655. auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
  656. PrintWidths widths = {};
  657. widths.index = ComputeDecimalPrintedWidth(token_infos.size());
  658. widths.kind = GetKind(token).Name().size();
  659. widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
  660. widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
  661. widths.indent =
  662. ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
  663. return widths;
  664. }
  665. auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
  666. if (Tokens().begin() == Tokens().end()) {
  667. return;
  668. }
  669. PrintWidths widths = {};
  670. widths.index = ComputeDecimalPrintedWidth((token_infos.size()));
  671. for (Token token : Tokens()) {
  672. widths.Widen(GetTokenPrintWidths(token));
  673. }
  674. for (Token token : Tokens()) {
  675. PrintToken(output_stream, token, widths);
  676. output_stream << "\n";
  677. }
  678. }
  679. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  680. Token token) const -> void {
  681. PrintToken(output_stream, token, {});
  682. }
  683. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
  684. PrintWidths widths) const -> void {
  685. widths.Widen(GetTokenPrintWidths(token));
  686. int token_index = token.index;
  687. auto& token_info = GetTokenInfo(token);
  688. llvm::StringRef token_text = GetTokenText(token);
  689. // Output the main chunk using one format string. We have to do the
  690. // justification manually in order to use the dynamically computed widths
  691. // and get the quotes included.
  692. output_stream << llvm::formatv(
  693. "token: { index: {0}, kind: {1}, line: {2}, column: {3}, indent: {4}, "
  694. "spelling: '{5}'",
  695. llvm::format_decimal(token_index, widths.index),
  696. llvm::right_justify(
  697. (llvm::Twine("'") + token_info.kind.Name() + "'").str(),
  698. widths.kind + 2),
  699. llvm::format_decimal(GetLineNumber(token_info.token_line), widths.line),
  700. llvm::format_decimal(GetColumnNumber(token), widths.column),
  701. llvm::format_decimal(GetIndentColumnNumber(token_info.token_line),
  702. widths.indent),
  703. token_text);
  704. if (token_info.kind == TokenKind::Identifier()) {
  705. output_stream << ", identifier: " << GetIdentifier(token).index;
  706. } else if (token_info.kind.IsOpeningSymbol()) {
  707. output_stream << ", closing_token: " << GetMatchedClosingToken(token).index;
  708. } else if (token_info.kind.IsClosingSymbol()) {
  709. output_stream << ", opening_token: " << GetMatchedOpeningToken(token).index;
  710. } else if (token_info.kind == TokenKind::StringLiteral()) {
  711. output_stream << ", value: `" << GetStringLiteral(token) << "`";
  712. }
  713. // TODO: Include value for numeric literals.
  714. if (token_info.has_trailing_space) {
  715. output_stream << ", has_trailing_space: true";
  716. }
  717. if (token_info.is_recovery) {
  718. output_stream << ", recovery: true";
  719. }
  720. output_stream << " }";
  721. }
  722. auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
  723. return line_infos[line.index];
  724. }
  725. auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
  726. return line_infos[line.index];
  727. }
  728. auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
  729. line_infos.push_back(info);
  730. return Line(static_cast<int>(line_infos.size()) - 1);
  731. }
  732. auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
  733. return token_infos[token.index];
  734. }
  735. auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
  736. return token_infos[token.index];
  737. }
  738. auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
  739. token_infos.push_back(info);
  740. return Token(static_cast<int>(token_infos.size()) - 1);
  741. }
  742. auto TokenizedBuffer::TokenIterator::Print(llvm::raw_ostream& output) const
  743. -> void {
  744. output << token.index;
  745. }
  746. auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
  747. const char* loc) -> Diagnostic::Location {
  748. assert(llvm::is_sorted(std::array{buffer_->source->Text().begin(), loc,
  749. buffer_->source->Text().end()}) &&
  750. "location not within buffer");
  751. int64_t offset = loc - buffer_->source->Text().begin();
  752. // Find the first line starting after the given location. Note that we can't
  753. // inspect `line.length` here because it is not necessarily correct for the
  754. // final line.
  755. auto line_it = std::partition_point(
  756. buffer_->line_infos.begin(), buffer_->line_infos.end(),
  757. [offset](const LineInfo& line) { return line.start <= offset; });
  758. bool incomplete_line_info = line_it == buffer_->line_infos.end();
  759. // Step back one line to find the line containing the given position.
  760. assert(line_it != buffer_->line_infos.begin() &&
  761. "location precedes the start of the first line");
  762. --line_it;
  763. int line_number = line_it - buffer_->line_infos.begin();
  764. int column_number = offset - line_it->start;
  765. // We might still be lexing the last line. If so, check to see if there are
  766. // any newline characters between the start of this line and the given
  767. // location.
  768. if (incomplete_line_info) {
  769. column_number = 0;
  770. for (int64_t i = line_it->start; i != offset; ++i) {
  771. if (buffer_->source->Text()[i] == '\n') {
  772. ++line_number;
  773. column_number = 0;
  774. } else {
  775. ++column_number;
  776. }
  777. }
  778. }
  779. return {.file_name = buffer_->source->Filename().str(),
  780. .line_number = line_number + 1,
  781. .column_number = column_number + 1};
  782. }
  783. auto TokenizedBuffer::TokenLocationTranslator::GetLocation(Token token)
  784. -> Diagnostic::Location {
  785. // Map the token location into a position within the source buffer.
  786. auto& token_info = buffer_->GetTokenInfo(token);
  787. auto& line_info = buffer_->GetLineInfo(token_info.token_line);
  788. const char* token_start =
  789. buffer_->source->Text().begin() + line_info.start + token_info.column;
  790. // Find the corresponding file location.
  791. // TODO: Should we somehow indicate in the diagnostic location if this token
  792. // is a recovery token that doesn't correspond to the original source?
  793. return SourceBufferLocationTranslator(*buffer_).GetLocation(token_start);
  794. }
  795. } // namespace Carbon