tokenized_buffer.cpp 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930
  1. // Part of the Carbon Language project, under the Apache License v2.0 with LLVM
  2. // Exceptions. See /LICENSE for license information.
  3. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  4. #include "toolchain/lexer/tokenized_buffer.h"
  5. #include <algorithm>
  6. #include <array>
  7. #include <cmath>
  8. #include <iterator>
  9. #include <string>
  10. #include "common/check.h"
  11. #include "llvm/ADT/STLExtras.h"
  12. #include "llvm/ADT/StringRef.h"
  13. #include "llvm/ADT/StringSwitch.h"
  14. #include "llvm/ADT/Twine.h"
  15. #include "llvm/Support/ErrorHandling.h"
  16. #include "llvm/Support/Format.h"
  17. #include "llvm/Support/FormatVariadic.h"
  18. #include "llvm/Support/raw_ostream.h"
  19. #include "toolchain/lexer/character_set.h"
  20. #include "toolchain/lexer/numeric_literal.h"
  21. #include "toolchain/lexer/string_literal.h"
  22. namespace Carbon {
  23. struct TrailingComment : SimpleDiagnostic<TrailingComment> {
  24. static constexpr llvm::StringLiteral ShortName = "syntax-comments";
  25. static constexpr llvm::StringLiteral Message =
  26. "Trailing comments are not permitted.";
  27. };
  28. struct NoWhitespaceAfterCommentIntroducer
  29. : SimpleDiagnostic<NoWhitespaceAfterCommentIntroducer> {
  30. static constexpr llvm::StringLiteral ShortName = "syntax-comments";
  31. static constexpr llvm::StringLiteral Message =
  32. "Whitespace is required after '//'.";
  33. };
  34. struct UnmatchedClosing : SimpleDiagnostic<UnmatchedClosing> {
  35. static constexpr llvm::StringLiteral ShortName = "syntax-balanced-delimiters";
  36. static constexpr llvm::StringLiteral Message =
  37. "Closing symbol without a corresponding opening symbol.";
  38. };
  39. struct MismatchedClosing : SimpleDiagnostic<MismatchedClosing> {
  40. static constexpr llvm::StringLiteral ShortName = "syntax-balanced-delimiters";
  41. static constexpr llvm::StringLiteral Message =
  42. "Closing symbol does not match most recent opening symbol.";
  43. };
  44. struct UnrecognizedCharacters : SimpleDiagnostic<UnrecognizedCharacters> {
  45. static constexpr llvm::StringLiteral ShortName =
  46. "syntax-unrecognized-characters";
  47. static constexpr llvm::StringLiteral Message =
  48. "Encountered unrecognized characters while parsing.";
  49. };
  50. // TODO: Move Overload and VariantMatch somewhere more central.
  51. // Form an overload set from a list of functions. For example:
  52. //
  53. // ```
  54. // auto overloaded = Overload{[] (int) {}, [] (float) {}};
  55. // ```
  56. template <typename... Fs>
  57. struct Overload : Fs... {
  58. using Fs::operator()...;
  59. };
  60. template <typename... Fs>
  61. Overload(Fs...) -> Overload<Fs...>;
  62. // Pattern-match against the type of the value stored in the variant `V`. Each
  63. // element of `fs` should be a function that takes one or more of the variant
  64. // values in `V`.
  65. template <typename V, typename... Fs>
  66. auto VariantMatch(V&& v, Fs&&... fs) -> decltype(auto) {
  67. return std::visit(Overload{std::forward<Fs&&>(fs)...}, std::forward<V&&>(v));
  68. }
  69. // Implementation of the lexer logic itself.
  70. //
  71. // The design is that lexing can loop over the source buffer, consuming it into
  72. // tokens by calling into this API. This class handles the state and breaks down
  73. // the different lexing steps that may be used. It directly updates the provided
  74. // tokenized buffer with the lexed tokens.
  75. class TokenizedBuffer::Lexer {
  76. TokenizedBuffer& buffer;
  77. SourceBufferLocationTranslator translator;
  78. LexerDiagnosticEmitter emitter;
  79. TokenLocationTranslator token_translator;
  80. TokenDiagnosticEmitter token_emitter;
  81. Line current_line;
  82. LineInfo* current_line_info;
  83. int current_column = 0;
  84. bool set_indent = false;
  85. llvm::SmallVector<Token, 8> open_groups;
  86. public:
  87. Lexer(TokenizedBuffer& buffer, DiagnosticConsumer& consumer)
  88. : buffer(buffer),
  89. translator(buffer),
  90. emitter(translator, consumer),
  91. token_translator(buffer),
  92. token_emitter(token_translator, consumer),
  93. current_line(buffer.AddLine({0, 0, 0})),
  94. current_line_info(&buffer.GetLineInfo(current_line)) {}
  95. // Symbolic result of a lexing action. This indicates whether we successfully
  96. // lexed a token, or whether other lexing actions should be attempted.
  97. //
  98. // While it wraps a simple boolean state, its API both helps make the failures
  99. // more self documenting, and by consuming the actual token constructively
  100. // when one is produced, it helps ensure the correct result is returned.
  101. class LexResult {
  102. bool formed_token;
  103. explicit LexResult(bool formed_token) : formed_token(formed_token) {}
  104. public:
  105. // Consumes (and discard) a valid token to construct a result
  106. // indicating a token has been produced. Relies on implicit conversions.
  107. // NOLINTNEXTLINE(google-explicit-constructor)
  108. LexResult(Token) : LexResult(true) {}
  109. // Returns a result indicating no token was produced.
  110. static auto NoMatch() -> LexResult { return LexResult(false); }
  111. // Tests whether a token was produced by the lexing routine, and
  112. // the lexer can continue forming tokens.
  113. explicit operator bool() const { return formed_token; }
  114. };
  115. // Perform the necessary bookkeeping to step past a newline at the current
  116. // line and column.
  117. auto HandleNewline() -> void {
  118. current_line_info->length = current_column;
  119. current_line =
  120. buffer.AddLine({current_line_info->start + current_column + 1, 0, 0});
  121. current_line_info = &buffer.GetLineInfo(current_line);
  122. current_column = 0;
  123. set_indent = false;
  124. }
  125. auto NoteWhitespace() -> void {
  126. if (!buffer.token_infos_.empty()) {
  127. buffer.token_infos_.back().has_trailing_space = true;
  128. }
  129. }
  130. auto SkipWhitespace(llvm::StringRef& source_text) -> bool {
  131. const char* const whitespace_start = source_text.begin();
  132. while (!source_text.empty()) {
  133. // We only support line-oriented commenting and lex comments as-if they
  134. // were whitespace.
  135. if (source_text.startswith("//")) {
  136. // Any comment must be the only non-whitespace on the line.
  137. if (set_indent) {
  138. emitter.EmitError<TrailingComment>(source_text.begin());
  139. }
  140. // The introducer '//' must be followed by whitespace or EOF.
  141. if (source_text.size() > 2 && !IsSpace(source_text[2])) {
  142. emitter.EmitError<NoWhitespaceAfterCommentIntroducer>(
  143. source_text.begin() + 2);
  144. }
  145. while (!source_text.empty() && source_text.front() != '\n') {
  146. ++current_column;
  147. source_text = source_text.drop_front();
  148. }
  149. if (source_text.empty()) {
  150. break;
  151. }
  152. }
  153. switch (source_text.front()) {
  154. default:
  155. // If we find a non-whitespace character without exhausting the
  156. // buffer, return true to continue lexing.
  157. assert(!IsSpace(source_text.front()));
  158. if (whitespace_start != source_text.begin()) {
  159. NoteWhitespace();
  160. }
  161. return true;
  162. case '\n':
  163. // If this is the last character in the source, directly return here
  164. // to avoid creating an empty line.
  165. source_text = source_text.drop_front();
  166. if (source_text.empty()) {
  167. current_line_info->length = current_column;
  168. return false;
  169. }
  170. // Otherwise, add a line and set up to continue lexing.
  171. HandleNewline();
  172. continue;
  173. case ' ':
  174. case '\t':
  175. // Skip other forms of whitespace while tracking column.
  176. // FIXME: This obviously needs looooots more work to handle unicode
  177. // whitespace as well as special handling to allow better tokenization
  178. // of operators. This is just a stub to check that our column
  179. // management works.
  180. ++current_column;
  181. source_text = source_text.drop_front();
  182. continue;
  183. }
  184. }
  185. CHECK(source_text.empty()) << "Cannot reach here w/o finishing the text!";
  186. // Update the line length as this is also the end of a line.
  187. current_line_info->length = current_column;
  188. return false;
  189. }
  190. auto LexNumericLiteral(llvm::StringRef& source_text) -> LexResult {
  191. llvm::Optional<LexedNumericLiteral> literal =
  192. LexedNumericLiteral::Lex(source_text);
  193. if (!literal) {
  194. return LexResult::NoMatch();
  195. }
  196. int int_column = current_column;
  197. int token_size = literal->Text().size();
  198. current_column += token_size;
  199. source_text = source_text.drop_front(token_size);
  200. if (!set_indent) {
  201. current_line_info->indent = int_column;
  202. set_indent = true;
  203. }
  204. return VariantMatch(
  205. literal->ComputeValue(emitter),
  206. [&](LexedNumericLiteral::IntegerValue&& value) {
  207. auto token = buffer.AddToken({.kind = TokenKind::IntegerLiteral(),
  208. .token_line = current_line,
  209. .column = int_column});
  210. buffer.GetTokenInfo(token).literal_index =
  211. buffer.literal_int_storage_.size();
  212. buffer.literal_int_storage_.push_back(std::move(value.value));
  213. return token;
  214. },
  215. [&](LexedNumericLiteral::RealValue&& value) {
  216. auto token = buffer.AddToken({.kind = TokenKind::RealLiteral(),
  217. .token_line = current_line,
  218. .column = int_column});
  219. buffer.GetTokenInfo(token).literal_index =
  220. buffer.literal_int_storage_.size();
  221. buffer.literal_int_storage_.push_back(std::move(value.mantissa));
  222. buffer.literal_int_storage_.push_back(std::move(value.exponent));
  223. assert(buffer.GetRealLiteral(token).IsDecimal() ==
  224. (value.radix == 10));
  225. return token;
  226. },
  227. [&](LexedNumericLiteral::UnrecoverableError) {
  228. auto token = buffer.AddToken({
  229. .kind = TokenKind::Error(),
  230. .token_line = current_line,
  231. .column = int_column,
  232. .error_length = token_size,
  233. });
  234. return token;
  235. });
  236. }
  237. auto LexStringLiteral(llvm::StringRef& source_text) -> LexResult {
  238. llvm::Optional<LexedStringLiteral> literal =
  239. LexedStringLiteral::Lex(source_text);
  240. if (!literal) {
  241. return LexResult::NoMatch();
  242. }
  243. Line string_line = current_line;
  244. int string_column = current_column;
  245. int literal_size = literal->Text().size();
  246. source_text = source_text.drop_front(literal_size);
  247. if (!set_indent) {
  248. current_line_info->indent = string_column;
  249. set_indent = true;
  250. }
  251. // Update line and column information.
  252. if (!literal->IsMultiLine()) {
  253. current_column += literal_size;
  254. } else {
  255. for (char c : literal->Text()) {
  256. if (c == '\n') {
  257. HandleNewline();
  258. // The indentation of all lines in a multi-line string literal is
  259. // that of the first line.
  260. current_line_info->indent = string_column;
  261. set_indent = true;
  262. } else {
  263. ++current_column;
  264. }
  265. }
  266. }
  267. auto token = buffer.AddToken({.kind = TokenKind::StringLiteral(),
  268. .token_line = string_line,
  269. .column = string_column});
  270. buffer.GetTokenInfo(token).literal_index =
  271. buffer.literal_string_storage_.size();
  272. buffer.literal_string_storage_.push_back(literal->ComputeValue(emitter));
  273. return token;
  274. }
  275. auto LexSymbolToken(llvm::StringRef& source_text) -> LexResult {
  276. TokenKind kind = llvm::StringSwitch<TokenKind>(source_text)
  277. #define CARBON_SYMBOL_TOKEN(Name, Spelling) \
  278. .StartsWith(Spelling, TokenKind::Name())
  279. #include "toolchain/lexer/token_registry.def"
  280. .Default(TokenKind::Error());
  281. if (kind == TokenKind::Error()) {
  282. return LexResult::NoMatch();
  283. }
  284. if (!set_indent) {
  285. current_line_info->indent = current_column;
  286. set_indent = true;
  287. }
  288. CloseInvalidOpenGroups(kind);
  289. const char* location = source_text.begin();
  290. Token token = buffer.AddToken(
  291. {.kind = kind, .token_line = current_line, .column = current_column});
  292. current_column += kind.GetFixedSpelling().size();
  293. source_text = source_text.drop_front(kind.GetFixedSpelling().size());
  294. // Opening symbols just need to be pushed onto our queue of opening groups.
  295. if (kind.IsOpeningSymbol()) {
  296. open_groups.push_back(token);
  297. return token;
  298. }
  299. // Only closing symbols need further special handling.
  300. if (!kind.IsClosingSymbol()) {
  301. return token;
  302. }
  303. TokenInfo& closing_token_info = buffer.GetTokenInfo(token);
  304. // Check that there is a matching opening symbol before we consume this as
  305. // a closing symbol.
  306. if (open_groups.empty()) {
  307. closing_token_info.kind = TokenKind::Error();
  308. closing_token_info.error_length = kind.GetFixedSpelling().size();
  309. emitter.EmitError<UnmatchedClosing>(location);
  310. // Note that this still returns true as we do consume a symbol.
  311. return token;
  312. }
  313. // Finally can handle a normal closing symbol.
  314. Token opening_token = open_groups.pop_back_val();
  315. TokenInfo& opening_token_info = buffer.GetTokenInfo(opening_token);
  316. opening_token_info.closing_token = token;
  317. closing_token_info.opening_token = opening_token;
  318. return token;
  319. }
  320. // Given a word that has already been lexed, determine whether it is a type
  321. // literal and if so form the corresponding token.
  322. auto LexWordAsTypeLiteralToken(llvm::StringRef word, int column)
  323. -> LexResult {
  324. if (word.size() < 2) {
  325. // Too short to form one of these tokens.
  326. return LexResult::NoMatch();
  327. }
  328. if (!('1' <= word[1] && word[1] <= '9')) {
  329. // Doesn't start with a valid initial digit.
  330. return LexResult::NoMatch();
  331. }
  332. llvm::Optional<TokenKind> kind;
  333. switch (word.front()) {
  334. case 'i':
  335. kind = TokenKind::IntegerTypeLiteral();
  336. break;
  337. case 'u':
  338. kind = TokenKind::UnsignedIntegerTypeLiteral();
  339. break;
  340. case 'f':
  341. kind = TokenKind::FloatingPointTypeLiteral();
  342. break;
  343. default:
  344. return LexResult::NoMatch();
  345. };
  346. llvm::StringRef suffix = word.substr(1);
  347. llvm::APInt suffix_value;
  348. if (suffix.getAsInteger(10, suffix_value)) {
  349. return LexResult::NoMatch();
  350. }
  351. auto token = buffer.AddToken(
  352. {.kind = *kind, .token_line = current_line, .column = column});
  353. buffer.GetTokenInfo(token).literal_index =
  354. buffer.literal_int_storage_.size();
  355. buffer.literal_int_storage_.push_back(std::move(suffix_value));
  356. return token;
  357. }
  358. // Closes all open groups that cannot remain open across the symbol `K`.
  359. // Users may pass `Error` to close all open groups.
  360. auto CloseInvalidOpenGroups(TokenKind kind) -> void {
  361. if (!kind.IsClosingSymbol() && kind != TokenKind::Error()) {
  362. return;
  363. }
  364. while (!open_groups.empty()) {
  365. Token opening_token = open_groups.back();
  366. TokenKind opening_kind = buffer.GetTokenInfo(opening_token).kind;
  367. if (kind == opening_kind.GetClosingSymbol()) {
  368. return;
  369. }
  370. open_groups.pop_back();
  371. token_emitter.EmitError<MismatchedClosing>(opening_token);
  372. CHECK(!buffer.Tokens().empty()) << "Must have a prior opening token!";
  373. Token prev_token = buffer.Tokens().end()[-1];
  374. // TODO: do a smarter backwards scan for where to put the closing
  375. // token.
  376. Token closing_token = buffer.AddToken(
  377. {.kind = opening_kind.GetClosingSymbol(),
  378. .has_trailing_space = buffer.HasTrailingWhitespace(prev_token),
  379. .is_recovery = true,
  380. .token_line = current_line,
  381. .column = current_column});
  382. TokenInfo& opening_token_info = buffer.GetTokenInfo(opening_token);
  383. TokenInfo& closing_token_info = buffer.GetTokenInfo(closing_token);
  384. opening_token_info.closing_token = closing_token;
  385. closing_token_info.opening_token = opening_token;
  386. }
  387. }
  388. auto GetOrCreateIdentifier(llvm::StringRef text) -> Identifier {
  389. auto insert_result = buffer.identifier_map_.insert(
  390. {text, Identifier(buffer.identifier_infos_.size())});
  391. if (insert_result.second) {
  392. buffer.identifier_infos_.push_back({text});
  393. }
  394. return insert_result.first->second;
  395. }
  396. auto LexKeywordOrIdentifier(llvm::StringRef& source_text) -> LexResult {
  397. if (!IsAlpha(source_text.front()) && source_text.front() != '_') {
  398. return LexResult::NoMatch();
  399. }
  400. if (!set_indent) {
  401. current_line_info->indent = current_column;
  402. set_indent = true;
  403. }
  404. // Take the valid characters off the front of the source buffer.
  405. llvm::StringRef identifier_text =
  406. source_text.take_while([](char c) { return IsAlnum(c) || c == '_'; });
  407. CHECK(!identifier_text.empty()) << "Must have at least one character!";
  408. int identifier_column = current_column;
  409. current_column += identifier_text.size();
  410. source_text = source_text.drop_front(identifier_text.size());
  411. // Check if the text is a type literal, and if so form such a literal.
  412. if (LexResult result =
  413. LexWordAsTypeLiteralToken(identifier_text, identifier_column)) {
  414. return result;
  415. }
  416. // Check if the text matches a keyword token, and if so use that.
  417. TokenKind kind = llvm::StringSwitch<TokenKind>(identifier_text)
  418. #define CARBON_KEYWORD_TOKEN(Name, Spelling) .Case(Spelling, TokenKind::Name())
  419. #include "toolchain/lexer/token_registry.def"
  420. .Default(TokenKind::Error());
  421. if (kind != TokenKind::Error()) {
  422. return buffer.AddToken({.kind = kind,
  423. .token_line = current_line,
  424. .column = identifier_column});
  425. }
  426. // Otherwise we have a generic identifier.
  427. return buffer.AddToken({.kind = TokenKind::Identifier(),
  428. .token_line = current_line,
  429. .column = identifier_column,
  430. .id = GetOrCreateIdentifier(identifier_text)});
  431. }
  432. auto LexError(llvm::StringRef& source_text) -> LexResult {
  433. llvm::StringRef error_text = source_text.take_while([](char c) {
  434. if (IsAlnum(c)) {
  435. return false;
  436. }
  437. switch (c) {
  438. case '_':
  439. case '\t':
  440. case '\n':
  441. return false;
  442. }
  443. return llvm::StringSwitch<bool>(llvm::StringRef(&c, 1))
  444. #define CARBON_SYMBOL_TOKEN(Name, Spelling) .StartsWith(Spelling, false)
  445. #include "toolchain/lexer/token_registry.def"
  446. .Default(true);
  447. });
  448. if (error_text.empty()) {
  449. // TODO: Reimplement this to use the lexer properly. In the meantime,
  450. // guarantee that we eat at least one byte.
  451. error_text = source_text.take_front(1);
  452. }
  453. // Longer errors get to be two tokens.
  454. error_text = error_text.substr(0, std::numeric_limits<int32_t>::max());
  455. auto token = buffer.AddToken(
  456. {.kind = TokenKind::Error(),
  457. .token_line = current_line,
  458. .column = current_column,
  459. .error_length = static_cast<int32_t>(error_text.size())});
  460. emitter.EmitError<UnrecognizedCharacters>(error_text.begin());
  461. current_column += error_text.size();
  462. source_text = source_text.drop_front(error_text.size());
  463. return token;
  464. }
  465. auto AddEndOfFileToken() -> void {
  466. buffer.AddToken({.kind = TokenKind::EndOfFile(),
  467. .token_line = current_line,
  468. .column = current_column});
  469. }
  470. };
  471. auto TokenizedBuffer::Lex(SourceBuffer& source, DiagnosticConsumer& consumer)
  472. -> TokenizedBuffer {
  473. TokenizedBuffer buffer(source);
  474. ErrorTrackingDiagnosticConsumer error_tracking_consumer(consumer);
  475. Lexer lexer(buffer, error_tracking_consumer);
  476. llvm::StringRef source_text = source.Text();
  477. while (lexer.SkipWhitespace(source_text)) {
  478. // Each time we find non-whitespace characters, try each kind of token we
  479. // support lexing, from simplest to most complex.
  480. Lexer::LexResult result = lexer.LexSymbolToken(source_text);
  481. if (!result) {
  482. result = lexer.LexKeywordOrIdentifier(source_text);
  483. }
  484. if (!result) {
  485. result = lexer.LexNumericLiteral(source_text);
  486. }
  487. if (!result) {
  488. result = lexer.LexStringLiteral(source_text);
  489. }
  490. if (!result) {
  491. result = lexer.LexError(source_text);
  492. }
  493. CHECK(result) << "No token was lexed.";
  494. }
  495. // The end-of-file token is always considered to be whitespace.
  496. lexer.NoteWhitespace();
  497. lexer.CloseInvalidOpenGroups(TokenKind::Error());
  498. lexer.AddEndOfFileToken();
  499. if (error_tracking_consumer.SeenError()) {
  500. buffer.has_errors_ = true;
  501. }
  502. return buffer;
  503. }
  504. auto TokenizedBuffer::GetKind(Token token) const -> TokenKind {
  505. return GetTokenInfo(token).kind;
  506. }
  507. auto TokenizedBuffer::GetLine(Token token) const -> Line {
  508. return GetTokenInfo(token).token_line;
  509. }
  510. auto TokenizedBuffer::GetLineNumber(Token token) const -> int {
  511. return GetLineNumber(GetLine(token));
  512. }
  513. auto TokenizedBuffer::GetColumnNumber(Token token) const -> int {
  514. return GetTokenInfo(token).column + 1;
  515. }
  516. auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
  517. auto& token_info = GetTokenInfo(token);
  518. llvm::StringRef fixed_spelling = token_info.kind.GetFixedSpelling();
  519. if (!fixed_spelling.empty()) {
  520. return fixed_spelling;
  521. }
  522. if (token_info.kind == TokenKind::Error()) {
  523. auto& line_info = GetLineInfo(token_info.token_line);
  524. int64_t token_start = line_info.start + token_info.column;
  525. return source_->Text().substr(token_start, token_info.error_length);
  526. }
  527. // Refer back to the source text to preserve oddities like radix or digit
  528. // separators the author included.
  529. if (token_info.kind == TokenKind::IntegerLiteral() ||
  530. token_info.kind == TokenKind::RealLiteral()) {
  531. auto& line_info = GetLineInfo(token_info.token_line);
  532. int64_t token_start = line_info.start + token_info.column;
  533. llvm::Optional<LexedNumericLiteral> relexed_token =
  534. LexedNumericLiteral::Lex(source_->Text().substr(token_start));
  535. CHECK(relexed_token) << "Could not reform numeric literal token.";
  536. return relexed_token->Text();
  537. }
  538. // Refer back to the source text to find the original spelling, including
  539. // escape sequences etc.
  540. if (token_info.kind == TokenKind::StringLiteral()) {
  541. auto& line_info = GetLineInfo(token_info.token_line);
  542. int64_t token_start = line_info.start + token_info.column;
  543. llvm::Optional<LexedStringLiteral> relexed_token =
  544. LexedStringLiteral::Lex(source_->Text().substr(token_start));
  545. CHECK(relexed_token) << "Could not reform string literal token.";
  546. return relexed_token->Text();
  547. }
  548. // Refer back to the source text to avoid needing to reconstruct the
  549. // spelling from the size.
  550. if (token_info.kind.IsSizedTypeLiteral()) {
  551. auto& line_info = GetLineInfo(token_info.token_line);
  552. int64_t token_start = line_info.start + token_info.column;
  553. llvm::StringRef suffix =
  554. source_->Text().substr(token_start + 1).take_while(IsDecimalDigit);
  555. return llvm::StringRef(suffix.data() - 1, suffix.size() + 1);
  556. }
  557. if (token_info.kind == TokenKind::EndOfFile()) {
  558. return llvm::StringRef();
  559. }
  560. CHECK(token_info.kind == TokenKind::Identifier())
  561. << "Only identifiers have stored text!";
  562. return GetIdentifierText(token_info.id);
  563. }
  564. auto TokenizedBuffer::GetIdentifier(Token token) const -> Identifier {
  565. auto& token_info = GetTokenInfo(token);
  566. CHECK(token_info.kind == TokenKind::Identifier())
  567. << "The token must be an identifier!";
  568. return token_info.id;
  569. }
  570. auto TokenizedBuffer::GetIntegerLiteral(Token token) const
  571. -> const llvm::APInt& {
  572. auto& token_info = GetTokenInfo(token);
  573. CHECK(token_info.kind == TokenKind::IntegerLiteral())
  574. << "The token must be an integer literal!";
  575. return literal_int_storage_[token_info.literal_index];
  576. }
  577. auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealLiteralValue {
  578. auto& token_info = GetTokenInfo(token);
  579. CHECK(token_info.kind == TokenKind::RealLiteral())
  580. << "The token must be a real literal!";
  581. // Note that every real literal is at least three characters long, so we can
  582. // safely look at the second character to determine whether we have a decimal
  583. // or hexadecimal literal.
  584. auto& line_info = GetLineInfo(token_info.token_line);
  585. int64_t token_start = line_info.start + token_info.column;
  586. char second_char = source_->Text()[token_start + 1];
  587. bool is_decimal = second_char != 'x' && second_char != 'b';
  588. return RealLiteralValue(this, token_info.literal_index, is_decimal);
  589. }
  590. auto TokenizedBuffer::GetStringLiteral(Token token) const -> llvm::StringRef {
  591. auto& token_info = GetTokenInfo(token);
  592. CHECK(token_info.kind == TokenKind::StringLiteral())
  593. << "The token must be a string literal!";
  594. return literal_string_storage_[token_info.literal_index];
  595. }
  596. auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
  597. -> const llvm::APInt& {
  598. auto& token_info = GetTokenInfo(token);
  599. CHECK(token_info.kind.IsSizedTypeLiteral())
  600. << "The token must be a sized type literal!";
  601. return literal_int_storage_[token_info.literal_index];
  602. }
  603. auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
  604. -> Token {
  605. auto& opening_token_info = GetTokenInfo(opening_token);
  606. CHECK(opening_token_info.kind.IsOpeningSymbol())
  607. << "The token must be an opening group symbol!";
  608. return opening_token_info.closing_token;
  609. }
  610. auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
  611. -> Token {
  612. auto& closing_token_info = GetTokenInfo(closing_token);
  613. CHECK(closing_token_info.kind.IsClosingSymbol())
  614. << "The token must be an closing group symbol!";
  615. return closing_token_info.opening_token;
  616. }
  617. auto TokenizedBuffer::HasLeadingWhitespace(Token token) const -> bool {
  618. auto it = TokenIterator(token);
  619. return it == Tokens().begin() || GetTokenInfo(*(it - 1)).has_trailing_space;
  620. }
  621. auto TokenizedBuffer::HasTrailingWhitespace(Token token) const -> bool {
  622. return GetTokenInfo(token).has_trailing_space;
  623. }
  624. auto TokenizedBuffer::IsRecoveryToken(Token token) const -> bool {
  625. return GetTokenInfo(token).is_recovery;
  626. }
  627. auto TokenizedBuffer::GetLineNumber(Line line) const -> int {
  628. return line.index_ + 1;
  629. }
  630. auto TokenizedBuffer::GetIndentColumnNumber(Line line) const -> int {
  631. return GetLineInfo(line).indent + 1;
  632. }
  633. auto TokenizedBuffer::GetIdentifierText(Identifier identifier) const
  634. -> llvm::StringRef {
  635. return identifier_infos_[identifier.index_].text;
  636. }
  637. auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
  638. index = std::max(widths.index, index);
  639. kind = std::max(widths.kind, kind);
  640. column = std::max(widths.column, column);
  641. line = std::max(widths.line, line);
  642. indent = std::max(widths.indent, indent);
  643. }
  644. // Compute the printed width of a number. When numbers are printed in decimal,
  645. // the number of digits needed is is one more than the log-base-10 of the value.
  646. // We handle a value of `zero` explicitly.
  647. //
  648. // This routine requires its argument to be *non-negative*.
  649. static auto ComputeDecimalPrintedWidth(int number) -> int {
  650. CHECK(number >= 0) << "Negative numbers are not supported.";
  651. if (number == 0) {
  652. return 1;
  653. }
  654. return static_cast<int>(std::log10(number)) + 1;
  655. }
  656. auto TokenizedBuffer::GetTokenPrintWidths(Token token) const -> PrintWidths {
  657. PrintWidths widths = {};
  658. widths.index = ComputeDecimalPrintedWidth(token_infos_.size());
  659. widths.kind = GetKind(token).Name().size();
  660. widths.line = ComputeDecimalPrintedWidth(GetLineNumber(token));
  661. widths.column = ComputeDecimalPrintedWidth(GetColumnNumber(token));
  662. widths.indent =
  663. ComputeDecimalPrintedWidth(GetIndentColumnNumber(GetLine(token)));
  664. return widths;
  665. }
  666. auto TokenizedBuffer::Print(llvm::raw_ostream& output_stream) const -> void {
  667. if (Tokens().begin() == Tokens().end()) {
  668. return;
  669. }
  670. PrintWidths widths = {};
  671. widths.index = ComputeDecimalPrintedWidth((token_infos_.size()));
  672. for (Token token : Tokens()) {
  673. widths.Widen(GetTokenPrintWidths(token));
  674. }
  675. for (Token token : Tokens()) {
  676. PrintToken(output_stream, token, widths);
  677. output_stream << "\n";
  678. }
  679. }
  680. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream,
  681. Token token) const -> void {
  682. PrintToken(output_stream, token, {});
  683. }
  684. auto TokenizedBuffer::PrintToken(llvm::raw_ostream& output_stream, Token token,
  685. PrintWidths widths) const -> void {
  686. widths.Widen(GetTokenPrintWidths(token));
  687. int token_index = token.index_;
  688. auto& token_info = GetTokenInfo(token);
  689. llvm::StringRef token_text = GetTokenText(token);
  690. // Output the main chunk using one format string. We have to do the
  691. // justification manually in order to use the dynamically computed widths
  692. // and get the quotes included.
  693. output_stream << llvm::formatv(
  694. "token: { index: {0}, kind: {1}, line: {2}, column: {3}, indent: {4}, "
  695. "spelling: '{5}'",
  696. llvm::format_decimal(token_index, widths.index),
  697. llvm::right_justify(
  698. (llvm::Twine("'") + token_info.kind.Name() + "'").str(),
  699. widths.kind + 2),
  700. llvm::format_decimal(GetLineNumber(token_info.token_line), widths.line),
  701. llvm::format_decimal(GetColumnNumber(token), widths.column),
  702. llvm::format_decimal(GetIndentColumnNumber(token_info.token_line),
  703. widths.indent),
  704. token_text);
  705. if (token_info.kind == TokenKind::Identifier()) {
  706. output_stream << ", identifier: " << GetIdentifier(token).index_;
  707. } else if (token_info.kind.IsOpeningSymbol()) {
  708. output_stream << ", closing_token: "
  709. << GetMatchedClosingToken(token).index_;
  710. } else if (token_info.kind.IsClosingSymbol()) {
  711. output_stream << ", opening_token: "
  712. << GetMatchedOpeningToken(token).index_;
  713. } else if (token_info.kind == TokenKind::StringLiteral()) {
  714. output_stream << ", value: `" << GetStringLiteral(token) << "`";
  715. }
  716. // TODO: Include value for numeric literals.
  717. if (token_info.has_trailing_space) {
  718. output_stream << ", has_trailing_space: true";
  719. }
  720. if (token_info.is_recovery) {
  721. output_stream << ", recovery: true";
  722. }
  723. output_stream << " }";
  724. }
  725. auto TokenizedBuffer::GetLineInfo(Line line) -> LineInfo& {
  726. return line_infos_[line.index_];
  727. }
  728. auto TokenizedBuffer::GetLineInfo(Line line) const -> const LineInfo& {
  729. return line_infos_[line.index_];
  730. }
  731. auto TokenizedBuffer::AddLine(LineInfo info) -> Line {
  732. line_infos_.push_back(info);
  733. return Line(static_cast<int>(line_infos_.size()) - 1);
  734. }
  735. auto TokenizedBuffer::GetTokenInfo(Token token) -> TokenInfo& {
  736. return token_infos_[token.index_];
  737. }
  738. auto TokenizedBuffer::GetTokenInfo(Token token) const -> const TokenInfo& {
  739. return token_infos_[token.index_];
  740. }
  741. auto TokenizedBuffer::AddToken(TokenInfo info) -> Token {
  742. token_infos_.push_back(info);
  743. return Token(static_cast<int>(token_infos_.size()) - 1);
  744. }
  745. auto TokenizedBuffer::TokenIterator::Print(llvm::raw_ostream& output) const
  746. -> void {
  747. output << token_.index_;
  748. }
  749. auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
  750. const char* loc) -> Diagnostic::Location {
  751. assert(llvm::is_sorted(std::array{buffer_->source_->Text().begin(), loc,
  752. buffer_->source_->Text().end()}) &&
  753. "location not within buffer");
  754. int64_t offset = loc - buffer_->source_->Text().begin();
  755. // Find the first line starting after the given location. Note that we can't
  756. // inspect `line.length` here because it is not necessarily correct for the
  757. // final line.
  758. auto line_it = std::partition_point(
  759. buffer_->line_infos_.begin(), buffer_->line_infos_.end(),
  760. [offset](const LineInfo& line) { return line.start <= offset; });
  761. bool incomplete_line_info = line_it == buffer_->line_infos_.end();
  762. // Step back one line to find the line containing the given position.
  763. CHECK(line_it != buffer_->line_infos_.begin())
  764. << "location precedes the start of the first line";
  765. --line_it;
  766. int line_number = line_it - buffer_->line_infos_.begin();
  767. int column_number = offset - line_it->start;
  768. // We might still be lexing the last line. If so, check to see if there are
  769. // any newline characters between the start of this line and the given
  770. // location.
  771. if (incomplete_line_info) {
  772. column_number = 0;
  773. for (int64_t i = line_it->start; i != offset; ++i) {
  774. if (buffer_->source_->Text()[i] == '\n') {
  775. ++line_number;
  776. column_number = 0;
  777. } else {
  778. ++column_number;
  779. }
  780. }
  781. }
  782. return {.file_name = buffer_->source_->Filename().str(),
  783. .line_number = line_number + 1,
  784. .column_number = column_number + 1};
  785. }
  786. auto TokenizedBuffer::TokenLocationTranslator::GetLocation(Token token)
  787. -> Diagnostic::Location {
  788. // Map the token location into a position within the source buffer.
  789. auto& token_info = buffer_->GetTokenInfo(token);
  790. auto& line_info = buffer_->GetLineInfo(token_info.token_line);
  791. const char* token_start =
  792. buffer_->source_->Text().begin() + line_info.start + token_info.column;
  793. // Find the corresponding file location.
  794. // TODO: Should we somehow indicate in the diagnostic location if this token
  795. // is a recovery token that doesn't correspond to the original source?
  796. return SourceBufferLocationTranslator(*buffer_).GetLocation(token_start);
  797. }
  798. } // namespace Carbon