Преглед изворни кода

Eliminate NodeLocConverter (#4870)

I'm looking at eliminating `DiagnosticConverter`. This change removes
`NodeLocConverter` (albeit adding `UnitAndImportsDiagnosticConverter`),
and in doing so, refactors lex conversion functions to extract them out
from the `DiagnosticConverter` functions.

I'll be following up with changes that collapse `DiagnosticConverter`
logic into `DiagnosticEmitter` locations. The intent is that we
shouldn't need separate ownership of both types.
Jon Ross-Perkins пре 1 година
родитељ
комит
133717cd7e

+ 1 - 3
toolchain/check/BUILD

@@ -89,7 +89,6 @@ cc_library(
         "//toolchain/lex:tokenized_buffer",
         "//toolchain/parse:node_kind",
         "//toolchain/parse:tree",
-        "//toolchain/parse:tree_node_diagnostic_converter",
         "//toolchain/sem_ir:file",
         "//toolchain/sem_ir:formatter",
         "//toolchain/sem_ir:inst",
@@ -158,7 +157,6 @@ cc_library(
         "//toolchain/lex:tokenized_buffer",
         "//toolchain/parse:node_kind",
         "//toolchain/parse:tree",
-        "//toolchain/parse:tree_node_diagnostic_converter",
         "//toolchain/sem_ir:entry_point",
         "//toolchain/sem_ir:file",
         "//toolchain/sem_ir:inst",
@@ -273,7 +271,7 @@ cc_library(
         "//common:raw_string_ostream",
         "//toolchain/diagnostics:diagnostic_emitter",
         "//toolchain/lex:token_index",
-        "//toolchain/parse:tree_node_diagnostic_converter",
+        "//toolchain/parse:tree",
         "//toolchain/sem_ir:file",
         "//toolchain/sem_ir:stringify_type",
         "@llvm-project//llvm:Support",

+ 0 - 1
toolchain/check/check.h

@@ -30,7 +30,6 @@ struct Unit {
   SemIR::File* sem_ir;
 
   // Diagnostic converters.
-  Parse::NodeLocConverter* node_converter;
   SemIRDiagnosticConverter* sem_ir_converter;
 };
 

+ 2 - 2
toolchain/check/check_unit.cpp

@@ -363,8 +363,8 @@ auto CheckUnit::ProcessNodeIds() -> bool {
 
   // On crash, report which token we were handling.
   PrettyStackTraceFunction node_dumper([&](llvm::raw_ostream& output) {
-    auto converted = unit_and_imports_->unit->node_converter->ConvertLoc(
-        node_id, [](DiagnosticLoc, const DiagnosticBase<>&) {});
+    const auto& tree = unit_and_imports_->unit->get_parse_tree_and_subtrees();
+    auto converted = tree.NodeToDiagnosticLoc(node_id, /*token_only=*/false);
     converted.loc.FormatLocation(output);
     output << "checking " << context_.parse_tree().node_kind(node_id) << "\n";
     // Crash output has a tab indent; try to indent slightly past that.

+ 24 - 3
toolchain/check/check_unit.h

@@ -9,7 +9,6 @@
 #include "llvm/ADT/SmallVector.h"
 #include "toolchain/check/check.h"
 #include "toolchain/check/context.h"
-#include "toolchain/parse/tree_node_diagnostic_converter.h"
 #include "toolchain/sem_ir/ids.h"
 
 namespace Carbon::Check {
@@ -44,6 +43,26 @@ struct PackageImports {
   llvm::SmallVector<Import> imports;
 };
 
+// Converts a `NodeId` to a diagnostic location for `UnitAndImports`.
+class UnitAndImportsDiagnosticConverter
+    : public DiagnosticConverter<Parse::NodeId> {
+ public:
+  explicit UnitAndImportsDiagnosticConverter(
+      llvm::function_ref<const Parse::TreeAndSubtrees&()>
+          get_parse_tree_and_subtrees)
+      : get_parse_tree_and_subtrees_(get_parse_tree_and_subtrees) {}
+
+  auto ConvertLoc(Parse::NodeId node_id, ContextFnT /*context_fn*/) const
+      -> ConvertedDiagnosticLoc override {
+    return get_parse_tree_and_subtrees_().NodeToDiagnosticLoc(
+        node_id, /*token_only=*/false);
+  }
+
+ private:
+  llvm::function_ref<const Parse::TreeAndSubtrees&()>
+      get_parse_tree_and_subtrees_;
+};
+
 // Contains information accumulated while checking a `Unit` (primarily import
 // information), in addition to the `Unit` itself.
 struct UnitAndImports {
@@ -51,7 +70,8 @@ struct UnitAndImports {
       : check_ir_id(check_ir_id),
         unit(&unit),
         err_tracker(*unit.consumer),
-        emitter(*unit.node_converter, err_tracker) {}
+        converter(unit.get_parse_tree_and_subtrees),
+        emitter(converter, err_tracker) {}
 
   auto parse_tree() -> const Parse::Tree& { return unit->sem_ir->parse_tree(); }
   auto source() -> const SourceBuffer& {
@@ -63,7 +83,8 @@ struct UnitAndImports {
 
   // Emitter information.
   ErrorTrackingDiagnosticConsumer err_tracker;
-  DiagnosticEmitter<Parse::NodeLoc> emitter;
+  UnitAndImportsDiagnosticConverter converter;
+  DiagnosticEmitter<Parse::NodeId> emitter;
 
   // List of the outgoing imports. If a package includes unavailable library
   // imports, it has an entry with has_load_error set. Invalid imports (for

+ 0 - 1
toolchain/check/diagnostic_helpers.h

@@ -7,7 +7,6 @@
 
 #include "llvm/ADT/APSInt.h"
 #include "toolchain/parse/node_ids.h"
-#include "toolchain/parse/tree_node_diagnostic_converter.h"
 #include "toolchain/sem_ir/ids.h"
 
 namespace Carbon::Check {

+ 4 - 3
toolchain/check/sem_ir_diagnostic_converter.cpp

@@ -198,10 +198,11 @@ auto SemIRDiagnosticConverter::ConvertArg(llvm::Any arg) const -> llvm::Any {
 auto SemIRDiagnosticConverter::ConvertLocInFile(const SemIR::File* sem_ir,
                                                 Parse::NodeId node_id,
                                                 bool token_only,
-                                                ContextFnT context_fn) const
+                                                ContextFnT /*context_fn*/) const
     -> ConvertedDiagnosticLoc {
-  return node_converters_[sem_ir->check_ir_id().index]->ConvertLoc(
-      Parse::NodeLoc(node_id, token_only), context_fn);
+  const auto& tree_and_subtrees =
+      imported_trees_and_subtrees_[sem_ir->check_ir_id().index]();
+  return tree_and_subtrees.NodeToDiagnosticLoc(node_id, token_only);
 }
 
 }  // namespace Carbon::Check

+ 7 - 4
toolchain/check/sem_ir_diagnostic_converter.h

@@ -9,7 +9,7 @@
 #include "toolchain/check/diagnostic_helpers.h"
 #include "toolchain/diagnostics/diagnostic_converter.h"
 #include "toolchain/lex/token_index.h"
-#include "toolchain/parse/tree_node_diagnostic_converter.h"
+#include "toolchain/parse/tree_and_subtrees.h"
 #include "toolchain/sem_ir/file.h"
 
 namespace Carbon::Check {
@@ -17,10 +17,13 @@ namespace Carbon::Check {
 // Handles the transformation of a SemIRLoc to a DiagnosticLoc.
 class SemIRDiagnosticConverter : public DiagnosticConverter<SemIRLoc> {
  public:
+  using TreeFnT = llvm::function_ref<const Parse::TreeAndSubtrees&()>;
+
   explicit SemIRDiagnosticConverter(
-      llvm::ArrayRef<Parse::NodeLocConverter*> node_converters,
+      llvm::ArrayRef<TreeFnT> imported_trees_and_subtrees,
       const SemIR::File* sem_ir)
-      : node_converters_(node_converters), sem_ir_(sem_ir) {}
+      : imported_trees_and_subtrees_(imported_trees_and_subtrees),
+        sem_ir_(sem_ir) {}
 
   // Implements `DiagnosticConverter::ConvertLoc`. Adds context for any imports
   // used in the current SemIR to get to the underlying code.
@@ -52,7 +55,7 @@ class SemIRDiagnosticConverter : public DiagnosticConverter<SemIRLoc> {
       -> ConvertedDiagnosticLoc;
 
   // Converters for each SemIR.
-  llvm::ArrayRef<Parse::NodeLocConverter*> node_converters_;
+  llvm::ArrayRef<TreeFnT> imported_trees_and_subtrees_;
 
   // The current SemIR being processed.
   const SemIR::File* sem_ir_;

+ 22 - 19
toolchain/driver/compile_subcommand.cpp

@@ -333,12 +333,15 @@ class CompilationUnit {
   // Parses tokens. Returns true on success.
   auto RunParse() -> void;
 
-  auto PreCheck() -> Parse::NodeLocConverter&;
+  // Prepares per-IR lazy fetch functions which may come up in cross-IR
+  // diagnostics.
+  auto PreCheck() -> llvm::function_ref<const Parse::TreeAndSubtrees&()>;
 
   // Returns information needed to check this unit.
-  auto GetCheckUnit(SemIR::CheckIRId check_ir_id,
-                    llvm::ArrayRef<Parse::NodeLocConverter*> node_converters)
-      -> Check::Unit;
+  auto GetCheckUnit(
+      SemIR::CheckIRId check_ir_id,
+      llvm::ArrayRef<llvm::function_ref<const Parse::TreeAndSubtrees&()>>
+          all_trees_and_subtrees) -> Check::Unit;
 
   // Runs post-check logic. Returns true if checking succeeded for the IR.
   auto PostCheck() -> void;
@@ -411,7 +414,6 @@ class CompilationUnit {
   std::optional<Parse::TreeAndSubtrees> parse_tree_and_subtrees_;
   std::optional<std::function<const Parse::TreeAndSubtrees&()>>
       get_parse_tree_and_subtrees_;
-  std::optional<Parse::NodeLocConverter> node_converter_;
   std::optional<Check::SemIRDiagnosticConverter> sem_ir_converter_;
   std::optional<SemIR::File> sem_ir_;
   std::unique_ptr<llvm::LLVMContext> llvm_context_;
@@ -497,33 +499,32 @@ auto CompilationUnit::RunParse() -> void {
   }
 }
 
-auto CompilationUnit::PreCheck() -> Parse::NodeLocConverter& {
+auto CompilationUnit::PreCheck()
+    -> llvm::function_ref<const Parse::TreeAndSubtrees&()> {
   CARBON_CHECK(parse_tree_, "Must call RunParse first");
-  CARBON_CHECK(!node_converter_, "Called PreCheck twice");
+  CARBON_CHECK(!get_parse_tree_and_subtrees_, "Called PreCheck twice");
 
   get_parse_tree_and_subtrees_ = [this]() -> const Parse::TreeAndSubtrees& {
     return this->GetParseTreeAndSubtrees();
   };
-  node_converter_.emplace(&*tokens_, source_->filename(),
-                          *get_parse_tree_and_subtrees_);
-  return *node_converter_;
+  return *get_parse_tree_and_subtrees_;
 }
 
 auto CompilationUnit::GetCheckUnit(
     SemIR::CheckIRId check_ir_id,
-    llvm::ArrayRef<Parse::NodeLocConverter*> node_converters) -> Check::Unit {
-  CARBON_CHECK(node_converter_, "Must call PreCheck first");
+    llvm::ArrayRef<llvm::function_ref<const Parse::TreeAndSubtrees&()>>
+        all_trees_and_subtrees) -> Check::Unit {
+  CARBON_CHECK(get_parse_tree_and_subtrees_, "Must call PreCheck first");
   CARBON_CHECK(!sem_ir_converter_, "Called GetCheckUnit twice");
 
   sem_ir_.emplace(&*parse_tree_, check_ir_id, parse_tree_->packaging_decl(),
                   value_stores_, input_filename_);
-  sem_ir_converter_.emplace(node_converters, &*sem_ir_);
+  sem_ir_converter_.emplace(all_trees_and_subtrees, &*sem_ir_);
   return {.consumer = consumer_,
           .value_stores = &value_stores_,
           .timings = timings_ ? &*timings_ : nullptr,
           .get_parse_tree_and_subtrees = *get_parse_tree_and_subtrees_,
           .sem_ir = &*sem_ir_,
-          .node_converter = &*node_converter_,
           .sem_ir_converter = &*sem_ir_converter_};
 }
 
@@ -841,23 +842,25 @@ auto CompileSubcommand::Run(DriverEnv& driver_env) -> DriverResult {
   }
 
   // Pre-check assigns IR IDs and constructs node converters.
-  llvm::SmallVector<Parse::NodeLocConverter*> node_converters;
+  llvm::SmallVector<llvm::function_ref<const Parse::TreeAndSubtrees&()>>
+      all_trees_and_subtrees;
   // This size may not match due to units that are missing source, but that's an
   // error case and not worth extra work.
-  node_converters.reserve(units.size());
+  all_trees_and_subtrees.reserve(units.size());
   for (auto& unit : units) {
     if (unit->has_source()) {
-      node_converters.push_back(&unit->PreCheck());
+      all_trees_and_subtrees.push_back(unit->PreCheck());
     }
   }
 
   // Gather Check::Units.
   llvm::SmallVector<Check::Unit> check_units;
-  check_units.reserve(node_converters.size());
+  check_units.reserve(all_trees_and_subtrees.size());
   for (auto& unit : units) {
     if (unit->has_source()) {
       SemIR::CheckIRId check_ir_id(check_units.size());
-      check_units.push_back(unit->GetCheckUnit(check_ir_id, node_converters));
+      check_units.push_back(
+          unit->GetCheckUnit(check_ir_id, all_trees_and_subtrees));
     }
   }
 

+ 25 - 18
toolchain/lex/tokenized_buffer.cpp

@@ -377,30 +377,29 @@ auto TokenizedBuffer::CollectMemUsage(MemUsage& mem_usage,
   mem_usage.Collect(MemUsage::ConcatLabel(label, "comments_"), comments_);
 }
 
-auto TokenizedBuffer::SourceBufferDiagnosticConverter::ConvertLoc(
-    const char* loc, ContextFnT /*context_fn*/) const
+auto TokenizedBuffer::SourcePointerToDiagnosticLoc(const char* loc) const
     -> ConvertedDiagnosticLoc {
-  CARBON_CHECK(StringRefContainsPointer(tokens_->source_->text(), loc),
+  CARBON_CHECK(StringRefContainsPointer(source_->text(), loc),
                "location not within buffer");
-  int32_t offset = loc - tokens_->source_->text().begin();
+  int32_t offset = loc - source_->text().begin();
 
   // Find the first line starting after the given location.
   const auto* next_line_it = llvm::partition_point(
-      tokens_->line_infos_,
+      line_infos_,
       [offset](const LineInfo& line) { return line.start <= offset; });
 
   // Step back one line to find the line containing the given position.
-  CARBON_CHECK(next_line_it != tokens_->line_infos_.begin(),
+  CARBON_CHECK(next_line_it != line_infos_.begin(),
                "location precedes the start of the first line");
   const auto* line_it = std::prev(next_line_it);
-  int line_number = line_it - tokens_->line_infos_.begin();
+  int line_number = line_it - line_infos_.begin();
   int column_number = offset - line_it->start;
 
   // Grab the line from the buffer by slicing from this line to the next
   // minus the newline. When on the last line, instead use the start to the end
   // of the buffer.
-  llvm::StringRef text = tokens_->source_->text();
-  llvm::StringRef line = next_line_it != tokens_->line_infos_.end()
+  llvm::StringRef text = source_->text();
+  llvm::StringRef line = next_line_it != line_infos_.end()
                              ? text.slice(line_it->start, next_line_it->start)
                              : text.substr(line_it->start);
 
@@ -409,29 +408,37 @@ auto TokenizedBuffer::SourceBufferDiagnosticConverter::ConvertLoc(
   // tail of the line such as CR+LF, etc.
   line.consume_back("\n");
 
-  return {.loc = {.filename = tokens_->source_->filename(),
+  return {.loc = {.filename = source_->filename(),
                   .line = line,
                   .line_number = line_number + 1,
                   .column_number = column_number + 1},
           .last_byte_offset = offset};
 }
 
-auto TokenDiagnosticConverter::ConvertLoc(TokenIndex token,
-                                          ContextFnT context_fn) const
+auto TokenizedBuffer::SourceBufferDiagnosticConverter::ConvertLoc(
+    const char* loc, ContextFnT /*context_fn*/) const
+    -> ConvertedDiagnosticLoc {
+  return tokens_->SourcePointerToDiagnosticLoc(loc);
+}
+
+auto TokenizedBuffer::TokenToDiagnosticLoc(TokenIndex token) const
     -> ConvertedDiagnosticLoc {
   // Map the token location into a position within the source buffer.
-  const auto& token_info = tokens_->GetTokenInfo(token);
   const char* token_start =
-      tokens_->source_->text().begin() + token_info.byte_offset();
+      source_->text().begin() + GetTokenInfo(token).byte_offset();
 
   // Find the corresponding file location.
   // TODO: Should we somehow indicate in the diagnostic location if this token
   // is a recovery token that doesn't correspond to the original source?
-  auto converted =
-      TokenizedBuffer::SourceBufferDiagnosticConverter(tokens_).ConvertLoc(
-          token_start, context_fn);
-  converted.loc.length = tokens_->GetTokenText(token).size();
+  auto converted = SourcePointerToDiagnosticLoc(token_start);
+  converted.loc.length = GetTokenText(token).size();
   return converted;
 }
 
+auto TokenDiagnosticConverter::ConvertLoc(TokenIndex token,
+                                          ContextFnT /*context_fn*/) const
+    -> ConvertedDiagnosticLoc {
+  return tokens_->TokenToDiagnosticLoc(token);
+}
+
 }  // namespace Carbon::Lex

+ 7 - 1
toolchain/lex/tokenized_buffer.h

@@ -194,6 +194,9 @@ class TokenizedBuffer : public Printable<TokenizedBuffer> {
   auto CollectMemUsage(MemUsage& mem_usage, llvm::StringRef label) const
       -> void;
 
+  // Converts a token to a diagnostic location.
+  auto TokenToDiagnosticLoc(TokenIndex token) const -> ConvertedDiagnosticLoc;
+
   // Returns true if the buffer has errors that were detected at lexing time.
   auto has_errors() const -> bool { return has_errors_; }
 
@@ -221,7 +224,6 @@ class TokenizedBuffer : public Printable<TokenizedBuffer> {
 
  private:
   friend class Lexer;
-  friend class TokenDiagnosticConverter;
 
   // A diagnostic location converter that maps token locations into source
   // buffer locations.
@@ -239,6 +241,10 @@ class TokenizedBuffer : public Printable<TokenizedBuffer> {
     const TokenizedBuffer* tokens_;
   };
 
+  // Converts a pointer into the source to a diagnostic location.
+  auto SourcePointerToDiagnosticLoc(const char* loc) const
+      -> ConvertedDiagnosticLoc;
+
   // Specifies minimum widths to use when printing a token's fields via
   // `printToken`.
   struct PrintWidths {

+ 0 - 10
toolchain/parse/BUILD

@@ -188,16 +188,6 @@ cc_fuzz_test(
     ],
 )
 
-cc_library(
-    name = "tree_node_diagnostic_converter",
-    hdrs = ["tree_node_diagnostic_converter.h"],
-    deps = [
-        ":tree",
-        "//toolchain/diagnostics:diagnostic_emitter",
-        "//toolchain/lex:tokenized_buffer",
-    ],
-)
-
 cc_library(
     name = "precedence",
     srcs = ["precedence.cpp"],

+ 9 - 7
toolchain/parse/context.h

@@ -403,24 +403,26 @@ class Context {
   }
 
  private:
-  // Applies the `position_` to the `last_byte_offset` returned by `ConvertLoc`.
+  // Applies the `position_` to the `last_byte_offset` returned by
+  // `TokenToDiagnosticLoc`.
   class TokenDiagnosticConverterForParse
-      : public Lex::TokenDiagnosticConverter {
+      : public DiagnosticConverter<Lex::TokenIndex> {
    public:
     explicit TokenDiagnosticConverterForParse(Lex::TokenizedBuffer* tokens,
                                               Lex::TokenIterator* position)
-        : Lex::TokenDiagnosticConverter(tokens), position_(position) {}
+        : tokens_(tokens), position_(position) {}
 
-    auto ConvertLoc(Lex::TokenIndex token, ContextFnT context_fn) const
+    auto ConvertLoc(Lex::TokenIndex token, ContextFnT /*context_fn*/) const
         -> ConvertedDiagnosticLoc override {
-      auto converted =
-          Lex::TokenDiagnosticConverter::ConvertLoc(token, context_fn);
+      auto converted = tokens_->TokenToDiagnosticLoc(token);
       converted.last_byte_offset = std::max(
-          converted.last_byte_offset, tokens().GetByteOffset(**position_));
+          converted.last_byte_offset, tokens_->GetByteOffset(**position_));
       return converted;
     }
 
    private:
+    Lex::TokenizedBuffer* tokens_;
+
     // The position in `Parse()`.
     Lex::TokenIterator* position_;
   };

+ 47 - 0
toolchain/parse/tree_and_subtrees.cpp

@@ -239,6 +239,53 @@ auto TreeAndSubtrees::CollectMemUsage(MemUsage& mem_usage,
                     subtree_sizes_);
 }
 
+auto TreeAndSubtrees::NodeToDiagnosticLoc(NodeId node_id, bool token_only) const
+    -> ConvertedDiagnosticLoc {
+  // Support the invalid token as a way to emit only the filename, when there
+  // is no line association.
+  if (!node_id.has_value()) {
+    return {{.filename = tree_->tokens().source().filename()}, -1};
+  }
+
+  if (token_only) {
+    return tree_->tokens().TokenToDiagnosticLoc(tree_->node_token(node_id));
+  }
+
+  // Construct a location that encompasses all tokens that descend from this
+  // node (including the root).
+  Lex::TokenIndex start_token = tree_->node_token(node_id);
+  Lex::TokenIndex end_token = start_token;
+  for (NodeId desc : postorder(node_id)) {
+    Lex::TokenIndex desc_token = tree_->node_token(desc);
+    if (!desc_token.has_value()) {
+      continue;
+    }
+    if (desc_token < start_token) {
+      start_token = desc_token;
+    } else if (desc_token > end_token) {
+      end_token = desc_token;
+    }
+  }
+  auto start_loc = tree_->tokens().TokenToDiagnosticLoc(start_token);
+  if (start_token == end_token) {
+    return start_loc;
+  }
+  auto end_loc = tree_->tokens().TokenToDiagnosticLoc(end_token);
+  start_loc.last_byte_offset = end_loc.last_byte_offset;
+  // For multiline locations we simply return the rest of the line for now
+  // since true multiline locations are not yet supported.
+  if (start_loc.loc.line_number != end_loc.loc.line_number) {
+    start_loc.loc.length =
+        start_loc.loc.line.size() - start_loc.loc.column_number + 1;
+  } else {
+    if (start_loc.loc.column_number != end_loc.loc.column_number) {
+      start_loc.loc.length = end_loc.loc.column_number + end_loc.loc.length -
+                             start_loc.loc.column_number;
+    }
+  }
+  return start_loc;
+}
+
 auto TreeAndSubtrees::SiblingIterator::Print(llvm::raw_ostream& output) const
     -> void {
   output << node_;

+ 5 - 0
toolchain/parse/tree_and_subtrees.h

@@ -107,6 +107,11 @@ class TreeAndSubtrees {
   auto CollectMemUsage(MemUsage& mem_usage, llvm::StringRef label) const
       -> void;
 
+  // Converts the node to a diagnostic location, covering either the full
+  // subtree or only the token.
+  auto NodeToDiagnosticLoc(NodeId node_id, bool token_only) const
+      -> ConvertedDiagnosticLoc;
+
   // Returns an iterable range over the parse tree node and all of its
   // descendants in depth-first postorder.
   auto postorder(NodeId n) const

+ 0 - 106
toolchain/parse/tree_node_diagnostic_converter.h

@@ -1,106 +0,0 @@
-// Part of the Carbon Language project, under the Apache License v2.0 with LLVM
-// Exceptions. See /LICENSE for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-#ifndef CARBON_TOOLCHAIN_PARSE_TREE_NODE_DIAGNOSTIC_CONVERTER_H_
-#define CARBON_TOOLCHAIN_PARSE_TREE_NODE_DIAGNOSTIC_CONVERTER_H_
-
-#include <utility>
-
-#include "toolchain/diagnostics/diagnostic_emitter.h"
-#include "toolchain/lex/tokenized_buffer.h"
-#include "toolchain/parse/tree.h"
-#include "toolchain/parse/tree_and_subtrees.h"
-
-namespace Carbon::Parse {
-
-class NodeLoc {
- public:
-  // NOLINTNEXTLINE(google-explicit-constructor)
-  NodeLoc(NodeId node_id) : NodeLoc(node_id, false) {}
-  NodeLoc(NodeId node_id, bool token_only)
-      : node_id_(node_id), token_only_(token_only) {}
-  // TODO: Have some other way of representing diagnostic that applies to a file
-  // as a whole.
-  // NOLINTNEXTLINE(google-explicit-constructor)
-  NodeLoc(NoneNodeId node_id) : NodeLoc(node_id, false) {}
-
-  auto node_id() const -> NodeId { return node_id_; }
-  auto token_only() const -> bool { return token_only_; }
-
- private:
-  NodeId node_id_;
-  bool token_only_;
-};
-
-class NodeLocConverter : public DiagnosticConverter<NodeLoc> {
- public:
-  explicit NodeLocConverter(
-      const Lex::TokenizedBuffer* tokens, llvm::StringRef filename,
-      llvm::function_ref<const Parse::TreeAndSubtrees&()> get_tree_and_subtrees)
-      : token_converter_(tokens),
-        filename_(filename),
-        get_tree_and_subtrees_(get_tree_and_subtrees) {}
-
-  // Implements `DiagnosticConverter::ConvertLoc`.
-  auto ConvertLoc(NodeLoc node_loc, ContextFnT context_fn) const
-      -> ConvertedDiagnosticLoc override {
-    // Support the invalid token as a way to emit only the filename, when there
-    // is no line association.
-    if (!node_loc.node_id().has_value()) {
-      return {{.filename = filename_}, -1};
-    }
-
-    const auto& tree = get_tree_and_subtrees_();
-
-    if (node_loc.token_only()) {
-      return token_converter_.ConvertLoc(
-          tree.tree().node_token(node_loc.node_id()), context_fn);
-    }
-
-    // Construct a location that encompasses all tokens that descend from this
-    // node (including the root).
-    Lex::TokenIndex start_token = tree.tree().node_token(node_loc.node_id());
-    Lex::TokenIndex end_token = start_token;
-    for (NodeId desc : tree.postorder(node_loc.node_id())) {
-      Lex::TokenIndex desc_token = tree.tree().node_token(desc);
-      if (!desc_token.has_value()) {
-        continue;
-      }
-      if (desc_token < start_token) {
-        start_token = desc_token;
-      } else if (desc_token > end_token) {
-        end_token = desc_token;
-      }
-    }
-    auto start_loc = token_converter_.ConvertLoc(start_token, context_fn);
-    if (start_token == end_token) {
-      return start_loc;
-    }
-    auto end_loc = token_converter_.ConvertLoc(end_token, context_fn);
-    start_loc.last_byte_offset = end_loc.last_byte_offset;
-    // For multiline locations we simply return the rest of the line for now
-    // since true multiline locations are not yet supported.
-    if (start_loc.loc.line_number != end_loc.loc.line_number) {
-      start_loc.loc.length =
-          start_loc.loc.line.size() - start_loc.loc.column_number + 1;
-    } else {
-      if (start_loc.loc.column_number != end_loc.loc.column_number) {
-        start_loc.loc.length = end_loc.loc.column_number + end_loc.loc.length -
-                               start_loc.loc.column_number;
-      }
-    }
-    return start_loc;
-  }
-
- private:
-  Lex::TokenDiagnosticConverter token_converter_;
-  llvm::StringRef filename_;
-
-  // Returns a lazily constructed TreeAndSubtrees.
-  llvm::function_ref<const Parse::TreeAndSubtrees&()> get_tree_and_subtrees_;
-};
-
-}  // namespace Carbon::Parse
-
-#endif  // CARBON_TOOLCHAIN_PARSE_TREE_NODE_DIAGNOSTIC_CONVERTER_H_