Просмотр исходного кода

Switch from assert to CHECK (#975)


Co-authored-by: Chandler Carruth <chandlerc@gmail.com>
Jon Meow 4 лет назад
Родитель
Сommit
a562f872e7

+ 9 - 1
toolchain/lexer/BUILD

@@ -11,7 +11,10 @@ cc_library(
     srcs = ["token_kind.cpp"],
     hdrs = ["token_kind.h"],
     textual_hdrs = ["token_registry.def"],
-    deps = ["@llvm-project//llvm:Support"],
+    deps = [
+        "//common:check",
+        "@llvm-project//llvm:Support",
+    ],
 )
 
 cc_test(
@@ -35,6 +38,7 @@ cc_library(
     testonly = 1,
     hdrs = ["test_helpers.h"],
     deps = [
+        "//common:check",
         "//toolchain/diagnostics:diagnostic_emitter",
         "@llvm-project//llvm:Support",
     ],
@@ -46,6 +50,7 @@ cc_library(
     hdrs = ["numeric_literal.h"],
     deps = [
         ":character_set",
+        "//common:check",
         "//toolchain/diagnostics:diagnostic_emitter",
         "@llvm-project//llvm:Support",
     ],
@@ -82,6 +87,7 @@ cc_library(
     hdrs = ["string_literal.h"],
     deps = [
         ":character_set",
+        "//common:check",
         "//toolchain/diagnostics:diagnostic_emitter",
         "@llvm-project//llvm:Support",
     ],
@@ -121,6 +127,7 @@ cc_library(
         ":numeric_literal",
         ":string_literal",
         ":token_kind",
+        "//common:check",
         "//common:ostream",
         "//toolchain/diagnostics:diagnostic_emitter",
         "//toolchain/source:source_buffer",
@@ -159,6 +166,7 @@ cc_fuzz_test(
     corpus = glob(["fuzzer_corpus/tokenized_buffer/*"]),
     deps = [
         ":tokenized_buffer",
+        "//common:check",
         "//toolchain/diagnostics:diagnostic_emitter",
         "//toolchain/diagnostics:null_diagnostics",
         "@llvm-project//llvm:Support",

+ 9 - 7
toolchain/lexer/numeric_literal.cpp

@@ -6,6 +6,7 @@
 
 #include <bitset>
 
+#include "common/check.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Support/FormatVariadic.h"
 #include "toolchain/lexer/character_set.h"
@@ -45,7 +46,7 @@ struct IrregularDigitSeparators {
       "syntax-irregular-digit-separators";
 
   auto Format() -> std::string {
-    assert((radix == 10 || radix == 16) && "unexpected radix");
+    CHECK((radix == 10 || radix == 16)) << "unexpected radix: " << radix;
     return llvm::formatv(
                "Digit separators in {0} number should appear every {1} "
                "characters from the right.",
@@ -127,7 +128,7 @@ auto LexedNumericLiteral::Lex(llvm::StringRef source_text)
         IsAlnum(source_text[i + 1])) {
       // This is not possible because we don't update result.exponent after we
       // see a '+' or '-'.
-      assert(!seen_plus_minus && "should only consume one + or -");
+      CHECK(!seen_plus_minus) << "should only consume one + or -";
       seen_plus_minus = true;
       continue;
     }
@@ -313,7 +314,8 @@ auto LexedNumericLiteral::Parser::GetExponent() -> llvm::APInt {
 auto LexedNumericLiteral::Parser::CheckDigitSequence(
     llvm::StringRef text, int radix, bool allow_digit_separators)
     -> CheckDigitSequenceResult {
-  assert((radix == 2 || radix == 10 || radix == 16) && "unknown radix");
+  CHECK((radix == 2 || radix == 10 || radix == 16))
+      << "unknown radix: " << radix;
 
   std::bitset<256> valid_digits;
   if (radix == 2) {
@@ -371,8 +373,8 @@ auto LexedNumericLiteral::Parser::CheckDigitSequence(
 // correctly positioned.
 auto LexedNumericLiteral::Parser::CheckDigitSeparatorPlacement(
     llvm::StringRef text, int radix, int num_digit_separators) -> void {
-  assert(std::count(text.begin(), text.end(), '_') == num_digit_separators &&
-         "given wrong number of digit separators");
+  CHECK(std::count(text.begin(), text.end(), '_') == num_digit_separators)
+      << "given wrong number of digit separators: " << num_digit_separators;
 
   if (radix == 2) {
     // There are no restrictions on digit separator placement for binary
@@ -380,8 +382,8 @@ auto LexedNumericLiteral::Parser::CheckDigitSeparatorPlacement(
     return;
   }
 
-  assert((radix == 10 || radix == 16) &&
-         "unexpected radix for digit separator checks");
+  CHECK((radix == 10 || radix == 16))
+      << "unexpected radix " << radix << " for digit separator checks";
 
   auto diagnose_irregular_digit_separators = [&]() {
     emitter_.EmitError<IrregularDigitSeparators>(text.begin(),

+ 4 - 3
toolchain/lexer/string_literal.cpp

@@ -4,6 +4,7 @@
 
 #include "toolchain/lexer/string_literal.h"
 
+#include "common/check.h"
 #include "llvm/ADT/SmallString.h"
 #include "llvm/ADT/StringExtras.h"
 #include "llvm/Support/ConvertUTF.h"
@@ -232,7 +233,7 @@ static auto ExpandUnicodeEscapeSequence(LexerDiagnosticEmitter& emitter,
 static auto ExpandAndConsumeEscapeSequence(LexerDiagnosticEmitter& emitter,
                                            llvm::StringRef& content,
                                            std::string& result) -> void {
-  assert(!content.empty() && "should have escaped closing delimiter");
+  CHECK(!content.empty()) << "should have escaped closing delimiter";
   char first = content.front();
   content = content.drop_front(1);
 
@@ -351,8 +352,8 @@ static auto ExpandEscapeSequencesAndRemoveIndent(
       if (IsHorizontalWhitespace(contents.front())) {
         // Horizontal whitespace other than ` ` is valid only at the end of a
         // line.
-        assert(contents.front() != ' ' &&
-               "should not have stopped at a plain space");
+        CHECK(contents.front() != ' ')
+            << "should not have stopped at a plain space";
         auto after_space = contents.find_if_not(IsHorizontalWhitespace);
         if (after_space == llvm::StringRef::npos ||
             contents[after_space] != '\n') {

+ 3 - 2
toolchain/lexer/test_helpers.h

@@ -8,6 +8,7 @@
 #include <array>
 #include <string>
 
+#include "common/check.h"
 #include "gmock/gmock.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/Support/FormatVariadic.h"
@@ -26,8 +27,8 @@ class SingleTokenDiagnosticTranslator
       : token_(token) {}
 
   auto GetLocation(const char* pos) -> Diagnostic::Location override {
-    assert(llvm::is_sorted(std::array{token_.begin(), pos, token_.end()}) &&
-           "invalid diagnostic location");
+    CHECK(llvm::is_sorted(std::array{token_.begin(), pos, token_.end()}))
+        << "invalid diagnostic location";
     llvm::StringRef prefix = token_.take_front(pos - token_.begin());
     auto [before_last_newline, this_line] = prefix.rsplit('\n');
     if (before_last_newline.size() == prefix.size()) {

+ 3 - 2
toolchain/lexer/token_kind.cpp

@@ -4,6 +4,7 @@
 
 #include "toolchain/lexer/token_kind.h"
 
+#include "common/check.h"
 #include "llvm/ADT/StringRef.h"
 
 namespace Carbon {
@@ -55,7 +56,7 @@ auto TokenKind::GetClosingSymbol() const -> TokenKind {
 #include "toolchain/lexer/token_registry.def"
   };
   auto result = Table[static_cast<int>(kind_value_)];
-  assert(result != Error() && "Only opening symbols are valid!");
+  CHECK(result != Error()) << "Only opening symbols are valid!";
   return result;
 }
 
@@ -77,7 +78,7 @@ auto TokenKind::GetOpeningSymbol() const -> TokenKind {
 #include "toolchain/lexer/token_registry.def"
   };
   auto result = Table[static_cast<int>(kind_value_)];
-  assert(result != Error() && "Only closing symbols are valid!");
+  CHECK(result != Error()) << "Only closing symbols are valid!";
   return result;
 }
 

+ 26 - 25
toolchain/lexer/tokenized_buffer.cpp

@@ -10,6 +10,7 @@
 #include <iterator>
 #include <string>
 
+#include "common/check.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/StringSwitch.h"
@@ -214,7 +215,7 @@ class TokenizedBuffer::Lexer {
       }
     }
 
-    assert(source_text.empty() && "Cannot reach here w/o finishing the text!");
+    CHECK(source_text.empty()) << "Cannot reach here w/o finishing the text!";
     // Update the line length as this is also the end of a line.
     current_line_info->length = current_column;
     return false;
@@ -428,7 +429,7 @@ class TokenizedBuffer::Lexer {
       open_groups.pop_back();
       token_emitter.EmitError<MismatchedClosing>(opening_token);
 
-      assert(!buffer.Tokens().empty() && "Must have a prior opening token!");
+      CHECK(!buffer.Tokens().empty()) << "Must have a prior opening token!";
       Token prev_token = buffer.Tokens().end()[-1];
 
       // TODO: do a smarter backwards scan for where to put the closing
@@ -468,7 +469,7 @@ class TokenizedBuffer::Lexer {
     // Take the valid characters off the front of the source buffer.
     llvm::StringRef identifier_text =
         source_text.take_while([](char c) { return IsAlnum(c) || c == '_'; });
-    assert(!identifier_text.empty() && "Must have at least one character!");
+    CHECK(!identifier_text.empty()) << "Must have at least one character!";
     int identifier_column = current_column;
     current_column += identifier_text.size();
     source_text = source_text.drop_front(identifier_text.size());
@@ -563,7 +564,7 @@ auto TokenizedBuffer::Lex(SourceBuffer& source, DiagnosticConsumer& consumer)
     if (!result) {
       result = lexer.LexError(source_text);
     }
-    assert(result && "No token was lexed.");
+    CHECK(result) << "No token was lexed.";
   }
 
   // The end-of-file token is always considered to be whitespace.
@@ -616,7 +617,7 @@ auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
     int64_t token_start = line_info.start + token_info.column;
     llvm::Optional<LexedNumericLiteral> relexed_token =
         LexedNumericLiteral::Lex(source_->Text().substr(token_start));
-    assert(relexed_token && "Could not reform numeric literal token.");
+    CHECK(relexed_token) << "Could not reform numeric literal token.";
     return relexed_token->Text();
   }
 
@@ -627,7 +628,7 @@ auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
     int64_t token_start = line_info.start + token_info.column;
     llvm::Optional<LexedStringLiteral> relexed_token =
         LexedStringLiteral::Lex(source_->Text().substr(token_start));
-    assert(relexed_token && "Could not reform string literal token.");
+    CHECK(relexed_token) << "Could not reform string literal token.";
     return relexed_token->Text();
   }
 
@@ -645,30 +646,30 @@ auto TokenizedBuffer::GetTokenText(Token token) const -> llvm::StringRef {
     return llvm::StringRef();
   }
 
-  assert(token_info.kind == TokenKind::Identifier() &&
-         "Only identifiers have stored text!");
+  CHECK(token_info.kind == TokenKind::Identifier())
+      << "Only identifiers have stored text!";
   return GetIdentifierText(token_info.id);
 }
 
 auto TokenizedBuffer::GetIdentifier(Token token) const -> Identifier {
   auto& token_info = GetTokenInfo(token);
-  assert(token_info.kind == TokenKind::Identifier() &&
-         "The token must be an identifier!");
+  CHECK(token_info.kind == TokenKind::Identifier())
+      << "The token must be an identifier!";
   return token_info.id;
 }
 
 auto TokenizedBuffer::GetIntegerLiteral(Token token) const
     -> const llvm::APInt& {
   auto& token_info = GetTokenInfo(token);
-  assert(token_info.kind == TokenKind::IntegerLiteral() &&
-         "The token must be an integer literal!");
+  CHECK(token_info.kind == TokenKind::IntegerLiteral())
+      << "The token must be an integer literal!";
   return literal_int_storage_[token_info.literal_index];
 }
 
 auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealLiteralValue {
   auto& token_info = GetTokenInfo(token);
-  assert(token_info.kind == TokenKind::RealLiteral() &&
-         "The token must be a real literal!");
+  CHECK(token_info.kind == TokenKind::RealLiteral())
+      << "The token must be a real literal!";
 
   // Note that every real literal is at least three characters long, so we can
   // safely look at the second character to determine whether we have a decimal
@@ -683,32 +684,32 @@ auto TokenizedBuffer::GetRealLiteral(Token token) const -> RealLiteralValue {
 
 auto TokenizedBuffer::GetStringLiteral(Token token) const -> llvm::StringRef {
   auto& token_info = GetTokenInfo(token);
-  assert(token_info.kind == TokenKind::StringLiteral() &&
-         "The token must be a string literal!");
+  CHECK(token_info.kind == TokenKind::StringLiteral())
+      << "The token must be a string literal!";
   return literal_string_storage_[token_info.literal_index];
 }
 
 auto TokenizedBuffer::GetTypeLiteralSize(Token token) const
     -> const llvm::APInt& {
   auto& token_info = GetTokenInfo(token);
-  assert(token_info.kind.IsSizedTypeLiteral() &&
-         "The token must be a sized type literal!");
+  CHECK(token_info.kind.IsSizedTypeLiteral())
+      << "The token must be a sized type literal!";
   return literal_int_storage_[token_info.literal_index];
 }
 
 auto TokenizedBuffer::GetMatchedClosingToken(Token opening_token) const
     -> Token {
   auto& opening_token_info = GetTokenInfo(opening_token);
-  assert(opening_token_info.kind.IsOpeningSymbol() &&
-         "The token must be an opening group symbol!");
+  CHECK(opening_token_info.kind.IsOpeningSymbol())
+      << "The token must be an opening group symbol!";
   return opening_token_info.closing_token;
 }
 
 auto TokenizedBuffer::GetMatchedOpeningToken(Token closing_token) const
     -> Token {
   auto& closing_token_info = GetTokenInfo(closing_token);
-  assert(closing_token_info.kind.IsClosingSymbol() &&
-         "The token must be an closing group symbol!");
+  CHECK(closing_token_info.kind.IsClosingSymbol())
+      << "The token must be an closing group symbol!";
   return closing_token_info.opening_token;
 }
 
@@ -752,7 +753,7 @@ auto TokenizedBuffer::PrintWidths::Widen(const PrintWidths& widths) -> void {
 //
 // This routine requires its argument to be *non-negative*.
 static auto ComputeDecimalPrintedWidth(int number) -> int {
-  assert(number >= 0 && "Negative numbers are not supported.");
+  CHECK(number >= 0) << "Negative numbers are not supported.";
   if (number == 0) {
     return 1;
   }
@@ -886,8 +887,8 @@ auto TokenizedBuffer::SourceBufferLocationTranslator::GetLocation(
   bool incomplete_line_info = line_it == buffer_->line_infos_.end();
 
   // Step back one line to find the line containing the given position.
-  assert(line_it != buffer_->line_infos_.begin() &&
-         "location precedes the start of the first line");
+  CHECK(line_it != buffer_->line_infos_.begin())
+      << "location precedes the start of the first line";
   --line_it;
   int line_number = line_it - buffer_->line_infos_.begin();
   int column_number = offset - line_it->start;

+ 5 - 4
toolchain/lexer/tokenized_buffer_fuzzer.cpp

@@ -5,6 +5,7 @@
 #include <cstdint>
 #include <cstring>
 
+#include "common/check.h"
 #include "llvm/ADT/StringRef.h"
 #include "toolchain/diagnostics/diagnostic_emitter.h"
 #include "toolchain/diagnostics/null_diagnostics.h"
@@ -50,12 +51,12 @@ extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data,
   for (TokenizedBuffer::Token token : buffer.Tokens()) {
     int line_number = buffer.GetLineNumber(token);
     (void)line_number;
-    assert(line_number > 0 && "Invalid line number!");
-    assert(line_number < INT_MAX && "Invalid line number!");
+    CHECK(line_number > 0) << "Invalid line number!";
+    CHECK(line_number < INT_MAX) << "Invalid line number!";
     int column_number = buffer.GetColumnNumber(token);
     (void)column_number;
-    assert(column_number > 0 && "Invalid line number!");
-    assert(column_number < INT_MAX && "Invalid line number!");
+    CHECK(column_number > 0) << "Invalid line number!";
+    CHECK(column_number < INT_MAX) << "Invalid line number!";
   }
 
   return 0;

+ 2 - 0
toolchain/parser/BUILD

@@ -35,6 +35,7 @@ cc_library(
     deps = [
         ":parse_node_kind",
         ":precedence",
+        "//common:check",
         "//toolchain/diagnostics:diagnostic_emitter",
         "//toolchain/lexer:token_kind",
         "//toolchain/lexer:tokenized_buffer",
@@ -78,6 +79,7 @@ cc_fuzz_test(
     corpus = glob(["fuzzer_corpus/*"]),
     deps = [
         ":parse_tree",
+        "//common:check",
         "//toolchain/diagnostics:diagnostic_emitter",
         "//toolchain/diagnostics:null_diagnostics",
         "//toolchain/lexer:tokenized_buffer",

+ 11 - 10
toolchain/parser/parse_test_helpers.h

@@ -12,6 +12,7 @@
 #include <utility>
 #include <vector>
 
+#include "common/check.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/ADT/SmallVector.h"
 #include "llvm/ADT/StringRef.h"
@@ -100,9 +101,9 @@ inline auto ExpectedNodesMatcher::MatchAndExplain(
     }
 
     if (expected_node.skip_subtree) {
-      assert(expected_node.children.empty() &&
-             "Must not skip an expected subtree while specifying expected "
-             "children!");
+      CHECK(expected_node.children.empty())
+          << "Must not skip an expected subtree while specifying expected "
+             "children!";
       nodes_it = llvm::reverse(tree.Postorder(n)).end();
       continue;
     }
@@ -137,9 +138,9 @@ inline auto ExpectedNodesMatcher::MatchAndExplain(
   // subtrees. Instead, we need to check that we successfully processed all of
   // the actual tree and consumed all of the expected tree.
   if (nodes_it != nodes_end) {
-    assert(expected_node_stack.empty() &&
-           "If we have unmatched nodes in the input tree, should only finish "
-           "having fully processed expected tree.");
+    CHECK(expected_node_stack.empty())
+        << "If we have unmatched nodes in the input tree, should only finish "
+           "having fully processed expected tree.";
     output << "\nFinished processing expected nodes and there are still "
            << (nodes_end - nodes_it) << " unexpected nodes.";
     matches = false;
@@ -192,8 +193,8 @@ inline auto ExpectedNodesMatcher::DescribeTo(std::ostream* output_ptr) const
     }
 
     if (!expected_node.children.empty()) {
-      assert(!expected_node.skip_subtree &&
-             "Must not have children and skip a subtree!");
+      CHECK(!expected_node.skip_subtree)
+          << "Must not have children and skip a subtree!";
       output << ", children: [\n";
       for (const ExpectedNode& child_expected_node :
            llvm::reverse(expected_node.children)) {
@@ -208,8 +209,8 @@ inline auto ExpectedNodesMatcher::DescribeTo(std::ostream* output_ptr) const
     // we pop up.
     output << "}";
     if (!expected_node_stack.empty()) {
-      assert(depth >= expected_node_stack.back().second &&
-             "Cannot have an increase in depth on a leaf node!");
+      CHECK(depth >= expected_node_stack.back().second)
+          << "Cannot have an increase in depth on a leaf node!";
       // The distance we need to pop is the difference in depth.
       int pop_depth = depth - expected_node_stack.back().second;
       for (int pop_count = 0; pop_count < pop_depth; ++pop_count) {

+ 4 - 3
toolchain/parser/parse_tree.cpp

@@ -6,6 +6,7 @@
 
 #include <cstdlib>
 
+#include "common/check.h"
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/Optional.h"
 #include "llvm/ADT/Sequence.h"
@@ -119,12 +120,12 @@ auto ParseTree::Print(llvm::raw_ostream& output) const -> void {
     }
 
     // This node is finished, so close it up.
-    assert(n_impl.subtree_size == 1 &&
-           "Subtree size must always be a positive integer!");
+    CHECK(n_impl.subtree_size == 1)
+        << "Subtree size must always be a positive integer!";
     output << "}";
 
     int next_depth = node_stack.empty() ? 0 : node_stack.back().second;
-    assert(next_depth <= depth && "Cannot have the next depth increase!");
+    CHECK(next_depth <= depth) << "Cannot have the next depth increase!";
     for (int close_children_count : llvm::seq(0, depth - next_depth)) {
       (void)close_children_count;
       output << "]}";

+ 2 - 1
toolchain/parser/parse_tree_fuzzer.cpp

@@ -6,6 +6,7 @@
 #include <cstdint>
 #include <cstring>
 
+#include "common/check.h"
 #include "llvm/ADT/StringRef.h"
 #include "toolchain/diagnostics/diagnostic_emitter.h"
 #include "toolchain/diagnostics/null_diagnostics.h"
@@ -55,7 +56,7 @@ extern "C" int LLVMFuzzerTestOneInput(const unsigned char* data,
 
   // In the absence of parse errors, we should have exactly as many nodes as
   // tokens.
-  assert(tree.Size() == tokens.Size() && "Unexpected number of tree nodes!");
+  CHECK(tree.Size() == tokens.Size()) << "Unexpected number of tree nodes!";
 
   return 0;
 }

+ 15 - 14
toolchain/parser/parser_impl.cpp

@@ -6,6 +6,7 @@
 
 #include <cstdlib>
 
+#include "common/check.h"
 #include "llvm/ADT/Optional.h"
 #include "llvm/Support/FormatVariadic.h"
 #include "llvm/Support/raw_ostream.h"
@@ -210,11 +211,11 @@ ParseTree::Parser::Parser(ParseTree& tree_arg, TokenizedBuffer& tokens_arg,
       emitter_(emitter),
       position_(tokens_.Tokens().begin()),
       end_(tokens_.Tokens().end()) {
-  assert(std::find_if(position_, end_,
-                      [&](TokenizedBuffer::Token t) {
-                        return tokens_.GetKind(t) == TokenKind::EndOfFile();
-                      }) != end_ &&
-         "No EndOfFileToken in token buffer.");
+  CHECK(std::find_if(position_, end_,
+                     [&](TokenizedBuffer::Token t) {
+                       return tokens_.GetKind(t) == TokenKind::EndOfFile();
+                     }) != end_)
+      << "No EndOfFileToken in token buffer.";
 }
 
 auto ParseTree::Parser::Parse(TokenizedBuffer& tokens,
@@ -237,17 +238,17 @@ auto ParseTree::Parser::Parse(TokenizedBuffer& tokens,
 
   parser.AddLeafNode(ParseNodeKind::FileEnd(), *parser.position_);
 
-  assert(tree.Verify() && "Parse tree built but does not verify!");
+  CHECK(tree.Verify()) << "Parse tree built but does not verify!";
   return tree;
 }
 
 auto ParseTree::Parser::Consume(TokenKind kind) -> TokenizedBuffer::Token {
-  assert(kind != TokenKind::EndOfFile() && "Cannot consume the EOF token!");
-  assert(NextTokenIs(kind) && "The current token is the wrong kind!");
+  CHECK(kind != TokenKind::EndOfFile()) << "Cannot consume the EOF token!";
+  CHECK(NextTokenIs(kind)) << "The current token is the wrong kind!";
   TokenizedBuffer::Token t = *position_;
   ++position_;
-  assert(position_ != end_ &&
-         "Reached end of tokens without finding EOF token.");
+  CHECK(position_ != end_)
+      << "Reached end of tokens without finding EOF token.";
   return t;
 }
 
@@ -323,9 +324,9 @@ auto ParseTree::Parser::SkipMatchingGroup() -> bool {
 }
 
 auto ParseTree::Parser::SkipTo(TokenizedBuffer::Token t) -> void {
-  assert(t >= *position_ && "Tried to skip backwards.");
+  CHECK(t >= *position_) << "Tried to skip backwards.";
   position_ = TokenizedBuffer::TokenIterator(t);
-  assert(position_ != end_ && "Skipped past EOF.");
+  CHECK(position_ != end_) << "Skipped past EOF.";
 }
 
 auto ParseTree::Parser::FindNextOf(
@@ -448,7 +449,7 @@ auto ParseTree::Parser::ParseList(TokenKind open, TokenKind close,
 
         auto end_of_element = FindNextOf({TokenKind::Comma(), close});
         // The lexer guarantees that parentheses are balanced.
-        assert(end_of_element && "missing matching `)` for `(`");
+        CHECK(end_of_element) << "missing matching `)` for `(`";
         SkipTo(*end_of_element);
       }
 
@@ -907,7 +908,7 @@ static auto IsPossibleStartOfOperand(TokenKind kind) -> bool {
 }
 
 auto ParseTree::Parser::IsLexicallyValidInfixOperator() -> bool {
-  assert(!AtEndOfFile() && "Expected an operator token.");
+  CHECK(!AtEndOfFile()) << "Expected an operator token.";
 
   bool leading_space = tokens_.HasLeadingWhitespace(*position_);
   bool trailing_space = tokens_.HasTrailingWhitespace(*position_);

+ 4 - 1
toolchain/source/BUILD

@@ -8,7 +8,10 @@ cc_library(
     name = "source_buffer",
     srcs = ["source_buffer.cpp"],
     hdrs = ["source_buffer.h"],
-    deps = ["@llvm-project//llvm:Support"],
+    deps = [
+        "//common:check",
+        "@llvm-project//llvm:Support",
+    ],
 )
 
 cc_test(

+ 4 - 3
toolchain/source/source_buffer.cpp

@@ -13,6 +13,7 @@
 #include <cstdint>
 #include <system_error>
 
+#include "common/check.h"
 #include "llvm/ADT/ScopeExit.h"
 
 namespace Carbon {
@@ -78,8 +79,8 @@ auto SourceBuffer::CreateFromFile(llvm::StringRef filename)
   }
 
   buffer.text_ = llvm::StringRef(static_cast<const char*>(mapped_text), size);
-  assert(!buffer.text_.empty() &&
-         "Must not have an empty text when we have mapped data from a file!");
+  CHECK(!buffer.text_.empty())
+      << "Must not have an empty text when we have mapped data from a file!";
   return {std::move(buffer)};
 }
 
@@ -95,7 +96,7 @@ SourceBuffer::~SourceBuffer() {
         munmap(const_cast<void*>(static_cast<const void*>(text_.data())),
                text_.size());
     (void)result;
-    assert(result != -1 && "Unmapping text failed!");
+    CHECK(result != -1) << "Unmapping text failed!";
   }
 }