Răsfoiți Sursa

Sync the keyword list (#1097)

I was mainly looking at keywords trying to figure out what needs work and the amount to which it doesn't reflect the design confused me (including some things that we've decided not to include, and some things I'm not aware of discussion about). I figured this cleanup would at least make it somewhat clearer why things are in there.

I'm treating https://github.com/carbon-language/carbon-lang/blob/trunk/docs/design/lexical_conventions/words.md as canonical, with `_` and `xor` as presumably deliberate exceptions. Similarly avoiding symbol tokens because I assume you'll push proposals for the difference.

I dropped the `Keyword` qualifier because `is` makes `IsKeyword` a name conflict, and dropping the qualifier seemed like the more consistent solution (it doesn't do `AmpSymbol`, after all). If we need clarity I might lean towards a separate namespace to avoid naming conflicts.
Jon Meow 4 ani în urmă
părinte
comite
fa07a016b8

+ 2 - 2
toolchain/driver/testdata/carbon_test.carbon

@@ -13,14 +13,14 @@ fn run(String program) {
   return True;
   return True;
 }
 }
 
 
-// TOKENS: token: { index:  0, kind:       'FnKeyword', line: 12, column:   1, indent: 1, spelling: 'fn', has_trailing_space: true }
+// TOKENS: token: { index:  0, kind:              'Fn', line: 12, column:   1, indent: 1, spelling: 'fn', has_trailing_space: true }
 // TOKENS: token: { index:  1, kind:      'Identifier', line: 12, column:   4, indent: 1, spelling: 'run', identifier: 0 }
 // TOKENS: token: { index:  1, kind:      'Identifier', line: 12, column:   4, indent: 1, spelling: 'run', identifier: 0 }
 // TOKENS: token: { index:  2, kind:       'OpenParen', line: 12, column:   7, indent: 1, spelling: '(', closing_token: 5 }
 // TOKENS: token: { index:  2, kind:       'OpenParen', line: 12, column:   7, indent: 1, spelling: '(', closing_token: 5 }
 // TOKENS: token: { index:  3, kind:      'Identifier', line: 12, column:   8, indent: 1, spelling: 'String', identifier: 1, has_trailing_space: true }
 // TOKENS: token: { index:  3, kind:      'Identifier', line: 12, column:   8, indent: 1, spelling: 'String', identifier: 1, has_trailing_space: true }
 // TOKENS: token: { index:  4, kind:      'Identifier', line: 12, column:  15, indent: 1, spelling: 'program', identifier: 2 }
 // TOKENS: token: { index:  4, kind:      'Identifier', line: 12, column:  15, indent: 1, spelling: 'program', identifier: 2 }
 // TOKENS: token: { index:  5, kind:      'CloseParen', line: 12, column:  22, indent: 1, spelling: ')', opening_token: 2, has_trailing_space: true }
 // TOKENS: token: { index:  5, kind:      'CloseParen', line: 12, column:  22, indent: 1, spelling: ')', opening_token: 2, has_trailing_space: true }
 // TOKENS: token: { index:  6, kind:  'OpenCurlyBrace', line: 12, column:  24, indent: 1, spelling: '{', closing_token: 10, has_trailing_space: true }
 // TOKENS: token: { index:  6, kind:  'OpenCurlyBrace', line: 12, column:  24, indent: 1, spelling: '{', closing_token: 10, has_trailing_space: true }
-// TOKENS: token: { index:  7, kind:   'ReturnKeyword', line: 13, column:   3, indent: 3, spelling: 'return', has_trailing_space: true }
+// TOKENS: token: { index:  7, kind:          'Return', line: 13, column:   3, indent: 3, spelling: 'return', has_trailing_space: true }
 // TOKENS: token: { index:  8, kind:      'Identifier', line: 13, column:  10, indent: 3, spelling: 'True', identifier: 3 }
 // TOKENS: token: { index:  8, kind:      'Identifier', line: 13, column:  10, indent: 3, spelling: 'True', identifier: 3 }
 // TOKENS: token: { index:  9, kind:            'Semi', line: 13, column:  14, indent: 3, spelling: ';', has_trailing_space: true }
 // TOKENS: token: { index:  9, kind:            'Semi', line: 13, column:  14, indent: 3, spelling: ';', has_trailing_space: true }
 // TOKENS: token: { index: 10, kind: 'CloseCurlyBrace', line: 14, column:   1, indent: 1, spelling: '}', opening_token: 6, has_trailing_space: true }
 // TOKENS: token: { index: 10, kind: 'CloseCurlyBrace', line: 14, column:   1, indent: 1, spelling: '}', opening_token: 6, has_trailing_space: true }

+ 49 - 39
toolchain/lexer/token_registry.def

@@ -113,45 +113,55 @@ CARBON_CLOSING_GROUP_SYMBOL_TOKEN(CloseSquareBracket, "]", OpenSquareBracket)
 #define CARBON_KEYWORD_TOKEN(Name, Spelling) CARBON_TOKEN(Name)
 #define CARBON_KEYWORD_TOKEN(Name, Spelling) CARBON_TOKEN(Name)
 #endif
 #endif
 // clang-format off
 // clang-format off
-CARBON_KEYWORD_TOKEN(AndKeyword,        "and")
-CARBON_KEYWORD_TOKEN(AsmKeyword,        "asm")
-CARBON_KEYWORD_TOKEN(BreakKeyword,      "break")
-CARBON_KEYWORD_TOKEN(CatchKeyword,      "catch")
-CARBON_KEYWORD_TOKEN(ClassKeyword,      "class")
-CARBON_KEYWORD_TOKEN(ContinueKeyword,   "continue")
-CARBON_KEYWORD_TOKEN(ElseKeyword,       "else")
-CARBON_KEYWORD_TOKEN(EnumKeyword,       "enum")
-CARBON_KEYWORD_TOKEN(ExternKeyword,     "extern")
-CARBON_KEYWORD_TOKEN(FinalKeyword,      "final")
-CARBON_KEYWORD_TOKEN(FixKeyword,        "fix")
-CARBON_KEYWORD_TOKEN(FnKeyword,         "fn")
-CARBON_KEYWORD_TOKEN(ForKeyword,        "for")
-CARBON_KEYWORD_TOKEN(GuardKeyword,      "guard")
-CARBON_KEYWORD_TOKEN(IfKeyword,         "if")
-CARBON_KEYWORD_TOKEN(InlineKeyword,     "inline")
-CARBON_KEYWORD_TOKEN(InoutKeyword,      "inout")
-CARBON_KEYWORD_TOKEN(InterfaceKeyword,  "interface")
-CARBON_KEYWORD_TOKEN(InternalKeyword,   "internal")
-CARBON_KEYWORD_TOKEN(LetKeyword,        "let")
-CARBON_KEYWORD_TOKEN(LoopKeyword,       "loop")
-CARBON_KEYWORD_TOKEN(MatchKeyword,      "match")
-CARBON_KEYWORD_TOKEN(NotKeyword,        "not")
-CARBON_KEYWORD_TOKEN(OrKeyword,         "or")
-CARBON_KEYWORD_TOKEN(OutKeyword,        "out")
-CARBON_KEYWORD_TOKEN(PrivateKeyword,    "private")
-CARBON_KEYWORD_TOKEN(ProtectedKeyword,  "protected")
-CARBON_KEYWORD_TOKEN(PublicKeyword,     "public")
-CARBON_KEYWORD_TOKEN(RefKeyword,        "ref")
-CARBON_KEYWORD_TOKEN(ReturnKeyword,     "return")
-CARBON_KEYWORD_TOKEN(StaticKeyword,     "static")
-CARBON_KEYWORD_TOKEN(StructKeyword,     "struct")
-CARBON_KEYWORD_TOKEN(ThrowKeyword,      "throw")
-CARBON_KEYWORD_TOKEN(TryKeyword,        "try")
-CARBON_KEYWORD_TOKEN(UnderscoreKeyword, "_")
-CARBON_KEYWORD_TOKEN(VarKeyword,        "var")
-CARBON_KEYWORD_TOKEN(VirtualKeyword,    "virtual")
-CARBON_KEYWORD_TOKEN(WhileKeyword,      "while")
-CARBON_KEYWORD_TOKEN(XorKeyword,        "xor")
+CARBON_KEYWORD_TOKEN(Abstract,   "abstract")
+CARBON_KEYWORD_TOKEN(Addr,       "addr")
+CARBON_KEYWORD_TOKEN(Alias,      "alias")
+CARBON_KEYWORD_TOKEN(And,        "and")
+CARBON_KEYWORD_TOKEN(Api,        "api")
+CARBON_KEYWORD_TOKEN(As,         "as")
+CARBON_KEYWORD_TOKEN(Auto,       "auto")
+CARBON_KEYWORD_TOKEN(Base,       "base")
+CARBON_KEYWORD_TOKEN(Break,      "break")
+CARBON_KEYWORD_TOKEN(Case,       "case")
+CARBON_KEYWORD_TOKEN(Class,      "class")
+CARBON_KEYWORD_TOKEN(Constraint, "constraint")
+CARBON_KEYWORD_TOKEN(Continue,   "continue")
+CARBON_KEYWORD_TOKEN(Default,    "default")
+CARBON_KEYWORD_TOKEN(Else,       "else")
+CARBON_KEYWORD_TOKEN(Extends,    "extends")
+CARBON_KEYWORD_TOKEN(External,   "external")
+CARBON_KEYWORD_TOKEN(Final,      "final")
+CARBON_KEYWORD_TOKEN(Fn,         "fn")
+CARBON_KEYWORD_TOKEN(For,        "for")
+CARBON_KEYWORD_TOKEN(Friend,     "friend")
+CARBON_KEYWORD_TOKEN(If,         "if")
+CARBON_KEYWORD_TOKEN(Impl,       "impl")
+CARBON_KEYWORD_TOKEN(Import,     "import")
+CARBON_KEYWORD_TOKEN(Interface,  "interface")
+CARBON_KEYWORD_TOKEN(Is,         "is")
+CARBON_KEYWORD_TOKEN(Let,        "let")
+CARBON_KEYWORD_TOKEN(Library,    "library")
+CARBON_KEYWORD_TOKEN(Match,      "match")
+CARBON_KEYWORD_TOKEN(Namespace,  "namespace")
+CARBON_KEYWORD_TOKEN(Not,        "not")
+CARBON_KEYWORD_TOKEN(Observe,    "observe")
+CARBON_KEYWORD_TOKEN(Or,         "or")
+CARBON_KEYWORD_TOKEN(Override,   "override")
+CARBON_KEYWORD_TOKEN(Package,    "package")
+CARBON_KEYWORD_TOKEN(Partial,    "partial")
+CARBON_KEYWORD_TOKEN(Private,    "private")
+CARBON_KEYWORD_TOKEN(Protected,  "protected")
+CARBON_KEYWORD_TOKEN(Return,     "return")
+CARBON_KEYWORD_TOKEN(Returned,   "returned")
+CARBON_KEYWORD_TOKEN(Then,       "then")
+// Underscore is tokenized as a keyword because it's part of identifiers.
+CARBON_KEYWORD_TOKEN(Underscore, "_")
+CARBON_KEYWORD_TOKEN(Var,        "var")
+CARBON_KEYWORD_TOKEN(Virtual,    "virtual")
+CARBON_KEYWORD_TOKEN(Where,      "where")
+CARBON_KEYWORD_TOKEN(While,      "while")
+// TODO: In use by precedence.cpp, but not standardized.
+CARBON_KEYWORD_TOKEN(Xor,        "xor")
 // clang-format on
 // clang-format on
 #undef CARBON_KEYWORD_TOKEN
 #undef CARBON_KEYWORD_TOKEN
 
 

+ 18 - 20
toolchain/lexer/tokenized_buffer_test.cpp

@@ -272,7 +272,7 @@ TEST_F(LexerTest, SplitsNumericLiteralsProperly) {
                   // newline
                   // newline
                   {.kind = TokenKind::IntegerLiteral(), .text = "13"},
                   {.kind = TokenKind::IntegerLiteral(), .text = "13"},
                   {.kind = TokenKind::Period()},
                   {.kind = TokenKind::Period()},
-                  {.kind = TokenKind::UnderscoreKeyword()},
+                  {.kind = TokenKind::Underscore()},
                   // newline
                   // newline
                   {.kind = TokenKind::EndOfFile()},
                   {.kind = TokenKind::EndOfFile()},
               }));
               }));
@@ -577,28 +577,26 @@ TEST_F(LexerTest, Whitespace) {
 TEST_F(LexerTest, Keywords) {
 TEST_F(LexerTest, Keywords) {
   auto buffer = Lex("   fn");
   auto buffer = Lex("   fn");
   EXPECT_FALSE(buffer.HasErrors());
   EXPECT_FALSE(buffer.HasErrors());
-  EXPECT_THAT(
-      buffer,
-      HasTokens(llvm::ArrayRef<ExpectedToken>{
-          {.kind = TokenKind::FnKeyword(), .column = 4, .indent_column = 4},
-          {TokenKind::EndOfFile()},
-      }));
+  EXPECT_THAT(buffer,
+              HasTokens(llvm::ArrayRef<ExpectedToken>{
+                  {.kind = TokenKind::Fn(), .column = 4, .indent_column = 4},
+                  {TokenKind::EndOfFile()},
+              }));
 
 
-  buffer = Lex("and or not if else for loop return var break continue _");
+  buffer = Lex("and or not if else for return var break continue _");
   EXPECT_FALSE(buffer.HasErrors());
   EXPECT_FALSE(buffer.HasErrors());
   EXPECT_THAT(buffer, HasTokens(llvm::ArrayRef<ExpectedToken>{
   EXPECT_THAT(buffer, HasTokens(llvm::ArrayRef<ExpectedToken>{
-                          {TokenKind::AndKeyword()},
-                          {TokenKind::OrKeyword()},
-                          {TokenKind::NotKeyword()},
-                          {TokenKind::IfKeyword()},
-                          {TokenKind::ElseKeyword()},
-                          {TokenKind::ForKeyword()},
-                          {TokenKind::LoopKeyword()},
-                          {TokenKind::ReturnKeyword()},
-                          {TokenKind::VarKeyword()},
-                          {TokenKind::BreakKeyword()},
-                          {TokenKind::ContinueKeyword()},
-                          {TokenKind::UnderscoreKeyword()},
+                          {TokenKind::And()},
+                          {TokenKind::Or()},
+                          {TokenKind::Not()},
+                          {TokenKind::If()},
+                          {TokenKind::Else()},
+                          {TokenKind::For()},
+                          {TokenKind::Return()},
+                          {TokenKind::Var()},
+                          {TokenKind::Break()},
+                          {TokenKind::Continue()},
+                          {TokenKind::Underscore()},
                           {TokenKind::EndOfFile()},
                           {TokenKind::EndOfFile()},
                       }));
                       }));
 }
 }

+ 16 - 16
toolchain/parser/parser_impl.cpp

@@ -601,7 +601,7 @@ auto ParseTree::Parser::ParseCodeBlock() -> llvm::Optional<Node> {
 }
 }
 
 
 auto ParseTree::Parser::ParseFunctionDeclaration() -> Node {
 auto ParseTree::Parser::ParseFunctionDeclaration() -> Node {
-  TokenizedBuffer::Token function_intro_token = Consume(TokenKind::FnKeyword());
+  TokenizedBuffer::Token function_intro_token = Consume(TokenKind::Fn());
   auto start = GetSubtreeStartPosition();
   auto start = GetSubtreeStartPosition();
 
 
   auto add_error_function_node = [&] {
   auto add_error_function_node = [&] {
@@ -663,7 +663,7 @@ auto ParseTree::Parser::ParseFunctionDeclaration() -> Node {
 
 
 auto ParseTree::Parser::ParseVariableDeclaration() -> Node {
 auto ParseTree::Parser::ParseVariableDeclaration() -> Node {
   // `var` pattern [= expression] `;`
   // `var` pattern [= expression] `;`
-  TokenizedBuffer::Token var_token = Consume(TokenKind::VarKeyword());
+  TokenizedBuffer::Token var_token = Consume(TokenKind::Var());
   auto start = GetSubtreeStartPosition();
   auto start = GetSubtreeStartPosition();
 
 
   RETURN_IF_STACK_LIMITED(AddNode(ParseNodeKind::VariableDeclaration(),
   RETURN_IF_STACK_LIMITED(AddNode(ParseNodeKind::VariableDeclaration(),
@@ -706,9 +706,9 @@ auto ParseTree::Parser::ParseEmptyDeclaration() -> Node {
 auto ParseTree::Parser::ParseDeclaration() -> llvm::Optional<Node> {
 auto ParseTree::Parser::ParseDeclaration() -> llvm::Optional<Node> {
   RETURN_IF_STACK_LIMITED(llvm::None);
   RETURN_IF_STACK_LIMITED(llvm::None);
   switch (NextTokenKind()) {
   switch (NextTokenKind()) {
-    case TokenKind::FnKeyword():
+    case TokenKind::Fn():
       return ParseFunctionDeclaration();
       return ParseFunctionDeclaration();
-    case TokenKind::VarKeyword():
+    case TokenKind::Var():
       return ParseVariableDeclaration();
       return ParseVariableDeclaration();
     case TokenKind::Semi():
     case TokenKind::Semi():
       return ParseEmptyDeclaration();
       return ParseEmptyDeclaration();
@@ -1184,14 +1184,14 @@ auto ParseTree::Parser::ParseParenCondition(TokenKind introducer)
 
 
 auto ParseTree::Parser::ParseIfStatement() -> llvm::Optional<Node> {
 auto ParseTree::Parser::ParseIfStatement() -> llvm::Optional<Node> {
   auto start = GetSubtreeStartPosition();
   auto start = GetSubtreeStartPosition();
-  auto if_token = Consume(TokenKind::IfKeyword());
-  auto cond = ParseParenCondition(TokenKind::IfKeyword());
+  auto if_token = Consume(TokenKind::If());
+  auto cond = ParseParenCondition(TokenKind::If());
   auto then_case = ParseCodeBlock();
   auto then_case = ParseCodeBlock();
   bool else_has_errors = false;
   bool else_has_errors = false;
-  if (ConsumeAndAddLeafNodeIf(TokenKind::ElseKeyword(),
+  if (ConsumeAndAddLeafNodeIf(TokenKind::Else(),
                               ParseNodeKind::IfStatementElse())) {
                               ParseNodeKind::IfStatementElse())) {
     // 'else if' is permitted as a special case.
     // 'else if' is permitted as a special case.
-    if (NextTokenIs(TokenKind::IfKeyword())) {
+    if (NextTokenIs(TokenKind::If())) {
       else_has_errors = !ParseIfStatement();
       else_has_errors = !ParseIfStatement();
     } else {
     } else {
       else_has_errors = !ParseCodeBlock();
       else_has_errors = !ParseCodeBlock();
@@ -1204,8 +1204,8 @@ auto ParseTree::Parser::ParseIfStatement() -> llvm::Optional<Node> {
 auto ParseTree::Parser::ParseWhileStatement() -> llvm::Optional<Node> {
 auto ParseTree::Parser::ParseWhileStatement() -> llvm::Optional<Node> {
   RETURN_IF_STACK_LIMITED(llvm::None);
   RETURN_IF_STACK_LIMITED(llvm::None);
   auto start = GetSubtreeStartPosition();
   auto start = GetSubtreeStartPosition();
-  auto while_token = Consume(TokenKind::WhileKeyword());
-  auto cond = ParseParenCondition(TokenKind::WhileKeyword());
+  auto while_token = Consume(TokenKind::While());
+  auto cond = ParseParenCondition(TokenKind::While());
   auto body = ParseCodeBlock();
   auto body = ParseCodeBlock();
   return AddNode(ParseNodeKind::WhileStatement(), while_token, start,
   return AddNode(ParseNodeKind::WhileStatement(), while_token, start,
                  /*has_error=*/!cond || !body);
                  /*has_error=*/!cond || !body);
@@ -1241,24 +1241,24 @@ auto ParseTree::Parser::ParseKeywordStatement(ParseNodeKind kind,
 auto ParseTree::Parser::ParseStatement() -> llvm::Optional<Node> {
 auto ParseTree::Parser::ParseStatement() -> llvm::Optional<Node> {
   RETURN_IF_STACK_LIMITED(llvm::None);
   RETURN_IF_STACK_LIMITED(llvm::None);
   switch (NextTokenKind()) {
   switch (NextTokenKind()) {
-    case TokenKind::VarKeyword():
+    case TokenKind::Var():
       return ParseVariableDeclaration();
       return ParseVariableDeclaration();
 
 
-    case TokenKind::IfKeyword():
+    case TokenKind::If():
       return ParseIfStatement();
       return ParseIfStatement();
 
 
-    case TokenKind::WhileKeyword():
+    case TokenKind::While():
       return ParseWhileStatement();
       return ParseWhileStatement();
 
 
-    case TokenKind::ContinueKeyword():
+    case TokenKind::Continue():
       return ParseKeywordStatement(ParseNodeKind::ContinueStatement(),
       return ParseKeywordStatement(ParseNodeKind::ContinueStatement(),
                                    KeywordStatementArgument::None);
                                    KeywordStatementArgument::None);
 
 
-    case TokenKind::BreakKeyword():
+    case TokenKind::Break():
       return ParseKeywordStatement(ParseNodeKind::BreakStatement(),
       return ParseKeywordStatement(ParseNodeKind::BreakStatement(),
                                    KeywordStatementArgument::None);
                                    KeywordStatementArgument::None);
 
 
-    case TokenKind::ReturnKeyword():
+    case TokenKind::Return():
       return ParseKeywordStatement(ParseNodeKind::ReturnStatement(),
       return ParseKeywordStatement(ParseNodeKind::ReturnStatement(),
                                    KeywordStatementArgument::Optional);
                                    KeywordStatementArgument::Optional);
 
 

+ 5 - 5
toolchain/parser/precedence.cpp

@@ -199,7 +199,7 @@ auto PrecedenceGroup::ForLeading(TokenKind kind)
     case TokenKind::Star():
     case TokenKind::Star():
       return PrecedenceGroup(TermPrefix);
       return PrecedenceGroup(TermPrefix);
 
 
-    case TokenKind::NotKeyword():
+    case TokenKind::Not():
       return PrecedenceGroup(LogicalPrefix);
       return PrecedenceGroup(LogicalPrefix);
 
 
     case TokenKind::Minus():
     case TokenKind::Minus():
@@ -233,9 +233,9 @@ auto PrecedenceGroup::ForTrailing(TokenKind kind, bool infix)
       return Trailing{.level = CompoundAssignment, .is_binary = true};
       return Trailing{.level = CompoundAssignment, .is_binary = true};
 
 
     // Logical operators.
     // Logical operators.
-    case TokenKind::AndKeyword():
+    case TokenKind::And():
       return Trailing{.level = LogicalAnd, .is_binary = true};
       return Trailing{.level = LogicalAnd, .is_binary = true};
-    case TokenKind::OrKeyword():
+    case TokenKind::Or():
       return Trailing{.level = LogicalOr, .is_binary = true};
       return Trailing{.level = LogicalOr, .is_binary = true};
 
 
     // Bitwise operators.
     // Bitwise operators.
@@ -243,7 +243,7 @@ auto PrecedenceGroup::ForTrailing(TokenKind kind, bool infix)
       return Trailing{.level = BitwiseAnd, .is_binary = true};
       return Trailing{.level = BitwiseAnd, .is_binary = true};
     case TokenKind::Pipe():
     case TokenKind::Pipe():
       return Trailing{.level = BitwiseOr, .is_binary = true};
       return Trailing{.level = BitwiseOr, .is_binary = true};
-    case TokenKind::XorKeyword():
+    case TokenKind::Xor():
       return Trailing{.level = BitwiseXor, .is_binary = true};
       return Trailing{.level = BitwiseXor, .is_binary = true};
     case TokenKind::GreaterGreater():
     case TokenKind::GreaterGreater():
     case TokenKind::LessLess():
     case TokenKind::LessLess():
@@ -282,7 +282,7 @@ auto PrecedenceGroup::ForTrailing(TokenKind kind, bool infix)
 
 
     // Prefix-only operators.
     // Prefix-only operators.
     case TokenKind::Tilde():
     case TokenKind::Tilde():
-    case TokenKind::NotKeyword():
+    case TokenKind::Not():
       break;
       break;
 
 
     // Symbolic tokens that might be operators eventually.
     // Symbolic tokens that might be operators eventually.

+ 9 - 11
toolchain/parser/precedence_test.cpp

@@ -107,16 +107,14 @@ TEST(PrecedenceTest, DirectRelations) {
 }
 }
 
 
 TEST(PrecedenceTest, IndirectRelations) {
 TEST(PrecedenceTest, IndirectRelations) {
-  EXPECT_THAT(
-      PrecedenceGroup::GetPriority(
-          PrecedenceGroup::ForTrailing(TokenKind::Star(), true)->level,
-          PrecedenceGroup::ForTrailing(TokenKind::OrKeyword(), true)->level),
-      Eq(OperatorPriority::LeftFirst));
-  EXPECT_THAT(
-      PrecedenceGroup::GetPriority(
-          PrecedenceGroup::ForTrailing(TokenKind::OrKeyword(), true)->level,
-          PrecedenceGroup::ForTrailing(TokenKind::Star(), true)->level),
-      Eq(OperatorPriority::RightFirst));
+  EXPECT_THAT(PrecedenceGroup::GetPriority(
+                  PrecedenceGroup::ForTrailing(TokenKind::Star(), true)->level,
+                  PrecedenceGroup::ForTrailing(TokenKind::Or(), true)->level),
+              Eq(OperatorPriority::LeftFirst));
+  EXPECT_THAT(PrecedenceGroup::GetPriority(
+                  PrecedenceGroup::ForTrailing(TokenKind::Or(), true)->level,
+                  PrecedenceGroup::ForTrailing(TokenKind::Star(), true)->level),
+              Eq(OperatorPriority::RightFirst));
 
 
   EXPECT_THAT(
   EXPECT_THAT(
       PrecedenceGroup::GetPriority(
       PrecedenceGroup::GetPriority(
@@ -132,7 +130,7 @@ TEST(PrecedenceTest, IndirectRelations) {
 TEST(PrecedenceTest, IncomparableOperators) {
 TEST(PrecedenceTest, IncomparableOperators) {
   EXPECT_THAT(PrecedenceGroup::GetPriority(
   EXPECT_THAT(PrecedenceGroup::GetPriority(
                   *PrecedenceGroup::ForLeading(TokenKind::Tilde()),
                   *PrecedenceGroup::ForLeading(TokenKind::Tilde()),
-                  *PrecedenceGroup::ForLeading(TokenKind::NotKeyword())),
+                  *PrecedenceGroup::ForLeading(TokenKind::Not())),
               Eq(OperatorPriority::Ambiguous));
               Eq(OperatorPriority::Ambiguous));
   EXPECT_THAT(PrecedenceGroup::GetPriority(
   EXPECT_THAT(PrecedenceGroup::GetPriority(
                   *PrecedenceGroup::ForLeading(TokenKind::Tilde()),
                   *PrecedenceGroup::ForLeading(TokenKind::Tilde()),