Browse Source

Correct consume comment groups in both parsers

gingerBill 1 month ago
parent
commit
38faec757d
2 changed files with 40 additions and 34 deletions
  1. 20 17
      core/odin/parser/parser.odin
  2. 20 17
      src/parser.cpp

+ 20 - 17
core/odin/parser/parser.odin

@@ -348,27 +348,30 @@ consume_comment_group :: proc(p: ^Parser, n: int) -> (comments: ^ast.Comment_Gro
 }
 
 consume_comment_groups :: proc(p: ^Parser, prev: tokenizer.Token) {
-	if p.curr_tok.kind == .Comment {
-		comment: ^ast.Comment_Group
-		end_line := 0
-
-		if p.curr_tok.pos.line == prev.pos.line {
-			comment, end_line = consume_comment_group(p, 0)
-			if p.curr_tok.pos.line != end_line || p.curr_tok.kind == .EOF {
-				p.line_comment = comment
-			}
-		}
+	if p.curr_tok.kind != .Comment {
+		return
+	}
+	comment: ^ast.Comment_Group
+	end_line := 0
 
-		end_line = -1
-		for p.curr_tok.kind == .Comment {
-			comment, end_line = consume_comment_group(p, 1)
-		}
-		if end_line+1 >= p.curr_tok.pos.line || end_line < 0 {
-			p.lead_comment = comment
+	if p.curr_tok.pos.line == prev.pos.line {
+		comment, end_line = consume_comment_group(p, 0)
+		if p.curr_tok.pos.line != end_line ||
+		   p.curr_tok.pos.line == prev.pos.line+1 ||
+		   p.curr_tok.kind == .EOF {
+			p.line_comment = comment
 		}
+	}
 
-		assert(p.curr_tok.kind != .Comment)
+	end_line = -1
+	for p.curr_tok.kind == .Comment {
+		comment, end_line = consume_comment_group(p, 1)
 	}
+	if end_line+1 >= p.curr_tok.pos.line || end_line < 0 {
+		p.lead_comment = comment
+	}
+
+	assert(p.curr_tok.kind != .Comment)
 }
 
 advance_token :: proc(p: ^Parser) -> tokenizer.Token {

+ 20 - 17
src/parser.cpp

@@ -1436,27 +1436,30 @@ gb_internal CommentGroup *consume_comment_group(AstFile *f, isize n, isize *end_
 }
 
 gb_internal void consume_comment_groups(AstFile *f, Token prev) {
-	if (f->curr_token.kind == Token_Comment) {
-		CommentGroup *comment = nullptr;
-		isize end_line = 0;
-
-		if (f->curr_token.pos.line == prev.pos.line) {
-			comment = consume_comment_group(f, 0, &end_line);
-			if (f->curr_token.pos.line != end_line || f->curr_token.kind == Token_EOF) {
-				f->line_comment = comment;
-			}
-		}
+	if (f->curr_token.kind != Token_Comment) {
+		return;
+	}
+	CommentGroup *comment = nullptr;
+	isize end_line = 0;
 
-		end_line = -1;
-		while (f->curr_token.kind == Token_Comment) {
-			comment = consume_comment_group(f, 1, &end_line);
-		}
-		if (end_line+1 == f->curr_token.pos.line || end_line < 0) {
-			f->lead_comment = comment;
+	if (f->curr_token.pos.line == prev.pos.line) {
+		comment = consume_comment_group(f, 0, &end_line);
+		if (f->curr_token.pos.line != end_line ||
+		    f->curr_token.pos.line == prev.pos.line+1 ||
+		    f->curr_token.kind == Token_EOF) {
+			f->line_comment = comment;
 		}
+	}
 
-		GB_ASSERT(f->curr_token.kind != Token_Comment);
+	end_line = -1;
+	while (f->curr_token.kind == Token_Comment) {
+		comment = consume_comment_group(f, 1, &end_line);
+	}
+	if (end_line+1 == f->curr_token.pos.line || end_line < 0) {
+		f->lead_comment = comment;
 	}
+
+	GB_ASSERT(f->curr_token.kind != Token_Comment);
 }
 
 gb_internal gb_inline bool ignore_newlines(AstFile *f) {