diff --git a/examples/06_comments.peb b/examples/06_comments.peb new file mode 100644 index 0000000..30165e9 --- /dev/null +++ b/examples/06_comments.peb @@ -0,0 +1,7 @@ +let var1 = 3 // A comment +let var2 // A second comment *{} +12 /* +var1 += var2 +*/ 34 + +56 /* // drgdr */ 78 diff --git a/main.py b/main.py index c77e80b..205570f 100644 --- a/main.py +++ b/main.py @@ -10,7 +10,7 @@ def main(): 123 "This is another string" """ - path: str = "examples/05_loop.peb" + path: str = "examples/06_comments.peb" with open(path, "r") as f: source = f.read() lexer: Lexer = Lexer() diff --git a/src/lexer.py b/src/lexer.py index 746679b..852623c 100644 --- a/src/lexer.py +++ b/src/lexer.py @@ -119,9 +119,9 @@ class Lexer: self.add_token(TokenType.LESS_EQUAL if self.match("=") else TokenType.LESS) case "/": if self.match("/"): - while self.peek() != "\n" and not self.is_at_end(): - self.advance() - self.add_token(TokenType.COMMENT) + self.scan_comment() + elif self.match("*"): + self.scan_comment_multiline() else: self.add_token(TokenType.SLASH_EQUAL if self.match("=") else TokenType.SLASH) case " " | "\r" | "\t" | "\n": @@ -169,3 +169,17 @@ class Lexer: lexeme: str = self.source[self.start:self.idx] token_type: TokenType = KEYWORDS.get(lexeme, TokenType.IDENTIFIER) self.add_token(token_type) + + def scan_comment(self): + while self.peek() != "\n" and not self.is_at_end(): + self.advance() + self.add_token(TokenType.COMMENT) + + def scan_comment_multiline(self): + while not (self.peek() == "*" and self.peek_next() == "/") and not self.is_at_end(): + self.advance() + if not self.is_at_end(): + self.advance() + if not self.is_at_end(): + self.advance() + self.add_token(TokenType.COMMENT)