@@ -637,6 +637,7 @@ def tokeniter(self, source, name, filename=None, state=None):
637
637
balancing_stack = []
638
638
lstrip_unless_re = self .lstrip_unless_re
639
639
newlines_stripped = 0
640
+ line_starting = True
640
641
641
642
while 1 :
642
643
# tokenizer loop
@@ -686,11 +687,11 @@ def tokeniter(self, source, name, filename=None, state=None):
686
687
):
687
688
# The start of text between the last newline and the tag.
688
689
l_pos = text .rfind ("\n " ) + 1
689
-
690
- # If there's only whitespace between the newline and the
691
- # tag, strip it.
692
- if not lstrip_unless_re .search (text , l_pos ):
693
- groups = (text [:l_pos ],) + groups [1 :]
690
+ if l_pos > 0 or line_starting :
691
+ # If there's only whitespace between the newline and the
692
+ # tag, strip it.
693
+ if not lstrip_unless_re .search (text , l_pos ):
694
+ groups = (text [:l_pos ],) + groups [1 :]
694
695
695
696
for idx , token in enumerate (tokens ):
696
697
# failure group
@@ -747,6 +748,8 @@ def tokeniter(self, source, name, filename=None, state=None):
747
748
yield lineno , tokens , data
748
749
lineno += data .count ("\n " )
749
750
751
+ line_starting = m .group ()[- 1 :] == "\n "
752
+
750
753
# fetch new position into new variable so that we can check
751
754
# if there is a internal parsing error which would result
752
755
# in an infinite loop
0 commit comments