Skip to content

Commit

Permalink
Merge pull request #25 from akx/bytestrings
Browse files Browse the repository at this point in the history
Don't require f-strings when the formatee is a bytestring
  • Loading branch information
MichaelKim0407 authored Jul 27, 2022
2 parents d8db1e6 + cff4cce commit c10c92e
Show file tree
Hide file tree
Showing 5 changed files with 37 additions and 4 deletions.
10 changes: 7 additions & 3 deletions flake8_use_fstring/base.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import token as _token
import typing as _typing

from tokenize import (
TokenInfo as _TokenInfo,
)
Expand All @@ -8,6 +8,10 @@
OptionManager as _OptionManager,
)

from flake8_use_fstring.utils import (
is_text_string_token as _is_text_string_token,
)

Flake8Output = _typing.Tuple[_typing.Tuple[int, int], str]


Expand Down Expand Up @@ -38,7 +42,7 @@ def __iter__(self) -> _typing.Iterator[Flake8Output]:
met_string = False

for i in range(len(self.tokens)):
if self.tokens[i].exact_type == _token.STRING:
if _is_text_string_token(self.tokens[i]):
met_string = True

if not self[i]:
Expand All @@ -48,7 +52,7 @@ def __iter__(self) -> _typing.Iterator[Flake8Output]:
# only if last token is string
if i == 0: # cannot use IndexError because -1 is a valid index
continue # pragma: no cover (syntax error)
if self.tokens[i - 1].exact_type != _token.STRING:
if not _is_text_string_token(self.tokens[i - 1]):
continue

elif self.greedy == self.GREEDY_MET_STRING:
Expand Down
6 changes: 5 additions & 1 deletion flake8_use_fstring/prefix.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@
BaseLogicalLineChecker as _Base,
)

from .utils import (
is_text_string_token as _is_text_string_token,
)

from . import __version__

FSTRING_REGEX = _re.compile(r'^([a-zA-Z]*?[fF][a-zA-Z]*?){1}["\']')
Expand All @@ -26,7 +30,7 @@ class MissingPrefixDetector(_Base):

def __getitem__(self, i: int) -> bool:
token = self.tokens[i]
if token.exact_type != _token.STRING:
if not _is_text_string_token(token):
return False

if FSTRING_REGEX.search(token.string): # already is an f-string
Expand Down
15 changes: 15 additions & 0 deletions flake8_use_fstring/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import re as _re
import token as _token
from tokenize import TokenInfo as _TokenInfo

PREFIX_RE = _re.compile(r"^[^'\"]*")


def is_text_string_token(token: _TokenInfo) -> bool:
if not token.type == _token.STRING:
return False # Not a string at all? Ignore.
# Get the prefix of the string (anything before the first quote)
prefix = PREFIX_RE.match(token.string).group(0).lower()
if 'b' in prefix:
return False # Smells like a bytestring. Ignore it.
return True
8 changes: 8 additions & 0 deletions tests/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,5 +50,13 @@ def format(self): # noqa: A003
'{{m}} {n}'
r'[a-z]{1,3}') # Should not be matched.

# should not be matched on any greedy level < 2
# (bytestrings can't be f-strings)
p = b'%d' % f

# should not be matched for missing prefix
# (bytestrings can't be f-strings)
q = b'{n}'

# no errors below; coverage
''.strip()
2 changes: 2 additions & 0 deletions tests/test_00.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def test_greedy_2(test_flake8_cmd):
tests/example.py:25:6: FS002 '.format' used
tests/example.py:34:13: FS002 '.format' used
tests/example.py:37:8: FS002 '.format' used
tests/example.py:55:11: FS001 '%' operator used
"""
test_flake8_cmd.test()

Expand All @@ -47,6 +48,7 @@ def test_greedy_different(test_flake8_cmd):
tests/example.py:12:16: FS001 '%' operator used
tests/example.py:15:7: FS001 '%' operator used
tests/example.py:18:12: FS002 '.format' used
tests/example.py:55:11: FS001 '%' operator used
"""
test_flake8_cmd.test()

Expand Down

0 comments on commit c10c92e

Please sign in to comment.