diff --git a/CHANGES.md b/CHANGES.md index d2955d2df0a..77fe17c03d3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -43,6 +43,7 @@ ### Performance +- Speed up the `is_fstring_start` function in Black's tokenizer (#4541) ### Output diff --git a/src/blib2to3/pgen2/tokenize.py b/src/blib2to3/pgen2/tokenize.py index 97dd92b06f0..407c184dd74 100644 --- a/src/blib2to3/pgen2/tokenize.py +++ b/src/blib2to3/pgen2/tokenize.py @@ -221,7 +221,7 @@ def _combinations(*l: str) -> set[str]: | {f"{prefix}'" for prefix in _strprefixes | _fstring_prefixes} | {f'{prefix}"' for prefix in _strprefixes | _fstring_prefixes} ) -fstring_prefix: Final = ( +fstring_prefix: Final = tuple( {f"{prefix}'" for prefix in _fstring_prefixes} | {f'{prefix}"' for prefix in _fstring_prefixes} | {f"{prefix}'''" for prefix in _fstring_prefixes} @@ -459,7 +459,7 @@ def untokenize(iterable: Iterable[TokenInfo]) -> str: def is_fstring_start(token: str) -> bool: - return builtins.any(token.startswith(prefix) for prefix in fstring_prefix) + return token.startswith(fstring_prefix) def _split_fstring_start_and_middle(token: str) -> tuple[str, str]: