[PATCH 1 of 5 BLACKGNAROK] formatting: run black on all file again

Augie Fackler raf at durin42.com
Mon Oct 7 12:17:20 EDT 2019



> On Oct 7, 2019, at 12:15, Pierre-Yves David <pierre-yves.david at ens-lyon.org> wrote:
> 
> # HG changeset patch
> # User Pierre-Yves David <pierre-yves.david at octobus.net>
> # Date 1570460331 14400
> #      Mon Oct 07 10:58:51 2019 -0400
> # Node ID f243703025e2ba5a53feb6f8ca8f029b6e186194
> # Parent  7054fd370430ae76b07ff5f68d0ab8df9de70fc5
> # EXP-Topic blackgnarok-bytemageddon
> # Available At https://bitbucket.org/octobus/mercurial-devel/
> #              hg pull https://bitbucket.org/octobus/mercurial-devel/ -r f243703025e2
> formatting: run black on all file again
> 
> Apparently, since the blackgnarok, we divergence from the expected formatting.
> 
> Formatted using::
> 
>  grey.py -l 80 -t py33 -S $(hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**" - hgext/fsmonitor/pywatchman/**')

we should also exclude grey.py, so that it's easy to pick up updates from black if we have to rebase the patch...

> 
> # skip-blame mass-reformatting only
> 
> # no-check-commit reformats foo_bar functions
> 
> diff --git a/contrib/grey.py b/contrib/grey.py
> --- a/contrib/grey.py
> +++ b/contrib/grey.py
> @@ -55,9 +55,7 @@ from blib2to3.pgen2.parse import ParseEr
> __version__ = '19.3b1.dev95+gdc1add6.d20191005'
> 
> DEFAULT_LINE_LENGTH = 88
> -DEFAULT_EXCLUDES = (
> -    r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)/"
> -)
> +DEFAULT_EXCLUDES = r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)/"
> DEFAULT_INCLUDES = r"\.pyi?$"
> CACHE_DIR = Path(user_cache_dir("black", version=__version__))
> 
> @@ -196,7 +194,9 @@ class FileMode:
>         if self.target_versions:
>             version_str = ",".join(
>                 str(version.value)
> -                for version in sorted(self.target_versions, key=lambda v: v.value)
> +                for version in sorted(
> +                    self.target_versions, key=lambda v: v.value
> +                )
>             )
>         else:
>             version_str = "-"
> @@ -209,12 +209,18 @@ class FileMode:
>         return ".".join(parts)
> 
> 
> -def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
> -    return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
> +def supports_feature(
> +    target_versions: Set[TargetVersion], feature: Feature
> +) -> bool:
> +    return all(
> +        feature in VERSION_TO_FEATURES[version] for version in target_versions
> +    )
> 
> 
> def read_pyproject_toml(
> -    ctx: click.Context, param: click.Parameter, value: Union[str, int, bool, None]
> +    ctx: click.Context,
> +    param: click.Parameter,
> +    value: Union[str, int, bool, None],
> ) -> Optional[str]:
>     """Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
> 
> @@ -250,7 +256,9 @@ def read_pyproject_toml(
> 
> 
> @click.command(context_settings=dict(help_option_names=["-h", "--help"]))
> - at click.option("-c", "--code", type=str, help="Format the code passed in as a string.")
> + at click.option(
> +    "-c", "--code", type=str, help="Format the code passed in as a string."
> +)
> @click.option(
>     "-l",
>     "--line-length",
> @@ -361,14 +369,22 @@ def read_pyproject_toml(
>     "src",
>     nargs=-1,
>     type=click.Path(
> -        exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
> +        exists=True,
> +        file_okay=True,
> +        dir_okay=True,
> +        readable=True,
> +        allow_dash=True,
>     ),
>     is_eager=True,
> )
> @click.option(
>     "--config",
>     type=click.Path(
> -        exists=False, file_okay=True, dir_okay=False, readable=True, allow_dash=False
> +        exists=False,
> +        file_okay=True,
> +        dir_okay=False,
> +        readable=True,
> +        allow_dash=False,
>     ),
>     is_eager=True,
>     callback=read_pyproject_toml,
> @@ -439,7 +455,9 @@ def main(
>         p = Path(s)
>         if p.is_dir():
>             sources.update(
> -                gen_python_files_in_dir(p, root, include_regex, exclude_regex, report)
> +                gen_python_files_in_dir(
> +                    p, root, include_regex, exclude_regex, report
> +                )
>             )
>         elif p.is_file() or s == "-":
>             # if a file was explicitly given, we don't care about its extension
> @@ -461,7 +479,11 @@ def main(
>         )
>     else:
>         reformat_many(
> -            sources=sources, fast=fast, write_back=write_back, mode=mode, report=report
> +            sources=sources,
> +            fast=fast,
> +            write_back=write_back,
> +            mode=mode,
> +            report=report,
>         )
> 
>     if verbose or not quiet:
> @@ -470,7 +492,9 @@ def main(
>     ctx.exit(report.return_code)
> 
> 
> -def path_empty(src: Tuple[str], quiet: bool, verbose: bool, ctx: click.Context) -> None:
> +def path_empty(
> +    src: Tuple[str], quiet: bool, verbose: bool, ctx: click.Context
> +) -> None:
>     """
>     Exit if there is no `src` provided for formatting
>     """
> @@ -481,7 +505,11 @@ def path_empty(src: Tuple[str], quiet: b
> 
> 
> def reformat_one(
> -    src: Path, fast: bool, write_back: WriteBack, mode: FileMode, report: "Report"
> +    src: Path,
> +    fast: bool,
> +    write_back: WriteBack,
> +    mode: FileMode,
> +    report: "Report",
> ) -> None:
>     """Reformat a single file under `src` without spawning child processes.
> 
> @@ -491,22 +519,26 @@ def reformat_one(
>     try:
>         changed = Changed.NO
>         if not src.is_file() and str(src) == "-":
> -            if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode):
> +            if format_stdin_to_stdout(
> +                fast=fast, write_back=write_back, mode=mode
> +            ):
>                 changed = Changed.YES
>         else:
>             cache: Cache = {}
>             if write_back != WriteBack.DIFF:
>                 cache = read_cache(mode)
>                 res_src = src.resolve()
> -                if res_src in cache and cache[res_src] == get_cache_info(res_src):
> +                if res_src in cache and cache[res_src] == get_cache_info(
> +                    res_src
> +                ):
>                     changed = Changed.CACHED
>             if changed is not Changed.CACHED and format_file_in_place(
>                 src, fast=fast, write_back=write_back, mode=mode
>             ):
>                 changed = Changed.YES
> -            if (write_back is WriteBack.YES and changed is not Changed.CACHED) or (
> -                write_back is WriteBack.CHECK and changed is Changed.NO
> -            ):
> +            if (
> +                write_back is WriteBack.YES and changed is not Changed.CACHED
> +            ) or (write_back is WriteBack.CHECK and changed is Changed.NO):
>                 write_cache(cache, [src], mode)
>         report.done(src, changed)
>     except Exception as exc:
> @@ -580,7 +612,13 @@ async def schedule_formatting(
>     tasks = {
>         asyncio.ensure_future(
>             loop.run_in_executor(
> -                executor, format_file_in_place, src, fast, mode, write_back, lock
> +                executor,
> +                format_file_in_place,
> +                src,
> +                fast,
> +                mode,
> +                write_back,
> +                lock,
>             )
>         ): src
>         for src in sorted(sources)
> @@ -593,7 +631,9 @@ async def schedule_formatting(
>         # There are no good alternatives for these on Windows.
>         pass
>     while pending:
> -        done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
> +        done, _ = await asyncio.wait(
> +            pending, return_when=asyncio.FIRST_COMPLETED
> +        )
>         for task in done:
>             src = tasks.pop(task)
>             if task.cancelled():
> @@ -682,7 +722,10 @@ def format_stdin_to_stdout(
> 
>     finally:
>         f = io.TextIOWrapper(
> -            sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
> +            sys.stdout.buffer,
> +            encoding=encoding,
> +            newline=newline,
> +            write_through=True,
>         )
>         if write_back == WriteBack.YES:
>             f.write(dst)
> @@ -741,7 +784,10 @@ def format_str(src_contents: str, *, mod
>     after = 0
>     split_line_features = {
>         feature
> -        for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF}
> +        for feature in {
> +            Feature.TRAILING_COMMA_IN_CALL,
> +            Feature.TRAILING_COMMA_IN_DEF,
> +        }
>         if supports_feature(versions, feature)
>     }
>     for current_line in lines.visit(src_node):
> @@ -751,7 +797,9 @@ def format_str(src_contents: str, *, mod
>         for _ in range(before):
>             dst_contents.append(str(empty_line))
>         for line in split_line(
> -            current_line, line_length=mode.line_length, features=split_line_features
> +            current_line,
> +            line_length=mode.line_length,
> +            features=split_line_features,
>         ):
>             dst_contents.append(str(line))
>     return "".join(dst_contents)
> @@ -806,13 +854,17 @@ def get_grammars(target_versions: Set[Ta
>             )
>         if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
>             # Python 3.0-3.6
> -            grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement)
> +            grammars.append(
> +                pygram.python_grammar_no_print_statement_no_exec_statement
> +            )
>         # At least one of the above branches must have been taken, because every Python
>         # version has exactly one of the two 'ASYNC_*' flags
>         return grammars
> 
> 
> -def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node:
> +def lib2to3_parse(
> +    src_txt: str, target_versions: Iterable[TargetVersion] = ()
> +) -> Node:
>     """Given a string with source, return the lib2to3 Node."""
>     if src_txt[-1:] != "\n":
>         src_txt += "\n"
> @@ -830,7 +882,9 @@ def lib2to3_parse(src_txt: str, target_v
>                 faulty_line = lines[lineno - 1]
>             except IndexError:
>                 faulty_line = "<line number missing in source>"
> -            exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}")
> +            exc = InvalidInput(
> +                f"Cannot parse: {lineno}:{column}: {faulty_line}"
> +            )
>     else:
>         raise exc from None
> 
> @@ -1077,7 +1131,9 @@ class BracketTracker:
>         """Return True if there is an yet unmatched open bracket on the line."""
>         return bool(self.bracket_match)
> 
> -    def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
> +    def max_delimiter_priority(
> +        self, exclude: Iterable[LeafID] = ()
> +    ) -> Priority:
>         """Return the highest priority of a delimiter found on the line.
> 
>         Values are consistent with what `is_split_*_delimiter()` return.
> @@ -1160,7 +1216,9 @@ class Line:
> 
>     depth: int = 0
>     leaves: List[Leaf] = Factory(list)
> -    comments: Dict[LeafID, List[Leaf]] = Factory(dict)  # keys ordered like `leaves`
> +    comments: Dict[LeafID, List[Leaf]] = Factory(
> +        dict
> +    )  # keys ordered like `leaves`
>     bracket_tracker: BracketTracker = Factory(BracketTracker)
>     inside_brackets: bool = False
>     should_explode: bool = False
> @@ -1213,7 +1271,9 @@ class Line:
>     @property
>     def is_comment(self) -> bool:
>         """Is this line a standalone comment?"""
> -        return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
> +        return (
> +            len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
> +        )
> 
>     @property
>     def is_decorator(self) -> bool:
> @@ -1278,7 +1338,10 @@ class Line:
>             commas = 0
>             comma_depth = self.leaves[close_index - 1].bracket_depth
>             for leaf in self.leaves[_open_index + 1 : close_index]:
> -                if leaf.bracket_depth == comma_depth and leaf.type == token.COMMA:
> +                if (
> +                    leaf.bracket_depth == comma_depth
> +                    and leaf.type == token.COMMA
> +                ):
>                     commas += 1
>             if commas > 1:
>                 # We haven't looked yet for the trailing comma because
> @@ -1308,7 +1371,9 @@ class Line:
>             second_leaf: Optional[Leaf] = self.leaves[1]
>         except IndexError:
>             second_leaf = None
> -        return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
> +        return (
> +            first_leaf.type == token.NAME and first_leaf.value == "def"
> +        ) or (
>             first_leaf.type == token.ASYNC
>             and second_leaf is not None
>             and second_leaf.type == token.NAME
> @@ -1340,7 +1405,9 @@ class Line:
>             and self.leaves[0].value.startswith(('"""', "'''"))
>         )
> 
> -    def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool:
> +    def contains_standalone_comments(
> +        self, depth_limit: int = sys.maxsize
> +    ) -> bool:
>         """If so, needs to be split before emitting."""
>         for leaf in self.leaves:
>             if leaf.type == STANDALONE_COMMENT:
> @@ -1584,8 +1651,14 @@ class EmptyLineTracker:
>                 before = 0 if depth else 1
>             else:
>                 before = 1 if depth else 2
> -        if current_line.is_decorator or current_line.is_def or current_line.is_class:
> -            return self._maybe_empty_lines_for_class_or_def(current_line, before)
> +        if (
> +            current_line.is_decorator
> +            or current_line.is_def
> +            or current_line.is_class
> +        ):
> +            return self._maybe_empty_lines_for_class_or_def(
> +                current_line, before
> +            )
> 
>         if (
>             self.previous_line
> @@ -1632,7 +1705,10 @@ class EmptyLineTracker:
>             if self.previous_line.depth > current_line.depth:
>                 newlines = 1
>             elif current_line.is_class or self.previous_line.is_class:
> -                if current_line.is_stub_class and self.previous_line.is_stub_class:
> +                if (
> +                    current_line.is_stub_class
> +                    and self.previous_line.is_stub_class
> +                ):
>                     # No blank line between classes with an empty body
>                     newlines = 0
>                 else:
> @@ -1681,7 +1757,9 @@ class LineGenerator(Visitor[Line]):
>     def visit_default(self, node: LN) -> Iterator[Line]:
>         """Default `visit_*()` implementation. Recurses to children of `node`."""
>         if isinstance(node, Leaf):
> -            any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
> +            any_open_brackets = (
> +                self.current_line.bracket_tracker.any_open_brackets()
> +            )
>             for comment in generate_comments(node):
>                 if any_open_brackets:
>                     # any comment within brackets is subject to splitting
> @@ -1700,7 +1778,9 @@ class LineGenerator(Visitor[Line]):
> 
>             normalize_prefix(node, inside_brackets=any_open_brackets)
>             if self.normalize_strings and node.type == token.STRING:
> -                normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix)
> +                normalize_string_prefix(
> +                    node, remove_u_prefix=self.remove_u_prefix
> +                )
>                 normalize_string_quotes(node)
>             if node.type == token.NUMBER:
>                 normalize_numeric_literal(node)
> @@ -1801,7 +1881,11 @@ class LineGenerator(Visitor[Line]):
>                 yield from self.line(-1)
> 
>         else:
> -            if not self.is_pyi or not node.parent or not is_stub_suite(node.parent):
> +            if (
> +                not self.is_pyi
> +                or not node.parent
> +                or not is_stub_suite(node.parent)
> +            ):
>                 yield from self.line()
>             yield from self.visit_default(node)
> 
> @@ -1844,12 +1928,18 @@ class LineGenerator(Visitor[Line]):
>         """You are in a twisty little maze of passages."""
>         v = self.visit_stmt
>         Ø: Set[str] = set()
> -        self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
> +        self.visit_assert_stmt = partial(
> +            v, keywords={"assert"}, parens={"assert", ","}
> +        )
>         self.visit_if_stmt = partial(
>             v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
>         )
> -        self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
> -        self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
> +        self.visit_while_stmt = partial(
> +            v, keywords={"while", "else"}, parens={"while"}
> +        )
> +        self.visit_for_stmt = partial(
> +            v, keywords={"for", "else"}, parens={"for", "in"}
> +        )
>         self.visit_try_stmt = partial(
>             v, keywords={"try", "except", "else", "finally"}, parens=Ø
>         )
> @@ -1858,7 +1948,9 @@ class LineGenerator(Visitor[Line]):
>         self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø)
>         self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
>         self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
> -        self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
> +        self.visit_return_stmt = partial(
> +            v, keywords={"return"}, parens={"return"}
> +        )
>         self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
>         self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
>         self.visit_async_funcdef = self.visit_async_stmt
> @@ -1866,7 +1958,11 @@ class LineGenerator(Visitor[Line]):
> 
> 
> IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
> -BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
> +BRACKET = {
> +    token.LPAR: token.RPAR,
> +    token.LSQB: token.RSQB,
> +    token.LBRACE: token.RBRACE,
> +}
> OPENING_BRACKETS = set(BRACKET.keys())
> CLOSING_BRACKETS = set(BRACKET.values())
> BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
> @@ -1891,7 +1987,9 @@ def whitespace(leaf: Leaf, *, complex_su
>     if t == token.COMMENT:
>         return DOUBLESPACE
> 
> -    assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
> +    assert (
> +        p is not None
> +    ), f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
>     if t == token.COLON and p.type not in {
>         syms.subscript,
>         syms.subscriptlist,
> @@ -1935,7 +2033,10 @@ def whitespace(leaf: Leaf, *, complex_su
>                 return NO
> 
>         elif prevp.type == token.COLON:
> -            if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
> +            if prevp.parent and prevp.parent.type in {
> +                syms.subscript,
> +                syms.sliceop,
> +            }:
>                 return SPACE if complex_subscript else NO
> 
>         elif (
> @@ -2079,7 +2180,9 @@ def whitespace(leaf: Leaf, *, complex_su
>             }:
>                 return NO
> 
> -            elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
> +            elif (
> +                prevp.type == token.EQUAL and prevp_parent.type == syms.argument
> +            ):
>                 return NO
> 
>         elif t in {token.NAME, token.NUMBER, token.STRING}:
> @@ -2147,14 +2250,19 @@ def container_of(leaf: Leaf) -> LN:
>         if parent.type == syms.file_input:
>             break
> 
> -        if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS:
> +        if (
> +            parent.prev_sibling is not None
> +            and parent.prev_sibling.type in BRACKETS
> +        ):
>             break
> 
>         container = parent
>     return container
> 
> 
> -def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
> +def is_split_after_delimiter(
> +    leaf: Leaf, previous: Optional[Leaf] = None
> +) -> Priority:
>     """Return the priority of the `leaf` delimiter, given a line break after it.
> 
>     The delimiter priorities returned here are from those delimiters that would
> @@ -2168,7 +2276,9 @@ def is_split_after_delimiter(leaf: Leaf,
>     return 0
> 
> 
> -def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
> +def is_split_before_delimiter(
> +    leaf: Leaf, previous: Optional[Leaf] = None
> +) -> Priority:
>     """Return the priority of the `leaf` delimiter, given a line break before it.
> 
>     The delimiter priorities returned here are from those delimiters that would
> @@ -2228,7 +2338,11 @@ def is_split_before_delimiter(leaf: Leaf
>     ):
>         return COMPREHENSION_PRIORITY
> 
> -    if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
> +    if (
> +        leaf.value in {"if", "else"}
> +        and leaf.parent
> +        and leaf.parent.type == syms.test
> +    ):
>         return TERNARY_PRIORITY
> 
>     if leaf.value == "is":
> @@ -2287,7 +2401,9 @@ def generate_comments(leaf: LN) -> Itera
>     Inline comments are emitted as regular token.COMMENT leaves.  Standalone
>     are emitted with a fake STANDALONE_COMMENT token identifier.
>     """
> -    for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
> +    for pc in list_comments(
> +        leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER
> +    ):
>         yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines)
> 
> 
> @@ -2339,7 +2455,10 @@ def list_comments(prefix: str, *, is_end
>         comment = make_comment(line)
>         result.append(
>             ProtoComment(
> -                type=comment_type, value=comment, newlines=nlines, consumed=consumed
> +                type=comment_type,
> +                value=comment,
> +                newlines=nlines,
> +                consumed=consumed,
>             )
>         )
>         nlines = 0
> @@ -2391,7 +2510,9 @@ def split_line(
>         and not line.should_explode
>         and not line.is_collection_with_optional_trailing_comma
>         and (
> -            is_line_short_enough(line, line_length=line_length, line_str=line_str)
> +            is_line_short_enough(
> +                line, line_length=line_length, line_str=line_str
> +            )
>             or line.contains_unsplittable_type_ignore()
>         )
>     ):
> @@ -2405,7 +2526,9 @@ def split_line(
> 
>         def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]:
>             for omit in generate_trailers_to_omit(line, line_length):
> -                lines = list(right_hand_split(line, line_length, features, omit=omit))
> +                lines = list(
> +                    right_hand_split(line, line_length, features, omit=omit)
> +                )
>                 if is_line_short_enough(lines[0], line_length=line_length):
>                     yield from lines
>                     return
> @@ -2427,11 +2550,16 @@ def split_line(
>         try:
>             for l in split_func(line, features):
>                 if str(l).strip("\n") == line_str:
> -                    raise CannotSplit("Split function returned an unchanged result")
> +                    raise CannotSplit(
> +                        "Split function returned an unchanged result"
> +                    )
> 
>                 result.extend(
>                     split_line(
> -                        l, line_length=line_length, inner=True, features=features
> +                        l,
> +                        line_length=line_length,
> +                        inner=True,
> +                        features=features,
>                     )
>                 )
>         except CannotSplit:
> @@ -2445,7 +2573,9 @@ def split_line(
>         yield line
> 
> 
> -def left_hand_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
> +def left_hand_split(
> +    line: Line, features: Collection[Feature] = ()
> +) -> Iterator[Line]:
>     """Split line into many lines, starting with the first matching bracket pair.
> 
>     Note: this usually looks weird, only use this for function definitions.
> @@ -2473,7 +2603,9 @@ def left_hand_split(line: Line, features
>         raise CannotSplit("No brackets found")
> 
>     head = bracket_split_build_line(head_leaves, line, matching_bracket)
> -    body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True)
> +    body = bracket_split_build_line(
> +        body_leaves, line, matching_bracket, is_body=True
> +    )
>     tail = bracket_split_build_line(tail_leaves, line, matching_bracket)
>     bracket_split_succeeded_or_raise(head, body, tail)
>     for result in (head, body, tail):
> @@ -2521,7 +2653,9 @@ def right_hand_split(
>     body_leaves.reverse()
>     head_leaves.reverse()
>     head = bracket_split_build_line(head_leaves, line, opening_bracket)
> -    body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True)
> +    body = bracket_split_build_line(
> +        body_leaves, line, opening_bracket, is_body=True
> +    )
>     tail = bracket_split_build_line(tail_leaves, line, opening_bracket)
>     bracket_split_succeeded_or_raise(head, body, tail)
>     if (
> @@ -2543,7 +2677,9 @@ def right_hand_split(
>     ):
>         omit = {id(closing_bracket), *omit}
>         try:
> -            yield from right_hand_split(line, line_length, features=features, omit=omit)
> +            yield from right_hand_split(
> +                line, line_length, features=features, omit=omit
> +            )
>             return
> 
>         except CannotSplit:
> @@ -2555,7 +2691,10 @@ def right_hand_split(
>                     "Splitting failed, body is still too long and can't be split."
>                 )
> 
> -            elif head.contains_multiline_strings() or tail.contains_multiline_strings():
> +            elif (
> +                head.contains_multiline_strings()
> +                or tail.contains_multiline_strings()
> +            ):
>                 raise CannotSplit(
>                     "The current optional pair of parentheses is bound to fail to "
>                     "satisfy the splitting algorithm because the head or the tail "
> @@ -2570,7 +2709,9 @@ def right_hand_split(
>             yield result
> 
> 
> -def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
> +def bracket_split_succeeded_or_raise(
> +    head: Line, body: Line, tail: Line
> +) -> None:
>     """Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
> 
>     Do nothing otherwise.
> @@ -2597,7 +2738,11 @@ def bracket_split_succeeded_or_raise(hea
> 
> 
> def bracket_split_build_line(
> -    leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False
> +    leaves: List[Leaf],
> +    original: Line,
> +    opening_bracket: Leaf,
> +    *,
> +    is_body: bool = False,
> ) -> Line:
>     """Return a new line with given `leaves` and respective comments from `original`.
> 
> @@ -2643,7 +2788,9 @@ def dont_increase_indentation(split_func
>     """
> 
>     @wraps(split_func)
> -    def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
> +    def split_wrapper(
> +        line: Line, features: Collection[Feature] = ()
> +    ) -> Iterator[Line]:
>         for l in split_func(line, features):
>             normalize_prefix(l.leaves[0], inside_brackets=True)
>             yield l
> @@ -2652,7 +2799,9 @@ def dont_increase_indentation(split_func
> 
> 
> @dont_increase_indentation
> -def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]:
> +def delimiter_split(
> +    line: Line, features: Collection[Feature] = ()
> +) -> Iterator[Line]:
>     """Split according to delimiters of the highest priority.
> 
>     If the appropriate Features are given, the split will add trailing commas
> @@ -2671,7 +2820,9 @@ def delimiter_split(line: Line, features
> 
>     if delimiter_priority == DOT_PRIORITY:
>         if bt.delimiter_count_with_priority(delimiter_priority) == 1:
> -            raise CannotSplit("Splitting a single attribute from its owner looks wrong")
> +            raise CannotSplit(
> +                "Splitting a single attribute from its owner looks wrong"
> +            )
> 
>     current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
>     lowest_depth = sys.maxsize
> @@ -2685,7 +2836,9 @@ def delimiter_split(line: Line, features
>         except ValueError:
>             yield current_line
> 
> -            current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
> +            current_line = Line(
> +                depth=line.depth, inside_brackets=line.inside_brackets
> +            )
>             current_line.append(leaf)
> 
>     for leaf in line.leaves:
> @@ -2698,18 +2851,22 @@ def delimiter_split(line: Line, features
>         if leaf.bracket_depth == lowest_depth:
>             if is_vararg(leaf, within={syms.typedargslist}):
>                 trailing_comma_safe = (
> -                    trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features
> +                    trailing_comma_safe
> +                    and Feature.TRAILING_COMMA_IN_DEF in features
>                 )
>             elif is_vararg(leaf, within={syms.arglist, syms.argument}):
>                 trailing_comma_safe = (
> -                    trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features
> +                    trailing_comma_safe
> +                    and Feature.TRAILING_COMMA_IN_CALL in features
>                 )
> 
>         leaf_priority = bt.delimiters.get(id(leaf))
>         if leaf_priority == delimiter_priority:
>             yield current_line
> 
> -            current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
> +            current_line = Line(
> +                depth=line.depth, inside_brackets=line.inside_brackets
> +            )
>     if current_line:
>         if (
>             trailing_comma_safe
> @@ -2739,7 +2896,9 @@ def standalone_comment_split(
>         except ValueError:
>             yield current_line
> 
> -            current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
> +            current_line = Line(
> +                depth=line.depth, inside_brackets=line.inside_brackets
> +            )
>             current_line.append(leaf)
> 
>     for leaf in line.leaves:
> @@ -2970,7 +3129,9 @@ def normalize_invisible_parens(node: Nod
>                     lpar = Leaf(token.LPAR, "")
>                     rpar = Leaf(token.RPAR, "")
>                     index = child.remove() or 0
> -                    node.insert_child(index, Node(syms.atom, [lpar, child, rpar]))
> +                    node.insert_child(
> +                        index, Node(syms.atom, [lpar, child, rpar])
> +                    )
>             elif is_one_tuple(child):
>                 # wrap child in visible parentheses
>                 lpar = Leaf(token.LPAR, "(")
> @@ -3032,12 +3193,16 @@ def convert_one_fmt_off_pair(node: Node)
>                 if not ignored_nodes:
>                     continue
> 
> -                first = ignored_nodes[0]  # Can be a container node with the `leaf`.
> +                first = ignored_nodes[
> +                    0
> +                ]  # Can be a container node with the `leaf`.
>                 parent = first.parent
>                 prefix = first.prefix
>                 first.prefix = prefix[comment.consumed :]
>                 hidden_value = (
> -                    comment.value + "\n" + "".join(str(n) for n in ignored_nodes)
> +                    comment.value
> +                    + "\n"
> +                    + "".join(str(n) for n in ignored_nodes)
>                 )
>                 if hidden_value.endswith("\n"):
>                     # That happens when one of the `ignored_nodes` ended with a NEWLINE
> @@ -3048,14 +3213,19 @@ def convert_one_fmt_off_pair(node: Node)
>                     index = ignored.remove()
>                     if first_idx is None:
>                         first_idx = index
> -                assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
> -                assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
> +                assert (
> +                    parent is not None
> +                ), "INTERNAL ERROR: fmt: on/off handling (1)"
> +                assert (
> +                    first_idx is not None
> +                ), "INTERNAL ERROR: fmt: on/off handling (2)"
>                 parent.insert_child(
>                     first_idx,
>                     Leaf(
>                         STANDALONE_COMMENT,
>                         hidden_value,
> -                        prefix=prefix[:previous_consumed] + "\n" * comment.newlines,
> +                        prefix=prefix[:previous_consumed]
> +                        + "\n" * comment.newlines,
>                     ),
>                 )
>                 return True
> @@ -3312,7 +3482,9 @@ def should_explode(line: Line, opening_b
>     try:
>         last_leaf = line.leaves[-1]
>         exclude = {id(last_leaf)} if last_leaf.type == token.COMMA else set()
> -        max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
> +        max_priority = line.bracket_tracker.max_delimiter_priority(
> +            exclude=exclude
> +        )
>     except (IndexError, ValueError):
>         return False
> 
> @@ -3372,11 +3544,15 @@ def detect_target_versions(node: Node) -
>     """Detect the version to target based on the nodes used."""
>     features = get_features_used(node)
>     return {
> -        version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
> +        version
> +        for version in TargetVersion
> +        if features <= VERSION_TO_FEATURES[version]
>     }
> 
> 
> -def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]:
> +def generate_trailers_to_omit(
> +    line: Line, line_length: int
> +) -> Iterator[Set[LeafID]]:
>     """Generate sets of closing bracket IDs that should be omitted in a RHS.
> 
>     Brackets can be omitted if the entire trailer up to and including
> @@ -3430,15 +3606,21 @@ def get_future_imports(node: Node) -> Se
>     """Return a set of __future__ imports in the file."""
>     imports: Set[str] = set()
> 
> -    def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]:
> +    def get_imports_from_children(
> +        children: List[LN],
> +    ) -> Generator[str, None, None]:
>         for child in children:
>             if isinstance(child, Leaf):
>                 if child.type == token.NAME:
>                     yield child.value
>             elif child.type == syms.import_as_name:
>                 orig_name = child.children[0]
> -                assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
> -                assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
> +                assert isinstance(
> +                    orig_name, Leaf
> +                ), "Invalid syntax parsing imports"
> +                assert (
> +                    orig_name.type == token.NAME
> +                ), "Invalid syntax parsing imports"
>                 yield orig_name.value
>             elif child.type == syms.import_as_names:
>                 yield from get_imports_from_children(child.children)
> @@ -3461,7 +3643,10 @@ def get_future_imports(node: Node) -> Se
>                 break
>         elif first_child.type == syms.import_from:
>             module_name = first_child.children[1]
> -            if not isinstance(module_name, Leaf) or module_name.value != "__future__":
> +            if (
> +                not isinstance(module_name, Leaf)
> +                or module_name.value != "__future__"
> +            ):
>                 break
>             imports |= set(get_imports_from_children(first_child.children[3:]))
>         else:
> @@ -3483,7 +3668,9 @@ def gen_python_files_in_dir(
> 
>     `report` is where output about exclusions goes.
>     """
> -    assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
> +    assert (
> +        root.is_absolute()
> +    ), f"INTERNAL ERROR: `root` must be absolute but is {root}"
>     for child in path.iterdir():
>         try:
>             normalized_path = "/" + child.resolve().relative_to(root).as_posix()
> @@ -3500,11 +3687,15 @@ def gen_python_files_in_dir(
>             normalized_path += "/"
>         exclude_match = exclude.search(normalized_path)
>         if exclude_match and exclude_match.group(0):
> -            report.path_ignored(child, f"matches the --exclude regular expression")
> +            report.path_ignored(
> +                child, f"matches the --exclude regular expression"
> +            )
>             continue
> 
>         if child.is_dir():
> -            yield from gen_python_files_in_dir(child, root, include, exclude, report)
> +            yield from gen_python_files_in_dir(
> +                child, root, include, exclude, report
> +            )
> 
>         elif child.is_file():
>             include_match = include.search(normalized_path)
> @@ -3614,7 +3805,9 @@ class Report:
>         if self.change_count:
>             s = "s" if self.change_count > 1 else ""
>             report.append(
> -                click.style(f"{self.change_count} file{s} {reformatted}", bold=True)
> +                click.style(
> +                    f"{self.change_count} file{s} {reformatted}", bold=True
> +                )
>             )
>         if self.same_count:
>             s = "s" if self.same_count > 1 else ""
> @@ -3633,13 +3826,17 @@ def parse_ast(src: str) -> Union[ast.AST
>         # TODO: support Python 4+ ;)
>         for minor_version in range(sys.version_info[1], 4, -1):
>             try:
> -                return ast.parse(src, filename, feature_version=(3, minor_version))
> +                return ast.parse(
> +                    src, filename, feature_version=(3, minor_version)
> +                )
>             except SyntaxError:
>                 continue
>     else:
>         for feature_version in (7, 6):
>             try:
> -                return ast3.parse(src, filename, feature_version=feature_version)
> +                return ast3.parse(
> +                    src, filename, feature_version=feature_version
> +                )
>             except SyntaxError:
>                 continue
> 
> @@ -3664,7 +3861,9 @@ def _fixup_ast_constants(
> def assert_equivalent(src: str, dst: str) -> None:
>     """Raise AssertionError if `src` and `dst` aren't equivalent."""
> 
> -    def _v(node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0) -> Iterator[str]:
> +    def _v(
> +        node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0
> +    ) -> Iterator[str]:
>         """Simple visitor generating strings to compare ASTs by content."""
> 
>         node = _fixup_ast_constants(node)
> @@ -3692,8 +3891,12 @@ def assert_equivalent(src: str, dst: str
>                     # parentheses and they change the AST.
>                     if (
>                         field == "targets"
> -                        and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete))
> -                        and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple))
> +                        and isinstance(
> +                            node, (ast.Delete, ast3.Delete, ast27.Delete)
> +                        )
> +                        and isinstance(
> +                            item, (ast.Tuple, ast3.Tuple, ast27.Tuple)
> +                        )
>                     ):
>                         for item in item.elts:
>                             yield from _v(item, depth + 2)
> @@ -3780,7 +3983,9 @@ def diff(a: str, b: str, a_name: str, b_
>     a_lines = [line + "\n" for line in a.split("\n")]
>     b_lines = [line + "\n" for line in b.split("\n")]
>     return "".join(
> -        difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
> +        difflib.unified_diff(
> +            a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5
> +        )
>     )
> 
> 
> @@ -3866,7 +4071,9 @@ def enumerate_with_length(
>         yield index, leaf, length
> 
> 
> -def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool:
> +def is_line_short_enough(
> +    line: Line, *, line_length: int, line_str: str = ""
> +) -> bool:
>     """Return True if `line` is no longer than `line_length`.
> 
>     Uses the provided `line_str` rendering, if any, otherwise computes a new one.
> @@ -3904,7 +4111,9 @@ def can_be_split(line: Line) -> bool:
>             elif leaf.type == token.DOT:
>                 dot_count += 1
>             elif leaf.type == token.NAME:
> -                if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
> +                if not (
> +                    next.type == token.DOT or next.type in OPENING_BRACKETS
> +                ):
>                     return False
> 
>             elif leaf.type not in CLOSING_BRACKETS:
> @@ -4033,7 +4242,9 @@ def get_cache_info(path: Path) -> CacheI
>     return stat.st_mtime, stat.st_size
> 
> 
> -def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
> +def filter_cached(
> +    cache: Cache, sources: Iterable[Path]
> +) -> Tuple[Set[Path], Set[Path]]:
>     """Split an iterable of paths in `sources` into two sets.
> 
>     The first contains paths of files that modified on disk or are not in the
> @@ -4054,8 +4265,13 @@ def write_cache(cache: Cache, sources: I
>     cache_file = get_cache_file(mode)
>     try:
>         CACHE_DIR.mkdir(parents=True, exist_ok=True)
> -        new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}}
> -        with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
> +        new_cache = {
> +            **cache,
> +            **{src.resolve(): get_cache_info(src) for src in sources},
> +        }
> +        with tempfile.NamedTemporaryFile(
> +            dir=str(cache_file.parent), delete=False
> +        ) as f:
>             pickle.dump(new_cache, f, protocol=pickle.HIGHEST_PROTOCOL)
>         os.replace(f.name, cache_file)
>     except OSError:
> diff --git a/doc/check-seclevel.py b/doc/check-seclevel.py
> --- a/doc/check-seclevel.py
> +++ b/doc/check-seclevel.py
> @@ -70,7 +70,9 @@ def checkseclevel(ui, doc, name, initlev
>             continue
>         nextlevel = mark2level[mark]
>         if curlevel < nextlevel and curlevel + 1 != nextlevel:
> -            ui.warnnoi18n('gap of section level at "%s" of %s\n' % (title, name))
> +            ui.warnnoi18n(
> +                'gap of section level at "%s" of %s\n' % (title, name)
> +            )
>             showavailables(ui, initlevel)
>             errorcnt += 1
>             continue
> @@ -88,7 +90,9 @@ def checkcmdtable(ui, cmdtable, namefmt,
>     for k, entry in cmdtable.items():
>         name = k.split(b"|")[0].lstrip(b"^")
>         if not entry[0].__doc__:
> -            ui.notenoi18n('skip checking %s: no help document\n' % (namefmt % name))
> +            ui.notenoi18n(
> +                'skip checking %s: no help document\n' % (namefmt % name)
> +            )
>             continue
>         errorcnt += checkseclevel(
>             ui, entry[0].__doc__, namefmt % name, initlevel
> @@ -113,7 +117,9 @@ def checkhghelps(ui):
>     ):
>         mod = extensions.load(ui, name, None)
>         if not mod.__doc__:
> -            ui.notenoi18n('skip checking %s extension: no help document\n' % name)
> +            ui.notenoi18n(
> +                'skip checking %s extension: no help document\n' % name
> +            )
>             continue
>         errorcnt += checkseclevel(
>             ui, mod.__doc__, '%s extension' % name, initlevel_ext
> diff --git a/hgext/phabricator.py b/hgext/phabricator.py
> --- a/hgext/phabricator.py
> +++ b/hgext/phabricator.py
> @@ -774,7 +774,9 @@ def phabsend(ui, repo, *revs, **opts):
>                     try:
>                         writediffproperties(unfi[newnode], diffmap[old.node()])
>                     except util.urlerr.urlerror:
> -                        ui.warnnoi18n(b'Failed to update metadata for D%s\n' % drevid)
> +                        ui.warnnoi18n(
> +                            b'Failed to update metadata for D%s\n' % drevid
> +                        )
>                 # Remove local tags since it's no longer necessary
>                 tagname = b'D%d' % drevid
>                 if tagname in repo.tags():
> diff --git a/hgext/win32mbcs.py b/hgext/win32mbcs.py
> --- a/hgext/win32mbcs.py
> +++ b/hgext/win32mbcs.py
> @@ -215,4 +215,6 @@ def extsetup(ui):
>         # command line options is not yet applied when
>         # extensions.loadall() is called.
>         if b'--debug' in sys.argv:
> -            ui.writenoi18n(b"[win32mbcs] activated with encoding: %s\n" % _encoding)
> +            ui.writenoi18n(
> +                b"[win32mbcs] activated with encoding: %s\n" % _encoding
> +            )
> diff --git a/mercurial/debugcommands.py b/mercurial/debugcommands.py
> --- a/mercurial/debugcommands.py
> +++ b/mercurial/debugcommands.py
> @@ -1246,11 +1246,17 @@ def debugformat(ui, repo, **opts):
> def debugfsinfo(ui, path=b"."):
>     """show information detected about current filesystem"""
>     ui.writenoi18n(b'path: %s\n' % path)
> -    ui.writenoi18n(b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)'))
> +    ui.writenoi18n(
> +        b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
> +    )
>     ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
>     ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
> -    ui.writenoi18n(b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no'))
> -    ui.writenoi18n(b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no'))
> +    ui.writenoi18n(
> +        b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
> +    )
> +    ui.writenoi18n(
> +        b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
> +    )
>     casesensitive = b'(unknown)'
>     try:
>         with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
> @@ -1934,7 +1940,9 @@ def debugmergestate(ui, repo, *args):
>                 ui.writenoi18n(b'other: %s\n' % record)
>             elif rtype == b'm':
>                 driver, mdstate = record.split(b'\0', 1)
> -                ui.writenoi18n(b'merge driver: %s (state "%s")\n' % (driver, mdstate))
> +                ui.writenoi18n(
> +                    b'merge driver: %s (state "%s")\n' % (driver, mdstate)
> +                )
>             elif rtype in b'FDC':
>                 r = record.split(b'\0')
>                 f, state, hash, lfile, afile, anode, ofile = r[0:7]
> @@ -1947,7 +1955,9 @@ def debugmergestate(ui, repo, *args):
>                     b'file: %s (record type "%s", state "%s", hash %s)\n'
>                     % (f, rtype, state, _hashornull(hash))
>                 )
> -                ui.writenoi18n(b'  local path: %s (flags "%s")\n' % (lfile, flags))
> +                ui.writenoi18n(
> +                    b'  local path: %s (flags "%s")\n' % (lfile, flags)
> +                )
>                 ui.writenoi18n(
>                     b'  ancestor path: %s (node %s)\n'
>                     % (afile, _hashornull(anode))
> @@ -2793,7 +2803,9 @@ def debugrevlog(ui, repo, file_=None, **
>     fmt2 = dfmtstr(totalsize)
>     ui.writenoi18n(b'revisions     : ' + fmt2 % numrevs)
>     ui.writenoi18n(b'    merges    : ' + fmt % pcfmt(nummerges, numrevs))
> -    ui.writenoi18n(b'    normal    : ' + fmt % pcfmt(numrevs - nummerges, numrevs))
> +    ui.writenoi18n(
> +        b'    normal    : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
> +    )
>     ui.writenoi18n(b'revisions     : ' + fmt2 % numrevs)
>     ui.writenoi18n(b'    empty     : ' + fmt % pcfmt(numempty, numrevs))
>     ui.writenoi18n(
> @@ -2804,7 +2816,9 @@ def debugrevlog(ui, repo, file_=None, **
>         b'                   delta : '
>         + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
>     )
> -    ui.writenoi18n(b'    snapshot  : ' + fmt % pcfmt(numfull + numsemi, numrevs))
> +    ui.writenoi18n(
> +        b'    snapshot  : ' + fmt % pcfmt(numfull + numsemi, numrevs)
> +    )
>     for depth in sorted(numsnapdepth):
>         ui.write(
>             (b'      lvl-%-3d :       ' % depth)
> @@ -2877,7 +2891,9 @@ def debugrevlog(ui, repo, file_=None, **
>         ui.write(b'\n')
>         fmt = pcfmtstr(numdeltas)
>         fmt2 = pcfmtstr(numdeltas, 4)
> -        ui.writenoi18n(b'deltas against prev  : ' + fmt % pcfmt(numprev, numdeltas))
> +        ui.writenoi18n(
> +            b'deltas against prev  : ' + fmt % pcfmt(numprev, numdeltas)
> +        )
>         if numprev > 0:
>             ui.writenoi18n(
>                 b'    where prev = p1  : ' + fmt2 % pcfmt(nump1prev, numprev)
> @@ -2889,8 +2905,12 @@ def debugrevlog(ui, repo, file_=None, **
>                 b'    other            : ' + fmt2 % pcfmt(numoprev, numprev)
>             )
>         if gdelta:
> -            ui.writenoi18n(b'deltas against p1    : ' + fmt % pcfmt(nump1, numdeltas))
> -            ui.writenoi18n(b'deltas against p2    : ' + fmt % pcfmt(nump2, numdeltas))
> +            ui.writenoi18n(
> +                b'deltas against p1    : ' + fmt % pcfmt(nump1, numdeltas)
> +            )
> +            ui.writenoi18n(
> +                b'deltas against p2    : ' + fmt % pcfmt(nump2, numdeltas)
> +            )
>             ui.writenoi18n(
>                 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
>             )
> @@ -3109,8 +3129,12 @@ def debugrevspec(ui, repo, expr, **opts)
>         arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
>         brevs = revset.makematcher(treebystage[b'optimized'])(repo)
>         if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
> -            ui.writenoi18n(b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n")
> -            ui.writenoi18n(b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n")
> +            ui.writenoi18n(
> +                b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
> +            )
> +            ui.writenoi18n(
> +                b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
> +            )
>         arevs = list(arevs)
>         brevs = list(brevs)
>         if arevs == brevs:
> @@ -3414,7 +3438,9 @@ def debugtemplate(ui, repo, tmpl, **opts
>         ui.note(templater.prettyformat(tree), b'\n')
>         newtree = templater.expandaliases(tree, aliases)
>         if newtree != tree:
> -            ui.notenoi18n(b"* expanded:\n", templater.prettyformat(newtree), b'\n')
> +            ui.notenoi18n(
> +                b"* expanded:\n", templater.prettyformat(newtree), b'\n'
> +            )
> 
>     if revs is None:
>         tres = formatter.templateresources(ui, repo)



More information about the Mercurial-devel mailing list