[PATCH 2 of 2 V3] py3: use namedtuple._replace to produce new tokens

Pierre-Yves David pierre-yves.david at ens-lyon.org
Sun Oct 16 10:30:04 EDT 2016



On 10/14/2016 06:55 PM, Martijn Pieters wrote:
> # HG changeset patch
> # User Martijn Pieters <mjpieters at fb.com>
> # Date 1476347257 -3600
> #      Thu Oct 13 09:27:37 2016 +0100
> # Node ID d20dd7db86044bdca79825499b913840d726d841
> # Parent  9031460519503abe5dc430c8ece29d198121cd65
> py3: use namedtuple._replace to produce new tokens

We seems to be using a private function of some stdlib type?
Can you elaborate on why this is a good move?

> diff --git a/mercurial/__init__.py b/mercurial/__init__.py
> --- a/mercurial/__init__.py
> +++ b/mercurial/__init__.py
> @@ -233,9 +233,7 @@
>              """
>              st = tokens[j]
>              if st.type == token.STRING and st.string.startswith(("'", '"')):
> -                rt = tokenize.TokenInfo(st.type, 'u%s' % st.string,
> -                                        st.start, st.end, st.line)
> -                tokens[j] = rt
> +                tokens[j] = st._replace(string='u%s' % st.string)
>
>          for i, t in enumerate(tokens):
>              # Convert most string literals to byte literals. String literals
> @@ -266,8 +264,7 @@
>                      continue
>
>                  # String literal. Prefix to make a b'' string.
> -                yield tokenize.TokenInfo(t.type, 'b%s' % s, t.start, t.end,
> -                                          t.line)
> +                yield t._replace(string='b%s' % t.string)
>                  continue
>
>              # Insert compatibility imports at "from __future__ import" line.
> @@ -287,10 +284,8 @@
>                  for u in tokenize.tokenize(io.BytesIO(l).readline):
>                      if u.type in (tokenize.ENCODING, token.ENDMARKER):
>                          continue
> -                    yield tokenize.TokenInfo(u.type, u.string,
> -                                             (r, c + u.start[1]),
> -                                             (r, c + u.end[1]),
> -                                             '')
> +                    yield u._replace(
> +                        start=(r, c + u.start[1]), end=(r, c + u.end[1]))
>                  continue
>
>              # This looks like a function call.
> @@ -322,8 +317,7 @@
>                  # It changes iteritems to items as iteritems is not
>                  # present in Python 3 world.
>                  elif fn == 'iteritems':
> -                    yield tokenize.TokenInfo(t.type, 'items',
> -                                             t.start, t.end, t.line)
> +                    yield t._replace(string='items')
>                      continue
>
>              # Emit unmodified token.

-- 
Pierre-Yves David


More information about the Mercurial-devel mailing list