[PATCH] Add built-in cvsps equivalent

Frank Kingswood frank at kingswood-consulting.co.uk
Wed Mar 26 16:26:18 CDT 2008


# HG changeset patch
# User Frank Kingswood <frank at kingswood-consulting.co.uk>
# Date 1206566746 0
# Node ID 9c0aac69922ad807a503ff80b489a27d7fb8e2b0
# Parent  e1fd124dd3473e4fde6ef62b3a7cb04090feddc2
Add built-in cvsps equivalent.

Built-in cvsps uses cvs rlog to determine the complete project history,
merges similar commits into changesets and builds an ancestry graph.

Also adds cvsps.py commandline tool, in case that is needed for debug.
The cvsps.py output is mostly compatible with that of cvsps but
in my experience sometimes generates correct output in situatons where
the 'real' cvsps fails.

diff -r e1fd124dd347 -r 9c0aac69922a hgext/convert/cvs.py
--- a/hgext/convert/cvs.py	Wed Mar 26 21:13:07 2008 +0000
+++ b/hgext/convert/cvs.py	Wed Mar 26 21:25:46 2008 +0000
@@ -3,8 +3,10 @@ import os, locale, re, socket
 import os, locale, re, socket
 from cStringIO import StringIO
 from mercurial import util
+from mercurial.i18n import _
 
 from common import NoRepo, commit, converter_source, checktool
+from cvsps import cvs_create_log,cvs_create_changeset
 
 class convert_cvs(converter_source):
     def __init__(self, ui, path, rev=None):
@@ -14,10 +16,12 @@ class convert_cvs(converter_source):
         if not os.path.exists(cvs):
             raise NoRepo("%s does not look like a CVS checkout" % path)
 
+        checktool('cvs')
         self.cmd = ui.config('convert', 'cvsps', 'cvsps -A -u --cvs-direct -q')
         cvspsexe = self.cmd.split(None, 1)[0]
-        for tool in (cvspsexe, 'cvs'):
-            checktool(tool)
+        self.builtin = cvspsexe=='builtin'
+        if not self.builtin:
+            checktool(cvspsexe)
 
         self.changeset = {}
         self.files = {}
@@ -28,10 +32,11 @@ class convert_cvs(converter_source):
         self.cvsroot = file(os.path.join(cvs, "Root")).read()[:-1]
         self.cvsrepo = file(os.path.join(cvs, "Repository")).read()[:-1]
         self.encoding = locale.getpreferredencoding()
-        self._parse()
+
+        self._parse(ui)
         self._connect()
 
-    def _parse(self):
+    def _parse(self,ui):
         if self.changeset:
             return
 
@@ -56,80 +61,129 @@ class convert_cvs(converter_source):
             id = None
             state = 0
             filerevids = {}
-            for l in util.popen(cmd):
-                if state == 0: # header
-                    if l.startswith("PatchSet"):
-                        id = l[9:-2]
-                        if maxrev and int(id) > maxrev:
-                            # ignore everything
-                            state = 3
-                    elif l.startswith("Date"):
-                        date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"])
-                        date = util.datestr(date)
-                    elif l.startswith("Branch"):
-                        branch = l[8:-1]
-                        self.parent[id] = self.lastbranch.get(branch, 'bad')
-                        self.lastbranch[branch] = id
-                    elif l.startswith("Ancestor branch"):
-                        ancestor = l[17:-1]
-                        # figure out the parent later
-                        self.parent[id] = self.lastbranch[ancestor]
-                    elif l.startswith("Author"):
-                        author = self.recode(l[8:-1])
-                    elif l.startswith("Tag:") or l.startswith("Tags:"):
-                        t = l[l.index(':')+1:]
-                        t = [ut.strip() for ut in t.split(',')]
-                        if (len(t) > 1) or (t[0] and (t[0] != "(none)")):
-                            self.tags.update(dict.fromkeys(t, id))
-                    elif l.startswith("Log:"):
-                        # switch to gathering log
-                        state = 1
-                        log = ""
-                elif state == 1: # log
-                    if l == "Members: \n":
-                        # switch to gathering members
-                        files = {}
-                        oldrevs = []
-                        log = self.recode(log[:-1])
-                        state = 2
+
+            if self.builtin:
+                # builtin cvsps code
+                ui.status(_('using builtin cvsps\n'))
+                verbose=self.ui.configbool('convert','cvs.verbose',default=False)
+
+                i=0
+                for cs in cvs_create_changeset(
+                            cvs_create_log(['.'],verbose=verbose),
+                            verbose=verbose):
+                    if maxrev and cs.Id>maxrev:
+                        break
+                    i += 1
+                    cs.Id = id = str(i)
+
+                    # .Author    - author name as CVS knows it
+                    cs.Author=self.recode(cs.Author)
+
+                    # .Branch    - name of branch this changeset is on, or None
+                    self.lastbranch[cs.Branch] = id
+                    # not dealing with ancestor branch here
+
+                    # .Comment   - commit message
+                    cs.Comment=self.recode(cs.Comment)
+
+                    # .Date      - the commit date as a (time,tz) tuple
+                    date=util.datestr(cs.Date)
+
+                    # .Tags      - list of tags on this changeset
+                    self.tags.update(dict.fromkeys(cs.Tags,id))
+
+                    # .Entries   - list of cvs_log_entry objects in this changeset
+
+                    files={}
+                    for f in cs.Entries:
+                        files[f.File]="%s%s"%('.'.join([str(x) for x in f.Revision]),['','(DEAD)'][f.Dead])
+
+                    if cs.Parent:
+                        p = [cs.Parent.Id]
                     else:
-                        # gather log
-                        log += l
-                elif state == 2: # members
-                    if l == "\n": # start of next entry
-                        state = 0
-                        p = [self.parent[id]]
-                        if id == "1":
-                            p = []
-                        if branch == "HEAD":
-                            branch = ""
-                        if branch:
-                            latest = None
-                            # the last changeset that contains a base
-                            # file is our parent
-                            for r in oldrevs:
-                                latest = max(filerevids.get(r, None), latest)
-                            if latest:
-                                p = [latest]
+                        p = []
 
-                        # add current commit to set
-                        c = commit(author=author, date=date, parents=p,
-                                   desc=log, branch=branch)
-                        self.changeset[id] = c
-                        self.files[id] = files
-                    else:
-                        colon = l.rfind(':')
-                        file = l[1:colon]
-                        rev = l[colon+1:-2]
-                        oldrev, rev = rev.split("->")
-                        files[file] = rev
+                    # add current commit to set
+                    c=commit(author=cs.Author,date=date,parents=p,
+                             desc=cs.Comment,branch=cs.Branch or '')
+                    self.changeset[id]=c
+                    self.files[id]=files
+            else:
+                # external cvsps
+                for l in util.popen(cmd):
+                    if state == 0: # header
+                        if l.startswith("PatchSet"):
+                            id = l[9:-2]
+                            if maxrev and int(id) > maxrev:
+                                # ignore everything
+                                state = 3
+                        elif l.startswith("Date"):
+                            date = util.parsedate(l[6:-1], ["%Y/%m/%d %H:%M:%S"])
+                            date = util.datestr(date)
+                        elif l.startswith("Branch"):
+                            branch = l[8:-1]
+                            self.parent[id] = self.lastbranch.get(branch, 'bad')
+                            self.lastbranch[branch] = id
+                        elif l.startswith("Ancestor branch"):
+                            ancestor = l[17:-1]
+                            # figure out the parent later
+                            self.parent[id] = self.lastbranch[ancestor]
+                        elif l.startswith("Author"):
+                            author = self.recode(l[8:-1])
+                        elif l.startswith("Tag:") or l.startswith("Tags:"):
+                            t = l[l.index(':')+1:]
+                            t = [ut.strip() for ut in t.split(',')]
+                            if (len(t) > 1) or (t[0] and (t[0] != "(none)")):
+                                self.tags.update(dict.fromkeys(t, id))
+                        elif l.startswith("Log:"):
+                            # switch to gathering log
+                            state = 1
+                            log = ""
+                    elif state == 1: # log
+                        if l == "Members: \n":
+                            # switch to gathering members
+                            files = {}
+                            oldrevs = []
+                            log = self.recode(log[:-1])
+                            state = 2
+                        else:
+                            # gather log
+                            log += l
+                    elif state == 2: # members
+                        if l == "\n": # start of next entry
+                            state = 0
+                            p = [self.parent[id]]
+                            if id == "1":
+                                p = []
+                            if branch == "HEAD":
+                                branch = ""
+                            if branch:
+                                latest = None
+                                # the last changeset that contains a base
+                                # file is our parent
+                                for r in oldrevs:
+                                    latest = max(filerevids.get(r, None), latest)
+                                if latest:
+                                    p = [latest]
 
-                        # save some information for identifying branch points
-                        oldrevs.append("%s:%s" % (oldrev, file))
-                        filerevids["%s:%s" % (rev, file)] = id
-                elif state == 3:
-                    # swallow all input
-                    continue
+                            # add current commit to set
+                            c = commit(author=author, date=date, parents=p,
+                                       desc=log, branch=branch)
+                            self.changeset[id] = c
+                            self.files[id] = files
+                        else:
+                            colon = l.rfind(':')
+                            file = l[1:colon]
+                            rev = l[colon+1:-2]
+                            oldrev, rev = rev.split("->")
+                            files[file] = rev
+
+                            # save some information for identifying branch points
+                            oldrevs.append("%s:%s" % (oldrev, file))
+                            filerevids["%s:%s" % (rev, file)] = id
+                    elif state == 3:
+                        # swallow all input
+                        continue
 
             self.heads = self.lastbranch.values()
         finally:
diff -r e1fd124dd347 -r 9c0aac69922a hgext/convert/cvsps.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/convert/cvsps.py	Wed Mar 26 21:25:46 2008 +0000
@@ -0,0 +1,534 @@
+#!/usr/bin/env python
+#
+# Mercurial built-in replacement for cvsps.
+#
+# Copyright 2008, Frank Kingswood <frank at kingswood-consulting.co.uk>
+#
+# This software may be used and distributed according to the terms
+# of the GNU General Public License, incorporated herein by reference.
+
+import os
+import re
+import sys
+from mercurial import util
+from mercurial.i18n import _
+
+
+class cvs_log_entry:
+   '''Class cvs_log_entry has the following attributes:
+      .Author    - author name as CVS knows it
+      .Branch    - name of branch this revision is on
+      .Branches  - revision tuple of branches starting at this revision
+      .Comment   - commit message
+      .Date      - the commit date as a (time,tz) tuple
+      .Dead      - true if file revision is dead
+      .File      - Name of file
+      .Lines     - a tuple (+lines,-lines) or None
+      .Parent    - Previous revision of this entry
+      .RCS       - name of file as returned from CVS
+      .Revision  - revision number as tuple
+      .Tags      - list of tags on the file
+   '''
+   def __init__(self,**entries):
+      self.__dict__.update(entries)
+
+def cvs_create_log(dirs,root=None,rev=None,date=None,verbose=False,rlog=True):
+   '''Collect the CVS rlog'''
+
+   if verbose:
+      print >>sys.stderr,_('Collecting CVS rlog')
+
+   log=[]      # list of cvs_log_entry objects containing the CVS state
+   
+   # patterns to match in CVS (r)log output, by state of use
+   re_00=re.compile('RCS file: (.+)$')
+   re_01=re.compile('cvs \\[r?log aborted\\]: (.+)$')
+   re_02=re.compile('cvs (r?log|server): (.+)\n$')
+   re_03=re.compile("(Cannot access.+CVSROOT)|(can't create temporary directory.+)$")
+   re_10=re.compile('Working file: (.+)$')
+   re_20=re.compile('symbolic names:')
+   re_30=re.compile('\t(.+): ([\\d.]+)$')
+   re_31=re.compile('----------------------------$')
+   re_32=re.compile('=============================================================================$')
+   re_50=re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
+   re_60=re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?')
+   re_70=re.compile('branches: (.+);$')
+
+   # reusing strings typically saves about 40% of memory
+   _cache={}
+   def cache(s):
+      try:
+         return _cache[s]
+      except:
+         _cache[s]=s
+      return s
+
+   for d in dirs or ["."]:
+      cmd=['cvs','-q']
+      
+      if root:
+         r=root
+      else:
+         # else try the Root file in the sandbox
+         try:
+            r=file('%s/CVS/Root'%d).read().strip()
+         except IOError:
+            r=None
+
+      if r:
+         cmd.append('-d%s'%r)
+         prefix=r.split(':')[-1]
+         if not prefix.endswith('/'):
+            prefix+='/'
+      else:
+         prefix=''
+
+      cmd.append(['log','rlog'][rlog])
+      if rev:cmd.append('-r%s'%rev)
+      if isinstance(date,tuple):
+         cmd.append('-d%s'%date[0])
+         cmd.append('-d%s'%date[1])
+      elif date:
+         cmd.append('-d%s'%date)
+      
+      # for sandboxes get the real directory in the repository
+      try:
+         d=file('%s/CVS/Repository'%d).read().strip()
+         prefix=prefix+d
+         if not prefix.endswith('/'):
+            prefix+='/'
+      except IOError:
+         pass
+      cmd.append(d)
+
+      # state machine begins here
+      tags={}     # dictionary of revisions on current file with their tags
+      state=0
+      store=False # set when a new record can be appended
+
+      cmd=[util.shellquote(arg) for arg in cmd]
+      cmd=util.quotecommand(' '.join(cmd))
+      
+      for line in os.popen(cmd):
+         if line.endswith('\n'):
+            line=line[:-1]
+         #print >>sys.stderr,'state=%d line=%r'%(state,line)
+
+         if state==0:
+            match=re_00.match(line)
+            if match:
+               rcs=match.group(1)
+               tags={}
+               if rlog:
+                  filename=rcs[:-2]
+                  if filename.startswith(prefix):
+                     filename=filename[len(prefix):]
+                  if filename.startswith('/'):
+                     filename=filename[1:]
+                  filename=filename.replace('/Attic/','/')
+                  state=2
+                  continue
+               state=1
+               continue
+            match=re_01.match(line)
+            if match:
+               raise Exception(match.group(1))
+            match=re_02.match(line)
+            if match:
+               raise Exception(match.group(2))
+            if re_03.match(line):
+               raise Exception(line)
+
+         elif state==1:
+            match=re_10.match(line)
+            assert match,_('RCS file must always be followed by Working file')
+            filename=match.group(1)
+            state=2
+
+         elif state==2:
+            if re_20.match(line):
+               state=3
+
+         elif state==3:
+            match=re_30.match(line)
+            if match:
+               rev=[int(x) for x in match.group(2).split('.')]
+
+               # Convert magic branch number to an odd-numbered one
+               revn=len(rev)
+               if revn>3 and (revn%2)==0 and rev[-2]==0:
+                  rev=rev[:-2]+rev[-1:]
+               rev=tuple(rev)
+
+               if rev not in tags:
+                  tags[rev]=[]
+               tags[rev].append(match.group(1))
+
+            elif re_31.match(line):
+               state=5
+            elif re_32.match(line):
+               state=0
+
+         elif state==4:
+            if re_31.match(line):
+               state=5
+            else:
+               assert not re_32.match(line),_('Must have at least some revisions')
+
+         elif state==5:
+            match=re_50.match(line)
+            assert match,_('expected revision number')
+            e=cvs_log_entry(RCS=cache(rcs),File=cache(filename),Revision=tuple([int(x) for x in match.group(1).split('.')]),Branches=[],Parent=None)
+            state=6
+
+         elif state==6:
+            match=re_60.match(line)
+            assert match,_('revision must be followed by date line')
+            d=match.group(1)
+            if d[2]=='/':
+               # Y2K
+               d='19'+d
+
+            if len(d.split())!=3:
+               d=d+" UTC"
+            e.Date=util.parsedate(d,['%y/%m/%d %H:%M:%S','%Y/%m/%d %H:%M:%S','%Y-%m-%d %H:%M:%S'])
+            e.Author=cache(match.group(2))
+            e.Dead=match.group(3).lower()=='dead'
+
+            if match.group(5):
+               if match.group(6):
+                  e.Lines=(int(match.group(5)),int(match.group(6)))
+               else:
+                  e.Lines=(int(match.group(5)),0)
+            elif match.group(6):
+               e.Lines=(0,int(match.group(6)))
+            else:
+               e.Lines=None
+            e.Comment=[]
+            state=7
+
+         elif state==7:
+            m=re_70.match(line)
+            if m:
+               e.Branches=[tuple([int(y) for y in x.strip().split('.')]) for x in m.group(1).split(';')]
+               state=8
+            elif re_31.match(line):
+               state=5
+               store=True
+            elif re_32.match(line):
+               state=0
+               store=True
+            else:
+               e.Comment.append(line)
+
+         elif state==8:
+            if re_31.match(line):
+               state=5
+               store=True
+            elif re_32.match(line):
+               state=0
+               store=True
+            else:
+               e.Comment.append(line)
+
+         if store:
+            store=False
+            e.Tags=[cache(x) for x in tags.get(e.Revision,[])]
+            e.Tags.sort()
+            e.Comment=cache('\n'.join(e.Comment))
+            
+            revn=len(e.Revision)
+            if revn>3 and (revn%2)==0:
+               e.Branch=tags.get(e.Revision[:-1],[None])[0]
+            else:
+               e.Branch=None
+
+            log.append(e)
+
+            if verbose and len(log)%100==0:
+               print >>sys.stderr,util.ellipsis('%d %s'%(len(log),e.File),80)
+
+   log.sort(key=lambda x:(x.RCS,x.Revision))
+   
+   # find parent revisions
+   versions={}
+   for e in log:
+      branch=e.Revision[:-1]
+
+      p=versions.get((e.RCS,branch),None)
+      if p is None:
+         p=e.Revision[:-2]
+      e.Parent=p
+      versions[(e.RCS,branch)]=e.Revision
+
+   if verbose:
+      print >>sys.stderr,_('%d log entries')%len(log)
+      
+   return log
+
+
+class cvs_changeset:
+   '''Class cvs_changeset has the following attributes:
+      .Author    - author name as CVS knows it
+      .Branch    - name of branch this changeset is on, or None
+      .Comment   - commit message
+      .Date      - the commit date as a (time,tz) tuple
+      .Entries   - list of cvs_log_entry objects in this changeset
+      .Tags      - list of tags on this changeset
+   '''
+   def __init__(self,**entries):
+      self.__dict__.update(entries)
+
+
+def cvs_create_changeset(log,fuzz=60,verbose=False):
+   '''Convert log into changesets.'''
+
+   if verbose:
+      print >>sys.stderr,_('Creating changesets')
+
+   # Merge changesets
+
+   log.sort(key=lambda x:(x.Comment,x.Author,x.Branch,x.Date))
+
+   changeset=[]
+   files={}
+   c=None
+   for i,e in enumerate(log):
+
+      if i%1000==0 and verbose:
+         print >>sys.stderr,util.ellipsis('%d %d %s'%(i,len(changeset),repr(e.Comment)[1:-1]),80)
+
+      # Check if log entry belongs to the current changeset or not.
+      if not (c and
+              e.Comment==c.Comment and
+              e.Author==c.Author and 
+              e.Branch==c.Branch and
+              (c.Date[0]+c.Date[1])<=(e.Date[0]+e.Date[1])<=(c.Date[0]+c.Date[1])+fuzz and
+              e.File not in files):
+         c=cvs_changeset(Comment=e.Comment,Author=e.Author,
+                          Branch=e.Branch,Date=e.Date)
+         c.Entries=[]
+         changeset.append(c)
+         files={}
+      
+      e.Changeset=c
+      c.Entries.append(e)
+      files[e.File]=True
+      c.Date=e.Date       # changeset date is date of latest commit in it
+
+
+   # Sort files in each changeset
+
+   for c in changeset:
+      def pathcompare(l,r):
+         'Mimic cvsps sorting order'
+         l=l.split('/')
+         r=r.split('/')
+         nl=len(l)
+         nr=len(r)
+         n=min(nl,nr)
+         for i in range(n):
+            if i+1==nl and nl<nr:
+               return -1
+            elif i+1==nr and nl>nr:
+               return +1
+            elif l[i]<r[i]:
+               return -1
+            elif l[i]>r[i]:
+               return +1
+         return 0
+      def entitycompare(l,r):
+         return pathcompare(l.File,r.File)
+
+      c.Entries.sort(cmp=entitycompare)
+
+
+   # Sort changesets by date
+
+   def cscmp(l,r):
+      d=sum(l.Date)-sum(r.Date)
+      if d:
+         return d
+
+      # detect vendor branches and initial commits on a branch
+      le={}
+      for e in l.Entries:
+         le[e.RCS]=e.Revision
+      re={}
+      for e in r.Entries:
+         re[e.RCS]=e.Revision
+         
+      d=0
+      for e in l.Entries:
+         if re.get(e.RCS,None)==e.Parent:
+            assert not d
+            d=1
+            break
+
+      for e in r.Entries:
+         if le.get(e.RCS,None)==e.Parent:
+            assert not d
+            d=-1
+            break
+
+      return d
+
+   changeset.sort(cmp=cscmp)
+
+
+   # Collect tags
+
+   globaltags={}
+   for c in changeset:
+      tags={}
+      for e in c.Entries:
+         for tag in e.Tags:
+            # remember which is the latest changeset to have this tag
+            globaltags[tag]=c
+
+   for c in changeset:
+      tags={}
+      for e in c.Entries:
+         for tag in e.Tags:
+            tags[tag]=True
+      # remember tags only if this is the latest changeset to have it
+      tagnames=[tag for tag in tags if globaltags[tag] is c]
+      tagnames.sort()
+      c.Tags=tagnames
+
+
+   # Find parent changesets
+
+   versions={}
+   for i,c in enumerate(changeset):
+      for f in c.Entries:
+         versions[(f.RCS,f.Revision)]=i
+
+   branches={}
+   for i,c in enumerate(changeset):
+      p=None
+      if c.Branch in branches:
+         p=branches[c.Branch]
+      else:
+         for f in c.Entries:
+            p=max(p,versions.get((f.RCS,f.Parent),None))
+      branches[c.Branch]=i
+      if p is not None:
+         p=changeset[p]
+      c.Parent=p
+
+   if verbose:
+      print >>sys.stderr,_('%d changeset entries')%len(changeset)
+
+   return changeset
+
+
+def main():
+   '''Main program to mimic cvsps.'''
+   from optparse import OptionParser,SUPPRESS_HELP
+   import cPickle as pickle
+   import os
+
+   op=OptionParser(usage='%prog [-cl] files...')
+
+   # Options that are ignored for compatibility with cvsps
+   op.add_option('-A',dest='Ignore',action='store_true',help=SUPPRESS_HELP)
+   op.add_option('-u',dest='Ignore',action='store_true',help=SUPPRESS_HELP)
+   op.add_option('--cvs-direct',dest='Ignore',action='store_true',help=SUPPRESS_HELP)
+   op.add_option('-q',dest='Ignore',action='store_true',help=SUPPRESS_HELP)
+
+   # Main options
+   op.add_option('-p',dest='Prefix',action='store',default='',
+                      help='Prefix to remove from file names')
+   op.add_option('-v',dest='Verbose',action='count',default=0,
+                      help='Be verbose')
+   op.add_option('-z',dest='Fuzz',action='store',type='int',default=60,
+                      help='Set commit time fuzz',metavar='seconds')
+   op.add_option('--root',dest='Root',action='store',
+                          help='Specify cvsroot',metavar='cvsroot')
+
+   # Debugging options
+   op.add_option('-c',dest='ReadChangeset',action='store',default='',
+                      help='Read changeset database',metavar='file')
+   op.add_option('-C',dest='WriteChangeset',action='store',default='',
+                      help='Write changeset database',metavar='file')
+   op.add_option('-l',dest='ReadLog',action='store',default='',
+                      help='Read log database',metavar='file')
+   op.add_option('-L',dest='WriteLog',action='store',default='',
+                      help='Write log database',metavar='file')
+   op.add_option('--debug',dest='Debug',action='store_true',help=SUPPRESS_HELP)
+
+   options,args=op.parse_args()
+
+   # Create log or read from pickle file
+
+   if options.ReadLog:
+      log=pickle.load(file(options.ReadLog))
+   elif options.ReadChangeset:
+      log=None
+   else:
+      log=cvs_create_log(dirs=args,root=options.Root,verbose=options.Verbose)
+      if options.WriteLog:
+         pickle.dump(log,file(options.WriteLog,'w'))
+
+   if options.Debug:
+      print '----- log -----'
+      for e in log:
+         print e
+         print
+      print '---------------'
+         
+   # Create changesets or read from pickle file
+
+   if options.ReadChangeset:
+      changeset=pickle.load(file(options.ReadChangeset))
+   else:
+      changeset=cvs_create_changeset(log,options.Fuzz,options.Verbose)
+      if options.WriteChangeset:
+         pickle.dump(changeset,file(options.WriteChangeset,'w'))
+
+   del log
+
+   if options.Debug:
+      print '----- changesets -----'
+      for cs in changeset:
+         print cs
+         print
+      print '---------------'
+
+   # Print changesets
+
+   for i,cs in enumerate(changeset):
+      cs.Id=i+1
+      # Note: trailing spaces on several lines here are needed to have
+      #       bug-for-bug compatibility with cvsps.
+      print '---------------------'
+      print 'PatchSet %d '%cs.Id
+      print 'Date: %s'%util.datestr(cs.Date,'%Y/%m/%d %H:%M:%S')
+      print 'Author: %s'%cs.Author
+      print 'Branch: %s'%(cs.Branch or 'HEAD')
+      print 'Tag%s: %s '%(['','s'][len(cs.Tags)>1],
+                          ','.join(cs.Tags) or '(none)')
+      if cs.Parent:
+         try:
+            p=str(cs.Parent.Id)
+         except:
+            p="(future)"
+         print 'Parent: %s'%p
+
+      print 'Log:'
+      print cs.Comment
+      print
+      print 'Members: '
+      for f in cs.Entries:
+         fn=f.File
+         if fn.startswith(options.Prefix):
+            fn=fn[len(options.Prefix):]
+         print '\t%s:%s->%s%s '%(fn,'.'.join([str(x) for x in f.Parent]) or 'INITIAL',
+                                 '.'.join([str(x) for x in f.Revision]),['','(DEAD)'][f.Dead])
+      print
+
+if __name__=='__main__':
+   main()
+
+# EOF cvsps.py


More information about the Mercurial-devel mailing list