hgext/evolve.py
branchstable
changeset 587 8152fedbac65
parent 586 f01721161532
child 588 89c8550019d0
--- a/hgext/evolve.py	Tue Oct 23 15:44:24 2012 +0200
+++ b/hgext/evolve.py	Tue Oct 23 16:36:29 2012 +0200
@@ -537,17 +537,17 @@
     obsstore = repo.obsstore
     newermap = {}
     for ctx in repo.set('(not public()) - obsolete()'):
-        prec = obsstore.successors.get(ctx.node(), ())
-        toprocess = set(prec)
+        mark = obsstore.successors.get(ctx.node(), ())
+        toprocess = set(mark)
         while toprocess:
             prec = toprocess.pop()[0]
             if prec not in newermap:
-                newermap[prec] = newerversion(repo, prec)
-            newer = [n for n in newermap[prec] if n] # filter kill
+                successorssets(repo, prec, newermap)
+            newer = [n for n in newermap[prec] if n]
             if len(newer) > 1:
                 divergent.add(ctx.rev())
                 break
-        toprocess.update(obsstore.successors.get(prec, ()))
+            toprocess.update(obsstore.successors.get(prec, ()))
     return divergent
 
 ### changectx method
@@ -815,6 +815,122 @@
             cs.add(sr)
     return cs
 
+nodemod = node
+def successorssets(repo, initialnode, cache=None):
+    """Return the newer version of an obsolete changeset"""
+
+    # prec -> markers mapping
+    markersfor = repo.obsstore.precursors
+
+    # Stack of node need to know the last successors set
+    toproceed = [initialnode]
+    # set version of toproceed for fast loop detection
+    stackedset = set(toproceed)
+    if cache is None:
+        cache = {}
+    while toproceed:
+        # work on the last node of the stack
+        node = toproceed[-1]
+        if node in cache:
+            # We already have a value for it.
+            # Keep working on something else.
+            stackedset.remove(toproceed.pop())
+        elif node not in markersfor:
+            # The node is not obsolete.
+            # This mean it is its own last successors.
+            if node in repo:
+                # We have a valid last successors.
+                cache[node] = [(node,)]
+            else:
+                # final obsolete version is unknown locally.
+                # Do not count that as a valid successors
+                cache[node] = []
+        else:
+            # <lss> stand for Last Successors Sets
+            # it contains the list of all last successors for the current node.
+            lss = []
+            for mark in markersfor[node]:
+                # <mlss> stand for Marker Last Successors Sets
+                # it contains the list of last successors set introduced by
+                # this marker.
+                mlss = [[]]
+                # iterate over possible multiple successors
+                for suc in mark[1]:
+                    if suc not in cache:
+                        # We do not know the last successors of that yet.
+                        if suc in stackedset:
+                            # Loop detected!
+                            #
+                            # we won't be able to ever compute a proper last
+                            # successors the naive and simple approve is to
+                            # consider it killed
+                            cache[suc] = []
+                        else:
+                            # Add the successor to the stack and break the next
+                            # iteration will work on this successors and the
+                            # algorithm will eventually process the current
+                            # node again.
+                            toproceed.append(suc)
+                            stackedset.add(suc)
+                            break
+                    # if we did not break, we can extend the possible set of
+                    # last successors.
+                    #
+                    # I say "extends" because if the marker have multiple
+                    # successors we have to generate
+                    #
+                    # if successors have multiple successors set (when ther are
+                    # divergent themself), we do a cartesian product of
+                    # possible successors set of already processed successors
+                    # and newly obtains successors set.
+                    newmlss = []
+                    for prefix in mlss:
+                        for suffix in cache[suc]:
+                            newss = list(prefix)
+                            for part in suffix:
+                                # do not duplicated entry in successors set.
+                                # first entry win.
+                                if part not in newss:
+                                    newss.append(part)
+                            newmlss.append(newss)
+                    mlss = newmlss
+                else:
+                    # note: mlss is still empty if the marker was a bare killing
+                    # of this changeset
+                    #
+                    # We extends the list of all possible successors sets with
+                    # successors set continuted by this marker
+                    lss.extend(mlss)
+                    # we use continue here to skip the break right bellow
+                    continue
+                # propagate "nested for" break.
+                # if the nested for exited on break, it did not ran the else
+                # clause and didn't "continue
+                break
+            else:
+                # computation was succesful for *all* marker.
+                # Add computed successors set to the cache
+                # (will be poped from to proceeed) on the new iteration
+                #
+                # We remove successors set that are subset of another one
+                # this fil
+                candsucset = sorted(((len(ss), set(ss), ss) for ss in lss),
+                                    reverse=True)
+                finalsucset = []
+                for cl, cs, css in candsucset:
+                    if not css:
+                        # remove empty successors set
+                        continue
+                    for fs, fss in finalsucset:
+                        if cs.issubset(fs):
+                            break
+                    else:
+                        finalsucset.append((cs, css))
+                finalsucset = [s[1] for s in finalsucset]
+                finalsucset.reverse()
+                cache[node] = finalsucset
+    return cache[initialnode]
+
 
 
 def newerversion(repo, obs):
@@ -1681,7 +1797,7 @@
     XXX this woobly function won't survive XXX
     """
     for base in ctx._repo.set('reverse(precursors(%d))', ctx):
-        newer = newerversion(ctx._repo, base.node())
+        newer = successorssets(ctx._repo, base.node())
         # drop filter and solution including the original ctx
         newer = [n for n in newer if n and ctx.node() not in n]
         if newer:
@@ -2141,6 +2257,60 @@
     finally:
         lockmod.release(lock, wlock)
 
+if 'debugsuccessorssets' not in commands.table:
+
+    @command('debugsuccessorssets',
+        [],
+        _('[REV]'))
+    def debugsuccessorssets(ui, repo, *revs):
+        """show set of successors for revision
+
+        Successors set of changeset A are a consistent group of revision that
+        succeed to A. Successors set contains non-obsolete changeset only.
+
+        In most case a changeset A have zero (changeset pruned) or a single
+        successors set that contains a single successors (changeset A replacement
+        by A')
+
+        But splitted changeset will result with successors set containing more than
+        a single element. Divergent rewritting will result in multiple successor
+        set.
+
+        result is displayed as follows::
+
+            <rev1>
+                <successors-1A>
+            <rev2>
+                <successors-2A>
+                <successors-2B1> <successors-2B1> <successors-2B1>
+
+        here rev2 have two possible successors sets. One hold three elements.
+
+        add --debug if you want full size node id.
+        """
+        cache = {}
+        s = str
+        if ui.debug:
+            def s(ctx):
+                return ctx.hex()
+        for rev in scmutil.revrange(repo, revs):
+            ctx = repo[rev]
+            if ui.debug():
+                ui.write('%s\n'% ctx.hex())
+                s = node.hex
+            else:
+                ui.write('%s\n'% ctx)
+                s = node.short
+            for ss in successorssets(repo, ctx.node(), cache):
+                if ss:
+                    ui.write('    ')
+                    ui.write(s(ss[0]))
+                    for n in ss[1:]:
+                        ui.write(' ')
+                        ui.write(s(n))
+                ui.write('\n')
+        pass
+
 
 @eh.wrapcommand('graft')
 def graftwrapper(orig, ui, repo, *revs, **kwargs):