--- a/hgext/obsolete.py Tue Aug 21 02:36:33 2012 +0200
+++ b/hgext/obsolete.py Tue Aug 21 02:37:53 2012 +0200
@@ -10,19 +10,18 @@
General concept
===============
-This extension introduces the *obsolete* concept. It adds a new *obsolete*
-relation between two changesets. A relation ``<changeset B> obsolete <changeset
-A>`` is set to denote that ``<changeset B>`` is new version of ``<changeset
-A>``.
+This extension introduces the *obsolete* concept. The relation
+``<changeset B> obsoletes <changeset A>`` denotes that ``<changeset B>``
+is a new version of ``<changeset A>``.
-The *obsolete* relation act as a **perpendicular history** to the standard
-changeset history. Standard changeset history versions files. The *obsolete*
-relation versions changesets.
+The *obsolete* relations act as an history **orthogonal** to the regular
+changesets history. Regular changesets history versions files. *Obsolete*
+relations version changesets.
:obsolete: a changeset that has been replaced by another one.
:unstable: a changeset that is not obsolete but has an obsolete ancestor.
-:suspended: an obsolete changeset with unstable descendant.
-:extinct: an obsolete changeset without unstable descendant.
+:suspended: an obsolete changeset with unstable descendants.
+:extinct: an obsolete changeset without unstable descendants.
(subject to garbage collection)
Another name for unstable could be out of sync.
@@ -31,176 +30,628 @@
Usage and Feature
=================
-Display and Exchange
---------------------
-
-obsolete changesets are hidden. (except if they have non obsolete changeset)
-
-obsolete changesets are not exchanged. This will probably change later but it
-was the simpler solution for now.
New commands
------------
Note that rebased changesets are now marked obsolete instead of being stripped.
-Context object
---------------
-
-Context gains a ``obsolete`` method that will return True if a changeset is
-obsolete False otherwise.
-
-revset
-------
-
-Add an ``obsolete()`` entry.
-
-repo extension
---------------
-
-To Do
-~~~~~
-
-- refuse to obsolete published changesets
-
-- handle split
-
-- handle conflict
-
-- handle unstable // out of sync
-
"""
-import os
-try:
- from cStringIO import StringIO
-except ImportError:
- from StringIO import StringIO
-from mercurial.i18n import _
-
-import base64
-import json
-
-import struct
-from mercurial import util, base85
-
-_pack = struct.pack
-_unpack = struct.unpack
from mercurial import util
+
+try:
+ from mercurial import obsolete
+ obsolete._enabled = True
+except ImportError:
+ raise util.Abort('Obsolete extension requires Mercurial 2.3 (or later)')
+
+import sys
+from mercurial.i18n import _
+from mercurial import cmdutil
+from mercurial import commands
from mercurial import context
-from mercurial import revset
-from mercurial import scmutil
-from mercurial import extensions
-from mercurial import pushkey
from mercurial import discovery
from mercurial import error
-from mercurial import commands
-from mercurial import changelog
+from mercurial import extensions
+from mercurial import localrepo
from mercurial import phases
-from mercurial.node import hex, bin, short, nullid
-from mercurial.lock import release
-from mercurial import localrepo
-from mercurial import cmdutil
+from mercurial import revset
+from mercurial import scmutil
from mercurial import templatekw
+from mercurial.node import bin, short, nullid
-try:
- from mercurial.localrepo import storecache
- storecache('babar') # to trigger import
-except (TypeError, ImportError):
- def storecache(*args):
- return scmutil.filecache(*args, instore=True)
+# This extension contains the following code
+#
+# - Extension Helper code
+# - Obsolescence cache
+# - ...
+# - Older format compat
+
+
+
+#####################################################################
+### Extension helper ###
+#####################################################################
+
+class exthelper(object):
+ """Helper for modular extension setup
+
+ A single helper should be instanciated for each extension. Helper
+ methods are then used as decorator for various purpose.
+
+ All decorators return the original function and may be chained.
+ """
+
+ def __init__(self):
+ self._uicallables = []
+ self._extcallables = []
+ self._repocallables = []
+ self._revsetsymbols = []
+ self._templatekws = []
+ self._commandwrappers = []
+ self._extcommandwrappers = []
+ self._functionwrappers = []
+ self._duckpunchers = []
+
+ def final_uisetup(self, ui):
+ """Method to be used as the extension uisetup
+
+ The following operations belong here:
+
+ - Changes to ui.__class__ . The ui object that will be used to run the
+ command has not yet been created. Changes made here will affect ui
+ objects created after this, and in particular the ui that will be
+ passed to runcommand
+ - Command wraps (extensions.wrapcommand)
+ - Changes that need to be visible to other extensions: because
+ initialization occurs in phases (all extensions run uisetup, then all
+ run extsetup), a change made here will be visible to other extensions
+ during extsetup
+ - Monkeypatch or wrap function (extensions.wrapfunction) of dispatch
+ module members
+ - Setup of pre-* and post-* hooks
+ - pushkey setup
+ """
+ for cont, funcname, func in self._duckpunchers:
+ setattr(cont, funcname, func)
+ for command, wrapper in self._commandwrappers:
+ extensions.wrapcommand(commands.table, command, wrapper)
+ for cont, funcname, wrapper in self._functionwrappers:
+ extensions.wrapfunction(cont, funcname, wrapper)
+ for c in self._uicallables:
+ c(ui)
+
+ def final_extsetup(self, ui):
+ """Method to be used as a the extension extsetup
+
+ The following operations belong here:
+
+ - Changes depending on the status of other extensions. (if
+ extensions.find('mq'))
+ - Add a global option to all commands
+ - Register revset functions
+ """
+ knownexts = {}
+ for name, symbol in self._revsetsymbols:
+ revset.symbols[name] = symbol
+ for name, kw in self._templatekws:
+ templatekw.keywords[name] = kw
+ for ext, command, wrapper in self._extcommandwrappers:
+ if ext not in knownexts:
+ e = extensions.find('rebase')
+ if e is None:
+ raise util.Abort('extension %s not found' %e)
+ knownexts[ext] = e.cmdtable
+ extensions.wrapcommand(knownexts[ext], commands, wrapper)
+ for c in self._extcallables:
+ c(ui)
+
+ def final_reposetup(self, ui, repo):
+ """Method to be used as a the extension reposetup
+
+ The following operations belong here:
+
+ - All hooks but pre-* and post-*
+ - Modify configuration variables
+ - Changes to repo.__class__, repo.dirstate.__class__
+ """
+ for c in self._repocallables:
+ c(ui, repo)
+
+ def uisetup(self, call):
+ """Decorated function will be executed during uisetup
+
+ example::
+
+ @eh.uisetup
+ def setupbabar(ui):
+ print 'this is uisetup!'
+ """
+ self._uicallables.append(call)
+ return call
+
+ def extsetup(self, call):
+ """Decorated function will be executed during extsetup
+
+ example::
+
+ @eh.extsetup
+ def setupcelestine(ui):
+ print 'this is extsetup!'
+ """
+ self._uicallables.append(call)
+ return call
+
+ def reposetup(self, call):
+ """Decorated function will be executed during reposetup
+
+ example::
+
+ @eh.reposetup
+ def setupzephir(ui, repo):
+ print 'this is reposetup!'
+ """
+ self._repocallables.append(call)
+ return call
+
+ def revset(self, symbolname):
+ """Decorated function is a revset symbol
+
+ The name of the symbol must be given as the decorator argument.
+ The symbol is added during `extsetup`.
+
+ example::
+
+ @eh.revset('hidden')
+ def revsetbabar(repo, subset, x):
+ args = revset.getargs(x, 0, 0, 'babar accept no argument')
+ return [r for r in subset if 'babar' in repo[r].description()]
+ """
+ def dec(symbol):
+ self._revsetsymbols.append((symbolname, symbol))
+ return symbol
+ return dec
-### Patch changectx
-#############################
+ def templatekw(self, keywordname):
+ """Decorated function is a revset keyword
+
+ The name of the keyword must be given as the decorator argument.
+ The symbol is added during `extsetup`.
+
+ example::
+
+ @eh.templatekw('babar')
+ def kwbabar(ctx):
+ return 'babar'
+ """
+ def dec(keyword):
+ self._templatekws.append((keywordname, keyword))
+ return keyword
+ return dec
+
+ def wrapcommand(self, command, extension=None):
+ """Decorated function is a command wrapper
+
+ The name of the command must be given as the decorator argument.
+ The wrapping is installed during `uisetup`.
+
+ If the second option `extension` argument is provided, the wrapping
+ will be applied in the extension commandtable. This argument must be a
+ string that will be searched using `extension.find` if not found and
+ Abort error is raised. If the wrapping applies to an extension, it is
+ installed during `extsetup`
+
+ example::
+
+ @eh.wrapcommand('summary')
+ def wrapsummary(orig, ui, repo, *args, **kwargs):
+ ui.note('Barry!')
+ return orig(ui, repo, *args, **kwargs)
+
+ """
+ def dec(wrapper):
+ if extension is None:
+ self._commandwrappers.append((command, wrapper))
+ else:
+ self._extcommandwrappers.append((extension, command, wrapper))
+ return wrapper
+ return dec
+
+ def wrapfunction(self, container, funcname):
+ """Decorated function is a function wrapper
+
+ This function takes two arguments, the container and the name of the
+ function to wrap. The wrapping is performed during `uisetup`.
+ (there is no extension support)
+
+ example::
+
+ @eh.function(discovery, 'checkheads')
+ def wrapfunction(orig, *args, **kwargs):
+ ui.note('His head smashed in and his heart cut out')
+ return orig(*args, **kwargs)
+ """
+ def dec(wrapper):
+ self._functionwrappers.append((container, funcname, wrapper))
+ return wrapper
+ return dec
+
+ def addattr(self, container, funcname):
+ """Decorated function is to be added to the container
+
+ This function takes two arguments, the container and the name of the
+ function to wrap. The wrapping is performed during `uisetup`.
+
+ example::
+
+ @eh.function(context.changectx, 'babar')
+ def babar(ctx):
+ return 'babar' in ctx.description
+ """
+ def dec(func):
+ self._duckpunchers.append((container, funcname, func))
+ return func
+ return dec
+
+eh = exthelper()
+uisetup = eh.final_uisetup
+extsetup = eh.final_extsetup
+reposetup = eh.final_reposetup
+
+#####################################################################
+### Obsolescence Caching Logic ###
+#####################################################################
+
+# Obsolescence related logic can be very slow if we don't have efficient cache.
+#
+# This section implements a cache mechanism that did not make it into core for
+# time reason. It store meaningful set of revision related to obsolescence
+# (obsolete, unstabletble ...
+#
+# Here is:
+#
+# - Computation of meaningful set,
+# - Cache access logic,
+# - Cache invalidation logic,
+# - revset and ctx using this cache.
+#
+
-def obsolete(ctx):
- """is the changeset obsolete by other"""
- if ctx.node()is None:
- return False
- return bool(ctx._repo.obsoletedby(ctx.node())) and ctx.phase()
+### Computation of meaningful set
+#
+# Most set can be computed with "simple" revset.
+
+#: { set name -> function to compute this set } mapping
+#: function take a single "repo" argument.
+#:
+#: Use the `cachefor` decorator to register new cache function
+cachefuncs = {}
+def cachefor(name):
+ """Decorator to register a function as computing the cache for a set"""
+ def decorator(func):
+ assert name not in cachefuncs
+ cachefuncs[name] = func
+ return func
+ return decorator
+
+@cachefor('obsolete')
+def _computeobsoleteset(repo):
+ """the set of obsolete revisions"""
+ obs = set()
+ nm = repo.changelog.nodemap
+ for prec in repo.obsstore.precursors:
+ rev = nm.get(prec)
+ if rev is not None:
+ obs.add(rev)
+ return set(repo.revs('%ld - public()', obs))
+
+@cachefor('unstable')
+def _computeunstableset(repo):
+ """the set of non obsolete revisions with obsolete parents"""
+ return set(repo.revs('(obsolete()::) - obsolete()'))
+
+@cachefor('suspended')
+def _computesuspendedset(repo):
+ """the set of obsolete parents with non obsolete descendants"""
+ return set(repo.revs('obsolete() and obsolete()::unstable()'))
+
+@cachefor('extinct')
+def _computeextinctset(repo):
+ """the set of obsolete parents without non obsolete descendants"""
+ return set(repo.revs('obsolete() - obsolete()::unstable()'))
+
+@eh.wrapfunction(obsolete.obsstore, '__init__')
+def _initobsstorecache(orig, obsstore, *args, **kwargs):
+ """add a cache attribute to obsstore"""
+ obsstore.caches = {}
+ return orig(obsstore, *args, **kwargs)
+
+### Cache access
+
+def getobscache(repo, name):
+ """Return the set of revision that belong to the <name> set
-context.changectx.obsolete = obsolete
+ Such access may compute the set and cache it for future use"""
+ if not repo.obsstore:
+ return ()
+ if name not in repo.obsstore.caches:
+ repo.obsstore.caches[name] = cachefuncs[name](repo)
+ return repo.obsstore.caches[name]
+
+### Cache clean up
+#
+# To be simple we need to invalidate obsolescence cache when:
+#
+# - new changeset is added:
+# - public phase is changed
+# - obsolescence marker are added
+# - strip is used a repo
+
+
+def clearobscaches(repo):
+ """Remove all obsolescence related cache from a repo
+
+ This remove all cache in obsstore is the obsstore already exist on the
+ repo.
+
+ (We could be smarter here)"""
+ if 'obsstore' in repo._filecache:
+ repo.obsstore.caches.clear()
+@eh.wrapfunction(localrepo.localrepository, 'addchangegroup') # new changeset
+@eh.wrapfunction(phases, 'retractboundary') # phase movement
+@eh.wrapfunction(phases, 'advanceboundary') # phase movement
+@eh.wrapfunction(localrepo.localrepository, 'destroyed') # strip
+def wrapclearcache(orig, repo, *args, **kwargs):
+ try:
+ return orig(repo, *args, **kwargs)
+ finally:
+ # we are a bit wide here
+ # we could restrict to:
+ # advanceboundary + phase==public
+ # retractboundary + phase==draft
+ clearobscaches(repo)
+
+@eh.wrapfunction(obsolete.obsstore, 'add') # new marker
+def clearonadd(orig, obsstore, *args, **kwargs):
+ try:
+ return orig(obsstore, *args, **kwargs)
+ finally:
+ obsstore.caches.clear()
+
+### Use the case
+# Function in core that could benefic from the cache are overwritten by cache using version
+
+# changectx method
+
+@eh.addattr(context.changectx, 'unstable')
def unstable(ctx):
"""is the changeset unstable (have obsolete ancestor)"""
if ctx.node() is None:
return False
- return ctx.rev() in ctx._repo._unstableset
+ return ctx.rev() in getobscache(ctx._repo, 'unstable')
-context.changectx.unstable = unstable
+@eh.addattr(context.changectx, 'extinct')
def extinct(ctx):
"""is the changeset extinct by other"""
if ctx.node() is None:
return False
- return ctx.rev() in ctx._repo._extinctset
+ return ctx.rev() in getobscache(ctx._repo, 'extinct')
+
+# revset
+
+@eh.revset('obsolete')
+def revsetobsolete(repo, subset, x):
+ """``obsolete()``
+ Changeset is obsolete.
+ """
+ args = revset.getargs(x, 0, 0, 'obsolete takes no argument')
+ obsoletes = getobscache(repo, 'obsolete')
+ return [r for r in subset if r in obsoletes]
+
+@eh.revset('unstable')
+def revsetunstable(repo, subset, x):
+ """``unstable()``
+ Unstable changesets are non-obsolete with obsolete ancestors.
+ """
+ args = revset.getargs(x, 0, 0, 'unstable takes no arguments')
+ unstables = getobscache(repo, 'unstable')
+ return [r for r in subset if r in unstables]
+
+@eh.revset('extinct')
+def revsetextinct(repo, subset, x):
+ """``extinct()``
+ Obsolete changesets with obsolete descendants only.
+ """
+ args = revset.getargs(x, 0, 0, 'extinct takes no arguments')
+ extincts = getobscache(repo, 'extinct')
+ return [r for r in subset if r in extincts]
+
+#####################################################################
+### Complete troubles computation logic ###
+#####################################################################
-context.changectx.extinct = extinct
+# there is two kind of trouble not handled by core right now:
+# - latecomer: (successors for public changeset)
+# - conflicting: (two changeset try to succeed to the same precursors)
+#
+# This section add support for those two addition trouble
+#
+# - Cache computation
+# - revset and ctx method
+# - push warning
+
+### Cache computation
+
+@cachefor('latecomer')
+def _computelatecomerset(repo):
+ """the set of rev trying to obsolete public revision"""
+ query = 'allsuccessors(public()) - obsolete() - public()'
+ return set(repo.revs(query))
+@cachefor('conflicting')
+def _computeconflictingset(repo):
+ """the set of rev trying to obsolete public revision"""
+ conflicting = set()
+ obsstore = repo.obsstore
+ newermap = {}
+ for ctx in repo.set('(not public()) - obsolete()'):
+ prec = obsstore.successors.get(ctx.node(), ())
+ toprocess = set(prec)
+ while toprocess:
+ prec = toprocess.pop()[0]
+ if prec not in newermap:
+ newermap[prec] = newerversion(repo, prec)
+ newer = [n for n in newermap[prec] if n] # filter kill
+ if len(newer) > 1:
+ conflicting.add(ctx.rev())
+ break
+ toprocess.update(obsstore.successors.get(prec, ()))
+ return conflicting
+
+### changectx method
+
+@eh.addattr(context.changectx, 'latecomer')
def latecomer(ctx):
"""is the changeset latecomer (Try to succeed to public change)"""
if ctx.node() is None:
return False
- return ctx.rev() in ctx._repo._latecomerset
+ return ctx.rev() in getobscache(ctx._repo, 'latecomer')
-context.changectx.latecomer = latecomer
-
+@eh.addattr(context.changectx, 'conflicting')
def conflicting(ctx):
"""is the changeset conflicting (Try to succeed to public change)"""
if ctx.node() is None:
return False
- return ctx.rev() in ctx._repo._conflictingset
+ return ctx.rev() in getobscache(ctx._repo, 'conflicting')
+
+### revset symbol
+
+@eh.revset('latecomer')
+def revsetlatecomer(repo, subset, x):
+ """``latecomer()``
+ Changesets marked as successors of public changesets.
+ """
+ args = revset.getargs(x, 0, 0, 'latecomer takes no arguments')
+ lates = getobscache(repo, 'latecomer')
+ return [r for r in subset if r in lates]
+
+@eh.revset('conflicting')
+def revsetconflicting(repo, subset, x):
+ """``conflicting()``
+ Changesets marked as successors of a same changeset.
+ """
+ args = revset.getargs(x, 0, 0, 'conflicting takes no arguments')
+ conf = getobscache(repo, 'conflicting')
+ return [r for r in subset if r in conf]
-context.changectx.conflicting = conflicting
+
+### Discovery wrapping
+
+@eh.wrapfunction(discovery, 'checkheads')
+def wrapcheckheads(orig, repo, remote, outgoing, *args, **kwargs):
+ """wrap mercurial.discovery.checkheads
+
+ * prevent latecomer and unstable to be pushed
+ """
+ # do not push instability
+ for h in outgoing.missingheads:
+ # Checking heads is enough, obsolete descendants are either
+ # obsolete or unstable.
+ ctx = repo[h]
+ if ctx.latecomer():
+ raise util.Abort(_("push includes a latecomer changeset: %s!")
+ % ctx)
+ if ctx.conflicting():
+ raise util.Abort(_("push includes a conflicting changeset: %s!")
+ % ctx)
+ return orig(repo, remote, outgoing, *args, **kwargs)
-### revset
-#############################
+#####################################################################
+### Additional Utilities ###
+#####################################################################
-def revsethidden(repo, subset, x):
- """hidden changesets"""
- args = revset.getargs(x, 0, 0, 'hidden takes no argument')
- return [r for r in subset if r in repo.changelog.hiddenrevs]
+# This section contains a lot of small utility function and method
-def revsetobsolete(repo, subset, x):
- """obsolete changesets"""
- args = revset.getargs(x, 0, 0, 'obsolete takes no argument')
- return [r for r in subset if r in repo._obsoleteset and repo._phasecache.phase(repo, r) > 0]
+# - Function to create markers
+# - useful alias pstatus and pdiff (should probably go in evolve)
+# - "troubles" method on changectx
+# - function to travel throught the obsolescence graph
+# - function to find useful changeset to stabilize
+
+### Marker Create
-# XXX Backward compatibility, to be removed once stabilized
-if '_phasecache' not in vars(localrepo.localrepository): # new api
- def revsetobsolete(repo, subset, x):
- """obsolete changesets"""
- args = revset.getargs(x, 0, 0, 'obsolete takes no argument')
- return [r for r in subset if r in repo._obsoleteset and repo._phaserev[r] > 0]
+def createmarkers(repo, relations, metadata=None, flag=0):
+ """Add obsolete markers between changeset in a repo
+
+ <relations> must be an iterable of (<old>, (<new>, ...)) tuple.
+ `old` and `news` are changectx.
+
+ Current user and date are used except if specified otherwise in the
+ metadata attribute.
-def revsetunstable(repo, subset, x):
- """non obsolete changesets descendant of obsolete one"""
- args = revset.getargs(x, 0, 0, 'unstable takes no arguments')
- return [r for r in subset if r in repo._unstableset]
+ /!\ assume the repo have been locked by the user /!\
+ """
+ # prepare metadata
+ if metadata is None:
+ metadata = {}
+ if 'date' not in metadata:
+ metadata['date'] = '%i %i' % util.makedate()
+ if 'user' not in metadata:
+ metadata['user'] = repo.ui.username()
+ # check future marker
+ tr = repo.transaction('add-obsolescence-marker')
+ try:
+ for prec, sucs in relations:
+ if not prec.mutable():
+ raise util.Abort("Cannot obsolete immutable changeset: %s" % prec)
+ nprec = prec.node()
+ nsucs = tuple(s.node() for s in sucs)
+ if nprec in nsucs:
+ raise util.Abort("Changeset %s cannot obsolete himself" % prec)
+ repo.obsstore.create(tr, nprec, nsucs, flag, metadata)
+ clearobscaches(repo)
+ tr.close()
+ finally:
+ tr.release()
-def revsetsuspended(repo, subset, x):
- """obsolete changesets with non obsolete descendants"""
- args = revset.getargs(x, 0, 0, 'suspended takes no arguments')
- return [r for r in subset if r in repo._suspendedset]
+
+### Useful alias
-def revsetextinct(repo, subset, x):
- """obsolete changesets without obsolete descendants"""
- args = revset.getargs(x, 0, 0, 'extinct takes no arguments')
- return [r for r in subset if r in repo._extinctset]
+@eh.uisetup
+def _installalias(ui):
+ if ui.config('alias', 'pstatus', None) is None:
+ ui.setconfig('alias', 'pstatus', 'status --rev .^')
+ if ui.config('alias', 'pdiff', None) is None:
+ ui.setconfig('alias', 'pdiff', 'diff --rev .^')
+
+# - "troubles" method on changectx
+
+@eh.addattr(context.changectx, 'troubles')
+def troubles(ctx):
+ """Return a tuple listing all the troubles that affect a changeset
-def revsetlatecomer(repo, subset, x):
- """latecomer, Try to succeed to public change"""
- args = revset.getargs(x, 0, 0, 'latecomer takes no arguments')
- return [r for r in subset if r in repo._latecomerset]
+ Troubles may be "unstable", "latecomer" or "conflicting".
+ """
+ troubles = []
+ if ctx.unstable():
+ troubles.append('unstable')
+ if ctx.latecomer():
+ troubles.append('latecomer')
+ if ctx.conflicting():
+ troubles.append('conflicting')
+ return tuple(troubles)
-def revsetconflicting(repo, subset, x):
- """conflicting, Try to succeed to public change"""
- args = revset.getargs(x, 0, 0, 'conflicting takes no arguments')
- return [r for r in subset if r in repo._conflictingset]
+
+### Obsolescence graph
+
+# XXX SOME MAJOR CLEAN UP TO DO HERE XXX
def _precursors(repo, s):
"""Precursor of a changeset"""
@@ -214,12 +665,6 @@
cs.add(pr)
return cs
-def revsetprecursors(repo, subset, x):
- """precursors of a subset"""
- s = revset.getset(repo, range(len(repo)), x)
- cs = _precursors(repo, s)
- return [r for r in subset if r in cs]
-
def _allprecursors(repo, s): # XXX we need a better naming
"""transitive precursors of a subset"""
toproceed = [repo[r].node() for r in s]
@@ -240,12 +685,6 @@
cs.add(pr)
return cs
-def revsetallprecursors(repo, subset, x):
- """obsolete parents"""
- s = revset.getset(repo, range(len(repo)), x)
- cs = _allprecursors(repo, s)
- return [r for r in subset if r in cs]
-
def _successors(repo, s):
"""Successors of a changeset"""
cs = set()
@@ -259,12 +698,6 @@
cs.add(sr)
return cs
-def revsetsuccessors(repo, subset, x):
- """successors of a subset"""
- s = revset.getset(repo, range(len(repo)), x)
- cs = _successors(repo, s)
- return [r for r in subset if r in cs]
-
def _allsuccessors(repo, s): # XXX we need a better naming
"""transitive successors of a subset"""
toproceed = [repo[r].node() for r in s]
@@ -287,32 +720,219 @@
cs.add(sr)
return cs
+
+
+def newerversion(repo, obs):
+ """Return the newer version of an obsolete changeset"""
+ toproceed = set([(obs,)])
+ # XXX known optimization available
+ newer = set()
+ objectrels = repo.obsstore.precursors
+ while toproceed:
+ current = toproceed.pop()
+ assert len(current) <= 1, 'splitting not handled yet. %r' % current
+ current = [n for n in current if n != nullid]
+ if current:
+ n, = current
+ if n in objectrels:
+ markers = objectrels[n]
+ for mark in markers:
+ toproceed.add(tuple(mark[1]))
+ else:
+ newer.add(tuple(current))
+ else:
+ newer.add(())
+ return sorted(newer)
+
+
+#####################################################################
+### Extending revset and template ###
+#####################################################################
+
+# this section add several useful revset symbol not yet in core.
+# they are subject to changes
+
+### hidden revset is not in core yet
+
+@eh.revset('hidden')
+def revsethidden(repo, subset, x):
+ """``hidden()``
+ Changeset is hidden.
+ """
+ args = revset.getargs(x, 0, 0, 'hidden takes no argument')
+ return [r for r in subset if r in repo.hiddenrevs]
+
+### XXX I'm not sure this revset is useful
+@eh.revset('suspended')
+def revsetsuspended(repo, subset, x):
+ """``suspended()``
+ Obsolete changesets with non-obsolete descendants.
+ """
+ args = revset.getargs(x, 0, 0, 'suspended takes no arguments')
+ suspended = getobscache(repo, 'suspended')
+ return [r for r in subset if r in suspended]
+
+
+@eh.revset('precursors')
+def revsetprecursors(repo, subset, x):
+ """``precursors(set)``
+ Immediate precursors of changesets in set.
+ """
+ s = revset.getset(repo, range(len(repo)), x)
+ cs = _precursors(repo, s)
+ return [r for r in subset if r in cs]
+
+
+@eh.revset('allprecursors')
+def revsetallprecursors(repo, subset, x):
+ """``allprecursors(set)``
+ Transitive precursors of changesets in set.
+ """
+ s = revset.getset(repo, range(len(repo)), x)
+ cs = _allprecursors(repo, s)
+ return [r for r in subset if r in cs]
+
+
+@eh.revset('successors')
+def revsetsuccessors(repo, subset, x):
+ """``successors(set)``
+ Immediate successors of changesets in set.
+ """
+ s = revset.getset(repo, range(len(repo)), x)
+ cs = _successors(repo, s)
+ return [r for r in subset if r in cs]
+
+@eh.revset('allsuccessors')
def revsetallsuccessors(repo, subset, x):
- """obsolete parents"""
+ """``allsuccessors(set)``
+ Transitive successors of changesets in set.
+ """
s = revset.getset(repo, range(len(repo)), x)
cs = _allsuccessors(repo, s)
return [r for r in subset if r in cs]
+### template keywords
+# XXX it does not handle troubles well :-/
-### template keywords
-#####################
-
+@eh.templatekw('obsolete')
def obsoletekw(repo, ctx, templ, **args):
""":obsolete: String. The obsolescence level of the node, could be
``stable``, ``unstable``, ``suspended`` or ``extinct``.
"""
rev = ctx.rev()
- if rev in repo._extinctset:
- return 'extinct'
- if rev in repo._suspendedset:
- return 'suspended'
- if rev in repo._unstableset:
+ if ctx.obsolete():
+ if ctx.extinct():
+ return 'extinct'
+ else:
+ return 'suspended'
+ elif ctx.unstable():
return 'unstable'
return 'stable'
-### Other Extension compat
-############################
+#####################################################################
+### Various trouble warning ###
+#####################################################################
+
+# This section take care of issue warning to the user when troubles appear
+
+@eh.wrapcommand("update")
+@eh.wrapcommand("pull")
+def wrapmayobsoletewc(origfn, ui, repo, *args, **opts):
+ """Warn that the working directory parent is an obsolete changeset"""
+ res = origfn(ui, repo, *args, **opts)
+ if repo['.'].obsolete():
+ ui.warn(_('Working directory parent is obsolete\n'))
+ return res
+
+# XXX this could wrap transaction code
+# XXX (but this is a bit a layer violation)
+@eh.wrapcommand("commit")
+@eh.wrapcommand("push")
+@eh.wrapcommand("pull")
+@eh.wrapcommand("graft")
+@eh.wrapcommand("phase")
+@eh.wrapcommand("unbundle")
+def warnobserrors(orig, ui, repo, *args, **kwargs):
+ """display warning is the command resulted in more instable changeset"""
+ priorunstables = len(repo.revs('unstable()'))
+ priorlatecomers = len(repo.revs('latecomer()'))
+ priorconflictings = len(repo.revs('conflicting()'))
+ try:
+ return orig(ui, repo, *args, **kwargs)
+ finally:
+ newunstables = len(repo.revs('unstable()')) - priorunstables
+ newlatecomers = len(repo.revs('latecomer()')) - priorlatecomers
+ newconflictings = len(repo.revs('conflicting()')) - priorconflictings
+ if newunstables > 0:
+ ui.warn(_('%i new unstables changesets\n') % newunstables)
+ if newlatecomers > 0:
+ ui.warn(_('%i new latecomers changesets\n') % newlatecomers)
+ if newconflictings > 0:
+ ui.warn(_('%i new conflictings changesets\n') % newconflictings)
+
+@eh.reposetup
+def _repostabilizesetup(ui, repo):
+ """Add a hint for "hg stabilize" when troubles make push fails
+ """
+ if not repo.local():
+ return
+
+ opush = repo.push
+ class stabilizerrepo(repo.__class__):
+ def push(self, remote, *args, **opts):
+ """wrapper around pull that pull obsolete relation"""
+ try:
+ result = opush(remote, *args, **opts)
+ except util.Abort, ex:
+ hint = _("use 'hg stabilize' to get a stable history "
+ "or --force to ignore warnings")
+ if (len(ex.args) >= 1
+ and ex.args[0].startswith('push includes ')
+ and ex.hint is None):
+ ex.hint = hint
+ raise
+ return result
+ repo.__class__ = stabilizerrepo
+
+#####################################################################
+### Core Other extension compat ###
+#####################################################################
+
+# This section make official history rewritter create obsolete marker
+
+
+### commit --amend
+# make commit --amend create obsolete marker
+#
+# The precursor is still strip from the repository.
+
+@eh.wrapfunction(cmdutil, 'amend')
+def wrapcmdutilamend(orig, ui, repo, commitfunc, old, *args, **kwargs):
+ oldnode = old.node()
+ new = orig(ui, repo, commitfunc, old, *args, **kwargs)
+ if new != oldnode:
+ lock = repo.lock()
+ try:
+ tr = repo.transaction('post-amend-obst')
+ try:
+ meta = {
+ 'date': '%i %i' % util.makedate(),
+ 'user': ui.username(),
+ }
+ repo.obsstore.create(tr, oldnode, [new], 0, meta)
+ tr.close()
+ clearobscaches(repo)
+ finally:
+ tr.release()
+ finally:
+ lock.release()
+ return new
+
+### rebase
+#
+# - ignore obsolete changeset
+# - create obsolete marker *instead of* striping
def buildstate(orig, repo, dest, rebaseset, *ags, **kws):
"""wrapper for rebase 's buildstate that exclude obsolete changeset"""
@@ -354,67 +974,50 @@
repo._rebasestate = {}
repo._rebasetarget = None
try:
- res = orig(ui, repo, *args, **kwargs)
- if not reallykeep:
- # Filter nullmerge or unrebased entries
- repo._rebasestate = dict(p for p in repo._rebasestate.iteritems()
- if p[1] >= 0)
- if not res and not kwargs.get('abort') and repo._rebasestate:
- # Rebased revisions are assumed to be descendants of
- # targetrev. If a source revision is mapped to targetrev
- # or to another rebased revision, it must have been
- # removed.
- targetrev = repo[repo._rebasetarget].rev()
- newrevs = set([targetrev])
- replacements = {}
- for rev, newrev in sorted(repo._rebasestate.items()):
- oldnode = repo[rev].node()
- if newrev not in newrevs:
- newnode = repo[newrev].node()
- newrevs.add(newrev)
+ l = repo.lock()
+ try:
+ res = orig(ui, repo, *args, **kwargs)
+ if not reallykeep:
+ # Filter nullmerge or unrebased entries
+ repo._rebasestate = dict(p for p in repo._rebasestate.iteritems()
+ if p[1] >= 0)
+ if not res and not kwargs.get('abort') and repo._rebasestate:
+ # Rebased revisions are assumed to be descendants of
+ # targetrev. If a source revision is mapped to targetrev
+ # or to another rebased revision, it must have been
+ # removed.
+ markers = []
+ if kwargs.get('collapse'):
+ # collapse assume revision disapear because they are all
+ # in the created revision
+ newrevs = set(repo._rebasestate.values())
+ newrevs.remove(repo._rebasetarget)
+ if newrevs:
+ # we create new revision.
+ # A single one by --collapse design
+ assert len(newrevs) == 1
+ new = tuple(repo[n] for n in newrevs)
+ else:
+ # every body died. no new changeset created
+ new = (repo[repo._rebasetarget],)
+ for rev, newrev in sorted(repo._rebasestate.items()):
+ markers.append((repo[rev], new))
else:
- newnode = nullid
- replacements[oldnode] = newnode
-
- if kwargs.get('collapse'):
- newnodes = set(n for n in replacements.values() if n != nullid)
- if newnodes:
- # Collapsing into more than one revision?
- assert len(newnodes) == 1, newnodes
- newnode = newnodes.pop()
- else:
- newnode = nullid
- repo.addcollapsedobsolete(replacements, newnode)
- else:
- for oldnode, newnode in replacements.iteritems():
- repo.addobsolete(newnode, oldnode)
- return res
+ # no collapse assume revision disapear because they are
+ # contained in parent
+ for rev, newrev in sorted(repo._rebasestate.items()):
+ markers.append((repo[rev], (repo[newrev],)))
+ createmarkers(repo, markers)
+ return res
+ finally:
+ l.release()
finally:
delattr(repo, '_rebasestate')
delattr(repo, '_rebasetarget')
-
-def extsetup(ui):
-
- revset.symbols["hidden"] = revsethidden
- revset.symbols["obsolete"] = revsetobsolete
- revset.symbols["unstable"] = revsetunstable
- revset.symbols["suspended"] = revsetsuspended
- revset.symbols["extinct"] = revsetextinct
- revset.symbols["latecomer"] = revsetlatecomer
- revset.symbols["conflicting"] = revsetconflicting
- revset.symbols["obsparents"] = revsetprecursors # DEPR
- revset.symbols["precursors"] = revsetprecursors
- revset.symbols["obsancestors"] = revsetallprecursors # DEPR
- revset.symbols["allprecursors"] = revsetallprecursors # bad name
- revset.symbols["successors"] = revsetsuccessors
- revset.symbols["allsuccessors"] = revsetallsuccessors # bad name
-
- templatekw.keywords['obsolete'] = obsoletekw
-
+@eh.extsetup
+def _rebasewrapping(ui):
# warning about more obsolete
- for cmd in ['commit', 'push', 'pull', 'graft', 'phase', 'unbundle']:
- entry = extensions.wrapcommand(commands.table, cmd, warnobserrors)
try:
rebase = extensions.find('rebase')
if rebase:
@@ -426,356 +1029,39 @@
except KeyError:
pass # rebase not found
-# Pushkey mechanism for mutable
-#########################################
-def listmarkers(repo):
- """List markers over pushkey"""
- if not repo.obsstore:
- return {}
- data = repo.obsstore._writemarkers()
- encdata = base85.b85encode(data)
- return {'dump0': encdata,
- 'dump': encdata} # legacy compat
-
-def pushmarker(repo, key, old, new):
- """Push markers over pushkey"""
- if not key.startswith('dump'):
- repo.ui.warn(_('unknown key: %r') % key)
- return 0
- if old:
- repo.ui.warn(_('unexpected old value') % key)
- return 0
- data = base85.b85decode(new)
- lock = repo.lock()
- try:
- try:
- repo.obsstore.mergemarkers(data)
- return 1
- except util.Abort:
- return 0
- finally:
- lock.release()
-
-pushkey.register('obsolete', pushmarker, listmarkers)
-
-### Discovery wrapping
-#############################
-
-class blist(list, object):
- """silly class to have non False but empty list"""
-
- def __nonzero__(self):
- return bool(len(self.orig))
-
-def wrapfindcommonoutgoing(orig, repo, *args, **kwargs):
- """wrap mercurial.discovery.findcommonoutgoing to remove extinct changeset
-
- Such excluded changeset are removed from excluded and will *not* appear
- are excluded secret changeset.
- """
- outgoing = orig(repo, *args, **kwargs)
- orig = outgoing.excluded
- outgoing.excluded = blist(n for n in orig if not repo[n].extinct())
- # when no revision is specified (push everything) a shortcut is taken when
- # nothign was exclude. taking this code path when extinct changeset have
- # been excluded leads to repository corruption.
- outgoing.excluded.orig = orig
- return outgoing
-
-def wrapcheckheads(orig, repo, remote, outgoing, *args, **kwargs):
- """wrap mercurial.discovery.checkheads
+#####################################################################
+### Older format management ###
+#####################################################################
- * prevent unstability to be pushed
- * patch remote to ignore obsolete heads on remote
- """
- # do not push instability
- for h in outgoing.missingheads:
- # checking heads only is enought because any thing base on obsolete
- # changeset is either obsolete or unstable.
- ctx = repo[h]
- if ctx.unstable():
- raise util.Abort(_("push includes an unstable changeset: %s!")
- % ctx)
- if ctx.obsolete():
- raise util.Abort(_("push includes an obsolete changeset: %s!")
- % ctx)
- if ctx.latecomer():
- raise util.Abort(_("push includes an latecomer changeset: %s!")
- % ctx)
- if ctx.conflicting():
- raise util.Abort(_("push includes conflicting changeset: %s!")
- % ctx)
- ### patch remote branch map
- # do not read it this burn eyes
- try:
- if 'oldbranchmap' not in vars(remote):
- remote.oldbranchmap = remote.branchmap
- def branchmap():
- newbm = {}
- oldbm = None
- if (util.safehasattr(phases, 'visiblebranchmap')
- and not util.safehasattr(remote, 'ignorevisiblebranchmap')
- ):
- remote.ignorevisiblebranchmap = False
- remote.branchmap = remote.oldbranchmap
- oldbm = phases.visiblebranchmap(remote)
- remote.branchmap = remote.newbranchmap
- remote.ignorevisiblebranchmap = True
- if oldbm is None:
- oldbm = remote.oldbranchmap()
- for branch, nodes in oldbm.iteritems():
- nodes = list(nodes)
- new = set()
- while nodes:
- n = nodes.pop()
- if n in repo.obsstore.precursors:
- markers = repo.obsstore.precursors[n]
- for mark in markers:
- for newernode in mark[1]:
- if newernode is not None:
- nodes.append(newernode)
- else:
- new.add(n)
- if new:
- newbm[branch] = list(new)
- return newbm
- remote.ignorevisiblebranchmap = True
- remote.branchmap = branchmap
- remote.newbranchmap = branchmap
- return orig(repo, remote, outgoing, *args, **kwargs)
- finally:
- remote.__dict__.pop('branchmap', None) # restore class one
- remote.__dict__.pop('oldbranchmap', None)
- remote.__dict__.pop('newbranchmap', None)
- remote.__dict__.pop('ignorevisiblebranchmap', None)
+# Code related to detection and management of older legacy format never
+# handled by core
-# eye are still burning
-def wrapvisiblebranchmap(orig, repo):
- ignore = getattr(repo, 'ignorevisiblebranchmap', None)
- if ignore is None:
- return orig(repo)
- elif ignore:
- return repo.branchmap()
- else:
- return None # break recursion
-
-def wrapclearcache(orig, repo, *args, **kwargs):
- try:
- return orig(repo, *args, **kwargs)
- finally:
- repo._clearobsoletecache()
-
-
-### New commands
-#############################
-
-cmdtable = {}
-command = cmdutil.command(cmdtable)
-
-@command('debugobsolete', [], _('SUBJECT OBJECT'))
-def cmddebugobsolete(ui, repo, subject, object):
- """add an obsolete relation between two nodes
-
- The subject is expected to be a newer version of the object.
- """
- lock = repo.lock()
- try:
- sub = repo[subject]
- obj = repo[object]
- repo.addobsolete(sub.node(), obj.node())
- finally:
- lock.release()
- return 0
+import json
-@command('debugconvertobsolete', [], '')
-def cmddebugconvertobsolete(ui, repo):
- """import markers from an .hg/obsolete-relations file"""
- cnt = 0
- err = 0
- l = repo.lock()
- some = False
- try:
- repo._importoldobsolete = True
- store = repo.obsstore
- ### very first format
- try:
- f = repo.opener('obsolete-relations')
- try:
- some = True
- for line in f:
- subhex, objhex = line.split()
- suc = bin(subhex)
- prec = bin(objhex)
- sucs = (suc==nullid) and [] or [suc]
- meta = {
- 'date': '%i %i' % util.makedate(),
- 'user': ui.username(),
- }
- try:
- store.create(prec, sucs, 0, meta)
- cnt += 1
- except ValueError:
- repo.ui.write_err("invalid old marker line: %s"
- % (line))
- err += 1
- finally:
- f.close()
- util.unlink(repo.join('obsolete-relations'))
- except IOError:
- pass
- ### second (json) format
- data = repo.sopener.tryread('obsoletemarkers')
- if data:
- some = True
- for oldmark in json.loads(data):
- del oldmark['id'] # dropped for now
- del oldmark['reason'] # unused until then
- oldobject = str(oldmark.pop('object'))
- oldsubjects = [str(s) for s in oldmark.pop('subjects', [])]
- LOOKUP_ERRORS = (error.RepoLookupError, error.LookupError)
- if len(oldobject) != 40:
- try:
- oldobject = repo[oldobject].node()
- except LOOKUP_ERRORS:
- pass
- if any(len(s) != 40 for s in oldsubjects):
- try:
- oldsubjects = [repo[s].node() for s in oldsubjects]
- except LOOKUP_ERRORS:
- pass
-
- oldmark['date'] = '%i %i' % tuple(oldmark['date'])
- meta = dict((k.encode('utf-8'), v.encode('utf-8'))
- for k, v in oldmark.iteritems())
- try:
- succs = [bin(n) for n in oldsubjects]
- succs = [n for n in succs if n != nullid]
- store.create(bin(oldobject), succs,
- 0, meta)
- cnt += 1
- except ValueError:
- repo.ui.write_err("invalid marker %s -> %s\n"
- % (oldobject, oldsubjects))
- err += 1
- util.unlink(repo.sjoin('obsoletemarkers'))
- finally:
- del repo._importoldobsolete
- l.release()
- if not some:
- ui.warn('nothing to do\n')
- ui.status('%i obsolete marker converted\n' % cnt)
- if err:
- ui.write_err('%i conversion failed. check you graph!\n' % err)
-
-@command('debugsuccessors', [], '')
-def cmddebugsuccessors(ui, repo):
- """dump obsolete changesets and their successors
-
- Each line matches an existing marker, the first identifier is the
- obsolete changeset identifier, followed by it successors.
+@eh.reposetup
+def _checkoldobsolete(ui, repo):
+ """Detect that a repo still contains some old obsolete format
"""
- lock = repo.lock()
- try:
- allsuccessors = repo.obsstore.precursors
- for old in sorted(allsuccessors):
- successors = [sorted(m[1]) for m in allsuccessors[old]]
- for i, group in enumerate(sorted(successors)):
- ui.write('%s' % short(old))
- for new in group:
- ui.write(' %s' % short(new))
- ui.write('\n')
- finally:
- lock.release()
-
-### Altering existing command
-#############################
-
-def wrapmayobsoletewc(origfn, ui, repo, *args, **opts):
- res = origfn(ui, repo, *args, **opts)
- if repo['.'].obsolete():
- ui.warn(_('Working directory parent is obsolete\n'))
- return res
-
-def warnobserrors(orig, ui, repo, *args, **kwargs):
- """display warning is the command resulted in more instable changeset"""
- priorunstables = len(repo.revs('unstable()'))
- priorlatecomers = len(repo.revs('latecomer()'))
- priorconflictings = len(repo.revs('conflicting()'))
- #print orig, priorunstables
- #print len(repo.revs('secret() - obsolete()'))
- try:
- return orig(ui, repo, *args, **kwargs)
- finally:
- newunstables = len(repo.revs('unstable()')) - priorunstables
- newlatecomers = len(repo.revs('latecomer()')) - priorlatecomers
- newconflictings = len(repo.revs('conflicting()')) - priorconflictings
- #print orig, newunstables
- #print len(repo.revs('secret() - obsolete()'))
- if newunstables > 0:
- ui.warn(_('%i new unstables changesets\n') % newunstables)
- if newlatecomers > 0:
- ui.warn(_('%i new latecomers changesets\n') % newlatecomers)
- if newconflictings > 0:
- ui.warn(_('%i new conflictings changesets\n') % newconflictings)
-
-def noextinctsvisibleheads(orig, repo):
- repo._turn_extinct_secret()
- return orig(repo)
-
-def wrapcmdutilamend(orig, ui, repo, commitfunc, old, *args, **kwargs):
- oldnode = old.node()
- new = orig(ui, repo, commitfunc, old, *args, **kwargs)
- if new != oldnode:
- lock = repo.lock()
- try:
- meta = {
- 'subjects': [new],
- 'object': oldnode,
- 'date': util.makedate(),
- 'user': ui.username(),
- 'reason': 'commit --amend',
- }
- repo.obsstore.create(oldnode, [new], 0, meta)
- repo._clearobsoletecache()
- repo._turn_extinct_secret()
- finally:
- lock.release()
- return new
-
-def uisetup(ui):
- extensions.wrapcommand(commands.table, "update", wrapmayobsoletewc)
- extensions.wrapcommand(commands.table, "pull", wrapmayobsoletewc)
- if util.safehasattr(cmdutil, 'amend'):
- extensions.wrapfunction(cmdutil, 'amend', wrapcmdutilamend)
- extensions.wrapfunction(discovery, 'findcommonoutgoing', wrapfindcommonoutgoing)
- extensions.wrapfunction(discovery, 'checkheads', wrapcheckheads)
- extensions.wrapfunction(phases, 'visibleheads', noextinctsvisibleheads)
- extensions.wrapfunction(phases, 'advanceboundary', wrapclearcache)
- if util.safehasattr(phases, 'visiblebranchmap'):
- extensions.wrapfunction(phases, 'visiblebranchmap', wrapvisiblebranchmap)
-
-### serialisation
-#############################
-
-def _obsserialise(obssubrels, flike):
- """serialise an obsolete relation mapping in a plain text one
-
- this is for subject -> [objects] mapping
-
- format is::
-
- <subject-full-hex> <object-full-hex>\n"""
- for sub, objs in obssubrels.iteritems():
- for obj in objs:
- if sub is None:
- sub = nullid
- flike.write('%s %s\n' % (hex(sub), hex(obj)))
+ if not repo.local():
+ return
+ for arg in sys.argv:
+ if 'debugc' in arg:
+ break
+ else:
+ data = repo.opener.tryread('obsolete-relations')
+ if not data:
+ data = repo.sopener.tryread('obsoletemarkers')
+ if data:
+ raise util.Abort('old format of obsolete marker detected!\n'
+ 'run `hg debugconvertobsolete` once.')
def _obsdeserialise(flike):
"""read a file like object serialised with _obsserialise
- this desierialize into a {subject -> objects} mapping"""
+ this desierialize into a {subject -> objects} mapping
+
+ this was the very first format ever."""
rels = {}
for line in flike:
subhex, objhex = line.split()
@@ -785,527 +1071,92 @@
rels.setdefault( subnode, set()).add(bin(objhex))
return rels
-### diagnostique tools
-#############################
-
-def unstables(repo):
- """Return all unstable changeset"""
- return scmutil.revrange(repo, ['obsolete():: and (not obsolete())'])
-
-def newerversion(repo, obs):
- """Return the newer version of an obsolete changeset"""
- toproceed = set([(obs,)])
- # XXX known optimization available
- newer = set()
- objectrels = repo.obsstore.precursors
- while toproceed:
- current = toproceed.pop()
- assert len(current) <= 1, 'splitting not handled yet. %r' % current
- current = [n for n in current if n != nullid]
- if current:
- n, = current
- if n in objectrels:
- markers = objectrels[n]
- for mark in markers:
- toproceed.add(tuple(mark[1]))
- else:
- newer.add(tuple(current))
- else:
- newer.add(())
- return sorted(newer)
-
-### obsolete relation storage
-#############################
-def add2set(d, key, mark):
- """add <mark> to a `set` in <d>[<key>]"""
- d.setdefault(key, []).append(mark)
-
-def markerid(marker):
- KEYS = ['subjects', "object", "date", "user", "reason"]
- for key in KEYS:
- assert key in marker
- keys = sorted(marker.keys())
- a = util.sha1()
- for key in keys:
- if key == 'subjects':
- for sub in sorted(marker[key]):
- a.update(sub)
- elif key == 'id':
- pass
- else:
- a.update(str(marker[key]))
- a.update('\0')
- return a.digest()
-
-# mercurial backport
-
-def encodemeta(meta):
- """Return encoded metadata string to string mapping.
-
- Assume no ':' in key and no '\0' in both key and value."""
- for key, value in meta.iteritems():
- if ':' in key or '\0' in key:
- raise ValueError("':' and '\0' are forbidden in metadata key'")
- if '\0' in value:
- raise ValueError("':' are forbidden in metadata value'")
- return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
-
-def decodemeta(data):
- """Return string to string dictionary from encoded version."""
- d = {}
- for l in data.split('\0'):
- if l:
- key, value = l.split(':')
- d[key] = value
- return d
-
-# data used for parsing and writing
-_fmversion = 0
-_fmfixed = '>BIB20s'
-_fmnode = '20s'
-_fmfsize = struct.calcsize(_fmfixed)
-_fnodesize = struct.calcsize(_fmnode)
-
-def _readmarkers(data):
- """Read and enumerate markers from raw data"""
- off = 0
- diskversion = _unpack('>B', data[off:off + 1])[0]
- off += 1
- if diskversion != _fmversion:
- raise util.Abort(_('parsing obsolete marker: unknown version %r')
- % diskversion)
-
- # Loop on markers
- l = len(data)
- while off + _fmfsize <= l:
- # read fixed part
- cur = data[off:off + _fmfsize]
- off += _fmfsize
- nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
- # read replacement
- sucs = ()
- if nbsuc:
- s = (_fnodesize * nbsuc)
- cur = data[off:off + s]
- sucs = _unpack(_fmnode * nbsuc, cur)
- off += s
- # read metadata
- # (metadata will be decoded on demand)
- metadata = data[off:off + mdsize]
- if len(metadata) != mdsize:
- raise util.Abort(_('parsing obsolete marker: metadata is too '
- 'short, %d bytes expected, got %d')
- % (len(metadata), mdsize))
- off += mdsize
- yield (pre, sucs, flags, metadata)
-
-class obsstore(object):
- """Store obsolete markers
-
- Markers can be accessed with two mappings:
- - precursors: old -> set(new)
- - successors: new -> set(old)
- """
-
- def __init__(self):
- self._all = []
- # new markers to serialize
- self._new = []
- self.precursors = {}
- self.successors = {}
-
- def __iter__(self):
- return iter(self._all)
-
- def __nonzero__(self):
- return bool(self._all)
-
- def create(self, prec, succs=(), flag=0, metadata=None):
- """obsolete: add a new obsolete marker
-
- * ensuring it is hashable
- * check mandatory metadata
- * encode metadata
- """
- if metadata is None:
- metadata = {}
- if len(prec) != 20:
- raise ValueError(repr(prec))
- for succ in succs:
- if len(succ) != 20:
- raise ValueError((succs))
- marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
- self.add(marker)
-
- def add(self, marker):
- """Add a new marker to the store
-
- This marker still needs to be written to disk"""
- self._new.append(marker)
- self._load(marker)
-
- def loadmarkers(self, data):
- """Load all markers in data, mark them as known."""
- for marker in _readmarkers(data):
- self._load(marker)
-
- def mergemarkers(self, data):
- other = set(_readmarkers(data))
- local = set(self._all)
- new = other - local
- for marker in new:
- self.add(marker)
-
- def flushmarkers(self, stream):
- """Write all markers to a stream
-
- After this operation, "new" markers are considered "known"."""
- self._writemarkers(stream)
- self._new[:] = []
-
- def _load(self, marker):
- self._all.append(marker)
- pre, sucs = marker[:2]
- self.precursors.setdefault(pre, set()).add(marker)
- for suc in sucs:
- self.successors.setdefault(suc, set()).add(marker)
-
- def _writemarkers(self, stream=None):
- # Kept separate from flushmarkers(), it will be reused for
- # markers exchange.
- if stream is None:
- final = []
- w = final.append
- else:
- w = stream.write
- w(_pack('>B', _fmversion))
- for marker in self._all:
- pre, sucs, flags, metadata = marker
- nbsuc = len(sucs)
- format = _fmfixed + (_fmnode * nbsuc)
- data = [nbsuc, len(metadata), flags, pre]
- data.extend(sucs)
- w(_pack(format, *data))
- w(metadata)
- if stream is None:
- return ''.join(final)
-
-
-### repo subclassing
-#############################
-
-def reposetup(ui, repo):
- if not repo.local():
- return
-
- if not util.safehasattr(repo.opener, 'tryread'):
- raise util.Abort('Obsolete extension requires Mercurial 2.2 (or later)')
- opull = repo.pull
- opush = repo.push
- olock = repo.lock
- o_rollback = repo._rollback
- o_updatebranchcache = repo.updatebranchcache
-
- # /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\
- if util.safehasattr(repo, '_journalfiles'): # Hg 2.2
- o_journalfiles = repo._journalfiles
- o_writejournal = repo._writejournal
- o_hook = repo.hook
-
-
- class obsoletingrepo(repo.__class__):
-
- # workaround
- def hook(self, name, throw=False, **args):
- if 'pushkey' in name:
- args.pop('new')
- args.pop('old')
- return o_hook(name, throw=False, **args)
-
- ### Public method
- def obsoletedby(self, node):
- """return the set of node that make <node> obsolete (obj)"""
- others = set()
- for marker in self.obsstore.precursors.get(node, []):
- others.update(marker[1])
- return others
-
- def obsolete(self, node):
- """return the set of node that <node> make obsolete (sub)"""
- return set(marker[0] for marker in self.obsstore.successors.get(node, []))
-
- @storecache('obsstore')
- def obsstore(self):
- if not getattr(self, '_importoldobsolete', False):
- data = repo.opener.tryread('obsolete-relations')
- if not data:
- data = repo.sopener.tryread('obsoletemarkers')
- if data:
- raise util.Abort('old format of obsolete marker detected!\n'
- 'run `hg debugconvertobsolete` once.')
- store = obsstore()
- data = self.sopener.tryread('obsstore')
+cmdtable = {}
+command = cmdutil.command(cmdtable)
+@command('debugconvertobsolete', [], '')
+def cmddebugconvertobsolete(ui, repo):
+ """import markers from an .hg/obsolete-relations file"""
+ cnt = 0
+ err = 0
+ l = repo.lock()
+ some = False
+ try:
+ unlink = []
+ tr = repo.transaction('convert-obsolete')
+ try:
+ repo._importoldobsolete = True
+ store = repo.obsstore
+ ### very first format
+ try:
+ f = repo.opener('obsolete-relations')
+ try:
+ some = True
+ for line in f:
+ subhex, objhex = line.split()
+ suc = bin(subhex)
+ prec = bin(objhex)
+ sucs = (suc==nullid) and [] or [suc]
+ meta = {
+ 'date': '%i %i' % util.makedate(),
+ 'user': ui.username(),
+ }
+ try:
+ store.create(tr, prec, sucs, 0, meta)
+ cnt += 1
+ except ValueError:
+ repo.ui.write_err("invalid old marker line: %s"
+ % (line))
+ err += 1
+ finally:
+ f.close()
+ unlink.append(repo.join('obsolete-relations'))
+ except IOError:
+ pass
+ ### second (json) format
+ data = repo.sopener.tryread('obsoletemarkers')
if data:
- store.loadmarkers(data)
- return store
-
- @util.propertycache
- def _obsoleteset(self):
- """the set of obsolete revision"""
- obs = set()
- nm = self.changelog.nodemap
- for obj in self.obsstore.precursors:
- try: # /!\api change in Hg 2.2 (e8d37b78acfb22ae2c1fb126c2)/!\
- rev = nm.get(obj)
- except TypeError: #XXX to remove while breaking Hg 2.1 support
- rev = nm.get(obj, None)
- if rev is not None:
- obs.add(rev)
- return obs
-
- @util.propertycache
- def _unstableset(self):
- """the set of non obsolete revision with obsolete parent"""
- return set(self.revs('(obsolete()::) - obsolete()'))
-
- @util.propertycache
- def _suspendedset(self):
- """the set of obsolete parent with non obsolete descendant"""
- return set(self.revs('obsolete() and obsolete()::unstable()'))
-
- @util.propertycache
- def _extinctset(self):
- """the set of obsolete parent without non obsolete descendant"""
- return set(self.revs('obsolete() - obsolete()::unstable()'))
-
- @util.propertycache
- def _latecomerset(self):
- """the set of rev trying to obsolete public revision"""
- query = 'allsuccessors(public()) - obsolete() - public()'
- return set(self.revs(query))
-
- @util.propertycache
- def _conflictingset(self):
- """the set of rev trying to obsolete public revision"""
- conflicting = set()
- obsstore = self.obsstore
- newermap = {}
- for ctx in self.set('(not public()) - obsolete()'):
- prec = obsstore.successors.get(ctx.node(), ())
- toprocess = set(prec)
- while toprocess:
- prec = toprocess.pop()[0]
- if prec not in newermap:
- newermap[prec] = newerversion(self, prec)
- newer = [n for n in newermap[prec] if n] # filter kill
- if len(newer) > 1:
- conflicting.add(ctx.rev())
- break
- toprocess.update(obsstore.successors.get(prec, ()))
- return conflicting
-
- def _clearobsoletecache(self):
- if '_obsoleteset' in vars(self):
- del self._obsoleteset
- self._clearunstablecache()
-
- def updatebranchcache(self):
- o_updatebranchcache()
- self._clearunstablecache()
-
- def _clearunstablecache(self):
- if '_unstableset' in vars(self):
- del self._unstableset
- if '_suspendedset' in vars(self):
- del self._suspendedset
- if '_extinctset' in vars(self):
- del self._extinctset
- if '_latecomerset' in vars(self):
- del self._latecomerset
- if '_conflictingset' in vars(self):
- del self._conflictingset
-
- def addobsolete(self, sub, obj):
- """Add a relation marking that node <sub> is a new version of <obj>"""
- assert sub != obj
- if not repo[obj].phase():
- if sub is None:
- self.ui.warn(
- _("trying to kill immutable changeset %(obj)s\n")
- % {'obj': short(obj)})
- if sub is not None:
- self.ui.warn(
- _("%(sub)s try to obsolete immutable changeset %(obj)s\n")
- % {'sub': short(sub), 'obj': short(obj)})
- lock = self.lock()
- try:
- meta = {
- 'date': util.makedate(),
- 'user': ui.username(),
- 'reason': 'unknown',
- }
- subs = (sub == nullid) and [] or [sub]
- mid = self.obsstore.create(obj, subs, 0, meta)
- self._clearobsoletecache()
- self._turn_extinct_secret()
- return mid
- finally:
- lock.release()
-
- def addcollapsedobsolete(self, oldnodes, newnode):
- """Mark oldnodes as collapsed into newnode."""
- # Assume oldnodes are all descendants of a single rev
- rootrevs = self.revs('roots(%ln)', oldnodes)
- assert len(rootrevs) == 1, rootrevs
- rootnode = self[rootrevs[0]].node()
- for n in oldnodes:
- self.addobsolete(newnode, n)
-
- def _turn_extinct_secret(self):
- """ensure all extinct changeset are secret"""
- self._clearobsoletecache()
- # this is mainly for safety purpose
- # both pull and push
- query = '(obsolete() - obsolete()::(unstable() - secret())) - secret()'
- expobs = [c.node() for c in repo.set(query)]
- phases.retractboundary(repo, 2, expobs)
-
- ### Disk IO
+ some = True
+ for oldmark in json.loads(data):
+ del oldmark['id'] # dropped for now
+ del oldmark['reason'] # unused until then
+ oldobject = str(oldmark.pop('object'))
+ oldsubjects = [str(s) for s in oldmark.pop('subjects', [])]
+ LOOKUP_ERRORS = (error.RepoLookupError, error.LookupError)
+ if len(oldobject) != 40:
+ try:
+ oldobject = repo[oldobject].node()
+ except LOOKUP_ERRORS:
+ pass
+ if any(len(s) != 40 for s in oldsubjects):
+ try:
+ oldsubjects = [repo[s].node() for s in oldsubjects]
+ except LOOKUP_ERRORS:
+ pass
- def lock(self, *args, **kwargs):
- l = olock(*args, **kwargs)
- if not getattr(l.releasefn, 'obspatched', False):
- oreleasefn = l.releasefn
- def releasefn(*args, **kwargs):
- if 'obsstore' in vars(self) and self.obsstore._new:
- f = self.sopener('obsstore', 'wb', atomictemp=True)
- try:
- self.obsstore.flushmarkers(f)
- f.close()
- except: # re-raises
- f.discard()
- raise
- oreleasefn(*args, **kwargs)
- releasefn.obspatched = True
- l.releasefn = releasefn
- return l
-
-
- ### pull // push support
-
- def pull(self, remote, *args, **kwargs):
- """wrapper around push that push obsolete relation"""
- l = repo.lock()
- try:
- result = opull(remote, *args, **kwargs)
- remoteobs = remote.listkeys('obsolete')
- if 'dump' in remoteobs:
- remoteobs['dump0'] = remoteobs.pop('dump')
- if 'dump0' in remoteobs:
- for key, values in remoteobs.iteritems():
- if key.startswith('dump'):
- data = base85.b85decode(remoteobs['dump0'])
- self.obsstore.mergemarkers(data)
- self._clearobsoletecache()
- self._turn_extinct_secret()
- return result
- finally:
- l.release()
-
- def push(self, remote, *args, **opts):
- """wrapper around pull that pull obsolete relation"""
- self._turn_extinct_secret()
- try:
- result = opush(remote, *args, **opts)
- except util.Abort, ex:
- hint = _("use 'hg stabilize' to get a stable history (or --force to proceed)")
- if (len(ex.args) >= 1
- and ex.args[0].startswith('push includes ')
- and ex.hint is None):
- ex.hint = hint
- raise
- if 'obsolete' in remote.listkeys('namespaces') and self.obsstore:
- data = self.obsstore._writemarkers()
- r = remote.pushkey('obsolete', 'dump0', '',
- base85.b85encode(data))
- if not r:
- self.ui.warn(_('failed to push obsolete markers!\n'))
- self._turn_extinct_secret()
-
- return result
-
-
- ### rollback support
-
- # /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\
- if util.safehasattr(repo, '_journalfiles'): # Hg 2.2
- def _journalfiles(self):
- return o_journalfiles() + (self.sjoin('journal.obsstore'),)
-
- def _writejournal(self, desc):
- """wrapped version of _writejournal that save obsolete data"""
- o_writejournal(desc)
- filename = 'obsstore'
- filepath = self.sjoin(filename)
- if os.path.exists(filepath):
- journalname = 'journal.' + filename
- journalpath = self.sjoin(journalname)
- util.copyfile(filepath, journalpath)
-
- else: # XXX removing this bloc will break Hg 2.1 support
- def _writejournal(self, desc):
- """wrapped version of _writejournal that save obsolete data"""
- entries = list(o_writejournal(desc))
- filename = 'obsstore'
- filepath = self.sjoin(filename)
- if os.path.exists(filepath):
- journalname = 'journal.' + filename
- journalpath = self.sjoin(journalname)
- util.copyfile(filepath, journalpath)
- entries.append(journalpath)
- return tuple(entries)
-
- def _rollback(self, dryrun, force):
- """wrapped version of _rollback that restore obsolete data"""
- ret = o_rollback(dryrun, force)
- if not (ret or dryrun): #rollback did not failed
- src = self.sjoin('undo.obsstore')
- dst = self.sjoin('obsstore')
- if os.path.exists(src):
- util.rename(src, dst)
- elif os.path.exists(dst):
- # If no state was saved because the file did not existed before.
- os.unlink(dst)
- # invalidate cache
- self.__dict__.pop('obsstore', None)
- return ret
-
- @storecache('00changelog.i')
- def changelog(self):
- # << copy pasted from mercurial source
- c = changelog.changelog(self.sopener)
- if 'HG_PENDING' in os.environ:
- p = os.environ['HG_PENDING']
- if p.startswith(self.root):
- c.readpending('00changelog.i.a')
- # >> end of the copy paste
- old = c.__dict__.pop('hiddenrevs', ())
- if old:
- ui.warn("old wasn't empty ? %r" % old)
- def _sethidden(c, value):
- assert not value
-
-
- class hchangelog(c.__class__):
- @util.propertycache
- def hiddenrevs(c):
- shown = ['not obsolete()', '.', 'bookmark()', 'tagged()',
- 'public()']
- basicquery = 'obsolete() - (::(%s))' % (' or '.join(shown))
- # !!! self is repo not changelog
- result = set(scmutil.revrange(self, [basicquery]))
- return result
- c.__class__ = hchangelog
- return c
-
- repo.__class__ = obsoletingrepo
+ oldmark['date'] = '%i %i' % tuple(oldmark['date'])
+ meta = dict((k.encode('utf-8'), v.encode('utf-8'))
+ for k, v in oldmark.iteritems())
+ try:
+ succs = [bin(n) for n in oldsubjects]
+ succs = [n for n in succs if n != nullid]
+ store.create(tr, bin(oldobject), succs,
+ 0, meta)
+ cnt += 1
+ except ValueError:
+ repo.ui.write_err("invalid marker %s -> %s\n"
+ % (oldobject, oldsubjects))
+ err += 1
+ unlink.append(repo.sjoin('obsoletemarkers'))
+ tr.close()
+ for path in unlink:
+ util.unlink(path)
+ finally:
+ tr.release()
+ finally:
+ del repo._importoldobsolete
+ l.release()
+ if not some:
+ ui.warn('nothing to do\n')
+ ui.status('%i obsolete marker converted\n' % cnt)
+ if err:
+ ui.write_err('%i conversion failed. check you graph!\n' % err)