Move obserror warning into the obsolete extension.
# obsolete.py - introduce the obsolete concept in mercurial.
#
# Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
# Logilab SA <contact@logilab.fr>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Introduce the Obsolete concept to mercurial
General concept
===============
This extension introduces the *obsolete* concept. It adds a new *obsolete*
relation between two changesets. A relation ``<changeset B> obsolete <changeset
A>`` is set to denote that ``<changeset B>`` is new version of ``<changeset
A>``.
The *obsolete* relation act as a **perpendicular history** to the standard
changeset history. Standard changeset history versions files. The *obsolete*
relation versions changesets.
:obsolete: a changeset that has been replaced by another one.
:unstable: a changeset that is not obsolete but has an obsolete ancestor.
:suspended: an obsolete changeset with unstable descendant.
:extinct: an obsolete changeset without unstable descendant.
(subject to garbage collection)
Another name for unstable could be out of sync.
Usage and Feature
=================
Display and Exchange
--------------------
obsolete changesets are hidden. (except if they have non obsolete changeset)
obsolete changesets are not exchanged. This will probably change later but it
was the simpler solution for now.
New commands
------------
Note that rebased changesets are not marked obsolete rather than being stripped
In this experimental extensions, this is done forcing the --keep option. Trying
to use the --keep option of rebase with this extensionn this experimental
extension will cause such a call to abort. Until better releasen please use
graft command to rebase and copy changesets.
Context object
--------------
Context gains a ``obsolete`` method that will return True if a changeset is
obsolete False otherwise.
revset
------
Add an ``obsolete()`` entry.
repo extension
--------------
To Do
~~~~~
- refuse to obsolete published changesets
- handle split
- handle conflict
- handle unstable // out of sync
"""
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from mercurial.i18n import _
import base64
import json
import struct
from mercurial import util, base85
_pack = struct.pack
_unpack = struct.unpack
from mercurial import util
from mercurial import context
from mercurial import revset
from mercurial import scmutil
from mercurial import extensions
from mercurial import pushkey
from mercurial import discovery
from mercurial import error
from mercurial import commands
from mercurial import changelog
from mercurial import phases
from mercurial.node import hex, bin, short, nullid
from mercurial.lock import release
from mercurial import localrepo
from mercurial import cmdutil
from mercurial import templatekw
try:
from mercurial.localrepo import storecache
storecache('babar') # to trigger import
except (TypeError, ImportError):
def storecache(*args):
return scmutil.filecache(*args, instore=True)
### Patch changectx
#############################
def obsolete(ctx):
"""is the changeset obsolete by other"""
if ctx.node()is None:
return False
return bool(ctx._repo.obsoletedby(ctx.node())) and ctx.phase()
context.changectx.obsolete = obsolete
def unstable(ctx):
"""is the changeset unstable (have obsolete ancestor)"""
if ctx.node() is None:
return False
return ctx.rev() in ctx._repo._unstableset
context.changectx.unstable = unstable
def extinct(ctx):
"""is the changeset extinct by other"""
if ctx.node() is None:
return False
return ctx.rev() in ctx._repo._extinctset
context.changectx.extinct = extinct
def latecomer(ctx):
"""is the changeset latecomer (Try to succeed to public change)"""
if ctx.node() is None:
return False
return ctx.rev() in ctx._repo._latecomerset
context.changectx.latecomer = latecomer
def conflicting(ctx):
"""is the changeset conflicting (Try to succeed to public change)"""
if ctx.node() is None:
return False
return ctx.rev() in ctx._repo._conflictingset
context.changectx.conflicting = conflicting
### revset
#############################
def revsethidden(repo, subset, x):
"""hidden changesets"""
args = revset.getargs(x, 0, 0, 'hidden takes no argument')
return [r for r in subset if r in repo.changelog.hiddenrevs]
def revsetobsolete(repo, subset, x):
"""obsolete changesets"""
args = revset.getargs(x, 0, 0, 'obsolete takes no argument')
return [r for r in subset if r in repo._obsoleteset and repo._phasecache.phase(repo, r) > 0]
# XXX Backward compatibility, to be removed once stabilized
if '_phasecache' not in vars(localrepo.localrepository): # new api
def revsetobsolete(repo, subset, x):
"""obsolete changesets"""
args = revset.getargs(x, 0, 0, 'obsolete takes no argument')
return [r for r in subset if r in repo._obsoleteset and repo._phaserev[r] > 0]
def revsetunstable(repo, subset, x):
"""non obsolete changesets descendant of obsolete one"""
args = revset.getargs(x, 0, 0, 'unstable takes no arguments')
return [r for r in subset if r in repo._unstableset]
def revsetsuspended(repo, subset, x):
"""obsolete changesets with non obsolete descendants"""
args = revset.getargs(x, 0, 0, 'suspended takes no arguments')
return [r for r in subset if r in repo._suspendedset]
def revsetextinct(repo, subset, x):
"""obsolete changesets without obsolete descendants"""
args = revset.getargs(x, 0, 0, 'extinct takes no arguments')
return [r for r in subset if r in repo._extinctset]
def revsetlatecomer(repo, subset, x):
"""latecomer, Try to succeed to public change"""
args = revset.getargs(x, 0, 0, 'latecomer takes no arguments')
return [r for r in subset if r in repo._latecomerset]
def revsetconflicting(repo, subset, x):
"""conflicting, Try to succeed to public change"""
args = revset.getargs(x, 0, 0, 'conflicting takes no arguments')
return [r for r in subset if r in repo._conflictingset]
def _precursors(repo, s):
"""Precursor of a changeset"""
cs = set()
nm = repo.changelog.nodemap
markerbysubj = repo.obsstore.successors
for r in s:
for p in markerbysubj.get(repo[r].node(), ()):
pr = nm.get(p[0])
if pr is not None:
cs.add(pr)
return cs
def revsetprecursors(repo, subset, x):
"""precursors of a subset"""
s = revset.getset(repo, range(len(repo)), x)
cs = _precursors(repo, s)
return [r for r in subset if r in cs]
def _allprecursors(repo, s): # XXX we need a better naming
"""transitive precursors of a subset"""
toproceed = [repo[r].node() for r in s]
seen = set()
allsubjects = repo.obsstore.successors
while toproceed:
nc = toproceed.pop()
for mark in allsubjects.get(nc, ()):
np = mark[0]
if np not in seen:
seen.add(np)
toproceed.append(np)
nm = repo.changelog.nodemap
cs = set()
for p in seen:
pr = nm.get(p)
if pr is not None:
cs.add(pr)
return cs
def revsetallprecursors(repo, subset, x):
"""obsolete parents"""
s = revset.getset(repo, range(len(repo)), x)
cs = _allprecursors(repo, s)
return [r for r in subset if r in cs]
def _successors(repo, s):
"""Successors of a changeset"""
cs = set()
nm = repo.changelog.nodemap
markerbyobj = repo.obsstore.precursors
for r in s:
for p in markerbyobj.get(repo[r].node(), ()):
for sub in p[1]:
sr = nm.get(sub)
if sr is not None:
cs.add(sr)
return cs
def revsetsuccessors(repo, subset, x):
"""successors of a subset"""
s = revset.getset(repo, range(len(repo)), x)
cs = _successors(repo, s)
return [r for r in subset if r in cs]
def _allsuccessors(repo, s): # XXX we need a better naming
"""transitive successors of a subset"""
toproceed = [repo[r].node() for r in s]
seen = set()
allobjects = repo.obsstore.precursors
while toproceed:
nc = toproceed.pop()
for mark in allobjects.get(nc, ()):
for sub in mark[1]:
if sub not in seen:
seen.add(sub)
toproceed.append(sub)
nm = repo.changelog.nodemap
cs = set()
for s in seen:
sr = nm.get(s)
if sr is not None:
cs.add(sr)
return cs
def revsetallsuccessors(repo, subset, x):
"""obsolete parents"""
s = revset.getset(repo, range(len(repo)), x)
cs = _allsuccessors(repo, s)
return [r for r in subset if r in cs]
### template keywords
#####################
def obsoletekw(repo, ctx, templ, **args):
""":obsolete: String. The obsolescence level of the node, could be
``stable``, ``unstable``, ``suspended`` or ``extinct``.
"""
rev = ctx.rev()
if rev in repo._extinctset:
return 'extinct'
if rev in repo._suspendedset:
return 'suspended'
if rev in repo._unstableset:
return 'unstable'
return 'stable'
### Other Extension compat
############################
def buildstate(orig, repo, dest, rebaseset, *ags, **kws):
"""wrapper for rebase 's buildstate that exclude obsolete changeset"""
rebaseset = repo.revs('%ld - extinct()', rebaseset)
return orig(repo, dest, rebaseset, *ags, **kws)
def defineparents(orig, repo, rev, target, state, *args, **kwargs):
rebasestate = getattr(repo, '_rebasestate', None)
if rebasestate is not None:
repo._rebasestate = dict(state)
repo._rebasetarget = target
return orig(repo, rev, target, state, *args, **kwargs)
def concludenode(orig, repo, rev, p1, *args, **kwargs):
"""wrapper for rebase 's concludenode that set obsolete relation"""
newrev = orig(repo, rev, p1, *args, **kwargs)
rebasestate = getattr(repo, '_rebasestate', None)
if rebasestate is not None:
if newrev is not None:
nrev = repo[newrev].rev()
else:
nrev = p1
repo._rebasestate[rev] = nrev
return newrev
def cmdrebase(orig, ui, repo, *args, **kwargs):
reallykeep = kwargs.get('keep', False)
kwargs = dict(kwargs)
kwargs['keep'] = True
# We want to mark rebased revision as obsolete and set their
# replacements if any. Doing it in concludenode() prevents
# aborting the rebase, and is not called with all relevant
# revisions in --collapse case. Instead, we try to track the
# rebase state structure by sampling/updating it in
# defineparents() and concludenode(). The obsolete markers are
# added from this state after a successful call.
repo._rebasestate = {}
repo._rebasetarget = None
try:
res = orig(ui, repo, *args, **kwargs)
if not reallykeep:
# Filter nullmerge or unrebased entries
repo._rebasestate = dict(p for p in repo._rebasestate.iteritems()
if p[1] >= 0)
if not res and not kwargs.get('abort') and repo._rebasestate:
# Rebased revisions are assumed to be descendants of
# targetrev. If a source revision is mapped to targetrev
# or to another rebased revision, it must have been
# removed.
targetrev = repo[repo._rebasetarget].rev()
newrevs = set([targetrev])
replacements = {}
for rev, newrev in sorted(repo._rebasestate.items()):
oldnode = repo[rev].node()
if newrev not in newrevs:
newnode = repo[newrev].node()
newrevs.add(newrev)
else:
newnode = nullid
replacements[oldnode] = newnode
if kwargs.get('collapse'):
newnodes = set(n for n in replacements.values() if n != nullid)
if newnodes:
# Collapsing into more than one revision?
assert len(newnodes) == 1, newnodes
newnode = newnodes.pop()
else:
newnode = nullid
repo.addcollapsedobsolete(replacements, newnode)
else:
for oldnode, newnode in replacements.iteritems():
repo.addobsolete(newnode, oldnode)
return res
finally:
delattr(repo, '_rebasestate')
delattr(repo, '_rebasetarget')
def extsetup(ui):
revset.symbols["hidden"] = revsethidden
revset.symbols["obsolete"] = revsetobsolete
revset.symbols["unstable"] = revsetunstable
revset.symbols["suspended"] = revsetsuspended
revset.symbols["extinct"] = revsetextinct
revset.symbols["latecomer"] = revsetlatecomer
revset.symbols["conflicting"] = revsetconflicting
revset.symbols["obsparents"] = revsetprecursors # DEPR
revset.symbols["precursors"] = revsetprecursors
revset.symbols["obsancestors"] = revsetallprecursors # DEPR
revset.symbols["allprecursors"] = revsetallprecursors # bad name
revset.symbols["successors"] = revsetsuccessors
revset.symbols["allsuccessors"] = revsetallsuccessors # bad name
templatekw.keywords['obsolete'] = obsoletekw
# warning about more obsolete
for cmd in ['commit', 'push', 'pull', 'graft', 'phase', 'unbundle']:
entry = extensions.wrapcommand(commands.table, cmd, warnobserrors)
try:
rebase = extensions.find('rebase')
if rebase:
entry = extensions.wrapcommand(rebase.cmdtable, 'rebase', warnobserrors)
extensions.wrapfunction(rebase, 'buildstate', buildstate)
extensions.wrapfunction(rebase, 'defineparents', defineparents)
extensions.wrapfunction(rebase, 'concludenode', concludenode)
extensions.wrapcommand(rebase.cmdtable, "rebase", cmdrebase)
except KeyError:
pass # rebase not found
# Pushkey mechanism for mutable
#########################################
def listmarkers(repo):
"""List markers over pushkey"""
if not repo.obsstore:
return {}
data = repo.obsstore._writemarkers()
return {'dump': base85.b85encode(data)}
def pushmarker(repo, key, old, new):
"""Push markers over pushkey"""
if key != 'dump':
repo.ui.warn(_('unknown key: %r') % key)
return 0
if old:
repo.ui.warn(_('unexpected old value') % key)
return 0
data = base85.b85decode(new)
lock = repo.lock()
try:
repo.obsstore.mergemarkers(data)
return 1
finally:
lock.release()
pushkey.register('obsolete', pushmarker, listmarkers)
### Discovery wrapping
#############################
class blist(list, object):
"""silly class to have non False but empty list"""
def __nonzero__(self):
return bool(len(self.orig))
def wrapfindcommonoutgoing(orig, repo, *args, **kwargs):
"""wrap mercurial.discovery.findcommonoutgoing to remove extinct changeset
Such excluded changeset are removed from excluded and will *not* appear
are excluded secret changeset.
"""
outgoing = orig(repo, *args, **kwargs)
orig = outgoing.excluded
outgoing.excluded = blist(n for n in orig if not repo[n].extinct())
# when no revision is specified (push everything) a shortcut is taken when
# nothign was exclude. taking this code path when extinct changeset have
# been excluded leads to repository corruption.
outgoing.excluded.orig = orig
return outgoing
def wrapcheckheads(orig, repo, remote, outgoing, *args, **kwargs):
"""wrap mercurial.discovery.checkheads
* prevent unstability to be pushed
* patch remote to ignore obsolete heads on remote
"""
# do not push instability
for h in outgoing.missingheads:
# checking heads only is enought because any thing base on obsolete
# changeset is either obsolete or unstable.
ctx = repo[h]
hint = _("use 'hg stabilize' to get a stable history (or --force to proceed)")
if ctx.unstable():
raise util.Abort(_("Trying to push unstable changeset: %s!") % ctx,
hint=hint)
if ctx.obsolete():
raise util.Abort(_("Trying to push obsolete changeset: %s!") % ctx,
hint=hint)
if ctx.latecomer():
raise util.Abort(_("Trying to push latecomer changeset: %s!") % ctx,
hint=hint)
if ctx.conflicting():
raise util.Abort(_("Trying to push conflicting changeset: %s!") % ctx,
hint=hint)
### patch remote branch map
# do not read it this burn eyes
try:
if 'oldbranchmap' not in vars(remote):
remote.oldbranchmap = remote.branchmap
def branchmap():
newbm = {}
oldbm = None
if (util.safehasattr(phases, 'visiblebranchmap')
and not util.safehasattr(remote, 'ignorevisiblebranchmap')
):
remote.ignorevisiblebranchmap = False
remote.branchmap = remote.oldbranchmap
oldbm = phases.visiblebranchmap(remote)
remote.branchmap = remote.newbranchmap
remote.ignorevisiblebranchmap = True
if oldbm is None:
oldbm = remote.oldbranchmap()
for branch, nodes in oldbm.iteritems():
nodes = list(nodes)
new = set()
while nodes:
n = nodes.pop()
if n in repo.obsstore.precursors:
markers = repo.obsstore.precursors[n]
for mark in markers:
for newernode in mark[1]:
if newernode is not None:
nodes.append(newernode)
else:
new.add(n)
if new:
newbm[branch] = list(new)
return newbm
remote.ignorevisiblebranchmap = True
remote.branchmap = branchmap
remote.newbranchmap = branchmap
return orig(repo, remote, outgoing, *args, **kwargs)
finally:
remote.__dict__.pop('branchmap', None) # restore class one
remote.__dict__.pop('oldbranchmap', None)
remote.__dict__.pop('newbranchmap', None)
remote.__dict__.pop('ignorevisiblebranchmap', None)
# eye are still burning
def wrapvisiblebranchmap(orig, repo):
ignore = getattr(repo, 'ignorevisiblebranchmap', None)
if ignore is None:
return orig(repo)
elif ignore:
return repo.branchmap()
else:
return None # break recursion
def wrapclearcache(orig, repo, *args, **kwargs):
try:
return orig(repo, *args, **kwargs)
finally:
repo._clearobsoletecache()
### New commands
#############################
cmdtable = {}
command = cmdutil.command(cmdtable)
@command('debugobsolete', [], _('SUBJECT OBJECT'))
def cmddebugobsolete(ui, repo, subject, object):
"""add an obsolete relation between two nodes
The subject is expected to be a newer version of the object.
"""
lock = repo.lock()
try:
sub = repo[subject]
obj = repo[object]
repo.addobsolete(sub.node(), obj.node())
finally:
lock.release()
return 0
@command('debugconvertobsolete', [], '')
def cmddebugconvertobsolete(ui, repo):
"""import markers from an .hg/obsolete-relations file"""
cnt = 0
err = 0
l = repo.lock()
some = False
try:
repo._importoldobsolete = True
store = repo.obsstore
### very first format
try:
f = repo.opener('obsolete-relations')
try:
some = True
for line in f:
subhex, objhex = line.split()
suc = bin(subhex)
prec = bin(objhex)
sucs = (suc==nullid) and [] or [suc]
meta = {
'date': '%i %i' % util.makedate(),
'user': ui.username(),
}
try:
store.create(prec, sucs, 0, meta)
cnt += 1
except ValueError:
repo.ui.write_err("invalid old marker line: %s"
% (line))
err += 1
finally:
f.close()
util.unlink(repo.join('obsolete-relations'))
except IOError:
pass
### second (json) format
data = repo.sopener.tryread('obsoletemarkers')
if data:
some = True
for oldmark in json.loads(data):
del oldmark['id'] # dropped for now
del oldmark['reason'] # unused until then
oldobject = str(oldmark.pop('object'))
oldsubjects = [str(s) for s in oldmark.pop('subjects', [])]
LOOKUP_ERRORS = (error.RepoLookupError, error.LookupError)
if len(oldobject) != 40:
try:
oldobject = repo[oldobject].node()
except LOOKUP_ERRORS:
pass
if any(len(s) != 40 for s in oldsubjects):
try:
oldsubjects = [repo[s].node() for s in oldsubjects]
except LOOKUP_ERRORS:
pass
oldmark['date'] = '%i %i' % tuple(oldmark['date'])
meta = dict((k.encode('utf-8'), v.encode('utf-8'))
for k, v in oldmark.iteritems())
try:
succs = [bin(n) for n in oldsubjects]
succs = [n for n in succs if n != nullid]
store.create(bin(oldobject), succs,
0, meta)
cnt += 1
except ValueError:
repo.ui.write_err("invalid marker %s -> %s\n"
% (oldobject, oldsubjects))
err += 1
util.unlink(repo.sjoin('obsoletemarkers'))
finally:
del repo._importoldobsolete
l.release()
if not some:
ui.warn('nothing to do\n')
ui.status('%i obsolete marker converted\n' % cnt)
if err:
ui.write_err('%i conversion failed. check you graph!\n' % err)
@command('debugsuccessors', [], '')
def cmddebugsuccessors(ui, repo):
"""dump obsolete changesets and their successors
Each line matches an existing marker, the first identifier is the
obsolete changeset identifier, followed by it successors.
"""
lock = repo.lock()
try:
allsuccessors = repo.obsstore.precursors
for old in sorted(allsuccessors):
successors = [sorted(m[1]) for m in allsuccessors[old]]
for i, group in enumerate(sorted(successors)):
ui.write('%s' % short(old))
for new in group:
ui.write(' %s' % short(new))
ui.write('\n')
finally:
lock.release()
### Altering existing command
#############################
def wrapmayobsoletewc(origfn, ui, repo, *args, **opts):
res = origfn(ui, repo, *args, **opts)
if repo['.'].obsolete():
ui.warn(_('Working directory parent is obsolete\n'))
return res
def warnobserrors(orig, ui, repo, *args, **kwargs):
"""display warning is the command resulted in more instable changeset"""
priorunstables = len(repo.revs('unstable()'))
priorlatecomers = len(repo.revs('latecomer()'))
priorconflictings = len(repo.revs('conflicting()'))
#print orig, priorunstables
#print len(repo.revs('secret() - obsolete()'))
try:
return orig(ui, repo, *args, **kwargs)
finally:
newunstables = len(repo.revs('unstable()')) - priorunstables
newlatecomers = len(repo.revs('latecomer()')) - priorlatecomers
newconflictings = len(repo.revs('conflicting()')) - priorconflictings
#print orig, newunstables
#print len(repo.revs('secret() - obsolete()'))
if newunstables > 0:
ui.warn(_('%i new unstables changesets\n') % newunstables)
if newlatecomers > 0:
ui.warn(_('%i new latecomers changesets\n') % newlatecomers)
if newconflictings > 0:
ui.warn(_('%i new conflictings changesets\n') % newconflictings)
def noextinctsvisibleheads(orig, repo):
repo._turn_extinct_secret()
return orig(repo)
def wrapcmdutilamend(orig, ui, repo, commitfunc, old, *args, **kwargs):
oldnode = old.node()
new = orig(ui, repo, commitfunc, old, *args, **kwargs)
if new != oldnode:
lock = repo.lock()
try:
meta = {
'subjects': [new],
'object': oldnode,
'date': util.makedate(),
'user': ui.username(),
'reason': 'commit --amend',
}
repo.obsstore.create(oldnode, [new], 0, meta)
repo._clearobsoletecache()
repo._turn_extinct_secret()
finally:
lock.release()
return new
def uisetup(ui):
extensions.wrapcommand(commands.table, "update", wrapmayobsoletewc)
extensions.wrapcommand(commands.table, "pull", wrapmayobsoletewc)
if util.safehasattr(cmdutil, 'amend'):
extensions.wrapfunction(cmdutil, 'amend', wrapcmdutilamend)
extensions.wrapfunction(discovery, 'findcommonoutgoing', wrapfindcommonoutgoing)
extensions.wrapfunction(discovery, 'checkheads', wrapcheckheads)
extensions.wrapfunction(phases, 'visibleheads', noextinctsvisibleheads)
extensions.wrapfunction(phases, 'advanceboundary', wrapclearcache)
if util.safehasattr(phases, 'visiblebranchmap'):
extensions.wrapfunction(phases, 'visiblebranchmap', wrapvisiblebranchmap)
### serialisation
#############################
def _obsserialise(obssubrels, flike):
"""serialise an obsolete relation mapping in a plain text one
this is for subject -> [objects] mapping
format is::
<subject-full-hex> <object-full-hex>\n"""
for sub, objs in obssubrels.iteritems():
for obj in objs:
if sub is None:
sub = nullid
flike.write('%s %s\n' % (hex(sub), hex(obj)))
def _obsdeserialise(flike):
"""read a file like object serialised with _obsserialise
this desierialize into a {subject -> objects} mapping"""
rels = {}
for line in flike:
subhex, objhex = line.split()
subnode = bin(subhex)
if subnode == nullid:
subnode = None
rels.setdefault( subnode, set()).add(bin(objhex))
return rels
### diagnostique tools
#############################
def unstables(repo):
"""Return all unstable changeset"""
return scmutil.revrange(repo, ['obsolete():: and (not obsolete())'])
def newerversion(repo, obs):
"""Return the newer version of an obsolete changeset"""
toproceed = set([(obs,)])
# XXX known optimization available
newer = set()
objectrels = repo.obsstore.precursors
while toproceed:
current = toproceed.pop()
assert len(current) <= 1, 'splitting not handled yet. %r' % current
current = [n for n in current if n != nullid]
if current:
n, = current
if n in objectrels:
markers = objectrels[n]
for mark in markers:
toproceed.add(tuple(mark[1]))
else:
newer.add(tuple(current))
else:
newer.add(())
return sorted(newer)
### obsolete relation storage
#############################
def add2set(d, key, mark):
"""add <mark> to a `set` in <d>[<key>]"""
d.setdefault(key, []).append(mark)
def markerid(marker):
KEYS = ['subjects', "object", "date", "user", "reason"]
for key in KEYS:
assert key in marker
keys = sorted(marker.keys())
a = util.sha1()
for key in keys:
if key == 'subjects':
for sub in sorted(marker[key]):
a.update(sub)
elif key == 'id':
pass
else:
a.update(str(marker[key]))
a.update('\0')
return a.digest()
# mercurial backport
def encodemeta(meta):
"""Return encoded metadata string to string mapping.
Assume no ':' in key and no '\0' in both key and value."""
for key, value in meta.iteritems():
if ':' in key or '\0' in key:
raise ValueError("':' and '\0' are forbidden in metadata key'")
if '\0' in value:
raise ValueError("':' are forbidden in metadata value'")
return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
def decodemeta(data):
"""Return string to string dictionary from encoded version."""
d = {}
for l in data.split('\0'):
if l:
key, value = l.split(':')
d[key] = value
return d
# data used for parsing and writing
_fmversion = 0
_fmfixed = '>BIB20s'
_fmnode = '20s'
_fmfsize = struct.calcsize(_fmfixed)
_fnodesize = struct.calcsize(_fmnode)
def _readmarkers(data):
"""Read and enumerate markers from raw data"""
off = 0
diskversion = _unpack('>B', data[off:off + 1])[0]
off += 1
if diskversion != _fmversion:
raise util.Abort(_('parsing obsolete marker: unknown version %r')
% diskversion)
# Loop on markers
l = len(data)
while off + _fmfsize <= l:
# read fixed part
cur = data[off:off + _fmfsize]
off += _fmfsize
nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
# read replacement
sucs = ()
if nbsuc:
s = (_fnodesize * nbsuc)
cur = data[off:off + s]
sucs = _unpack(_fmnode * nbsuc, cur)
off += s
# read metadata
# (metadata will be decoded on demand)
metadata = data[off:off + mdsize]
if len(metadata) != mdsize:
raise util.Abort(_('parsing obsolete marker: metadata is too '
'short, %d bytes expected, got %d')
% (len(metadata), mdsize))
off += mdsize
yield (pre, sucs, flags, metadata)
class obsstore(object):
"""Store obsolete markers
Markers can be accessed with two mappings:
- precursors: old -> set(new)
- successors: new -> set(old)
"""
def __init__(self):
self._all = []
# new markers to serialize
self._new = []
self.precursors = {}
self.successors = {}
def __iter__(self):
return iter(self._all)
def __nonzero__(self):
return bool(self._all)
def create(self, prec, succs=(), flag=0, metadata=None):
"""obsolete: add a new obsolete marker
* ensuring it is hashable
* check mandatory metadata
* encode metadata
"""
if metadata is None:
metadata = {}
if len(prec) != 20:
raise ValueError(repr(prec))
for succ in succs:
if len(succ) != 20:
raise ValueError((succs))
marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
self.add(marker)
def add(self, marker):
"""Add a new marker to the store
This marker still needs to be written to disk"""
self._new.append(marker)
self._load(marker)
def loadmarkers(self, data):
"""Load all markers in data, mark them as known."""
for marker in _readmarkers(data):
self._load(marker)
def mergemarkers(self, data):
other = set(_readmarkers(data))
local = set(self._all)
new = other - local
for marker in new:
self.add(marker)
def flushmarkers(self, stream):
"""Write all markers to a stream
After this operation, "new" markers are considered "known"."""
self._writemarkers(stream)
self._new[:] = []
def _load(self, marker):
self._all.append(marker)
pre, sucs = marker[:2]
self.precursors.setdefault(pre, set()).add(marker)
for suc in sucs:
self.successors.setdefault(suc, set()).add(marker)
def _writemarkers(self, stream=None):
# Kept separate from flushmarkers(), it will be reused for
# markers exchange.
if stream is None:
final = []
w = final.append
else:
w = stream.write
w(_pack('>B', _fmversion))
for marker in self._all:
pre, sucs, flags, metadata = marker
nbsuc = len(sucs)
format = _fmfixed + (_fmnode * nbsuc)
data = [nbsuc, len(metadata), flags, pre]
data.extend(sucs)
w(_pack(format, *data))
w(metadata)
if stream is None:
return ''.join(final)
### repo subclassing
#############################
def reposetup(ui, repo):
if not repo.local():
return
if not util.safehasattr(repo.opener, 'tryread'):
raise util.Abort('Obsolete extension require Mercurial 2.2 (or later)')
opull = repo.pull
opush = repo.push
olock = repo.lock
o_rollback = repo._rollback
o_updatebranchcache = repo.updatebranchcache
# /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\
if util.safehasattr(repo, '_journalfiles'): # Hg 2.2
o_journalfiles = repo._journalfiles
o_writejournal = repo._writejournal
class obsoletingrepo(repo.__class__):
### Public method
def obsoletedby(self, node):
"""return the set of node that make <node> obsolete (obj)"""
others = set()
for marker in self.obsstore.precursors.get(node, []):
others.update(marker[1])
return others
def obsolete(self, node):
"""return the set of node that <node> make obsolete (sub)"""
return set(marker[0] for marker in self.obsstore.successors.get(node, []))
@storecache('obsstore')
def obsstore(self):
if not getattr(self, '_importoldobsolete', False):
data = repo.opener.tryread('obsolete-relations')
if not data:
data = repo.sopener.tryread('obsoletemarkers')
if data:
raise util.Abort('old format of obsolete marker detected!\n'
'run `hg debugconvertobsolete` once.')
store = obsstore()
data = self.sopener.tryread('obsstore')
if data:
store.loadmarkers(data)
return store
@util.propertycache
def _obsoleteset(self):
"""the set of obsolete revision"""
obs = set()
nm = self.changelog.nodemap
for obj in self.obsstore.precursors:
try: # /!\api change in Hg 2.2 (e8d37b78acfb22ae2c1fb126c2)/!\
rev = nm.get(obj)
except TypeError: #XXX to remove while breaking Hg 2.1 support
rev = nm.get(obj, None)
if rev is not None:
obs.add(rev)
return obs
@util.propertycache
def _unstableset(self):
"""the set of non obsolete revision with obsolete parent"""
return set(self.revs('(obsolete()::) - obsolete()'))
@util.propertycache
def _suspendedset(self):
"""the set of obsolete parent with non obsolete descendant"""
return set(self.revs('obsolete() and obsolete()::unstable()'))
@util.propertycache
def _extinctset(self):
"""the set of obsolete parent without non obsolete descendant"""
return set(self.revs('obsolete() - obsolete()::unstable()'))
@util.propertycache
def _latecomerset(self):
"""the set of rev trying to obsolete public revision"""
query = 'allsuccessors(public()) - obsolete() - public()'
return set(self.revs(query))
@util.propertycache
def _conflictingset(self):
"""the set of rev trying to obsolete public revision"""
conflicting = set()
obsstore = self.obsstore
newermap = {}
for ctx in self.set('(not public()) - obsolete()'):
prec = obsstore.successors.get(ctx.node(), ())
toprocess = set(prec)
while toprocess:
prec = toprocess.pop()[0]
if prec not in newermap:
newermap[prec] = newerversion(self, prec)
newer = [n for n in newermap[prec] if n] # filter kill
if len(newer) > 1:
conflicting.add(ctx.rev())
break
toprocess.update(obsstore.successors.get(prec, ()))
return conflicting
def _clearobsoletecache(self):
if '_obsoleteset' in vars(self):
del self._obsoleteset
self._clearunstablecache()
def updatebranchcache(self):
o_updatebranchcache()
self._clearunstablecache()
def _clearunstablecache(self):
if '_unstableset' in vars(self):
del self._unstableset
if '_suspendedset' in vars(self):
del self._suspendedset
if '_extinctset' in vars(self):
del self._extinctset
if '_latecomerset' in vars(self):
del self._latecomerset
if '_conflictingset' in vars(self):
del self._conflictingset
def addobsolete(self, sub, obj):
"""Add a relation marking that node <sub> is a new version of <obj>"""
assert sub != obj
if not repo[obj].phase():
if sub is None:
self.ui.warn(
_("trying to kill immutable changeset %(obj)s\n")
% {'obj': short(obj)})
if sub is not None:
self.ui.warn(
_("%(sub)s try to obsolete immutable changeset %(obj)s\n")
% {'sub': short(sub), 'obj': short(obj)})
lock = self.lock()
try:
meta = {
'date': util.makedate(),
'user': ui.username(),
'reason': 'unknown',
}
subs = (sub == nullid) and [] or [sub]
mid = self.obsstore.create(obj, subs, 0, meta)
self._clearobsoletecache()
self._turn_extinct_secret()
return mid
finally:
lock.release()
def addcollapsedobsolete(self, oldnodes, newnode):
"""Mark oldnodes as collapsed into newnode."""
# Assume oldnodes are all descendants of a single rev
rootrevs = self.revs('roots(%ln)', oldnodes)
assert len(rootrevs) == 1, rootrevs
rootnode = self[rootrevs[0]].node()
for n in oldnodes:
self.addobsolete(newnode, n)
def _turn_extinct_secret(self):
"""ensure all extinct changeset are secret"""
self._clearobsoletecache()
# this is mainly for safety purpose
# both pull and push
query = '(obsolete() - obsolete()::(unstable() - secret())) - secret()'
expobs = [c.node() for c in repo.set(query)]
phases.retractboundary(repo, 2, expobs)
### Disk IO
def lock(self, *args, **kwargs):
l = olock(*args, **kwargs)
if not getattr(l.releasefn, 'obspatched', False):
oreleasefn = l.releasefn
def releasefn(*args, **kwargs):
if 'obsstore' in vars(self) and self.obsstore._new:
f = self.sopener('obsstore', 'wb', atomictemp=True)
try:
self.obsstore.flushmarkers(f)
f.close()
except: # re-raises
f.discard()
raise
oreleasefn(*args, **kwargs)
releasefn.obspatched = True
l.releasefn = releasefn
return l
### pull // push support
def pull(self, remote, *args, **kwargs):
"""wrapper around push that push obsolete relation"""
l = repo.lock()
try:
result = opull(remote, *args, **kwargs)
remoteobs = remote.listkeys('obsolete')
if 'dump' in remoteobs:
data = base85.b85decode(remoteobs['dump'])
self.obsstore.mergemarkers(data)
self._clearobsoletecache()
self._turn_extinct_secret()
return result
finally:
l.release()
def push(self, remote, *args, **opts):
"""wrapper around pull that pull obsolete relation"""
self._turn_extinct_secret()
result = opush(remote, *args, **opts)
if 'obsolete' in remote.listkeys('namespaces') and self.obsstore:
data = self.obsstore._writemarkers()
r = remote.pushkey('obsolete', 'dump', '',
base85.b85encode(data))
if not r:
self.ui.warn(_('failed to push obsolete markers!\n'))
self._turn_extinct_secret()
return result
### rollback support
# /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\
if util.safehasattr(repo, '_journalfiles'): # Hg 2.2
def _journalfiles(self):
return o_journalfiles() + (self.sjoin('journal.obsstore'),)
def _writejournal(self, desc):
"""wrapped version of _writejournal that save obsolete data"""
o_writejournal(desc)
filename = 'obsstore'
filepath = self.sjoin(filename)
if os.path.exists(filepath):
journalname = 'journal.' + filename
journalpath = self.sjoin(journalname)
util.copyfile(filepath, journalpath)
else: # XXX removing this bloc will break Hg 2.1 support
def _writejournal(self, desc):
"""wrapped version of _writejournal that save obsolete data"""
entries = list(o_writejournal(desc))
filename = 'obsstore'
filepath = self.sjoin(filename)
if os.path.exists(filepath):
journalname = 'journal.' + filename
journalpath = self.sjoin(journalname)
util.copyfile(filepath, journalpath)
entries.append(journalpath)
return tuple(entries)
def _rollback(self, dryrun, force):
"""wrapped version of _rollback that restore obsolete data"""
ret = o_rollback(dryrun, force)
if not (ret or dryrun): #rollback did not failed
src = self.sjoin('undo.obsstore')
dst = self.sjoin('obsstore')
if os.path.exists(src):
util.rename(src, dst)
elif os.path.exists(dst):
# If no state was saved because the file did not existed before.
os.unlink(dst)
# invalidate cache
self.__dict__.pop('obsstore', None)
return ret
@storecache('00changelog.i')
def changelog(self):
# << copy pasted from mercurial source
c = changelog.changelog(self.sopener)
if 'HG_PENDING' in os.environ:
p = os.environ['HG_PENDING']
if p.startswith(self.root):
c.readpending('00changelog.i.a')
# >> end of the copy paste
old = c.__dict__.pop('hiddenrevs', ())
if old:
ui.warn("old wasn't empty ? %r" % old)
def _sethidden(c, value):
assert not value
class hchangelog(c.__class__):
@util.propertycache
def hiddenrevs(c):
shown = ['not obsolete()', '.', 'bookmark()', 'tagged()',
'public()']
basicquery = 'obsolete() - (::(%s))' % (' or '.join(shown))
# !!! self is repo not changelog
result = set(scmutil.revrange(self, [basicquery]))
return result
c.__class__ = hchangelog
return c
repo.__class__ = obsoletingrepo