# HG changeset patch # User Pierre-Yves David # Date 1533248387 -7200 # Node ID 9af0e2b0bdd72a8c70a1cf296129747ab7e80c1d # Parent d55a6fef9eb9cdf933197e53029d80d7b7c1010e# Parent d00f0c369bc7d72cbab752ef8f612c3c5c7ffed2 test-compat: merge mercurial-4.5 into mercurial-4.4 diff -r d00f0c369bc7 -r 9af0e2b0bdd7 hgext3rd/evolve/compat.py --- a/hgext3rd/evolve/compat.py Thu Aug 02 02:47:44 2018 +0200 +++ b/hgext3rd/evolve/compat.py Fri Aug 03 00:19:47 2018 +0200 @@ -11,6 +11,7 @@ from mercurial import ( context, copies, + encoding, mdiff, obsolete, obsutil, @@ -453,5 +454,333 @@ return copy, movewithdir, diverge, renamedelete, dirmove +# code imported from Mercurial core at 4.3 + patch +def fixoldmergecopies(repo, c1, c2, base): + + from mercurial import pathutil + + # avoid silly behavior for update from empty dir + if not c1 or not c2 or c1 == c2: + return {}, {}, {}, {}, {} + + # avoid silly behavior for parent -> working dir + if c2.node() is None and c1.node() == repo.dirstate.p1(): + return repo.dirstate.copies(), {}, {}, {}, {} + + # Copy trace disabling is explicitly below the node == p1 logic above + # because the logic above is required for a simple copy to be kept across a + # rebase. + if repo.ui.configbool('experimental', 'disablecopytrace'): + return {}, {}, {}, {}, {} + + # In certain scenarios (e.g. graft, update or rebase), base can be + # overridden We still need to know a real common ancestor in this case We + # can't just compute _c1.ancestor(_c2) and compare it to ca, because there + # can be multiple common ancestors, e.g. in case of bidmerge. Because our + # caller may not know if the revision passed in lieu of the CA is a genuine + # common ancestor or not without explicitly checking it, it's better to + # determine that here. + # + # base.descendant(wc) and base.descendant(base) are False, work around that + _c1 = c1.p1() if c1.rev() is None else c1 + _c2 = c2.p1() if c2.rev() is None else c2 + # an endpoint is "dirty" if it isn't a descendant of the merge base + # if we have a dirty endpoint, we need to trigger graft logic, and also + # keep track of which endpoint is dirty + dirtyc1 = not (base == _c1 or base.descendant(_c1)) + dirtyc2 = not (base == _c2 or base.descendant(_c2)) + graft = dirtyc1 or dirtyc2 + tca = base + if graft: + tca = _c1.ancestor(_c2) + + limit = copies._findlimit(repo, c1.rev(), c2.rev()) + if limit is None: + # no common ancestor, no copies + return {}, {}, {}, {}, {} + repo.ui.debug(" searching for copies back to rev %d\n" % limit) + + m1 = c1.manifest() + m2 = c2.manifest() + mb = base.manifest() + + # gather data from _checkcopies: + # - diverge = record all diverges in this dict + # - copy = record all non-divergent copies in this dict + # - fullcopy = record all copies in this dict + # - incomplete = record non-divergent partial copies here + # - incompletediverge = record divergent partial copies here + diverge = {} # divergence data is shared + incompletediverge = {} + data1 = {'copy': {}, + 'fullcopy': {}, + 'incomplete': {}, + 'diverge': diverge, + 'incompletediverge': incompletediverge, + } + data2 = {'copy': {}, + 'fullcopy': {}, + 'incomplete': {}, + 'diverge': diverge, + 'incompletediverge': incompletediverge, + } + + # find interesting file sets from manifests + addedinm1 = m1.filesnotin(mb) + addedinm2 = m2.filesnotin(mb) + bothnew = sorted(addedinm1 & addedinm2) + if tca == base: + # unmatched file from base + u1r, u2r = copies._computenonoverlap(repo, c1, c2, addedinm1, addedinm2) + u1u, u2u = u1r, u2r + else: + # unmatched file from base (DAG rotation in the graft case) + u1r, u2r = copies._computenonoverlap(repo, c1, c2, addedinm1, addedinm2, + baselabel='base') + # unmatched file from topological common ancestors (no DAG rotation) + # need to recompute this for directory move handling when grafting + mta = tca.manifest() + u1u, u2u = copies._computenonoverlap(repo, c1, c2, m1.filesnotin(mta), + m2.filesnotin(mta), + baselabel='topological common ancestor') + + for f in u1u: + copies._checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1) + + for f in u2u: + copies._checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2) + + copy = dict(data1['copy']) + copy.update(data2['copy']) + fullcopy = dict(data1['fullcopy']) + fullcopy.update(data2['fullcopy']) + + if dirtyc1: + copies._combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge, + incompletediverge) + else: + copies._combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge, + incompletediverge) + + renamedelete = {} + renamedeleteset = set() + divergeset = set() + for of, fl in diverge.items(): + if len(fl) == 1 or of in c1 or of in c2: + del diverge[of] # not actually divergent, or not a rename + if of not in c1 and of not in c2: + # renamed on one side, deleted on the other side, but filter + # out files that have been renamed and then deleted + renamedelete[of] = [f for f in fl if f in c1 or f in c2] + renamedeleteset.update(fl) # reverse map for below + else: + divergeset.update(fl) # reverse map for below + + if bothnew: + repo.ui.debug(" unmatched files new in both:\n %s\n" + % "\n ".join(bothnew)) + bothdiverge = {} + bothincompletediverge = {} + remainder = {} + both1 = {'copy': {}, + 'fullcopy': {}, + 'incomplete': {}, + 'diverge': bothdiverge, + 'incompletediverge': bothincompletediverge + } + both2 = {'copy': {}, + 'fullcopy': {}, + 'incomplete': {}, + 'diverge': bothdiverge, + 'incompletediverge': bothincompletediverge + } + for f in bothnew: + copies._checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1) + copies._checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2) + if dirtyc1 and dirtyc2: + pass + elif dirtyc1: + # incomplete copies may only be found on the "dirty" side for bothnew + assert not both2['incomplete'] + remainder = copies._combinecopies({}, both1['incomplete'], copy, bothdiverge, + bothincompletediverge) + elif dirtyc2: + assert not both1['incomplete'] + remainder = copies._combinecopies({}, both2['incomplete'], copy, bothdiverge, + bothincompletediverge) + else: + # incomplete copies and divergences can't happen outside grafts + assert not both1['incomplete'] + assert not both2['incomplete'] + assert not bothincompletediverge + for f in remainder: + assert f not in bothdiverge + ic = remainder[f] + if ic[0] in (m1 if dirtyc1 else m2): + # backed-out rename on one side, but watch out for deleted files + bothdiverge[f] = ic + for of, fl in bothdiverge.items(): + if len(fl) == 2 and fl[0] == fl[1]: + copy[fl[0]] = of # not actually divergent, just matching renames + + if fullcopy and repo.ui.debugflag: + repo.ui.debug(" all copies found (* = to merge, ! = divergent, " + "% = renamed and deleted):\n") + for f in sorted(fullcopy): + note = "" + if f in copy: + note += "*" + if f in divergeset: + note += "!" + if f in renamedeleteset: + note += "%" + repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, + note)) + del divergeset + + if not fullcopy: + return copy, {}, diverge, renamedelete, {} + + repo.ui.debug(" checking for directory renames\n") + + # generate a directory move map + d1, d2 = c1.dirs(), c2.dirs() + # Hack for adding '', which is not otherwise added, to d1 and d2 + d1.addpath('/') + d2.addpath('/') + invalid = set() + dirmove = {} + + # examine each file copy for a potential directory move, which is + # when all the files in a directory are moved to a new directory + for dst, src in fullcopy.iteritems(): + dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst) + if dsrc in invalid: + # already seen to be uninteresting + continue + elif dsrc in d1 and ddst in d1: + # directory wasn't entirely moved locally + invalid.add(dsrc + "/") + elif dsrc in d2 and ddst in d2: + # directory wasn't entirely moved remotely + invalid.add(dsrc + "/") + elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/": + # files from the same directory moved to two different places + invalid.add(dsrc + "/") + else: + # looks good so far + dirmove[dsrc + "/"] = ddst + "/" + + for i in invalid: + if i in dirmove: + del dirmove[i] + del d1, d2, invalid + + if not dirmove: + return copy, {}, diverge, renamedelete, {} + + for d in dirmove: + repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" % + (d, dirmove[d])) + + movewithdir = {} + # check unaccounted nonoverlapping files against directory moves + for f in u1r + u2r: + if f not in fullcopy: + for d in dirmove: + if f.startswith(d): + # new file added in a directory that was moved, move it + df = dirmove[d] + f[len(d):] + if df not in copy: + movewithdir[f] = df + repo.ui.debug((" pending file src: '%s' -> " + "dst: '%s'\n") % (f, df)) + break + + return copy, movewithdir, diverge, renamedelete, dirmove + if util.safehasattr(copies, '_fullcopytracing'): copies._fullcopytracing = fixedcopytracing +elif util.safehasattr(copies, 'mergecopies'): + # compat fix for hg <= 4.3 + copies.mergecopies = fixoldmergecopies + +if not util.safehasattr(obsutil, "_succs"): + class _succs(list): + """small class to represent a successors with some metadata about it""" + + def __init__(self, *args, **kwargs): + super(_succs, self).__init__(*args, **kwargs) + self.markers = set() + + def copy(self): + new = _succs(self) + new.markers = self.markers.copy() + return new + + @util.propertycache + def _set(self): + # immutable + return set(self) + + def canmerge(self, other): + return self._set.issubset(other._set) +else: + from mercurial.obsutil import _succs + +def wrap_succs(succs): + """ Wrap old data format of successorsets (tuple) only if if's not yet a + _succs instance + """ + + if not util.safehasattr(succs, "markers"): + return _succs(succs) + else: + return succs + +if not util.safehasattr(obsutil, "markersdates"): + MARKERS_DATE_COMPAT = True +else: + MARKERS_DATE_COMPAT = False + +def markersdates(markers): + """returns the list of dates for a list of markers + """ + if MARKERS_DATE_COMPAT is False: + return obsutil.markersdates(markers) + + return [m[4] for m in markers] + +if not util.safehasattr(obsutil, "markersusers"): + MARKERS_USERS_COMPAT = True +else: + MARKERS_USERS_COMPAT = False + +def markersusers(markers): + """ Returns a sorted list of markers users without duplicates + """ + if MARKERS_USERS_COMPAT is False: + return obsutil.markersusers(markers) + + markersmeta = [dict(m[3]) for m in markers] + users = set(encoding.tolocal(meta['user']) for meta in markersmeta + if meta.get('user')) + + return sorted(users) + +if not util.safehasattr(obsutil, "markersoperations"): + MARKERS_OPERATIONS_COMPAT = True +else: + MARKERS_OPERATIONS_COMPAT = False + +def markersoperations(markers): + """ Returns a sorted list of markers operations without duplicates + """ + if MARKERS_OPERATIONS_COMPAT is False: + return obsutil.markersoperations(markers) + + markersmeta = [dict(m[3]) for m in markers] + operations = set(meta.get('operation') for meta in markersmeta + if meta.get('operation')) + + return sorted(operations) diff -r d00f0c369bc7 -r 9af0e2b0bdd7 hgext3rd/evolve/obshistory.py --- a/hgext3rd/evolve/obshistory.py Thu Aug 02 02:47:44 2018 +0200 +++ b/hgext3rd/evolve/obshistory.py Fri Aug 03 00:19:47 2018 +0200 @@ -107,7 +107,7 @@ fullsuccessorsets = [] # successor set + markers for sset in ssets: if sset: - fullsuccessorsets.append(sset) + fullsuccessorsets.append(compat.wrap_succs(sset)) else: # successorsset return an empty set() when ctx or one of its # successors is pruned. @@ -123,11 +123,11 @@ for mark in succsmap.get(ctx.node(), ()): if not mark[1]: foundany = True - sset = obsutil._succs() + sset = compat._succs() sset.markers.add(mark) fullsuccessorsets.append(sset) if not foundany: - fullsuccessorsets.append(obsutil._succs()) + fullsuccessorsets.append(compat._succs()) values = [] for sset in fullsuccessorsets: @@ -673,7 +673,7 @@ label="evolve.node") # Operations - operations = obsutil.markersoperations(markers) + operations = compat.markersoperations(markers) if operations: fm.plain(' using ') fm.write('operation', '%s', ", ".join(operations), label="evolve.operation") @@ -681,13 +681,13 @@ fm.plain(' by ') # Users - users = obsutil.markersusers(markers) + users = compat.markersusers(markers) fm.write('user', '%s', ", ".join(users), label="evolve.user") fm.plain(' ') # Dates - dates = obsutil.markersdates(markers) + dates = compat.markersdates(markers) if dates: min_date = min(dates) max_date = max(dates)