407 extensions.wrapfunction(rebase, 'defineparents', defineparents) |
406 extensions.wrapfunction(rebase, 'defineparents', defineparents) |
408 extensions.wrapfunction(rebase, 'concludenode', concludenode) |
407 extensions.wrapfunction(rebase, 'concludenode', concludenode) |
409 extensions.wrapcommand(rebase.cmdtable, "rebase", cmdrebase) |
408 extensions.wrapcommand(rebase.cmdtable, "rebase", cmdrebase) |
410 except KeyError: |
409 except KeyError: |
411 pass # rebase not found |
410 pass # rebase not found |
412 |
|
413 # Pushkey mechanism for mutable |
|
414 ######################################### |
|
415 |
|
416 def listmarkers(repo): |
|
417 """List markers over pushkey""" |
|
418 if not repo.obsstore: |
|
419 return {} |
|
420 data = repo.obsstore._writemarkers() |
|
421 return {'dump': base85.b85encode(data)} |
|
422 |
|
423 def pushmarker(repo, key, old, new): |
|
424 """Push markers over pushkey""" |
|
425 if key != 'dump': |
|
426 repo.ui.warn(_('unknown key: %r') % key) |
|
427 return 0 |
|
428 if old: |
|
429 repo.ui.warn(_('unexpected old value') % key) |
|
430 return 0 |
|
431 data = base85.b85decode(new) |
|
432 lock = repo.lock() |
|
433 try: |
|
434 repo.obsstore.mergemarkers(data) |
|
435 return 1 |
|
436 finally: |
|
437 lock.release() |
|
438 |
|
439 pushkey.register('obsolete', pushmarker, listmarkers) |
|
440 |
411 |
441 ### Discovery wrapping |
412 ### Discovery wrapping |
442 ############################# |
413 ############################# |
443 |
414 |
444 class blist(list, object): |
415 class blist(list, object): |
644 res = origfn(ui, repo, *args, **opts) |
601 res = origfn(ui, repo, *args, **opts) |
645 if repo['.'].obsolete(): |
602 if repo['.'].obsolete(): |
646 ui.warn(_('Working directory parent is obsolete\n')) |
603 ui.warn(_('Working directory parent is obsolete\n')) |
647 return res |
604 return res |
648 |
605 |
|
606 def wrapmaycreateobsmarker(origfn, ui, repo, *args, **opts): |
|
607 lock = repo.lock() |
|
608 try: |
|
609 res = origfn(ui, repo, *args, **opts) |
|
610 repo._turn_extinct_secret() |
|
611 finally: |
|
612 lock.release() |
|
613 return res |
|
614 |
649 def noextinctsvisibleheads(orig, repo): |
615 def noextinctsvisibleheads(orig, repo): |
650 repo._turn_extinct_secret() |
616 repo._turn_extinct_secret() |
651 return orig(repo) |
617 return orig(repo) |
652 |
618 |
653 def wrapcmdutilamend(orig, ui, repo, commitfunc, old, *args, **kwargs): |
619 def wrapcmdutilamend(orig, ui, repo, commitfunc, old, *args, **kwargs): |
671 return new |
634 return new |
672 |
635 |
673 def uisetup(ui): |
636 def uisetup(ui): |
674 extensions.wrapcommand(commands.table, "update", wrapmayobsoletewc) |
637 extensions.wrapcommand(commands.table, "update", wrapmayobsoletewc) |
675 extensions.wrapcommand(commands.table, "pull", wrapmayobsoletewc) |
638 extensions.wrapcommand(commands.table, "pull", wrapmayobsoletewc) |
|
639 extensions.wrapcommand(commands.table, "debugobsolete", wrapmaycreateobsmarker) |
676 if util.safehasattr(cmdutil, 'amend'): |
640 if util.safehasattr(cmdutil, 'amend'): |
677 extensions.wrapfunction(cmdutil, 'amend', wrapcmdutilamend) |
641 extensions.wrapfunction(cmdutil, 'amend', wrapcmdutilamend) |
678 extensions.wrapfunction(discovery, 'findcommonoutgoing', wrapfindcommonoutgoing) |
642 extensions.wrapfunction(discovery, 'findcommonoutgoing', wrapfindcommonoutgoing) |
679 extensions.wrapfunction(discovery, 'checkheads', wrapcheckheads) |
643 extensions.wrapfunction(discovery, 'checkheads', wrapcheckheads) |
680 extensions.wrapfunction(phases, 'visibleheads', noextinctsvisibleheads) |
644 extensions.wrapfunction(phases, 'visibleheads', noextinctsvisibleheads) |
761 else: |
725 else: |
762 a.update(str(marker[key])) |
726 a.update(str(marker[key])) |
763 a.update('\0') |
727 a.update('\0') |
764 return a.digest() |
728 return a.digest() |
765 |
729 |
766 # mercurial backport |
|
767 |
|
768 def encodemeta(meta): |
|
769 """Return encoded metadata string to string mapping. |
|
770 |
|
771 Assume no ':' in key and no '\0' in both key and value.""" |
|
772 for key, value in meta.iteritems(): |
|
773 if ':' in key or '\0' in key: |
|
774 raise ValueError("':' and '\0' are forbidden in metadata key'") |
|
775 if '\0' in value: |
|
776 raise ValueError("':' are forbidden in metadata value'") |
|
777 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) |
|
778 |
|
779 def decodemeta(data): |
|
780 """Return string to string dictionary from encoded version.""" |
|
781 d = {} |
|
782 for l in data.split('\0'): |
|
783 if l: |
|
784 key, value = l.split(':') |
|
785 d[key] = value |
|
786 return d |
|
787 |
|
788 # data used for parsing and writing |
|
789 _fmversion = 0 |
|
790 _fmfixed = '>BIB20s' |
|
791 _fmnode = '20s' |
|
792 _fmfsize = struct.calcsize(_fmfixed) |
|
793 _fnodesize = struct.calcsize(_fmnode) |
|
794 |
|
795 def _readmarkers(data): |
|
796 """Read and enumerate markers from raw data""" |
|
797 off = 0 |
|
798 diskversion = _unpack('>B', data[off:off + 1])[0] |
|
799 off += 1 |
|
800 if diskversion != _fmversion: |
|
801 raise util.Abort(_('parsing obsolete marker: unknown version %r') |
|
802 % diskversion) |
|
803 |
|
804 # Loop on markers |
|
805 l = len(data) |
|
806 while off + _fmfsize <= l: |
|
807 # read fixed part |
|
808 cur = data[off:off + _fmfsize] |
|
809 off += _fmfsize |
|
810 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) |
|
811 # read replacement |
|
812 sucs = () |
|
813 if nbsuc: |
|
814 s = (_fnodesize * nbsuc) |
|
815 cur = data[off:off + s] |
|
816 sucs = _unpack(_fmnode * nbsuc, cur) |
|
817 off += s |
|
818 # read metadata |
|
819 # (metadata will be decoded on demand) |
|
820 metadata = data[off:off + mdsize] |
|
821 if len(metadata) != mdsize: |
|
822 raise util.Abort(_('parsing obsolete marker: metadata is too ' |
|
823 'short, %d bytes expected, got %d') |
|
824 % (len(metadata), mdsize)) |
|
825 off += mdsize |
|
826 yield (pre, sucs, flags, metadata) |
|
827 |
|
828 class obsstore(object): |
|
829 """Store obsolete markers |
|
830 |
|
831 Markers can be accessed with two mappings: |
|
832 - precursors: old -> set(new) |
|
833 - successors: new -> set(old) |
|
834 """ |
|
835 |
|
836 def __init__(self): |
|
837 self._all = [] |
|
838 # new markers to serialize |
|
839 self._new = [] |
|
840 self.precursors = {} |
|
841 self.successors = {} |
|
842 |
|
843 def __iter__(self): |
|
844 return iter(self._all) |
|
845 |
|
846 def __nonzero__(self): |
|
847 return bool(self._all) |
|
848 |
|
849 def create(self, prec, succs=(), flag=0, metadata=None): |
|
850 """obsolete: add a new obsolete marker |
|
851 |
|
852 * ensuring it is hashable |
|
853 * check mandatory metadata |
|
854 * encode metadata |
|
855 """ |
|
856 if metadata is None: |
|
857 metadata = {} |
|
858 if len(prec) != 20: |
|
859 raise ValueError(prec) |
|
860 for succ in succs: |
|
861 if len(succ) != 20: |
|
862 raise ValueError(prec) |
|
863 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) |
|
864 self.add(marker) |
|
865 |
|
866 def add(self, marker): |
|
867 """Add a new marker to the store |
|
868 |
|
869 This marker still needs to be written to disk""" |
|
870 self._new.append(marker) |
|
871 self._load(marker) |
|
872 |
|
873 def loadmarkers(self, data): |
|
874 """Load all markers in data, mark them as known.""" |
|
875 for marker in _readmarkers(data): |
|
876 self._load(marker) |
|
877 |
|
878 def mergemarkers(self, data): |
|
879 other = set(_readmarkers(data)) |
|
880 local = set(self._all) |
|
881 new = other - local |
|
882 for marker in new: |
|
883 self.add(marker) |
|
884 |
|
885 def flushmarkers(self, stream): |
|
886 """Write all markers to a stream |
|
887 |
|
888 After this operation, "new" markers are considered "known".""" |
|
889 self._writemarkers(stream) |
|
890 self._new[:] = [] |
|
891 |
|
892 def _load(self, marker): |
|
893 self._all.append(marker) |
|
894 pre, sucs = marker[:2] |
|
895 self.precursors.setdefault(pre, set()).add(marker) |
|
896 for suc in sucs: |
|
897 self.successors.setdefault(suc, set()).add(marker) |
|
898 |
|
899 def _writemarkers(self, stream=None): |
|
900 # Kept separate from flushmarkers(), it will be reused for |
|
901 # markers exchange. |
|
902 if stream is None: |
|
903 final = [] |
|
904 w = final.append |
|
905 else: |
|
906 w = stream.write |
|
907 w(_pack('>B', _fmversion)) |
|
908 for marker in self._all: |
|
909 pre, sucs, flags, metadata = marker |
|
910 nbsuc = len(sucs) |
|
911 format = _fmfixed + (_fmnode * nbsuc) |
|
912 data = [nbsuc, len(metadata), flags, pre] |
|
913 data.extend(sucs) |
|
914 w(_pack(format, *data)) |
|
915 w(metadata) |
|
916 if stream is None: |
|
917 return ''.join(final) |
|
918 |
730 |
919 |
731 |
920 ### repo subclassing |
732 ### repo subclassing |
921 ############################# |
733 ############################# |
922 |
734 |
948 |
760 |
949 def obsolete(self, node): |
761 def obsolete(self, node): |
950 """return the set of node that <node> make obsolete (sub)""" |
762 """return the set of node that <node> make obsolete (sub)""" |
951 return set(marker[0] for marker in self.obsstore.successors.get(node, [])) |
763 return set(marker[0] for marker in self.obsstore.successors.get(node, [])) |
952 |
764 |
953 @storecache('obsstore') |
|
954 def obsstore(self): |
|
955 if not getattr(self, '_importoldobsolete', False): |
|
956 data = repo.opener.tryread('obsolete-relations') |
|
957 if not data: |
|
958 data = repo.sopener.tryread('obsoletemarkers') |
|
959 if data: |
|
960 raise util.Abort('old format of obsolete marker detected!\n' |
|
961 'run `hg debugconvertobsolete` once.') |
|
962 store = obsstore() |
|
963 data = self.sopener.tryread('obsstore') |
|
964 if data: |
|
965 store.loadmarkers(data) |
|
966 return store |
|
967 |
|
968 @util.propertycache |
765 @util.propertycache |
969 def _obsoleteset(self): |
766 def _obsoleteset(self): |
970 """the set of obsolete revision""" |
767 """the set of obsolete revision""" |
|
768 data = repo.opener.tryread('obsolete-relations') |
|
769 if not data: |
|
770 data = repo.sopener.tryread('obsoletemarkers') |
|
771 if data: |
|
772 raise util.Abort('old format of obsolete marker detected!\n' |
|
773 'run `hg debugconvertobsolete` once.') |
971 obs = set() |
774 obs = set() |
972 nm = self.changelog.nodemap |
775 nm = self.changelog.nodemap |
973 for obj in self.obsstore.precursors: |
776 for prec in self.obsstore.precursors: |
974 try: # /!\api change in Hg 2.2 (e8d37b78acfb22ae2c1fb126c2)/!\ |
777 rev = nm.get(prec) |
975 rev = nm.get(obj) |
|
976 except TypeError: #XXX to remove while breaking Hg 2.1 support |
|
977 rev = nm.get(obj, None) |
|
978 if rev is not None: |
778 if rev is not None: |
979 obs.add(rev) |
779 obs.add(rev) |
980 return obs |
780 return obs |
981 |
781 |
982 @util.propertycache |
782 @util.propertycache |
1049 def addcollapsedobsolete(self, oldnodes, newnode): |
848 def addcollapsedobsolete(self, oldnodes, newnode): |
1050 """Mark oldnodes as collapsed into newnode.""" |
849 """Mark oldnodes as collapsed into newnode.""" |
1051 # Assume oldnodes are all descendants of a single rev |
850 # Assume oldnodes are all descendants of a single rev |
1052 rootrevs = self.revs('roots(%ln)', oldnodes) |
851 rootrevs = self.revs('roots(%ln)', oldnodes) |
1053 assert len(rootrevs) == 1, rootrevs |
852 assert len(rootrevs) == 1, rootrevs |
1054 rootnode = self[rootrevs[0]].node() |
853 #rootnode = self[rootrevs[0]].node() |
1055 for n in oldnodes: |
854 for n in oldnodes: |
1056 self.addobsolete(newnode, n) |
855 self.addobsolete(newnode, n) |
1057 |
856 |
1058 def _turn_extinct_secret(self): |
857 def _turn_extinct_secret(self): |
1059 """ensure all extinct changeset are secret""" |
858 """ensure all extinct changeset are secret""" |
1062 # both pull and push |
861 # both pull and push |
1063 query = '(obsolete() - obsolete()::(unstable() - secret())) - secret()' |
862 query = '(obsolete() - obsolete()::(unstable() - secret())) - secret()' |
1064 expobs = [c.node() for c in repo.set(query)] |
863 expobs = [c.node() for c in repo.set(query)] |
1065 phases.retractboundary(repo, 2, expobs) |
864 phases.retractboundary(repo, 2, expobs) |
1066 |
865 |
1067 ### Disk IO |
|
1068 |
|
1069 def lock(self, *args, **kwargs): |
|
1070 l = olock(*args, **kwargs) |
|
1071 if not getattr(l.releasefn, 'obspatched', False): |
|
1072 oreleasefn = l.releasefn |
|
1073 def releasefn(*args, **kwargs): |
|
1074 if 'obsstore' in vars(self) and self.obsstore._new: |
|
1075 f = self.sopener('obsstore', 'wb', atomictemp=True) |
|
1076 try: |
|
1077 self.obsstore.flushmarkers(f) |
|
1078 f.close() |
|
1079 except: # re-raises |
|
1080 f.discard() |
|
1081 raise |
|
1082 oreleasefn(*args, **kwargs) |
|
1083 releasefn.obspatched = True |
|
1084 l.releasefn = releasefn |
|
1085 return l |
|
1086 |
|
1087 |
|
1088 ### pull // push support |
866 ### pull // push support |
1089 |
867 |
1090 def pull(self, remote, *args, **kwargs): |
868 def pull(self, remote, *args, **kwargs): |
1091 """wrapper around push that push obsolete relation""" |
869 """wrapper around push that push obsolete relation""" |
1092 l = repo.lock() |
870 l = repo.lock() |
1093 try: |
871 try: |
1094 result = opull(remote, *args, **kwargs) |
872 result = opull(remote, *args, **kwargs) |
1095 remoteobs = remote.listkeys('obsolete') |
873 self._turn_extinct_secret() |
1096 if 'dump' in remoteobs: |
874 return result |
1097 data = base85.b85decode(remoteobs['dump']) |
|
1098 self.obsstore.mergemarkers(data) |
|
1099 self._clearobsoletecache() |
|
1100 self._turn_extinct_secret() |
|
1101 return result |
|
1102 finally: |
875 finally: |
1103 l.release() |
876 l.release() |
1104 |
877 |
1105 def push(self, remote, *args, **opts): |
878 def push(self, remote, *args, **opts): |
1106 """wrapper around pull that pull obsolete relation""" |
879 """wrapper around pull that pull obsolete relation""" |
1107 self._turn_extinct_secret() |
880 self._turn_extinct_secret() |
1108 result = opush(remote, *args, **opts) |
881 result = opush(remote, *args, **opts) |
1109 if 'obsolete' in self.listkeys('namespaces') and self.obsstore: |
|
1110 data = self.obsstore._writemarkers() |
|
1111 r = remote.pushkey('obsolete', 'dump', '', |
|
1112 base85.b85encode(data)) |
|
1113 if not r: |
|
1114 self.ui.warn(_('failed to push obsolete markers!\n')) |
|
1115 self._turn_extinct_secret() |
882 self._turn_extinct_secret() |
1116 |
|
1117 return result |
883 return result |
1118 |
884 |
1119 |
885 |
1120 ### rollback support |
886 ### rollback support |
1121 |
887 |
1122 # /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\ |
888 # /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\ |
1123 if util.safehasattr(repo, '_journalfiles'): # Hg 2.2 |
889 def _journalfiles(self): |
1124 def _journalfiles(self): |
890 return o_journalfiles() + (self.sjoin('journal.obsstore'),) |
1125 return o_journalfiles() + (self.sjoin('journal.obsstore'),) |
891 |
1126 |
892 def _writejournal(self, desc): |
1127 def _writejournal(self, desc): |
893 """wrapped version of _writejournal that save obsolete data""" |
1128 """wrapped version of _writejournal that save obsolete data""" |
894 o_writejournal(desc) |
1129 o_writejournal(desc) |
895 filename = 'obsstore' |
1130 filename = 'obsstore' |
896 filepath = self.sjoin(filename) |
1131 filepath = self.sjoin(filename) |
897 if os.path.exists(filepath): |
1132 if os.path.exists(filepath): |
898 journalname = 'journal.' + filename |
1133 journalname = 'journal.' + filename |
899 journalpath = self.sjoin(journalname) |
1134 journalpath = self.sjoin(journalname) |
900 util.copyfile(filepath, journalpath) |
1135 util.copyfile(filepath, journalpath) |
901 |
1136 |
|
1137 else: # XXX removing this bloc will break Hg 2.1 support |
|
1138 def _writejournal(self, desc): |
|
1139 """wrapped version of _writejournal that save obsolete data""" |
|
1140 entries = list(o_writejournal(desc)) |
|
1141 filename = 'obsstore' |
|
1142 filepath = self.sjoin(filename) |
|
1143 if os.path.exists(filepath): |
|
1144 journalname = 'journal.' + filename |
|
1145 journalpath = self.sjoin(journalname) |
|
1146 util.copyfile(filepath, journalpath) |
|
1147 entries.append(journalpath) |
|
1148 return tuple(entries) |
|
1149 |
902 |
1150 def _rollback(self, dryrun, force): |
903 def _rollback(self, dryrun, force): |
1151 """wrapped version of _rollback that restore obsolete data""" |
904 """wrapped version of _rollback that restore obsolete data""" |
1152 ret = o_rollback(dryrun, force) |
905 ret = o_rollback(dryrun, force) |
1153 if not (ret or dryrun): #rollback did not failed |
906 if not (ret or dryrun): #rollback did not failed |