405 pass # rebase not found |
411 pass # rebase not found |
406 |
412 |
407 # Pushkey mechanism for mutable |
413 # Pushkey mechanism for mutable |
408 ######################################### |
414 ######################################### |
409 |
415 |
410 def pushobsolete(repo, key, old, raw): |
416 def listmarkers(repo): |
411 """push obsolete relation through pushkey""" |
417 """List markers over pushkey""" |
412 assert key == "markers" |
418 if not repo.obsstore: |
413 l = repo.lock() |
419 return {} |
|
420 data = repo.obsstore._writemarkers() |
|
421 return {'dump': base85.b85encode(data)} |
|
422 |
|
423 def pushmarker(repo, key, old, new): |
|
424 """Push markers over pushkey""" |
|
425 if key != 'dump': |
|
426 repo.ui.warn(_('unknown key: %r') % key) |
|
427 return 0 |
|
428 if old: |
|
429 repo.ui.warn(_('unexpected old value') % key) |
|
430 return 0 |
|
431 data = base85.b85decode(new) |
|
432 lock = repo.lock() |
414 try: |
433 try: |
415 tmp = StringIO() |
434 repo.obsstore.mergemarkers(data) |
416 tmp.write(raw) |
|
417 tmp.seek(0) |
|
418 repo.obsoletestore.load(tmp) |
|
419 repo.obsoletestore._dirty = True # XXX meh |
|
420 return 1 |
435 return 1 |
421 finally: |
436 finally: |
422 l.release() |
437 lock.release() |
423 |
438 |
424 def listobsolete(repo): |
439 pushkey.register('obsolete', pushmarker, listmarkers) |
425 """dump all obsolete relation in |
|
426 |
|
427 XXX this have be improved""" |
|
428 tmp = StringIO() |
|
429 repo.obsoletestore.save(tmp) |
|
430 return {'markers': base64.b64encode(tmp.getvalue())} |
|
431 |
|
432 pushkey.register('obsolete', pushobsolete, listobsolete) |
|
433 |
440 |
434 ### Discovery wrapping |
441 ### Discovery wrapping |
435 ############################# |
442 ############################# |
436 |
443 |
437 class blist(list, object): |
444 class blist(list, object): |
561 @command('debugconvertobsolete', [], '') |
568 @command('debugconvertobsolete', [], '') |
562 def cmddebugconvertobsolete(ui, repo): |
569 def cmddebugconvertobsolete(ui, repo): |
563 """import markers from an .hg/obsolete-relations file""" |
570 """import markers from an .hg/obsolete-relations file""" |
564 cnt = 0 |
571 cnt = 0 |
565 l = repo.lock() |
572 l = repo.lock() |
|
573 some = False |
566 try: |
574 try: |
567 repo._importoldobsolete = True |
575 repo._importoldobsolete = True |
568 store = repo.obsoletestore |
576 store = repo.obsstore |
|
577 ### very first format |
569 try: |
578 try: |
570 f = repo.opener('obsolete-relations') |
579 f = repo.opener('obsolete-relations') |
571 try: |
580 try: |
|
581 some = True |
572 for line in f: |
582 for line in f: |
573 subhex, objhex = line.split() |
583 subhex, objhex = line.split() |
574 sub = bin(subhex) |
584 suc = bin(subhex) |
575 obj = bin(objhex) |
585 prec = bin(objhex) |
576 newmarker = { |
586 sucs = (suc==nullid) and [] or [suc] |
577 'subjects': (sub==nullid) and [] or [sub], |
587 meta = { |
578 'object': obj, |
588 'date': '%i %i' % util.makedate(), |
579 'date': util.makedate(), |
|
580 'user': ui.username(), |
589 'user': ui.username(), |
581 'reason': 'import from older format.', |
|
582 } |
590 } |
583 store.new(newmarker) |
591 store.create(prec, sucs, 0, meta) |
584 store._dirty = True |
|
585 cnt += 1 |
592 cnt += 1 |
586 finally: |
593 finally: |
587 f.close() |
594 f.close() |
588 util.unlink(repo.join('obsolete-relations')) |
595 util.unlink(repo.join('obsolete-relations')) |
589 except IOError: |
596 except IOError: |
590 ui.warn('nothing to do\n') |
|
591 pass |
597 pass |
|
598 ### second (json) format |
|
599 data = repo.sopener.tryread('obsoletemarkers') |
|
600 if data: |
|
601 some = True |
|
602 for oldmark in json.loads(data): |
|
603 del oldmark['id'] # dropped for now |
|
604 del oldmark['reason'] # unused until then |
|
605 oldmark['subjects'] = [bin(n) for n in oldmark['subjects']] |
|
606 oldmark['object'] = bin(oldmark['object']) |
|
607 oldmark['date'] = '%i %i' % tuple(oldmark['date']) |
|
608 store.create(oldmark.pop('object'), |
|
609 oldmark.pop('subjects'), |
|
610 0, oldmark) |
|
611 cnt += 1 |
|
612 util.unlink(repo.sjoin('obsoletemarkers')) |
592 finally: |
613 finally: |
593 del repo._importoldobsolete |
614 del repo._importoldobsolete |
594 l.release() |
615 l.release() |
|
616 if not some: |
|
617 ui.warn('nothing to do\n') |
595 ui.status('%i obsolete marker converted\n' % cnt) |
618 ui.status('%i obsolete marker converted\n' % cnt) |
596 |
619 |
597 @command('debugsuccessors', [], '') |
620 @command('debugsuccessors', [], '') |
598 def cmddebugsuccessors(ui, repo): |
621 def cmddebugsuccessors(ui, repo): |
599 """dump obsolete changesets and their successors |
622 """dump obsolete changesets and their successors |
738 else: |
761 else: |
739 a.update(str(marker[key])) |
762 a.update(str(marker[key])) |
740 a.update('\0') |
763 a.update('\0') |
741 return a.digest() |
764 return a.digest() |
742 |
765 |
743 class obsoletestore(object): |
766 # mercurial backport |
744 """Store obsolete relations |
767 |
745 |
768 def encodemeta(meta): |
746 Relation are stored in three mapping. All mapping have "obsolete markers" |
769 """Return encoded metadata string to string mapping. |
747 as values:: |
770 |
748 |
771 Assume no ':' in key and no '\0' in both key and value.""" |
749 {'id': "unique id of the obsolete marker" |
772 for key, value in meta.iteritems(): |
750 'subjects': "0-N newer version of changeset in "object" (as ordered list) |
773 if ':' in key or '\0' in key: |
751 'object': "old and obsolete version" |
774 raise ValueError("':' and '\0' are forbidden in metadata key'") |
752 'date': "When was this marker created ?" |
775 if '\0' in value: |
753 'user': "Who did that ?" |
776 raise ValueError("':' are forbidden in metadata value'") |
754 'reason': "Why was it done" |
777 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)]) |
755 } |
778 |
756 |
779 def decodemeta(data): |
757 Three keys exists |
780 """Return string to string dictionary from encoded version.""" |
758 |
781 d = {} |
759 :self._markers: "id" -> marker |
782 for l in data.split('\0'): |
760 |
783 if l: |
761 :self.subjects: "subject" -> set(marker) |
784 key, value = l.split(':') |
762 |
785 d[key] = value |
763 :self.objects: "object" -> set(marker) |
786 return d |
|
787 |
|
788 # data used for parsing and writing |
|
789 _fmversion = 0 |
|
790 _fmfixed = '>BIB20s' |
|
791 _fmnode = '20s' |
|
792 _fmfsize = struct.calcsize(_fmfixed) |
|
793 _fnodesize = struct.calcsize(_fmnode) |
|
794 |
|
795 def _readmarkers(data): |
|
796 """Read and enumerate markers from raw data""" |
|
797 off = 0 |
|
798 diskversion = _unpack('>B', data[off:off + 1])[0] |
|
799 off += 1 |
|
800 if diskversion != _fmversion: |
|
801 raise util.Abort(_('parsing obsolete marker: unknown version %r') |
|
802 % diskversion) |
|
803 |
|
804 # Loop on markers |
|
805 l = len(data) |
|
806 while off + _fmfsize <= l: |
|
807 # read fixed part |
|
808 cur = data[off:off + _fmfsize] |
|
809 off += _fmfsize |
|
810 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur) |
|
811 # read replacement |
|
812 sucs = () |
|
813 if nbsuc: |
|
814 s = (_fnodesize * nbsuc) |
|
815 cur = data[off:off + s] |
|
816 sucs = _unpack(_fmnode * nbsuc, cur) |
|
817 off += s |
|
818 # read metadata |
|
819 # (metadata will be decoded on demand) |
|
820 metadata = data[off:off + mdsize] |
|
821 if len(metadata) != mdsize: |
|
822 raise util.Abort(_('parsing obsolete marker: metadata is too ' |
|
823 'short, %d bytes expected, got %d') |
|
824 % (len(metadata), mdsize)) |
|
825 off += mdsize |
|
826 yield (pre, sucs, flags, metadata) |
|
827 |
|
828 class obsstore(object): |
|
829 """Store obsolete markers |
|
830 |
|
831 Markers can be accessed with two mappings: |
|
832 - precursors: old -> set(new) |
|
833 - successors: new -> set(old) |
764 """ |
834 """ |
765 |
835 |
766 def __init__(self): |
836 def __init__(self): |
767 self._markers = {} |
837 self._all = [] |
768 self.subjects = {} |
838 # new markers to serialize |
769 self.objects = {} |
839 self._new = [] |
770 self._dirty = False # should be on repo |
840 self.precursors = {} |
771 |
841 self.successors = {} |
772 def new(self, marker): |
842 |
773 """Add a *new* marker to the store. computing it's ID""" |
843 def __iter__(self): |
774 mid = marker['id'] = markerid(marker) |
844 return iter(self._all) |
775 self._insert(marker) |
845 |
776 self._dirty = True |
846 def __nonzero__(self): |
777 return mid |
847 return bool(self._all) |
778 |
848 |
779 def _insert(self, marker): |
849 def create(self, prec, succs=(), flag=0, metadata=None): |
780 if marker['id'] not in self._markers: |
850 """obsolete: add a new obsolete marker |
781 self._markers[marker['id']] = marker |
851 |
782 add2set(self.objects, marker['object'], marker) |
852 * ensuring it is hashable |
783 for subj in marker['subjects']: |
853 * check mandatory metadata |
784 add2set(self.subjects, subj, marker) |
854 * encode metadata |
785 |
855 """ |
786 def save(self, stream): |
856 if metadata is None: |
787 markers = [] |
857 metadata = {} |
788 for mark in self._markers.itervalues(): |
858 if len(prec) != 20: |
789 jmark = mark.copy() |
859 raise ValueError(prec) |
790 jmark['id'] = hex(jmark['id']) |
860 for succ in succs: |
791 jmark['subjects'] = [hex(n) for n in jmark['subjects']] |
861 if len(succ) != 20: |
792 jmark['object'] = hex(jmark['object']) |
862 raise ValueError(prec) |
793 markers.append(jmark) |
863 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata)) |
794 json.dump(markers, stream, indent=4) |
864 self.add(marker) |
795 |
865 |
796 def load(self, stream): |
866 def add(self, marker): |
797 for mark in json.load(stream): |
867 """Add a new marker to the store |
798 mark['id'] = bin(mark['id']) |
868 |
799 mark['subjects'] = [bin(n) for n in mark['subjects']] |
869 This marker still needs to be written to disk""" |
800 mark['object'] = bin(mark['object']) |
870 self._new.append(marker) |
801 self._insert(mark) |
871 self._load(marker) |
802 |
872 |
803 def writeobsolete(repo): |
873 def loadmarkers(self, data): |
804 """wire obsolete data on disk""" |
874 """Load all markers in data, mark them as known.""" |
805 f = repo.sopener('obsoletemarkers', 'w', atomictemp=True) |
875 for marker in _readmarkers(data): |
806 try: |
876 self._load(marker) |
807 repo.obsoletestore.save(f) |
877 |
808 repo._dirty = False |
878 def mergemarkers(self, data): |
809 finally: |
879 other = set(_readmarkers(data)) |
810 f.close() |
880 local = set(self._all) |
|
881 new = other - local |
|
882 for marker in new: |
|
883 self.add(marker) |
|
884 |
|
885 def flushmarkers(self, stream): |
|
886 """Write all markers to a stream |
|
887 |
|
888 After this operation, "new" markers are considered "known".""" |
|
889 self._writemarkers(stream) |
|
890 self._new[:] = [] |
|
891 |
|
892 def _load(self, marker): |
|
893 self._all.append(marker) |
|
894 pre, sucs = marker[:2] |
|
895 self.precursors.setdefault(pre, set()).add(marker) |
|
896 for suc in sucs: |
|
897 self.successors.setdefault(suc, set()).add(marker) |
|
898 |
|
899 def _writemarkers(self, stream=None): |
|
900 # Kept separate from flushmarkers(), it will be reused for |
|
901 # markers exchange. |
|
902 if stream is None: |
|
903 final = [] |
|
904 w = final.append |
|
905 else: |
|
906 w = stream.write |
|
907 w(_pack('>B', _fmversion)) |
|
908 for marker in self._all: |
|
909 pre, sucs, flags, metadata = marker |
|
910 nbsuc = len(sucs) |
|
911 format = _fmfixed + (_fmnode * nbsuc) |
|
912 data = [nbsuc, len(metadata), flags, pre] |
|
913 data.extend(sucs) |
|
914 w(_pack(format, *data)) |
|
915 w(metadata) |
|
916 if stream is None: |
|
917 return ''.join(final) |
811 |
918 |
812 |
919 |
813 ### repo subclassing |
920 ### repo subclassing |
814 ############################# |
921 ############################# |
815 |
922 |
833 |
940 |
834 ### Public method |
941 ### Public method |
835 def obsoletedby(self, node): |
942 def obsoletedby(self, node): |
836 """return the set of node that make <node> obsolete (obj)""" |
943 """return the set of node that make <node> obsolete (obj)""" |
837 others = set() |
944 others = set() |
838 for marker in self.obsoletestore.objects.get(node, []): |
945 for marker in self.obsstore.precursors.get(node, []): |
839 others.update(marker['subjects']) |
946 others.update(marker[1]) |
840 return others |
947 return others |
841 |
948 |
842 def obsolete(self, node): |
949 def obsolete(self, node): |
843 """return the set of node that <node> make obsolete (sub)""" |
950 """return the set of node that <node> make obsolete (sub)""" |
844 return set(marker['object'] for marker in self.obsoletestore.subjects.get(node, [])) |
951 return set(marker[0] for marker in self.obsstore.successors.get(node, [])) |
845 |
952 |
846 @util.propertycache |
953 @storecache('obsstore') |
847 def obsoletestore(self): |
954 def obsstore(self): |
848 if not getattr(self, '_importoldobsolete', False): |
955 if not getattr(self, '_importoldobsolete', False): |
849 try: |
956 data = repo.opener.tryread('obsolete-relations') |
850 f = self.opener('obsolete-relations') |
957 if not data: |
851 f.close() |
958 data = repo.sopener.tryread('obsoletemarkers') |
|
959 if data: |
852 raise util.Abort('old format of obsolete marker detected!\n' |
960 raise util.Abort('old format of obsolete marker detected!\n' |
853 'run `hg debugconvertobsolete` once.') |
961 'run `hg debugconvertobsolete` once.') |
854 except IOError: |
962 store = obsstore() |
855 pass |
963 data = self.sopener.tryread('obsstore') |
856 store = obsoletestore() |
964 if data: |
857 try: |
965 store.loadmarkers(data) |
858 f = self.sopener('obsoletemarkers') |
|
859 store.load(f) |
|
860 except IOError: |
|
861 pass |
|
862 return store |
966 return store |
863 |
967 |
864 @util.propertycache |
968 @util.propertycache |
865 def _obsoleteset(self): |
969 def _obsoleteset(self): |
866 """the set of obsolete revision""" |
970 """the set of obsolete revision""" |
867 obs = set() |
971 obs = set() |
868 nm = self.changelog.nodemap |
972 nm = self.changelog.nodemap |
869 for obj in self.obsoletestore.objects: |
973 for obj in self.obsstore.precursors: |
870 try: # /!\api change in Hg 2.2 (e8d37b78acfb22ae2c1fb126c2)/!\ |
974 try: # /!\api change in Hg 2.2 (e8d37b78acfb22ae2c1fb126c2)/!\ |
871 rev = nm.get(obj) |
975 rev = nm.get(obj) |
872 except TypeError: #XXX to remove while breaking Hg 2.1 support |
976 except TypeError: #XXX to remove while breaking Hg 2.1 support |
873 rev = nm.get(obj, None) |
977 rev = nm.get(obj, None) |
874 if rev is not None: |
978 if rev is not None: |
966 def lock(self, *args, **kwargs): |
1069 def lock(self, *args, **kwargs): |
967 l = olock(*args, **kwargs) |
1070 l = olock(*args, **kwargs) |
968 if not getattr(l.releasefn, 'obspatched', False): |
1071 if not getattr(l.releasefn, 'obspatched', False): |
969 oreleasefn = l.releasefn |
1072 oreleasefn = l.releasefn |
970 def releasefn(*args, **kwargs): |
1073 def releasefn(*args, **kwargs): |
971 if self.obsoletestore._dirty: |
1074 if 'obsstore' in vars(self) and self.obsstore._new: |
972 writeobsolete(self) |
1075 f = self.sopener('obsstore', 'wb', atomictemp=True) |
|
1076 try: |
|
1077 self.obsstore.flushmarkers(f) |
|
1078 f.close() |
|
1079 except: # re-raises |
|
1080 f.discard() |
|
1081 raise |
973 oreleasefn(*args, **kwargs) |
1082 oreleasefn(*args, **kwargs) |
974 releasefn.obspatched = True |
1083 releasefn.obspatched = True |
975 l.releasefn = releasefn |
1084 l.releasefn = releasefn |
976 return l |
1085 return l |
977 |
|
978 def _readobsrels(self): |
|
979 """Read obsolete relation on disk""" |
|
980 # XXX handle lock |
|
981 try: |
|
982 f = self.opener('obsolete-relations') |
|
983 try: |
|
984 return _obsdeserialise(f) |
|
985 finally: |
|
986 f.close() |
|
987 except IOError: |
|
988 return {} |
|
989 |
1086 |
990 |
1087 |
991 ### pull // push support |
1088 ### pull // push support |
992 |
1089 |
993 def pull(self, remote, *args, **kwargs): |
1090 def pull(self, remote, *args, **kwargs): |
994 """wrapper around push that push obsolete relation""" |
1091 """wrapper around push that push obsolete relation""" |
995 l = repo.lock() |
1092 l = repo.lock() |
996 try: |
1093 try: |
997 result = opull(remote, *args, **kwargs) |
1094 result = opull(remote, *args, **kwargs) |
998 if 'obsolete' in remote.listkeys('namespaces'): |
1095 remoteobs = remote.listkeys('obsolete') |
999 tmp = StringIO() |
1096 if 'dump' in remoteobs: |
1000 rels = remote.listkeys('obsolete')['markers'] |
1097 data = base85.b85decode(remoteobs['dump']) |
1001 tmp.write(base64.b64decode(rels)) |
1098 self.obsstore.mergemarkers(data) |
1002 tmp.seek(0) |
|
1003 repo.obsoletestore.load(tmp) |
|
1004 repo.obsoletestore._dirty = True # XXX meh |
|
1005 self._clearobsoletecache() |
1099 self._clearobsoletecache() |
1006 self._turn_extinct_secret() |
1100 self._turn_extinct_secret() |
1007 return result |
1101 return result |
1008 finally: |
1102 finally: |
1009 l.release() |
1103 l.release() |
1010 |
1104 |
1011 def push(self, remote, *args, **opts): |
1105 def push(self, remote, *args, **opts): |
1012 """wrapper around pull that pull obsolete relation""" |
1106 """wrapper around pull that pull obsolete relation""" |
1013 self._turn_extinct_secret() |
1107 self._turn_extinct_secret() |
1014 result = opush(remote, *args, **opts) |
1108 result = opush(remote, *args, **opts) |
1015 if 'obsolete' in remote.listkeys('namespaces'): |
1109 if 'obsolete' in self.listkeys('namespaces') and self.obsstore: |
1016 tmp = StringIO() |
1110 data = self.obsstore._writemarkers() |
1017 self.obsoletestore.save(tmp) |
1111 r = remote.pushkey('obsolete', 'dump', '', |
1018 remote.pushkey('obsolete', 'markers', '', tmp.getvalue()) |
1112 base85.b85encode(data)) |
|
1113 if not r: |
|
1114 self.ui.warn(_('failed to push obsolete markers!\n')) |
1019 self._turn_extinct_secret() |
1115 self._turn_extinct_secret() |
1020 |
1116 |
1021 return result |
1117 return result |
1022 |
1118 |
1023 |
1119 |
1024 ### rollback support |
1120 ### rollback support |
1025 |
1121 |
1026 # /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\ |
1122 # /!\ api change in Hg 2.2 (97efd26eb9576f39590812ea9) /!\ |
1027 if util.safehasattr(repo, '_journalfiles'): # Hg 2.2 |
1123 if util.safehasattr(repo, '_journalfiles'): # Hg 2.2 |
1028 def _journalfiles(self): |
1124 def _journalfiles(self): |
1029 return o_journalfiles() + (self.sjoin('journal.obsoletemarkers'),) |
1125 return o_journalfiles() + (self.sjoin('journal.obsstore'),) |
1030 |
1126 |
1031 def _writejournal(self, desc): |
1127 def _writejournal(self, desc): |
1032 """wrapped version of _writejournal that save obsolete data""" |
1128 """wrapped version of _writejournal that save obsolete data""" |
1033 o_writejournal(desc) |
1129 o_writejournal(desc) |
1034 filename = 'obsoletemarkers' |
1130 filename = 'obsstore' |
1035 filepath = self.sjoin(filename) |
1131 filepath = self.sjoin(filename) |
1036 if os.path.exists(filepath): |
1132 if os.path.exists(filepath): |
1037 journalname = 'journal.' + filename |
1133 journalname = 'journal.' + filename |
1038 journalpath = self.sjoin(journalname) |
1134 journalpath = self.sjoin(journalname) |
1039 util.copyfile(filepath, journalpath) |
1135 util.copyfile(filepath, journalpath) |
1040 |
1136 |
1041 else: # XXX removing this bloc will break Hg 2.1 support |
1137 else: # XXX removing this bloc will break Hg 2.1 support |
1042 def _writejournal(self, desc): |
1138 def _writejournal(self, desc): |
1043 """wrapped version of _writejournal that save obsolete data""" |
1139 """wrapped version of _writejournal that save obsolete data""" |
1044 entries = list(o_writejournal(desc)) |
1140 entries = list(o_writejournal(desc)) |
1045 filename = 'obsoletemarkers' |
1141 filename = 'obsstore' |
1046 filepath = self.sjoin(filename) |
1142 filepath = self.sjoin(filename) |
1047 if os.path.exists(filepath): |
1143 if os.path.exists(filepath): |
1048 journalname = 'journal.' + filename |
1144 journalname = 'journal.' + filename |
1049 journalpath = self.sjoin(journalname) |
1145 journalpath = self.sjoin(journalname) |
1050 util.copyfile(filepath, journalpath) |
1146 util.copyfile(filepath, journalpath) |
1053 |
1149 |
1054 def _rollback(self, dryrun, force): |
1150 def _rollback(self, dryrun, force): |
1055 """wrapped version of _rollback that restore obsolete data""" |
1151 """wrapped version of _rollback that restore obsolete data""" |
1056 ret = o_rollback(dryrun, force) |
1152 ret = o_rollback(dryrun, force) |
1057 if not (ret or dryrun): #rollback did not failed |
1153 if not (ret or dryrun): #rollback did not failed |
1058 src = self.sjoin('undo.obsoletemarkers') |
1154 src = self.sjoin('undo.obsstore') |
1059 dst = self.sjoin('obsoletemarkers') |
1155 dst = self.sjoin('obsstore') |
1060 if os.path.exists(src): |
1156 if os.path.exists(src): |
1061 util.rename(src, dst) |
1157 util.rename(src, dst) |
1062 elif os.path.exists(dst): |
1158 elif os.path.exists(dst): |
1063 # If no state was saved because the file did not existed before. |
1159 # If no state was saved because the file did not existed before. |
1064 os.unlink(dst) |
1160 os.unlink(dst) |
1065 # invalidate cache |
1161 # invalidate cache |
1066 self.__dict__.pop('obsoletestore', None) |
1162 self.__dict__.pop('obsstore', None) |
1067 return ret |
1163 return ret |
1068 |
1164 |
1069 @storecache('00changelog.i') |
1165 @storecache('00changelog.i') |
1070 def changelog(self): |
1166 def changelog(self): |
1071 # << copy pasted from mercurial source |
1167 # << copy pasted from mercurial source |