obscache: extract code to update from new revision
Code cleanup and prepare upstreaming. The make the code more likely to work with
the key validation returning an iterator of revs to update the cache with.
--- a/hgext3rd/evolve/obscache.py Wed May 03 18:33:53 2017 +0200
+++ b/hgext3rd/evolve/obscache.py Wed May 03 18:46:48 2017 +0200
@@ -253,25 +253,7 @@
# process the new changesets
cl = repo.changelog
if startrev is not None:
- node = cl.node
- # Note:
- #
- # Newly added changeset might be affected by obsolescence markers
- # we already have locally. So we needs to have soem global
- # knowledge about the markers to handle that question. Right this
- # requires parsing all markers in the obsstore. However, we could
- # imagine using various optimisation (eg: bloom filter, other on
- # disk cache) to remove this full parsing.
- #
- # For now we stick to the simpler approach or paying the
- # performance cost on new changesets.
- succs = repo.obsstore.successors
- for r in cl.revs(startrev):
- if node(r) in succs:
- val = 1
- else:
- val = 0
- self._data.append(val)
+ self._updaterevs(repo, cl.revs(startrev))
assert len(self._data) == len(cl), (len(self._data), len(cl))
# process the new obsmarkers
@@ -303,6 +285,28 @@
self._cachekey = getcachekey(repo)
+ def _updaterevs(self, repo, revs):
+ """update the cache with new revisions
+
+ Newly added changeset might be affected by obsolescence markers
+ we already have locally. So we needs to have soem global
+ knowledge about the markers to handle that question. Right this
+ requires parsing all markers in the obsstore. However, we could
+ imagine using various optimisation (eg: bloom filter, other on
+ disk cache) to remove this full parsing.
+
+ For now we stick to the simpler approach or paying the
+ performance cost on new changesets.
+ """
+ node = repo.changelog.node
+ succs = repo.obsstore.successors
+ for r in revs:
+ if node(r) in succs:
+ val = 1
+ else:
+ val = 0
+ self._data.append(val)
+
def save(self, repo):
"""save the data to disk"""