# HG changeset patch # User Aurelien Campeas <aurelien.campeas@logilab.fr> # Date 1416840198 -3600 # Node ID 7d9256aab3373c527f42dc395a075edbd9a61798 # Parent 341b63331e4ba6ba6e5a10f7b4b2bf2c63a4bbf6 [source/native/backup_restore] have a unique tunable blocksize for the dump phase Thus we make memory errors much less likely. Closes #4618949. diff -r 341b63331e4b -r 7d9256aab337 server/sources/native.py --- a/server/sources/native.py Wed Sep 17 10:31:50 2014 +0200 +++ b/server/sources/native.py Mon Nov 24 15:43:18 2014 +0100 @@ -1706,6 +1706,8 @@ Tables are saved in chunks in different files in order to prevent a too high memory consumption. """ + blocksize = 100 + def __init__(self, source): """ :param: source an instance of the system source @@ -1790,10 +1792,7 @@ sql = 'SELECT * FROM %s' % table columns, rows_iterator = self._get_cols_and_rows(sql) self.logger.info('number of rows: %d', rowcount) - if table.startswith('cw_'): # entities - blocksize = 2000 - else: # relations and metadata - blocksize = 10000 + blocksize = self.blocksize if rowcount > 0: for i, start in enumerate(xrange(0, rowcount, blocksize)): rows = list(itertools.islice(rows_iterator, blocksize))