[source/native/backup_restore] have a unique tunable blocksize for the dump phase
Thus we make memory errors much less likely.
Closes #4618949.
--- a/server/sources/native.py Wed Sep 17 10:31:50 2014 +0200
+++ b/server/sources/native.py Mon Nov 24 15:43:18 2014 +0100
@@ -1706,6 +1706,8 @@
Tables are saved in chunks in different files in order to prevent
a too high memory consumption.
"""
+ blocksize = 100
+
def __init__(self, source):
"""
:param: source an instance of the system source
@@ -1790,10 +1792,7 @@
sql = 'SELECT * FROM %s' % table
columns, rows_iterator = self._get_cols_and_rows(sql)
self.logger.info('number of rows: %d', rowcount)
- if table.startswith('cw_'): # entities
- blocksize = 2000
- else: # relations and metadata
- blocksize = 10000
+ blocksize = self.blocksize
if rowcount > 0:
for i, start in enumerate(xrange(0, rowcount, blocksize)):
rows = list(itertools.islice(rows_iterator, blocksize))