--- a/cubicweb/dataimport/massive_store.py Thu Jan 28 16:08:08 2016 +0100
+++ b/cubicweb/dataimport/massive_store.py Thu Jan 28 18:14:55 2016 +0100
@@ -429,9 +429,9 @@
# Clear data cache
self._data_entities[etype] = []
if not self.slave_mode:
- self.flush_meta_data()
+ self.flush_metadata()
- def flush_meta_data(self):
+ def flush_metadata(self):
""" Flush the meta data (entities table, is_instance table, ...)
"""
if self.slave_mode:
@@ -450,7 +450,7 @@
if etype not in already_flushed:
# Deals with meta data
self.logger.info('Flushing meta data for %s' % etype)
- self.insert_massive_meta_data(etype)
+ self.insert_massive_metadata(etype)
self.sql('INSERT INTO cwmassive_metadata VALUES (%(e)s)', {'e': etype})
def _cleanup_relations(self, rtype):
@@ -463,7 +463,7 @@
# Drop temporary relation table
self.sql('DROP TABLE %(r)s_relation_tmp' % {'r': rtype.lower()})
- def insert_massive_meta_data(self, etype):
+ def insert_massive_metadata(self, etype):
""" Massive insertion of meta data for a given etype, based on SQL statements.
"""
# Push data - Use coalesce to avoid NULL (and get 0), if there is no
--- a/cubicweb/dataimport/test/test_massive_store.py Thu Jan 28 16:08:08 2016 +0100
+++ b/cubicweb/dataimport/test/test_massive_store.py Thu Jan 28 18:14:55 2016 +0100
@@ -213,7 +213,7 @@
with self.admin_access.repo_cnx() as cnx:
master_store = MassiveObjectStore(cnx, slave_mode=False)
slave_store = MassiveObjectStore(cnx, slave_mode=True)
- self.assertRaises(RuntimeError, slave_store.flush_meta_data)
+ self.assertRaises(RuntimeError, slave_store.flush_metadata)
self.assertRaises(RuntimeError, slave_store.finish)
def test_simple_insert(self):