[dataimport] remove autoflush_metadata from MassiveObjectStore parameters
authorJulien Cristau <julien.cristau@logilab.fr>
Mon, 09 Nov 2015 15:44:06 +0100
changeset 10875 75d1b2d66f18
parent 10874 538e17174769
child 10876 11a9b2fb83d0
[dataimport] remove autoflush_metadata from MassiveObjectStore parameters Hopefully not needed.
dataimport/massive_store.py
dataimport/test/test_massive_store.py
--- a/dataimport/massive_store.py	Mon Nov 09 15:41:27 2015 +0100
+++ b/dataimport/massive_store.py	Mon Nov 09 15:44:06 2015 +0100
@@ -88,7 +88,7 @@
     # max size of the iid, used to create the iid_eid conversion table
     iid_maxsize = 1024
 
-    def __init__(self, cnx, autoflush_metadata=True,
+    def __init__(self, cnx,
                  commit_at_flush=True,
                  on_commit_callback=None, on_rollback_callback=None,
                  slave_mode=False,
@@ -96,9 +96,6 @@
         """ Create a MassiveObject store, with the following attributes:
 
         - cnx: CubicWeb cnx
-        - autoflush_metadata: Boolean.
-                              Automatically flush the metadata after
-                              each flush()
         - commit_at_flush: Boolean. Commit after each flush().
         """
         super(MassiveObjectStore, self).__init__(cnx)
@@ -115,7 +112,6 @@
                             }
         self.sql = self._cnx.system_sql
         self.logger = logging.getLogger('dataio.massiveimport')
-        self.autoflush_metadata = autoflush_metadata
         self.slave_mode = slave_mode
         self.size_constraints = get_size_constraints(cnx.vreg.schema)
         self.default_values = get_default_values(cnx.vreg.schema)
@@ -561,8 +557,7 @@
                 self.on_rollback(exc, etype, data)
             # Clear data cache
             self._data_entities[etype] = []
-        if self.autoflush_metadata:
-            self.flush_meta_data()
+        self.flush_meta_data()
         # Commit if asked
         if self.commit_at_flush:
             self.commit()
--- a/dataimport/test/test_massive_store.py	Mon Nov 09 15:41:27 2015 +0100
+++ b/dataimport/test/test_massive_store.py	Mon Nov 09 15:44:06 2015 +0100
@@ -86,7 +86,7 @@
             crs = cnx.system_sql('SELECT * FROM entities WHERE type=%(t)s',
                                  {'t': 'Location'})
             self.assertEqual(len(crs.fetchall()), 0)
-            store = MassiveObjectStore(cnx, autoflush_metadata=True)
+            store = MassiveObjectStore(cnx)
             store.prepare_insert_entity('Location', name=u'toto')
             store.flush()
             store.commit()
@@ -97,25 +97,6 @@
                                  {'t': 'Location'})
             self.assertEqual(len(crs.fetchall()), 1)
 
-#    def test_no_autoflush_metadata(self):
-#        with self.admin_access.repo_cnx() as cnx:
-#            crs = cnx.system_sql('SELECT * FROM entities WHERE type=%(t)s',
-#                                      {'t': 'Location'})
-#            self.assertEqual(len(crs.fetchall()), 0)
-#        with self.admin_access.repo_cnx() as cnx:
-#            store = MassiveObjectStore(cnx, autoflush_metadata=False)
-#            store.prepare_insert_entity('Location', name=u'toto')
-#            store.flush()
-#            store.commit()
-#            crs = cnx.system_sql('SELECT * FROM entities WHERE type=%(t)s',
-#                                 {'t': 'Location'})
-#            self.assertEqual(len(crs.fetchall()), 0)
-#            store.flush_meta_data()
-#            crs = cnx.system_sql('SELECT * FROM entities WHERE type=%(t)s',
-#                                 {'t': 'Location'})
-#            self.assertEqual(len(crs.fetchall()), 1)
-#            store.finish()
-
     def test_massimport_etype_metadata(self):
         with self.admin_access.repo_cnx() as cnx:
             store = MassiveObjectStore(cnx)
@@ -226,7 +207,7 @@
 
     def test_simple_insert(self):
         with self.admin_access.repo_cnx() as cnx:
-            store = MassiveObjectStore(cnx, autoflush_metadata=True)
+            store = MassiveObjectStore(cnx)
             self.push_geonames_data(osp.join(HERE, 'data/geonames.csv'), store)
             store.flush()
         with self.admin_access.repo_cnx() as cnx:
@@ -237,7 +218,7 @@
 
     def test_index_building(self):
         with self.admin_access.repo_cnx() as cnx:
-            store = MassiveObjectStore(cnx, autoflush_metadata=True)
+            store = MassiveObjectStore(cnx)
             self.push_geonames_data(osp.join(HERE, 'data/geonames.csv'), store)
             store.flush()
 
@@ -260,20 +241,6 @@
             self.assertIn('owned_by_relation_p_key', indexes)
             self.assertIn('owned_by_relation_to_idx', indexes)
 
-    def test_flush_meta_data(self):
-        with self.admin_access.repo_cnx() as cnx:
-            store = MassiveObjectStore(cnx, autoflush_metadata=False)
-            self.push_geonames_data(osp.join(HERE, 'data/geonames.csv'), store)
-            store.flush()
-            curs = cnx.system_sql('SELECT * FROM entities WHERE type=%(t)s',
-                                  {'t': 'Location'})
-            self.assertEqual(len(curs.fetchall()), 0)
-            # Flush metadata -> entities table is updated
-            store.flush_meta_data()
-            curs = cnx.system_sql('SELECT * FROM entities WHERE type=%(t)s',
-                                  {'t': 'Location'})
-            self.assertEqual(len(curs.fetchall()), 4000)
-
     def test_multiple_insert(self):
         with self.admin_access.repo_cnx() as cnx:
             store = MassiveObjectStore(cnx)