server/sources/native.py
changeset 9467 ad66d7b3fd48
parent 9466 c3a5f4507f12
child 9468 39b7a91a3f4c
equal deleted inserted replaced
9466:c3a5f4507f12 9467:ad66d7b3fd48
    91     def fetchall(self):
    91     def fetchall(self):
    92         return self.cu.fetchall()
    92         return self.cu.fetchall()
    93 
    93 
    94     def fetchone(self):
    94     def fetchone(self):
    95         return self.cu.fetchone()
    95         return self.cu.fetchone()
    96 
       
    97 
       
    98 def make_schema(selected, solution, table, typemap):
       
    99     """return a sql schema to store RQL query result"""
       
   100     sql = []
       
   101     varmap = {}
       
   102     for i, term in enumerate(selected):
       
   103         name = 'C%s' % i
       
   104         key = term.as_string()
       
   105         varmap[key] = '%s.%s' % (table, name)
       
   106         ttype = term.get_type(solution)
       
   107         try:
       
   108             sql.append('%s %s' % (name, typemap[ttype]))
       
   109         except KeyError:
       
   110             # assert not schema(ttype).final
       
   111             sql.append('%s %s' % (name, typemap['Int']))
       
   112     return ','.join(sql), varmap
       
   113 
    96 
   114 
    97 
   115 def sql_or_clauses(sql, clauses):
    98 def sql_or_clauses(sql, clauses):
   116     select, restr = sql.split(' WHERE ', 1)
    99     select, restr = sql.split(' WHERE ', 1)
   117     restrclauses = restr.split(' AND ')
   100     restrclauses = restr.split(' AND ')
   278                                              ATTR_MAP.copy())
   261                                              ATTR_MAP.copy())
   279         # full text index helper
   262         # full text index helper
   280         self.do_fti = not repo.config['delay-full-text-indexation']
   263         self.do_fti = not repo.config['delay-full-text-indexation']
   281         # sql queries cache
   264         # sql queries cache
   282         self._cache = QueryCache(repo.config['rql-cache-size'])
   265         self._cache = QueryCache(repo.config['rql-cache-size'])
   283         self._temp_table_data = {}
       
   284         # we need a lock to protect eid attribution function (XXX, really?
   266         # we need a lock to protect eid attribution function (XXX, really?
   285         # explain)
   267         # explain)
   286         self._eid_cnx_lock = Lock()
   268         self._eid_cnx_lock = Lock()
   287         self._eid_creation_cnx = None
   269         self._eid_creation_cnx = None
   288         # (etype, attr) / storage mapping
   270         # (etype, attr) / storage mapping
   513             else:
   495             else:
   514                 raise
   496                 raise
   515         results = self.process_result(cursor, cbs, session=session)
   497         results = self.process_result(cursor, cbs, session=session)
   516         assert dbg_results(results)
   498         assert dbg_results(results)
   517         return results
   499         return results
   518 
       
   519     def flying_insert(self, table, session, union, args=None, varmap=None):
       
   520         """similar as .syntax_tree_search, but inserts data in the
       
   521         temporary table (on-the-fly if possible, eg for the system
       
   522         source whose the given cursor come from). If not possible,
       
   523         inserts all data by calling .executemany().
       
   524         """
       
   525         assert dbg_st_search(
       
   526             self.uri, union, varmap, args,
       
   527             prefix='ON THE FLY temp data insertion into %s from' % table)
       
   528         # generate sql queries if we are able to do so
       
   529         sql, qargs, cbs = self._rql_sqlgen.generate(union, args, varmap)
       
   530         query = 'INSERT INTO %s %s' % (table, sql.encode(self._dbencoding))
       
   531         self.doexec(session, query, self.merge_args(args, qargs))
       
   532 
       
   533     def manual_insert(self, results, table, session):
       
   534         """insert given result into a temporary table on the system source"""
       
   535         if server.DEBUG & server.DBG_RQL:
       
   536             print '  manual insertion of', len(results), 'results into', table
       
   537         if not results:
       
   538             return
       
   539         query_args = ['%%(%s)s' % i for i in xrange(len(results[0]))]
       
   540         query = 'INSERT INTO %s VALUES(%s)' % (table, ','.join(query_args))
       
   541         kwargs_list = []
       
   542         for row in results:
       
   543             kwargs = {}
       
   544             row = tuple(row)
       
   545             for index, cell in enumerate(row):
       
   546                 if isinstance(cell, Binary):
       
   547                     cell = self._binary(cell.getvalue())
       
   548                 kwargs[str(index)] = cell
       
   549             kwargs_list.append(kwargs)
       
   550         self.doexecmany(session, query, kwargs_list)
       
   551 
       
   552     def clean_temp_data(self, session, temptables):
       
   553         """remove temporary data, usually associated to temporary tables"""
       
   554         if temptables:
       
   555             for table in temptables:
       
   556                 try:
       
   557                     self.doexec(session,'DROP TABLE %s' % table)
       
   558                 except Exception:
       
   559                     pass
       
   560                 try:
       
   561                     del self._temp_table_data[table]
       
   562                 except KeyError:
       
   563                     continue
       
   564 
   500 
   565     @contextmanager
   501     @contextmanager
   566     def _storage_handler(self, entity, event):
   502     def _storage_handler(self, entity, event):
   567         # 1/ memorize values as they are before the storage is called.
   503         # 1/ memorize values as they are before the storage is called.
   568         #    For instance, the BFSStorage will replace the `data`
   504         #    For instance, the BFSStorage will replace the `data`
   863                 return result[0]
   799                 return result[0]
   864         except Exception:
   800         except Exception:
   865             pass
   801             pass
   866         return None
   802         return None
   867 
   803 
   868     def make_temp_table_name(self, table):
       
   869         return self.dbhelper.temporary_table_name(table)
       
   870 
       
   871     def temp_table_def(self, selected, sol, table):
       
   872         return make_schema(selected, sol, table, self.dbhelper.TYPE_MAPPING)
       
   873 
       
   874     def create_temp_table(self, session, table, schema):
       
   875         # we don't want on commit drop, this may cause problem when
       
   876         # running with an ldap source, and table will be deleted manually any way
       
   877         # on commit
       
   878         sql = self.dbhelper.sql_temporary_table(table, schema, False)
       
   879         self.doexec(session, sql)
       
   880 
       
   881     def _create_eid_sqlite(self, session):
   804     def _create_eid_sqlite(self, session):
   882         with self._eid_cnx_lock:
   805         with self._eid_cnx_lock:
   883             for sql in self.dbhelper.sqls_increment_sequence('entities_id_seq'):
   806             for sql in self.dbhelper.sqls_increment_sequence('entities_id_seq'):
   884                 cursor = self.doexec(session, sql)
   807                 cursor = self.doexec(session, sql)
   885             return cursor.fetchone()[0]
   808             return cursor.fetchone()[0]
   886 
       
   887 
   809 
   888     def create_eid(self, session): # pylint: disable=E0202
   810     def create_eid(self, session): # pylint: disable=E0202
   889         # lock needed to prevent 'Connection is busy with results for another
   811         # lock needed to prevent 'Connection is busy with results for another
   890         # command (0)' errors with SQLServer
   812         # command (0)' errors with SQLServer
   891         with self._eid_cnx_lock:
   813         with self._eid_cnx_lock: