262 self.open_pool_connections() |
262 self.open_pool_connections() |
263 |
263 |
264 def init(self): |
264 def init(self): |
265 self.init_creating() |
265 self.init_creating() |
266 |
266 |
267 def map_attribute(self, etype, attr, cb): |
267 # XXX deprecates [un]map_attribute ? |
268 self._rql_sqlgen.attr_map['%s.%s' % (etype, attr)] = cb |
268 def map_attribute(self, etype, attr, cb, sourcedb=True): |
|
269 self._rql_sqlgen.attr_map['%s.%s' % (etype, attr)] = (cb, sourcedb) |
269 |
270 |
270 def unmap_attribute(self, etype, attr): |
271 def unmap_attribute(self, etype, attr): |
271 self._rql_sqlgen.attr_map.pop('%s.%s' % (etype, attr), None) |
272 self._rql_sqlgen.attr_map.pop('%s.%s' % (etype, attr), None) |
272 |
273 |
273 def set_storage(self, etype, attr, storage): |
274 def set_storage(self, etype, attr, storage): |
274 storage_dict = self._storages.setdefault(etype, {}) |
275 storage_dict = self._storages.setdefault(etype, {}) |
275 storage_dict[attr] = storage |
276 storage_dict[attr] = storage |
276 self.map_attribute(etype, attr, storage.sqlgen_callback) |
277 self.map_attribute(etype, attr, |
|
278 storage.callback, storage.is_source_callback) |
277 |
279 |
278 def unset_storage(self, etype, attr): |
280 def unset_storage(self, etype, attr): |
279 self._storages[etype].pop(attr) |
281 self._storages[etype].pop(attr) |
280 # if etype has no storage left, remove the entry |
282 # if etype has no storage left, remove the entry |
281 if not self._storages[etype]: |
283 if not self._storages[etype]: |
346 assert dbg_st_search(self.uri, union, varmap, args, cachekey) |
348 assert dbg_st_search(self.uri, union, varmap, args, cachekey) |
347 # remember number of actually selected term (sql generation may append some) |
349 # remember number of actually selected term (sql generation may append some) |
348 if cachekey is None: |
350 if cachekey is None: |
349 self.no_cache += 1 |
351 self.no_cache += 1 |
350 # generate sql query if we are able to do so (not supported types...) |
352 # generate sql query if we are able to do so (not supported types...) |
351 sql, query_args = self._rql_sqlgen.generate(union, args, varmap) |
353 sql, qargs, cbs = self._rql_sqlgen.generate(union, args, varmap) |
352 else: |
354 else: |
353 # sql may be cached |
355 # sql may be cached |
354 try: |
356 try: |
355 sql, query_args = self._cache[cachekey] |
357 sql, qargs, cbs = self._cache[cachekey] |
356 self.cache_hit += 1 |
358 self.cache_hit += 1 |
357 except KeyError: |
359 except KeyError: |
358 self.cache_miss += 1 |
360 self.cache_miss += 1 |
359 sql, query_args = self._rql_sqlgen.generate(union, args, varmap) |
361 sql, qargs, cbs = self._rql_sqlgen.generate(union, args, varmap) |
360 self._cache[cachekey] = sql, query_args |
362 self._cache[cachekey] = sql, qargs, cbs |
361 args = self.merge_args(args, query_args) |
363 args = self.merge_args(args, qargs) |
362 assert isinstance(sql, basestring), repr(sql) |
364 assert isinstance(sql, basestring), repr(sql) |
363 try: |
365 try: |
364 cursor = self.doexec(session, sql, args) |
366 cursor = self.doexec(session, sql, args) |
365 except (self.OperationalError, self.InterfaceError): |
367 except (self.OperationalError, self.InterfaceError): |
366 # FIXME: better detection of deconnection pb |
368 # FIXME: better detection of deconnection pb |
367 self.info("request failed '%s' ... retry with a new cursor", sql) |
369 self.info("request failed '%s' ... retry with a new cursor", sql) |
368 session.pool.reconnect(self) |
370 session.pool.reconnect(self) |
369 cursor = self.doexec(session, sql, args) |
371 cursor = self.doexec(session, sql, args) |
370 results = self.process_result(cursor) |
372 results = self.process_result(cursor, cbs) |
371 assert dbg_results(results) |
373 assert dbg_results(results) |
372 return results |
374 return results |
373 |
375 |
374 def flying_insert(self, table, session, union, args=None, varmap=None): |
376 def flying_insert(self, table, session, union, args=None, varmap=None): |
375 """similar as .syntax_tree_search, but inserts data in the |
377 """similar as .syntax_tree_search, but inserts data in the |
379 """ |
381 """ |
380 assert dbg_st_search( |
382 assert dbg_st_search( |
381 self.uri, union, varmap, args, |
383 self.uri, union, varmap, args, |
382 prefix='ON THE FLY temp data insertion into %s from' % table) |
384 prefix='ON THE FLY temp data insertion into %s from' % table) |
383 # generate sql queries if we are able to do so |
385 # generate sql queries if we are able to do so |
384 sql, query_args = self._rql_sqlgen.generate(union, args, varmap) |
386 sql, qargs, cbs = self._rql_sqlgen.generate(union, args, varmap) |
385 query = 'INSERT INTO %s %s' % (table, sql.encode(self._dbencoding)) |
387 query = 'INSERT INTO %s %s' % (table, sql.encode(self._dbencoding)) |
386 self.doexec(session, query, self.merge_args(args, query_args)) |
388 self.doexec(session, query, self.merge_args(args, qargs)) |
387 |
389 |
388 def manual_insert(self, results, table, session): |
390 def manual_insert(self, results, table, session): |
389 """insert given result into a temporary table on the system source""" |
391 """insert given result into a temporary table on the system source""" |
390 if server.DEBUG & server.DBG_RQL: |
392 if server.DEBUG & server.DBG_RQL: |
391 print ' manual insertion of', results, 'into', table |
393 print ' manual insertion of', results, 'into', table |