merge
authorSylvain Thénault <sylvain.thenault@logilab.fr>
Tue, 09 Mar 2010 11:01:44 +0100
changeset 4844 ad78b118b124
parent 4843 5f7363416765 (diff)
parent 4830 10e8bc190695 (current diff)
child 4845 dc351b96f596
merge
utils.py
--- a/__pkginfo__.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/__pkginfo__.py	Tue Mar 09 11:01:44 2010 +0100
@@ -30,7 +30,7 @@
 
 web = 'http://www.cubicweb.org'
 ftp = 'ftp://ftp.logilab.org/pub/cubicweb'
-pyversions = ['2.4', '2.5']
+pyversions = ['2.5', '2.6']
 
 classifiers = [
            'Environment :: Web Environment',
--- a/cwconfig.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/cwconfig.py	Tue Mar 09 11:01:44 2010 +0100
@@ -998,7 +998,7 @@
 
 _EXT_REGISTERED = False
 def register_stored_procedures():
-    from logilab.common.adbh import FunctionDescr
+    from logilab.db import FunctionDescr
     from rql.utils import register_function, iter_funcnode_variables
 
     global _EXT_REGISTERED
@@ -1010,8 +1010,7 @@
         supported_backends = ('postgres', 'sqlite',)
         rtype = 'String'
 
-        @classmethod
-        def st_description(cls, funcnode, mainindex, tr):
+        def st_description(self, funcnode, mainindex, tr):
             return ', '.join(sorted(term.get_description(mainindex, tr)
                                     for term in iter_funcnode_variables(funcnode)))
 
@@ -1023,6 +1022,7 @@
 
     register_function(CONCAT_STRINGS) # XXX bw compat
 
+
     class GROUP_CONCAT(CONCAT_STRINGS):
         supported_backends = ('mysql', 'postgres', 'sqlite',)
 
@@ -1033,8 +1033,7 @@
         supported_backends = ('postgres', 'sqlite',)
         rtype = 'String'
 
-        @classmethod
-        def st_description(cls, funcnode, mainindex, tr):
+        def st_description(self, funcnode, mainindex, tr):
             return funcnode.children[0].get_description(mainindex, tr)
 
     register_function(LIMIT_SIZE)
@@ -1046,7 +1045,6 @@
     register_function(TEXT_LIMIT_SIZE)
 
 
-
     class FSPATH(FunctionDescr):
         supported_backends = ('postgres', 'sqlite',)
         rtype = 'Bytes'
--- a/debian/control	Mon Mar 08 19:11:47 2010 +0100
+++ b/debian/control	Tue Mar 09 11:01:44 2010 +0100
@@ -7,10 +7,10 @@
            Adrien Di Mascio <Adrien.DiMascio@logilab.fr>,
            Aurélien Campéas <aurelien.campeas@logilab.fr>,
            Nicolas Chauvat <nicolas.chauvat@logilab.fr>
-Build-Depends: debhelper (>= 5), python-dev (>=2.4), python-central (>= 0.5)
+Build-Depends: debhelper (>= 5), python-dev (>=2.5), python-central (>= 0.5)
 Standards-Version: 3.8.0
 Homepage: http://www.cubicweb.org
-XS-Python-Version: >= 2.4, << 2.6
+XS-Python-Version: >= 2.5, << 2.6
 
 Package: cubicweb
 Architecture: all
@@ -33,7 +33,7 @@
 Conflicts: cubicweb-multisources
 Replaces: cubicweb-multisources
 Provides: cubicweb-multisources
-Depends: ${python:Depends}, cubicweb-common (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-indexer (>= 0.6.1), python-psycopg2 | python-mysqldb | python-pysqlite2
+Depends: ${python:Depends}, cubicweb-common (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-logilab-database, python-psycopg2 | python-mysqldb | python-pysqlite2
 Recommends: pyro, cubicweb-documentation (= ${source:Version})
 Description: server part of the CubicWeb framework
  CubicWeb is a semantic web application framework.
--- a/devtools/fake.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/devtools/fake.py	Tue Mar 09 11:01:44 2010 +0100
@@ -7,9 +7,7 @@
 """
 __docformat__ = "restructuredtext en"
 
-from logilab.common.adbh import get_adv_func_helper
-
-from indexer import get_indexer
+from logilab.db import get_db_helper
 
 from cubicweb.req import RequestSessionBase
 from cubicweb.cwvreg import CubicWebVRegistry
@@ -118,17 +116,6 @@
     def validate_cache(self):
         pass
 
-    # session compatibility (in some test are using this class to test server
-    # side views...)
-    def actual_session(self):
-        """return the original parent session if any, else self"""
-        return self
-
-    def unsafe_execute(self, *args, **kwargs):
-        """return the original parent session if any, else self"""
-        kwargs.pop('propagate', None)
-        return self.execute(*args, **kwargs)
-
 
 class FakeUser(object):
     login = 'toto'
@@ -138,18 +125,19 @@
 
 
 class FakeSession(RequestSessionBase):
+    read_security = write_security = True
+    set_read_security = set_write_security = lambda *args, **kwargs: None
+
     def __init__(self, repo=None, user=None):
         self.repo = repo
         self.vreg = getattr(self.repo, 'vreg', CubicWebVRegistry(FakeConfig(), initlog=False))
         self.pool = FakePool()
         self.user = user or FakeUser()
         self.is_internal_session = False
-        self.is_super_session = self.user.eid == -1
         self.transaction_data = {}
 
-    def execute(self, *args):
+    def execute(self, *args, **kwargs):
         pass
-    unsafe_execute = execute
 
     def commit(self, *args):
         self.transaction_data.clear()
@@ -200,12 +188,7 @@
 
 
 class FakeSource(object):
-    dbhelper = get_adv_func_helper('sqlite')
-    indexer = get_indexer('sqlite', 'UTF8')
-    dbhelper.fti_uid_attr = indexer.uid_attr
-    dbhelper.fti_table = indexer.table
-    dbhelper.fti_restriction_sql = indexer.restriction_sql
-    dbhelper.fti_need_distinct_query = indexer.need_distinct
+    dbhelper = get_db_helper('sqlite')
     def __init__(self, uri):
         self.uri = uri
 
--- a/devtools/repotest.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/devtools/repotest.py	Tue Mar 09 11:01:44 2010 +0100
@@ -178,9 +178,10 @@
         self._dumb_sessions = []
 
     def get_max_eid(self):
-        return self.session.unsafe_execute('Any MAX(X)')[0][0]
+        return self.session.execute('Any MAX(X)')[0][0]
     def cleanup(self):
-        self.session.unsafe_execute('DELETE Any X WHERE X eid > %s' % self.maxeid)
+        self.session.set_pool()
+        self.session.execute('DELETE Any X WHERE X eid > %s' % self.maxeid)
 
     def tearDown(self):
         undo_monkey_patch()
--- a/devtools/testlib.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/devtools/testlib.py	Tue Mar 09 11:01:44 2010 +0100
@@ -228,7 +228,9 @@
     @property
     def session(self):
         """return current server side session (using default manager account)"""
-        return self.repo._sessions[self.cnx.sessionid]
+        session = self.repo._sessions[self.cnx.sessionid]
+        session.set_pool()
+        return session
 
     @property
     def adminsession(self):
--- a/doc/tools/generate_modules.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/doc/tools/generate_modules.py	Tue Mar 09 11:01:44 2010 +0100
@@ -16,7 +16,7 @@
     cw_gen = ModuleGenerator('cubicweb', '../..')
     cw_gen.generate("../book/en/annexes/api_cubicweb.rst",
                     EXCLUDE_DIRS + ('cwdesklets', 'misc', 'skel', 'skeleton'))
-    for modname in ('indexer', 'logilab', 'rql', 'yams'):
+    for modname in ('logilab', 'rql', 'yams'):
         cw_gen = ModuleGenerator(modname, '../../../' + modname)
         cw_gen.generate("../book/en/annexes/api_%s.rst" % modname,
                         EXCLUDE_DIRS + ('tools',))
--- a/entities/authobjs.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/entities/authobjs.py	Tue Mar 09 11:01:44 2010 +0100
@@ -93,15 +93,10 @@
         return self.groups == frozenset(('guests', ))
 
     def owns(self, eid):
-        if hasattr(self._cw, 'unsafe_execute'):
-            # use unsafe_execute on the repository side, in case
-            # session's user doesn't have access to CWUser
-            execute = self._cw.unsafe_execute
-        else:
-            execute = self._cw.execute
         try:
-            return execute('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
-                           {'x': eid, 'u': self.eid}, 'x')
+            return self._cw.execute(
+                'Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
+                {'x': eid, 'u': self.eid}, 'x')
         except Unauthorized:
             return False
     owns = cached(owns, keyarg=1)
--- a/entities/test/unittest_wfobjs.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/entities/test/unittest_wfobjs.py	Tue Mar 09 11:01:44 2010 +0100
@@ -1,5 +1,7 @@
+from __future__ import with_statement
 from cubicweb.devtools.testlib import CubicWebTC
 from cubicweb import ValidationError
+from cubicweb.server.session import security_enabled
 
 def add_wf(self, etype, name=None, default=False):
     if name is None:
@@ -126,10 +128,11 @@
         wf = add_wf(self, 'CWUser')
         s = wf.add_state(u'foo', initial=True)
         self.commit()
-        ex = self.assertRaises(ValidationError, self.session.unsafe_execute,
+        with security_enabled(self.session, write=False):
+            ex = self.assertRaises(ValidationError, self.session.execute,
                                'SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
                                {'x': self.user().eid, 's': s.eid}, 'x')
-        self.assertEquals(ex.errors, {'in_state': "state doesn't belong to entity's workflow. "
+            self.assertEquals(ex.errors, {'in_state': "state doesn't belong to entity's workflow. "
                                       "You may want to set a custom workflow for this entity first."})
 
     def test_fire_transition(self):
@@ -505,7 +508,7 @@
                      {'wf': self.wf.eid})
         self.commit()
 
-    # XXX currently, we've to rely on hooks to set initial state, or to use unsafe_execute
+    # XXX currently, we've to rely on hooks to set initial state, or to use execute
     # def test_initial_state(self):
     #     cnx = self.login('stduser')
     #     cu = cnx.cursor()
--- a/entities/wfobjs.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/entities/wfobjs.py	Tue Mar 09 11:01:44 2010 +0100
@@ -158,7 +158,7 @@
             todelstate = self.state_by_name(todelstate)
         if not hasattr(replacement, 'eid'):
             replacement = self.state_by_name(replacement)
-        execute = self._cw.unsafe_execute
+        execute = self._cw.execute
         execute('SET X in_state S WHERE S eid %(s)s', {'s': todelstate.eid}, 's')
         execute('SET X from_state NS WHERE X to_state OS, OS eid %(os)s, NS eid %(ns)s',
                 {'os': todelstate.eid, 'ns': replacement.eid}, 's')
--- a/entity.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/entity.py	Tue Mar 09 11:01:44 2010 +0100
@@ -20,6 +20,7 @@
 from cubicweb.rset import ResultSet
 from cubicweb.selectors import yes
 from cubicweb.appobject import AppObject
+from cubicweb.req import _check_cw_unsafe
 from cubicweb.schema import RQLVocabularyConstraint, RQLConstraint
 from cubicweb.rqlrewrite import RQLRewriter
 
@@ -531,8 +532,8 @@
             # if some outer join are included to fetch inlined relations
             rql = 'Any %s,%s %s' % (V, ','.join(var for attr, var in selected),
                                     ','.join(rql))
-            execute = getattr(self._cw, 'unsafe_execute', self._cw.execute)
-            rset = execute(rql, {'x': self.eid}, 'x', build_descr=False)[0]
+            rset = self._cw.execute(rql, {'x': self.eid}, 'x',
+                                    build_descr=False)[0]
             # handle attributes
             for i in xrange(1, lastattr):
                 self[str(selected[i-1][0])] = rset[i]
@@ -560,11 +561,8 @@
             if not self.is_saved():
                 return None
             rql = "Any A WHERE X eid %%(x)s, X %s A" % name
-            # XXX should we really use unsafe_execute here? I think so (syt),
-            # see #344874
-            execute = getattr(self._cw, 'unsafe_execute', self._cw.execute)
             try:
-                rset = execute(rql, {'x': self.eid}, 'x')
+                rset = self._cw.execute(rql, {'x': self.eid}, 'x')
             except Unauthorized:
                 self[name] = value = None
             else:
@@ -595,10 +593,7 @@
             pass
         assert self.has_eid()
         rql = self.related_rql(rtype, role)
-        # XXX should we really use unsafe_execute here? I think so (syt),
-        # see #344874
-        execute = getattr(self._cw, 'unsafe_execute', self._cw.execute)
-        rset = execute(rql, {'x': self.eid}, 'x')
+        rset = self._cw.execute(rql, {'x': self.eid}, 'x')
         self.set_related_cache(rtype, role, rset)
         return self.related(rtype, role, limit, entities)
 
@@ -800,8 +795,9 @@
 
     # raw edition utilities ###################################################
 
-    def set_attributes(self, _cw_unsafe=False, **kwargs):
+    def set_attributes(self, **kwargs):
         assert kwargs
+        _check_cw_unsafe(kwargs)
         relations = []
         for key in kwargs:
             relations.append('X %s %%(%s)s' % (key, key))
@@ -809,25 +805,18 @@
         self.update(kwargs)
         # and now update the database
         kwargs['x'] = self.eid
-        if _cw_unsafe:
-            self._cw.unsafe_execute(
-                'SET %s WHERE X eid %%(x)s' % ','.join(relations), kwargs, 'x')
-        else:
-            self._cw.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations),
-                             kwargs, 'x')
+        self._cw.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations),
+                         kwargs, 'x')
 
-    def set_relations(self, _cw_unsafe=False, **kwargs):
+    def set_relations(self, **kwargs):
         """add relations to the given object. To set a relation where this entity
         is the object of the relation, use 'reverse_'<relation> as argument name.
 
         Values may be an entity, a list of entity, or None (meaning that all
         relations of the given type from or to this object should be deleted).
         """
-        if _cw_unsafe:
-            execute = self._cw.unsafe_execute
-        else:
-            execute = self._cw.execute
         # XXX update cache
+        _check_cw_unsafe(kwargs)
         for attr, values in kwargs.iteritems():
             if attr.startswith('reverse_'):
                 restr = 'Y %s X' % attr[len('reverse_'):]
@@ -839,14 +828,14 @@
                 continue
             if not isinstance(values, (tuple, list, set, frozenset)):
                 values = (values,)
-            execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
+            self._cw.execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
                 restr, ','.join(str(r.eid) for r in values)),
-                    {'x': self.eid}, 'x')
+                             {'x': self.eid}, 'x')
 
-    def delete(self):
+    def delete(self, **kwargs):
         assert self.has_eid(), self.eid
         self._cw.execute('DELETE %s X WHERE X eid %%(x)s' % self.e_schema,
-                         {'x': self.eid})
+                         {'x': self.eid}, **kwargs)
 
     # server side utilities ###################################################
 
@@ -894,12 +883,12 @@
         """used by the full text indexer to get words to index
 
         this method should only be used on the repository side since it depends
-        on the indexer package
+        on the logilab.db package
 
         :rtype: list
         :return: the list of indexable word of this entity
         """
-        from indexer.query_objects import tokenize
+        from logilab.db.fti import tokenize
         # take care to cases where we're modyfying the schema
         pending = self._cw.transaction_data.setdefault('pendingrdefs', set())
         words = []
--- a/goa/appobjects/dbmgmt.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/goa/appobjects/dbmgmt.py	Tue Mar 09 11:01:44 2010 +0100
@@ -172,7 +172,7 @@
     skip_etypes = ('CWGroup', 'CWUser')
 
     def call(self):
-        # XXX should use unsafe_execute with all hooks deactivated
+        # XXX should use unsafe execute with all hooks deactivated
         # XXX step by catching datastore errors?
         for eschema in self.schema.entities():
             if eschema.final or eschema in self.skip_etypes:
--- a/goa/dbinit.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/goa/dbinit.py	Tue Mar 09 11:01:44 2010 +0100
@@ -84,7 +84,7 @@
             Put(gaeentity)
 
 def init_persistent_schema(ssession, schema):
-    execute = ssession.unsafe_execute
+    execute = ssession.execute
     rql = ('INSERT CWEType X: X name %(name)s, X description %(descr)s,'
            'X final FALSE')
     eschema = schema.eschema('CWEType')
@@ -96,7 +96,7 @@
                       'descr': unicode(eschema.description)})
 
 def insert_versions(ssession, config):
-    execute = ssession.unsafe_execute
+    execute = ssession.execute
     # insert versions
     execute('INSERT CWProperty X: X pkey %(pk)s, X value%(v)s',
             {'pk': u'system.version.cubicweb',
--- a/hooks/email.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/hooks/email.py	Tue Mar 09 11:01:44 2010 +0100
@@ -26,7 +26,7 @@
 
     def precommit_event(self):
         if self.condition():
-            self.session.unsafe_execute(
+            self.session.execute(
                 'SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % self.rtype,
                 {'x': self.entity.eid, 'y': self.email.eid}, 'x')
 
--- a/hooks/integrity.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/hooks/integrity.py	Tue Mar 09 11:01:44 2010 +0100
@@ -35,13 +35,12 @@
     RQLUniqueConstraint in two different transactions, as explained in
     http://intranet.logilab.fr/jpl/ticket/36564
     """
-    asession = session.actual_session()
-    if 'uniquecstrholder' in asession.transaction_data:
+    if 'uniquecstrholder' in session.transaction_data:
         return
     _UNIQUE_CONSTRAINTS_LOCK.acquire()
-    asession.transaction_data['uniquecstrholder'] = True
+    session.transaction_data['uniquecstrholder'] = True
     # register operation responsible to release the lock on commit/rollback
-    _ReleaseUniqueConstraintsOperation(asession)
+    _ReleaseUniqueConstraintsOperation(session)
 
 def _release_unique_cstr_lock(session):
     if 'uniquecstrholder' in session.transaction_data:
@@ -69,7 +68,7 @@
             return
         if self.rtype in self.session.transaction_data.get('pendingrtypes', ()):
             return
-        if self.session.unsafe_execute(*self._rql()).rowcount < 1:
+        if self.session.execute(*self._rql()).rowcount < 1:
             etype = self.session.describe(self.eid)[0]
             _ = self.session._
             msg = _('at least one relation %(rtype)s is required on %(etype)s (%(eid)s)')
@@ -99,12 +98,8 @@
     __abstract__ = True
     category = 'integrity'
 
-class UserIntegrityHook(IntegrityHook):
-    __abstract__ = True
-    __select__ = IntegrityHook.__select__ & hook.regular_session()
 
-
-class CheckCardinalityHook(UserIntegrityHook):
+class CheckCardinalityHook(IntegrityHook):
     """check cardinalities are satisfied"""
     __regid__ = 'checkcard'
     events = ('after_add_entity', 'before_delete_relation')
@@ -176,7 +171,7 @@
         pass
 
 
-class CheckConstraintHook(UserIntegrityHook):
+class CheckConstraintHook(IntegrityHook):
     """check the relation satisfy its constraints
 
     this is delayed to a precommit time operation since other relation which
@@ -194,7 +189,7 @@
                                rdef=(self.eidfrom, self.rtype, self.eidto))
 
 
-class CheckAttributeConstraintHook(UserIntegrityHook):
+class CheckAttributeConstraintHook(IntegrityHook):
     """check the attribute relation satisfy its constraints
 
     this is delayed to a precommit time operation since other relation which
@@ -214,7 +209,7 @@
                                         rdef=(self.entity.eid, attr, None))
 
 
-class CheckUniqueHook(UserIntegrityHook):
+class CheckUniqueHook(IntegrityHook):
     __regid__ = 'checkunique'
     events = ('before_add_entity', 'before_update_entity')
 
@@ -227,7 +222,7 @@
                 if val is None:
                     continue
                 rql = '%s X WHERE X %s %%(val)s' % (entity.e_schema, attr)
-                rset = self._cw.unsafe_execute(rql, {'val': val})
+                rset = self._cw.execute(rql, {'val': val})
                 if rset and rset[0][0] != entity.eid:
                     msg = self._cw._('the value "%s" is already used, use another one')
                     raise ValidationError(entity.eid, {attr: msg % val})
@@ -244,9 +239,9 @@
         if not (session.deleted_in_transaction(self.eid) or
                 session.added_in_transaction(self.eid)):
             etype = session.describe(self.eid)[0]
-            session.unsafe_execute('DELETE %s X WHERE X eid %%(x)s, NOT %s'
-                                   % (etype, self.relation),
-                                   {'x': self.eid}, 'x')
+            session.execute('DELETE %s X WHERE X eid %%(x)s, NOT %s'
+                            % (etype, self.relation),
+                            {'x': self.eid}, 'x')
 
 
 class DeleteCompositeOrphanHook(IntegrityHook):
@@ -290,7 +285,7 @@
             self.entity['name'] = newname
 
 
-class TidyHtmlFields(UserIntegrityHook):
+class TidyHtmlFields(IntegrityHook):
     """tidy HTML in rich text strings"""
     __regid__ = 'htmltidy'
     events = ('before_add_entity', 'before_update_entity')
--- a/hooks/metadata.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/hooks/metadata.py	Tue Mar 09 11:01:44 2010 +0100
@@ -19,7 +19,7 @@
     # eschema.eid is None if schema has been readen from the filesystem, not
     # from the database (eg during tests)
     if eschema.eid is None:
-        eschema.eid = session.unsafe_execute(
+        eschema.eid = session.execute(
             'Any X WHERE X is CWEType, X name %(name)s',
             {'name': str(eschema)})[0][0]
     return eschema.eid
@@ -103,18 +103,17 @@
     events = ('after_add_entity',)
 
     def __call__(self):
-        asession = self._cw.actual_session()
-        if not asession.is_internal_session:
-            self._cw.add_relation(self.entity.eid, 'owned_by', asession.user.eid)
-            _SetCreatorOp(asession, entity=self.entity)
+        if not self._cw.is_internal_session:
+            self._cw.add_relation(self.entity.eid, 'owned_by', self._cw.user.eid)
+            _SetCreatorOp(self._cw, entity=self.entity)
 
 
 class _SyncOwnersOp(hook.Operation):
     def precommit_event(self):
-        self.session.unsafe_execute('SET X owned_by U WHERE C owned_by U, C eid %(c)s,'
-                                    'NOT EXISTS(X owned_by U, X eid %(x)s)',
-                                    {'c': self.compositeeid, 'x': self.composedeid},
-                                    ('c', 'x'))
+        self.session.execute('SET X owned_by U WHERE C owned_by U, C eid %(c)s,'
+                             'NOT EXISTS(X owned_by U, X eid %(x)s)',
+                             {'c': self.compositeeid, 'x': self.composedeid},
+                             ('c', 'x'))
 
 
 class SyncCompositeOwner(MetaDataHook):
--- a/hooks/notification.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/hooks/notification.py	Tue Mar 09 11:01:44 2010 +0100
@@ -103,20 +103,19 @@
 class EntityUpdateHook(NotificationHook):
     __regid__ = 'notifentityupdated'
     __abstract__ = True # do not register by default
-
+    __select__ = NotificationHook.__select__ & hook.from_dbapi_query()
     events = ('before_update_entity',)
     skip_attrs = set()
 
     def __call__(self):
         session = self._cw
-        if self.entity.eid in session.transaction_data.get('neweids', ()):
+        if session.added_in_transaction(self.entity.eid):
             return # entity is being created
-        if session.is_super_session:
-            return # ignore changes triggered by hooks
         # then compute changes
         changes = session.transaction_data.setdefault('changes', {})
         thisentitychanges = changes.setdefault(self.entity.eid, set())
-        attrs = [k for k in self.entity.edited_attributes if not k in self.skip_attrs]
+        attrs = [k for k in self.entity.edited_attributes
+                 if not k in self.skip_attrs]
         if not attrs:
             return
         rqlsel, rqlrestr = [], ['X eid %(x)s']
@@ -125,7 +124,7 @@
             rqlsel.append(var)
             rqlrestr.append('X %s %s' % (attr, var))
         rql = 'Any %s WHERE %s' % (','.join(rqlsel), ','.join(rqlrestr))
-        rset = session.unsafe_execute(rql, {'x': self.entity.eid}, 'x')
+        rset = session.execute(rql, {'x': self.entity.eid}, 'x')
         for i, attr in enumerate(attrs):
             oldvalue = rset[0][i]
             newvalue = self.entity[attr]
@@ -139,13 +138,11 @@
 
 class SomethingChangedHook(NotificationHook):
     __regid__ = 'supervising'
+    __select__ = NotificationHook.__select__ & hook.from_dbapi_query()
     events = ('before_add_relation', 'before_delete_relation',
               'after_add_entity', 'before_update_entity')
 
     def __call__(self):
-        # XXX use proper selectors
-        if self._cw.is_super_session or self._cw.repo.config.repairing:
-            return # ignore changes triggered by hooks or maintainance shell
         dest = self._cw.vreg.config['supervising-addrs']
         if not dest: # no supervisors, don't do this for nothing...
             return
--- a/hooks/security.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/hooks/security.py	Tue Mar 09 11:01:44 2010 +0100
@@ -9,6 +9,7 @@
 __docformat__ = "restructuredtext en"
 
 from cubicweb import Unauthorized
+from cubicweb.selectors import objectify_selector, lltrace
 from cubicweb.server import BEFORE_ADD_RELATIONS, ON_COMMIT_ADD_RELATIONS, hook
 
 
@@ -53,10 +54,17 @@
         pass
 
 
+@objectify_selector
+@lltrace
+def write_security_enabled(cls, req, **kwargs):
+    if req is None or not req.write_security:
+        return 0
+    return 1
+
 class SecurityHook(hook.Hook):
     __abstract__ = True
     category = 'security'
-    __select__ = hook.Hook.__select__ & hook.regular_session()
+    __select__ = hook.Hook.__select__ & write_security_enabled()
 
 
 class AfterAddEntitySecurityHook(SecurityHook):
--- a/hooks/syncschema.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/hooks/syncschema.py	Tue Mar 09 11:01:44 2010 +0100
@@ -801,7 +801,7 @@
         if name in CORE_ETYPES:
             raise ValidationError(self.entity.eid, {None: self._cw._('can\'t be deleted')})
         # delete every entities of this type
-        self._cw.unsafe_execute('DELETE %s X' % name)
+        self._cw.execute('DELETE %s X' % name)
         DropTable(self._cw, table=SQL_PREFIX + name)
         MemSchemaCWETypeDel(self._cw, name)
 
@@ -986,7 +986,7 @@
             if not (subjschema.eid in pendings or objschema.eid in pendings):
                 session.execute('DELETE X %s Y WHERE X is %s, Y is %s'
                                 % (rschema, subjschema, objschema))
-        execute = session.unsafe_execute
+        execute = session.execute
         rset = execute('Any COUNT(X) WHERE X is %s, X relation_type R,'
                        'R eid %%(x)s' % rdeftype, {'x': self.eidto})
         lastrel = rset[0][0] == 0
--- a/hooks/workflow.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/hooks/workflow.py	Tue Mar 09 11:01:44 2010 +0100
@@ -19,8 +19,8 @@
     nocheck = session.transaction_data.setdefault('skip-security', set())
     nocheck.add((x, 'in_state', oldstate))
     nocheck.add((x, 'in_state', newstate))
-    # delete previous state first in case we're using a super session,
-    # unless in_state isn't stored in the system source
+    # delete previous state first unless in_state isn't stored in the system
+    # source
     fromsource = session.describe(x)[1]
     if fromsource == 'system' or \
            not session.repo.sources_by_uri[fromsource].support_relation('in_state'):
@@ -42,9 +42,7 @@
                and entity.current_workflow:
             state = entity.current_workflow.initial
             if state:
-                # use super session to by-pass security checks
-                session.super_session.add_relation(entity.eid, 'in_state',
-                                                   state.eid)
+                session.add_relation(entity.eid, 'in_state', state.eid)
 
 
 class _FireAutotransitionOp(hook.Operation):
@@ -122,14 +120,7 @@
             msg = session._('exiting from subworkflow %s')
             msg %= session._(forentity.current_workflow.name)
             session.transaction_data[(forentity.eid, 'subwfentrytr')] = True
-            # XXX iirk
-            req = forentity._cw
-            forentity._cw = session.super_session
-            try:
-                trinfo = forentity.change_state(tostate, msg, u'text/plain',
-                                                tr=wftr)
-            finally:
-                forentity._cw = req
+            forentity.change_state(tostate, msg, u'text/plain', tr=wftr)
 
 
 # hooks ########################################################################
@@ -195,7 +186,8 @@
             raise ValidationError(entity.eid, {None: msg})
         # True if we are coming back from subworkflow
         swtr = session.transaction_data.pop((forentity.eid, 'subwfentrytr'), None)
-        cowpowers = session.is_super_session or 'managers' in session.user.groups
+        cowpowers = ('managers' in session.user.groups
+                     or not session.write_security)
         # no investigate the requested state change...
         try:
             treid = entity['by_transition']
@@ -266,7 +258,7 @@
 
 
 class CheckInStateChangeAllowed(WorkflowHook):
-    """check state apply, in case of direct in_state change using unsafe_execute
+    """check state apply, in case of direct in_state change using unsafe execute
     """
     __regid__ = 'wfcheckinstate'
     __select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
@@ -307,8 +299,7 @@
             return
         entity = self._cw.entity_from_eid(self.eidfrom)
         try:
-            entity.set_attributes(modification_date=datetime.now(),
-                                  _cw_unsafe=True)
+            entity.set_attributes(modification_date=datetime.now())
         except RepositoryError, ex:
             # usually occurs if entity is coming from a read-only source
             # (eg ldap user)
--- a/mail.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/mail.py	Tue Mar 09 11:01:44 2010 +0100
@@ -215,16 +215,9 @@
         """return a list of either 2-uple (email, language) or user entity to
         who this email should be sent
         """
-        # use super_session when available, we don't want to consider security
-        # when selecting recipients_finder
-        try:
-            req = self._cw.super_session
-        except AttributeError:
-            req = self._cw
-        finder = self._cw.vreg['components'].select('recipients_finder', req,
-                                                    rset=self.cw_rset,
-                                                    row=self.cw_row or 0,
-                                                    col=self.cw_col or 0)
+        finder = self._cw.vreg['components'].select(
+            'recipients_finder', self._cw, rset=self.cw_rset,
+            row=self.cw_row or 0, col=self.cw_col or 0)
         return finder.recipients()
 
     def send_now(self, recipients, msg):
--- a/misc/migration/bootstrapmigration_repository.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/misc/migration/bootstrapmigration_repository.py	Tue Mar 09 11:01:44 2010 +0100
@@ -20,8 +20,7 @@
 if applcubicwebversion == (3, 6, 0) and cubicwebversion >= (3, 6, 0):
     _add_relation_definition_no_perms('CWAttribute', 'update_permission', 'CWGroup')
     _add_relation_definition_no_perms('CWAttribute', 'update_permission', 'RQLExpression')
-    session.set_pool()
-    session.unsafe_execute('SET X update_permission Y WHERE X is CWAttribute, X add_permission Y')
+    rql('SET X update_permission Y WHERE X is CWAttribute, X add_permission Y')
     drop_relation_definition('CWAttribute', 'add_permission', 'CWGroup')
     drop_relation_definition('CWAttribute', 'add_permission', 'RQLExpression')
     drop_relation_definition('CWAttribute', 'delete_permission', 'CWGroup')
@@ -29,10 +28,9 @@
 
 elif applcubicwebversion < (3, 6, 0) and cubicwebversion >= (3, 6, 0):
     session.set_pool()
-    session.execute = session.unsafe_execute
     permsdict = ss.deserialize_ertype_permissions(session)
 
-    config.disabled_hooks_categories.add('integrity')
+    changes = session.disable_hook_categories.add('integrity')
     for rschema in repo.schema.relations():
         rpermsdict = permsdict.get(rschema.eid, {})
         for rdef in rschema.rdefs.values():
@@ -72,7 +70,8 @@
     for action in ('read', 'add', 'delete'):
         drop_relation_definition('CWRType', '%s_permission' % action, 'CWGroup', commit=False)
         drop_relation_definition('CWRType', '%s_permission' % action, 'RQLExpression')
-    config.disabled_hooks_categories.remove('integrity')
+    if changes:
+        session.enable_hook_categories.add(*changes)
 
 if applcubicwebversion < (3, 4, 0) and cubicwebversion >= (3, 4, 0):
 
@@ -80,13 +79,11 @@
     deactivate_verification_hooks()
     add_relation_type('cwuri')
     base_url = session.base_url()
-    # use an internal session since some entity might forbid modifications to admin
-    isession = repo.internal_session()
     for eid, in rql('Any X', ask_confirm=False):
         type, source, extid = session.describe(eid)
         if source == 'system':
-            isession.execute('SET X cwuri %(u)s WHERE X eid %(x)s',
-                             {'x': eid, 'u': base_url + u'eid/%s' % eid})
+            rql('SET X cwuri %(u)s WHERE X eid %(x)s',
+                {'x': eid, 'u': base_url + u'eid/%s' % eid})
     isession.commit()
     reactivate_verification_hooks()
     session.set_shared_data('do-not-insert-cwuri', False)
--- a/misc/migration/postcreate.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/misc/migration/postcreate.py	Tue Mar 09 11:01:44 2010 +0100
@@ -42,8 +42,8 @@
 
 # need this since we already have at least one user in the database (the default admin)
 for user in rql('Any X WHERE X is CWUser').entities():
-    session.unsafe_execute('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
-                           {'x': user.eid, 's': activated.eid}, 'x')
+    rql('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
+        {'x': user.eid, 's': activated.eid}, 'x')
 
 # on interactive mode, ask for level 0 persistent options
 if interactive_mode:
--- a/req.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/req.py	Tue Mar 09 11:01:44 2010 +0100
@@ -22,6 +22,12 @@
 CACHE_REGISTRY = {}
 
 
+def _check_cw_unsafe(kwargs):
+    if kwargs.pop('_cw_unsafe', False):
+        warn('[3.7] _cw_unsafe argument is deprecated, now unsafe by '
+             'default, control it using cw_[read|write]_security.',
+             DeprecationWarning, stacklevel=3)
+
 class Cache(dict):
     def __init__(self):
         super(Cache, self).__init__()
@@ -110,19 +116,18 @@
     # XXX move to CWEntityManager or even better as factory method (unclear
     # where yet...)
 
-    def create_entity(self, etype, _cw_unsafe=False, **kwargs):
+    def create_entity(self, etype, **kwargs):
         """add a new entity of the given type
 
         Example (in a shell session):
 
-        c = create_entity('Company', name=u'Logilab')
-        create_entity('Person', works_for=c, firstname=u'John', lastname=u'Doe')
+        >>> c = create_entity('Company', name=u'Logilab')
+        >>> create_entity('Person', firstname=u'John', lastname=u'Doe',
+        ...               works_for=c)
 
         """
-        if _cw_unsafe:
-            execute = self.unsafe_execute
-        else:
-            execute = self.execute
+        _check_cw_unsafe(kwargs)
+        execute = self.execute
         rql = 'INSERT %s X' % etype
         relations = []
         restrictions = set()
@@ -162,7 +167,7 @@
                 restr = 'X %s Y' % attr
             execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
                 restr, ','.join(str(r.eid) for r in values)),
-                         {'x': created.eid}, 'x')
+                    {'x': created.eid}, 'x', build_descr=False)
         return created
 
     def ensure_ro_rql(self, rql):
@@ -281,7 +286,7 @@
             userinfo['name'] = "cubicweb"
             userinfo['email'] = ""
             return userinfo
-        user = self.actual_session().user
+        user = self.user
         userinfo['login'] = user.login
         userinfo['name'] = user.name()
         userinfo['email'] = user.get_email()
--- a/schema.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/schema.py	Tue Mar 09 11:01:44 2010 +0100
@@ -704,7 +704,7 @@
         rql = 'Any %s WHERE %s' % (self.mainvars,  restriction)
         if self.distinct_query:
             rql = 'DISTINCT ' + rql
-        return session.unsafe_execute(rql, args, ck, build_descr=False)
+        return session.execute(rql, args, ck, build_descr=False)
 
 
 class RQLConstraint(RepoEnforcedRQLConstraintMixIn, RQLVocabularyConstraint):
@@ -830,13 +830,10 @@
                 return True
             return False
         if keyarg is None:
-            # on the server side, use unsafe_execute, but this is not available
-            # on the client side (session is actually a request)
-            execute = getattr(session, 'unsafe_execute', session.execute)
             kwargs.setdefault('u', session.user.eid)
             cachekey = kwargs.keys()
             try:
-                rset = execute(rql, kwargs, cachekey, build_descr=True)
+                rset = session.execute(rql, kwargs, cachekey, build_descr=True)
             except NotImplementedError:
                 self.critical('cant check rql expression, unsupported rql %s', rql)
                 if self.eid is not None:
@@ -1084,10 +1081,10 @@
     elif form is not None:
         cw = form._cw
     if cw is not None:
-        if hasattr(cw, 'is_super_session'):
+        if hasattr(cw, 'write_security'): # test it's a session and not a request
             # cw is a server session
-            hasperm = cw.is_super_session or \
-                      not cw.vreg.config.is_hook_category_activated('integrity') or \
+            hasperm = not cw.write_security or \
+                      not cw.is_hook_category_activated('integrity') or \
                       cw.user.has_permission(PERM_USE_TEMPLATE_FORMAT)
         else:
             hasperm = cw.user.has_permission(PERM_USE_TEMPLATE_FORMAT)
--- a/selectors.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/selectors.py	Tue Mar 09 11:01:44 2010 +0100
@@ -23,17 +23,15 @@
 You can log the selectors involved for *calendar* by replacing the line
 above by::
 
-    # in Python2.5
     from cubicweb.selectors import traced_selection
     with traced_selection():
         self.view('calendar', myrset)
 
-    # in Python2.4
-    from cubicweb import selectors
-    selectors.TRACED_OIDS = ('calendar',)
-    self.view('calendar', myrset)
-    selectors.TRACED_OIDS = ()
+With python 2.5, think to add:
 
+    from __future__ import with_statement
+
+at the top of your module.
 
 :organization: Logilab
 :copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
--- a/server/__init__.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/__init__.py	Tue Mar 09 11:01:44 2010 +0100
@@ -8,6 +8,8 @@
 :contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
 :license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
 """
+from __future__ import with_statement
+
 __docformat__ = "restructuredtext en"
 
 import sys
@@ -145,12 +147,9 @@
     # can't skip entities table even if system source doesn't support them,
     # they are used sometimes by generated sql. Keeping them empty is much
     # simpler than fixing this...
-    if sqlcnx.logged_user != source['db-user']:
-        schemasql = sqlschema(schema, driver, user=source['db-user'])
-    else:
-        schemasql = sqlschema(schema, driver)
-        #skip_entities=[str(e) for e in schema.entities()
-        #               if not repo.system_source.support_entity(str(e))])
+    schemasql = sqlschema(schema, driver)
+    #skip_entities=[str(e) for e in schema.entities()
+    #               if not repo.system_source.support_entity(str(e))])
     sqlexec(schemasql, execute, pbtitle=_title)
     sqlcursor.close()
     sqlcnx.commit()
@@ -212,33 +211,30 @@
 
 def initialize_schema(config, schema, mhandler, event='create'):
     from cubicweb.server.schemaserial import serialize_schema
-    # deactivate every hooks but those responsible to set metadata
-    # so, NO INTEGRITY CHECKS are done, to have quicker db creation
-    oldmode = config.set_hooks_mode(config.DENY_ALL)
-    changes = config.enable_hook_category('metadata')
+    from cubicweb.server.session import hooks_control
+    session = mhandler.session
     paths = [p for p in config.cubes_path() + [config.apphome]
              if exists(join(p, 'migration'))]
-    # execute cubicweb's pre<event> script
-    mhandler.exec_event_script('pre%s' % event)
-    # execute cubes pre<event> script if any
-    for path in reversed(paths):
-        mhandler.exec_event_script('pre%s' % event, path)
-    # enter instance'schema into the database
-    mhandler.session.set_pool()
-    serialize_schema(mhandler.session, schema)
-    # execute cubicweb's post<event> script
-    mhandler.exec_event_script('post%s' % event)
-    # execute cubes'post<event> script if any
-    for path in reversed(paths):
-        mhandler.exec_event_script('post%s' % event, path)
-    # restore hooks config
-    if changes:
-        config.disable_hook_category(changes)
-    config.set_hooks_mode(oldmode)
+    # deactivate every hooks but those responsible to set metadata
+    # so, NO INTEGRITY CHECKS are done, to have quicker db creation
+    with hooks_control(session, session.HOOKS_DENY_ALL, 'metadata'):
+        # execute cubicweb's pre<event> script
+        mhandler.exec_event_script('pre%s' % event)
+        # execute cubes pre<event> script if any
+        for path in reversed(paths):
+            mhandler.exec_event_script('pre%s' % event, path)
+        # enter instance'schema into the database
+        session.set_pool()
+        serialize_schema(session, schema)
+        # execute cubicweb's post<event> script
+        mhandler.exec_event_script('post%s' % event)
+        # execute cubes'post<event> script if any
+        for path in reversed(paths):
+            mhandler.exec_event_script('post%s' % event, path)
 
 
-# sqlite'stored procedures have to be registered at connexion opening time
-SQL_CONNECT_HOOKS = {}
+# sqlite'stored procedures have to be registered at connection opening time
+from logilab.db import SQL_CONNECT_HOOKS
 
 # add to this set relations which should have their add security checking done
 # *BEFORE* adding the actual relation (done after by default)
--- a/server/checkintegrity.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/checkintegrity.py	Tue Mar 09 11:01:44 2010 +0100
@@ -6,6 +6,8 @@
 :contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
 :license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
 """
+from __future__ import with_statement
+
 __docformat__ = "restructuredtext en"
 
 import sys
@@ -15,6 +17,7 @@
 
 from cubicweb.schema import PURE_VIRTUAL_RTYPES
 from cubicweb.server.sqlutils import SQL_PREFIX
+from cubicweb.server.session import security_enabled
 
 def has_eid(sqlcursor, eid, eids):
     """return true if the eid is a valid eid"""
@@ -70,15 +73,9 @@
     # to be updated due to the reindexation
     repo = session.repo
     cursor = session.pool['system']
-    if not repo.system_source.indexer.has_fti_table(cursor):
-        from indexer import get_indexer
+    if not repo.system_source.dbhelper.has_fti_table(cursor):
         print 'no text index table'
-        indexer = get_indexer(repo.system_source.dbdriver)
-        # XXX indexer.init_fti(cursor) once index 0.7 is out
-        indexer.init_extensions(cursor)
-        cursor.execute(indexer.sql_init_fti())
-    repo.config.disabled_hooks_categories.add('metadata')
-    repo.config.disabled_hooks_categories.add('integrity')
+        dbhelper.init_fti(cursor)
     repo.system_source.do_fti = True  # ensure full-text indexation is activated
     etypes = set()
     for eschema in schema.entities():
@@ -94,9 +91,6 @@
     if withpb:
         pb = ProgressBar(len(etypes) + 1)
     # first monkey patch Entity.check to disable validation
-    from cubicweb.entity import Entity
-    _check = Entity.check
-    Entity.check = lambda self, creation=False: True
     # clear fti table first
     session.system_sql('DELETE FROM %s' % session.repo.system_source.dbhelper.fti_table)
     if withpb:
@@ -106,14 +100,9 @@
     source = repo.system_source
     for eschema in etypes:
         for entity in session.execute('Any X WHERE X is %s' % eschema).entities():
-            source.fti_unindex_entity(session, entity.eid)
             source.fti_index_entity(session, entity)
         if withpb:
             pb.update()
-    # restore Entity.check
-    Entity.check = _check
-    repo.config.disabled_hooks_categories.remove('metadata')
-    repo.config.disabled_hooks_categories.remove('integrity')
 
 
 def check_schema(schema, session, eids, fix=1):
@@ -291,9 +280,10 @@
     # yo, launch checks
     if checks:
         eids_cache = {}
-        for check in checks:
-            check_func = globals()['check_%s' % check]
-            check_func(repo.schema, session, eids_cache, fix=fix)
+        with security_enabled(session, read=False): # ensure no read security
+            for check in checks:
+                check_func = globals()['check_%s' % check]
+                check_func(repo.schema, session, eids_cache, fix=fix)
         if fix:
             cnx.commit()
         else:
--- a/server/hook.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/hook.py	Tue Mar 09 11:01:44 2010 +0100
@@ -33,6 +33,8 @@
 :contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
 :license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
 """
+from __future__ import with_statement
+
 __docformat__ = "restructuredtext en"
 
 from warnings import warn
@@ -47,7 +49,7 @@
 from cubicweb.selectors import (objectify_selector, lltrace, ExpectedValueSelector,
                                 implements)
 from cubicweb.appobject import AppObject
-
+from cubicweb.server.session import security_enabled
 
 ENTITIES_HOOKS = set(('before_add_entity',    'after_add_entity',
                       'before_update_entity', 'after_update_entity',
@@ -76,13 +78,20 @@
                     event, obj.__module__, obj.__name__))
         super(HooksRegistry, self).register(obj, **kwargs)
 
-    def call_hooks(self, event, req=None, **kwargs):
+    def call_hooks(self, event, session=None, **kwargs):
         kwargs['event'] = event
-        for hook in sorted(self.possible_objects(req, **kwargs), key=lambda x: x.order):
-            if hook.enabled:
+        if session is None:
+            for hook in sorted(self.possible_objects(session, **kwargs),
+                               key=lambda x: x.order):
                 hook()
-            else:
-                warn('[3.6] %s: enabled is deprecated' % hook.__class__)
+        else:
+            # by default, hooks are executed with security turned off
+            with security_enabled(session, read=False):
+                hooks = sorted(self.possible_objects(session, **kwargs),
+                               key=lambda x: x.order)
+                with security_enabled(session, write=False):
+                    for hook in hooks:
+                        hook()
 
 VRegistry.REGISTRY_FACTORY['hooks'] = HooksRegistry
 
@@ -104,6 +113,14 @@
 
 @objectify_selector
 @lltrace
+def _bw_is_enabled(cls, req, **kwargs):
+    if cls.enabled:
+        return 1
+    warn('[3.6] %s: enabled is deprecated' % cls)
+    return 0
+
+@objectify_selector
+@lltrace
 def match_event(cls, req, **kwargs):
     if kwargs.get('event') in cls.events:
         return 1
@@ -113,19 +130,15 @@
 @lltrace
 def enabled_category(cls, req, **kwargs):
     if req is None:
-        # server startup / shutdown event
-        config = kwargs['repo'].config
-    else:
-        config = req.vreg.config
-    return config.is_hook_activated(cls)
+        return True # XXX how to deactivate server startup / shutdown event
+    return req.is_hook_activated(cls)
 
 @objectify_selector
 @lltrace
-def regular_session(cls, req, **kwargs):
-    if req is None or req.is_super_session:
-        return 0
-    return 1
-
+def from_dbapi_query(cls, req, **kwargs):
+    if req.running_dbapi_query:
+        return 1
+    return 0
 
 class rechain(object):
     def __init__(self, *iterators):
@@ -178,7 +191,7 @@
 
 class Hook(AppObject):
     __registry__ = 'hooks'
-    __select__ = match_event() & enabled_category()
+    __select__ = match_event() & enabled_category() & _bw_is_enabled()
     # set this in derivated classes
     events = None
     category = None
@@ -263,7 +276,7 @@
         else:
             assert self.rtype in self.object_relations
             meid, seid = self.eidto, self.eidfrom
-        self._cw.unsafe_execute(
+        self._cw.execute(
             'SET E %s P WHERE X %s P, X eid %%(x)s, E eid %%(e)s, NOT E %s P'\
             % (self.main_rtype, self.main_rtype, self.main_rtype),
             {'x': meid, 'e': seid}, ('x', 'e'))
@@ -281,7 +294,7 @@
 
     def __call__(self):
         eschema = self._cw.vreg.schema.eschema(self._cw.describe(self.eidfrom)[0])
-        execute = self._cw.unsafe_execute
+        execute = self._cw.execute
         for rel in self.subject_relations:
             if rel in eschema.subjrels:
                 execute('SET R %s P WHERE X eid %%(x)s, P eid %%(p)s, '
@@ -306,7 +319,7 @@
 
     def __call__(self):
         eschema = self._cw.vreg.schema.eschema(self._cw.describe(self.eidfrom)[0])
-        execute = self._cw.unsafe_execute
+        execute = self._cw.execute
         for rel in self.subject_relations:
             if rel in eschema.subjrels:
                 execute('DELETE R %s P WHERE X eid %%(x)s, P eid %%(p)s, '
@@ -510,6 +523,6 @@
 
 class RQLPrecommitOperation(Operation):
     def precommit_event(self):
-        execute = self.session.unsafe_execute
+        execute = self.session.execute
         for rql in self.rqls:
             execute(*rql)
--- a/server/migractions.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/migractions.py	Tue Mar 09 11:01:44 2010 +0100
@@ -243,17 +243,26 @@
     @property
     def session(self):
         if self.config is not None:
-            return self.repo._get_session(self.cnx.sessionid)
+            session = self.repo._get_session(self.cnx.sessionid)
+            if session.pool is None:
+                session.set_read_security(False)
+                session.set_write_security(False)
+            session.set_pool()
+            return session
         # no access to session on remote instance
         return None
 
     def commit(self):
         if hasattr(self, '_cnx'):
             self._cnx.commit()
+        if self.session:
+            self.session.set_pool()
 
     def rollback(self):
         if hasattr(self, '_cnx'):
             self._cnx.rollback()
+        if self.session:
+            self.session.set_pool()
 
     def rqlexecall(self, rqliter, cachekey=None, ask_confirm=True):
         for rql, kwargs in rqliter:
@@ -313,7 +322,6 @@
                     self.cmd_reactivate_verification_hooks()
 
     def install_custom_sql_scripts(self, directory, driver):
-        self.session.set_pool() # ensure pool is set
         for fpath in glob(osp.join(directory, '*.sql.%s' % driver)):
             newname = osp.basename(fpath).replace('.sql.%s' % driver,
                                                   '.%s.sql' % driver)
@@ -698,10 +706,7 @@
         groupmap = self.group_mapping()
         cstrtypemap = self.cstrtype_mapping()
         # register the entity into CWEType
-        try:
-            execute = self._cw.unsafe_execute
-        except AttributeError:
-            execute = self._cw.execute
+        execute = self._cw.execute
         ss.execschemarql(execute, eschema, ss.eschema2rql(eschema, groupmap))
         # add specializes relation if needed
         self.rqlexecall(ss.eschemaspecialize2rql(eschema), ask_confirm=confirm)
@@ -842,10 +847,7 @@
         """
         reposchema = self.repo.schema
         rschema = self.fs_schema.rschema(rtype)
-        try:
-            execute = self._cw.unsafe_execute
-        except AttributeError:
-            execute = self._cw.execute
+        execute = self._cw.execute
         # register the relation into CWRType and insert necessary relation
         # definitions
         ss.execschemarql(execute, rschema, ss.rschema2rql(rschema, addrdef=False))
@@ -905,10 +907,7 @@
         rschema = self.fs_schema.rschema(rtype)
         if not rtype in self.repo.schema:
             self.cmd_add_relation_type(rtype, addrdef=False, commit=True)
-        try:
-            execute = self._cw.unsafe_execute
-        except AttributeError:
-            execute = self._cw.execute
+        execute = self._cw.execute
         rdef = self._get_rdef(rschema, subjtype, objtype)
         ss.execschemarql(execute, rdef,
                          ss.rdef2rql(rdef, self.cstrtype_mapping(),
@@ -1184,7 +1183,6 @@
         level actions
         """
         if not ask_confirm or self.confirm('Execute sql: %s ?' % sql):
-            self.session.set_pool() # ensure pool is set
             try:
                 cu = self.session.system_sql(sql, args)
             except:
@@ -1203,10 +1201,7 @@
         if not isinstance(rql, (tuple, list)):
             rql = ( (rql, kwargs), )
         res = None
-        try:
-            execute = self._cw.unsafe_execute
-        except AttributeError:
-            execute = self._cw.execute
+        execute = self._cw.execute
         for rql, kwargs in rql:
             if kwargs:
                 msg = '%s (%s)' % (rql, kwargs)
@@ -1223,12 +1218,6 @@
     def rqliter(self, rql, kwargs=None, ask_confirm=True):
         return ForRqlIterator(self, rql, None, ask_confirm)
 
-    def cmd_deactivate_verification_hooks(self):
-        self.config.disabled_hooks_categories.add('integrity')
-
-    def cmd_reactivate_verification_hooks(self):
-        self.config.disabled_hooks_categories.remove('integrity')
-
     # broken db commands ######################################################
 
     def cmd_change_attribute_type(self, etype, attr, newtype, commit=True):
@@ -1279,6 +1268,14 @@
         if commit:
             self.commit()
 
+    @deprecated("[3.7] use session.disable_hook_categories('integrity')")
+    def cmd_deactivate_verification_hooks(self):
+        self.session.disable_hook_categories('integrity')
+
+    @deprecated("[3.7] use session.enable_hook_categories('integrity')")
+    def cmd_reactivate_verification_hooks(self):
+        self.session.enable_hook_categories('integrity')
+
 
 class ForRqlIterator:
     """specific rql iterator to make the loop skipable"""
--- a/server/querier.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/querier.py	Tue Mar 09 11:01:44 2010 +0100
@@ -6,6 +6,8 @@
 :contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
 :license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
 """
+from __future__ import with_statement
+
 __docformat__ = "restructuredtext en"
 
 from itertools import repeat
@@ -23,7 +25,7 @@
 from cubicweb.server.utils import cleanup_solutions
 from cubicweb.server.rqlannotation import SQLGenAnnotator, set_qdata
 from cubicweb.server.ssplanner import READ_ONLY_RTYPES, add_types_restriction
-
+from cubicweb.server.session import security_enabled
 
 def empty_rset(rql, args, rqlst=None):
     """build an empty result set object"""
@@ -200,8 +202,11 @@
         return rqlst to actually execute
         """
         noinvariant = set()
-        if security and not self.session.is_super_session:
-            self._insert_security(union, noinvariant)
+        if security and self.session.read_security:
+            # ensure security is turned of when security is inserted,
+            # else we may loop for ever...
+            with security_enabled(self.session, read=False):
+                self._insert_security(union, noinvariant)
         self.rqlhelper.simplify(union)
         self.sqlannotate(union)
         set_qdata(self.schema.rschema, union, noinvariant)
@@ -299,7 +304,6 @@
 
         note: rqlst should not have been simplified at this point
         """
-        assert not self.session.is_super_session
         user = self.session.user
         schema = self.schema
         msgs = []
@@ -595,20 +599,20 @@
             try:
                 self.solutions(session, rqlst, args)
             except UnknownEid:
-                # we want queries such as "Any X WHERE X eid 9999"
-                # return an empty result instead of raising UnknownEid
+                # we want queries such as "Any X WHERE X eid 9999" return an
+                # empty result instead of raising UnknownEid
                 return empty_rset(rql, args, rqlst)
             self._rql_cache[cachekey] = rqlst
         orig_rqlst = rqlst
         if not rqlst.TYPE == 'select':
-            if not session.is_super_session:
+            if session.read_security:
                 check_no_password_selected(rqlst)
-            # write query, ensure session's mode is 'write' so connections
-            # won't be released until commit/rollback
+            # write query, ensure session's mode is 'write' so connections won't
+            # be released until commit/rollback
             session.mode = 'write'
             cachekey = None
         else:
-            if not session.is_super_session:
+            if session.read_security:
                 for select in rqlst.children:
                     check_no_password_selected(select)
             # on select query, always copy the cached rqlst so we don't have to
--- a/server/repository.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/repository.py	Tue Mar 09 11:01:44 2010 +0100
@@ -15,6 +15,8 @@
 :contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
 :license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
 """
+from __future__ import with_statement
+
 __docformat__ = "restructuredtext en"
 
 import sys
@@ -36,7 +38,7 @@
                       typed_eid)
 from cubicweb import cwvreg, schema, server
 from cubicweb.server import utils, hook, pool, querier, sources
-from cubicweb.server.session import Session, InternalSession
+from cubicweb.server.session import Session, InternalSession, security_enabled
 
 
 class CleanupEidTypeCacheOp(hook.SingleLastOperation):
@@ -80,12 +82,12 @@
     this kind of behaviour has to be done in the repository so we don't have
     hooks order hazardness
     """
-    # XXX now that rql in migraction default to unsafe_execute we don't want to
-    #     skip that for super session (though we can still skip it for internal
-    #     sessions). Also we should imo rely on the orm to first fetch existing
-    #     entity if any then delete it.
+    # skip that for internal session or if integrity explicitly disabled
+    #
+    # XXX we should imo rely on the orm to first fetch existing entity if any
+    # then delete it.
     if session.is_internal_session \
-           or not session.vreg.config.is_hook_category_activated('integrity'):
+           or not session.is_hook_category_activated('integrity'):
         return
     card = session.schema_rproperty(rtype, eidfrom, eidto, 'cardinality')
     # one may be tented to check for neweids but this may cause more than one
@@ -100,23 +102,15 @@
     rschema = session.repo.schema.rschema(rtype)
     if card[0] in '1?':
         if not rschema.inlined: # inlined relations will be implicitly deleted
-            rset = session.unsafe_execute('Any X,Y WHERE X %s Y, X eid %%(x)s, '
-                                          'NOT Y eid %%(y)s' % rtype,
-                                          {'x': eidfrom, 'y': eidto}, 'x')
-            if rset:
-                safe_delete_relation(session, rschema, *rset[0])
+            with security_enabled(session, read=False):
+                session.execute('DELETE X %s Y WHERE X eid %%(x)s, '
+                                'NOT Y eid %%(y)s' % rtype,
+                                {'x': eidfrom, 'y': eidto}, 'x')
     if card[1] in '1?':
-        rset = session.unsafe_execute('Any X,Y WHERE X %s Y, Y eid %%(y)s, '
-                                      'NOT X eid %%(x)s' % rtype,
-                                      {'x': eidfrom, 'y': eidto}, 'y')
-        if rset:
-            safe_delete_relation(session, rschema, *rset[0])
-
-
-def safe_delete_relation(session, rschema, subject, object):
-    if not rschema.has_perm(session, 'delete', fromeid=subject, toeid=object):
-        raise Unauthorized()
-    session.repo.glob_delete_relation(session, subject, rschema.type, object)
+        with security_enabled(session, read=False):
+            session.execute('DELETE X %sY WHERE Y eid %%(y)s, '
+                            'NOT X eid %%(x)s' % rtype,
+                            {'x': eidfrom, 'y': eidto}, 'y')
 
 
 class Repository(object):
@@ -918,21 +912,22 @@
         rql = []
         eschema = self.schema.eschema(etype)
         pendingrtypes = session.transaction_data.get('pendingrtypes', ())
-        for rschema, targetschemas, x in eschema.relation_definitions():
-            rtype = rschema.type
-            if rtype in schema.VIRTUAL_RTYPES or rtype in pendingrtypes:
-                continue
-            var = '%s%s' % (rtype.upper(), x.upper())
-            if x == 'subject':
-                # don't skip inlined relation so they are regularly
-                # deleted and so hooks are correctly called
-                selection = 'X %s %s' % (rtype, var)
-            else:
-                selection = '%s %s X' % (var, rtype)
-            rql = 'DELETE %s WHERE X eid %%(x)s' % selection
-            # unsafe_execute since we suppose that if user can delete the entity,
-            # he can delete all its relations without security checking
-            session.unsafe_execute(rql, {'x': eid}, 'x', build_descr=False)
+        with security_enabled(session, read=False, write=False):
+            for rschema, targetschemas, x in eschema.relation_definitions():
+                rtype = rschema.type
+                if rtype in schema.VIRTUAL_RTYPES or rtype in pendingrtypes:
+                    continue
+                var = '%s%s' % (rtype.upper(), x.upper())
+                if x == 'subject':
+                    # don't skip inlined relation so they are regularly
+                    # deleted and so hooks are correctly called
+                    selection = 'X %s %s' % (rtype, var)
+                else:
+                    selection = '%s %s X' % (var, rtype)
+                rql = 'DELETE %s WHERE X eid %%(x)s' % selection
+                # if user can delete the entity, he can delete all its relations
+                # without security checking
+                session.execute(rql, {'x': eid}, 'x', build_descr=False)
 
     def locate_relation_source(self, session, subject, rtype, object):
         subjsource = self.source_from_eid(subject, session)
@@ -988,7 +983,8 @@
             if not rschema.final: # inlined relation
                 relations.append((attr, entity[attr]))
         entity.set_defaults()
-        entity.check(creation=True)
+        if session.is_hook_category_activated('integrity'):
+            entity.check(creation=True)
         source.add_entity(session, entity)
         if source.uri != 'system':
             extid = source.get_extid(entity)
@@ -1035,7 +1031,8 @@
             print 'UPDATE entity', etype, entity.eid, \
                   dict(entity), edited_attributes
         entity.edited_attributes = edited_attributes
-        entity.check()
+        if session.is_hook_category_activated('integrity'):
+            entity.check()
         eschema = entity.e_schema
         session.set_entity_cache(entity)
         only_inline_rels, need_fti_update = True, False
--- a/server/schemaserial.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/schemaserial.py	Tue Mar 09 11:01:44 2010 +0100
@@ -218,7 +218,7 @@
     if not quiet:
         _title = '-> storing the schema in the database '
         print _title,
-    execute = cursor.unsafe_execute
+    execute = cursor.execute
     eschemas = schema.entities()
     if not quiet:
         pb_size = (len(eschemas + schema.relations())
--- a/server/serverconfig.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/serverconfig.py	Tue Mar 09 11:01:44 2010 +0100
@@ -185,63 +185,6 @@
     # check user's state at login time
     consider_user_state = True
 
-    # XXX hooks control stuff should probably be on the session, not on the config
-
-    # hooks activation configuration
-    # all hooks should be activated during normal execution
-    disabled_hooks_categories = set()
-    enabled_hooks_categories = set()
-    ALLOW_ALL = object()
-    DENY_ALL = object()
-    hooks_mode = ALLOW_ALL
-
-    @classmethod
-    def set_hooks_mode(cls, mode):
-        assert mode is cls.ALLOW_ALL or mode is cls.DENY_ALL
-        oldmode = cls.hooks_mode
-        cls.hooks_mode = mode
-        return oldmode
-
-    @classmethod
-    def disable_hook_category(cls, *categories):
-        changes = set()
-        if cls.hooks_mode is cls.DENY_ALL:
-            for category in categories:
-                if category in cls.enabled_hooks_categories:
-                    cls.enabled_hooks_categories.remove(category)
-                    changes.add(category)
-        else:
-            for category in categories:
-                if category not in cls.disabled_hooks_categories:
-                    cls.disabled_hooks_categories.add(category)
-                    changes.add(category)
-        return changes
-
-    @classmethod
-    def enable_hook_category(cls, *categories):
-        changes = set()
-        if cls.hooks_mode is cls.DENY_ALL:
-            for category in categories:
-                if category not in cls.enabled_hooks_categories:
-                    cls.enabled_hooks_categories.add(category)
-                    changes.add(category)
-        else:
-            for category in categories:
-                if category in cls.disabled_hooks_categories:
-                    cls.disabled_hooks_categories.remove(category)
-                    changes.add(category)
-        return changes
-
-    @classmethod
-    def is_hook_activated(cls, hook):
-        return cls.is_hook_category_activated(hook.category)
-
-    @classmethod
-    def is_hook_category_activated(cls, category):
-        if cls.hooks_mode is cls.DENY_ALL:
-            return category in cls.enabled_hooks_categories
-        return category not in cls.disabled_hooks_categories
-
     # should some hooks be deactivated during [pre|post]create script execution
     free_wheel = False
 
--- a/server/serverctl.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/serverctl.py	Tue Mar 09 11:01:44 2010 +0100
@@ -63,9 +63,11 @@
             password = getpass('password: ')
     extra_args = source.get('db-extra-arguments')
     extra = extra_args and {'extra_args': extra_args} or {}
-    return get_connection(driver, dbhost, dbname, user, password=password,
-                          port=source.get('db-port'),
-                          **extra)
+    cnx = get_connection(driver, dbhost, dbname, user, password=password,
+                         port=source.get('db-port'),
+                         **extra)
+    cnx.logged_user = logged_user
+    return cnx
 
 def system_source_cnx(source, dbms_system_base=False,
                       special_privs='CREATE/DROP DATABASE', verbose=True):
@@ -75,8 +77,8 @@
     create/drop the instance database)
     """
     if dbms_system_base:
-        from logilab.common.adbh import get_adv_func_helper
-        system_db = get_adv_func_helper(source['db-driver']).system_database()
+        from logilab.db import get_db_helper
+        system_db = get_db_helper(source['db-driver']).system_database()
         return source_cnx(source, system_db, special_privs=special_privs, verbose=verbose)
     return source_cnx(source, special_privs=special_privs, verbose=verbose)
 
@@ -85,11 +87,11 @@
     or a database
     """
     import logilab.common as lgp
-    from logilab.common.adbh import get_adv_func_helper
+    from logilab.db import get_db_helper
     lgp.USE_MX_DATETIME = False
     special_privs = ''
     driver = source['db-driver']
-    helper = get_adv_func_helper(driver)
+    helper = get_db_helper(driver)
     if user is not None and helper.users_support:
         special_privs += '%s USER' % what
     if db is not None:
@@ -202,10 +204,10 @@
 
     def cleanup(self):
         """remove instance's configuration and database"""
-        from logilab.common.adbh import get_adv_func_helper
+        from logilab.db import get_db_helper
         source = self.config.sources()['system']
         dbname = source['db-name']
-        helper = get_adv_func_helper(source['db-driver'])
+        helper = get_db_helper(source['db-driver'])
         if ASK.confirm('Delete database %s ?' % dbname):
             user = source['db-user'] or None
             cnx = _db_sys_cnx(source, 'DROP DATABASE', user=user)
@@ -285,8 +287,7 @@
         )
     def run(self, args):
         """run the command with its specific arguments"""
-        from logilab.common.adbh import get_adv_func_helper
-        from indexer import get_indexer
+        from logilab.db import get_db_helper
         verbose = self.get('verbose')
         automatic = self.get('automatic')
         appid = pop_arg(args, msg='No instance specified !')
@@ -295,7 +296,7 @@
         dbname = source['db-name']
         driver = source['db-driver']
         create_db = self.config.create_db
-        helper = get_adv_func_helper(driver)
+        helper = get_db_helper(driver)
         if driver == 'sqlite':
             if os.path.exists(dbname) and automatic or \
                    ASK.confirm('Database %s already exists -- do you want to drop it ?' % dbname):
@@ -330,8 +331,7 @@
                 raise
         cnx = system_source_cnx(source, special_privs='LANGUAGE C', verbose=verbose)
         cursor = cnx.cursor()
-        indexer = get_indexer(driver)
-        indexer.init_extensions(cursor)
+        dbhelper.init_fti_extensions(cursor)
         # postgres specific stuff
         if driver == 'postgres':
             # install plpythonu/plpgsql language if not installed by the cube
--- a/server/session.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/session.py	Tue Mar 09 11:01:44 2010 +0100
@@ -5,6 +5,8 @@
 :contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
 :license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
 """
+from __future__ import with_statement
+
 __docformat__ = "restructuredtext en"
 
 import sys
@@ -42,6 +44,68 @@
     return description
 
 
+class hooks_control(object):
+    """context manager to control activated hooks categories.
+
+    If mode is session.`HOOKS_DENY_ALL`, given hooks categories will
+    be enabled.
+
+    If mode is session.`HOOKS_ALLOW_ALL`, given hooks categories will
+    be disabled.
+    """
+    def __init__(self, session, mode, *categories):
+        self.session = session
+        self.mode = mode
+        self.categories = categories
+
+    def __enter__(self):
+        self.oldmode = self.session.set_hooks_mode(self.mode)
+        if self.mode is self.session.HOOKS_DENY_ALL:
+            self.changes = self.session.enable_hook_categories(*self.categories)
+        else:
+            self.changes = self.session.disable_hook_categories(*self.categories)
+
+    def __exit__(self, exctype, exc, traceback):
+        if self.changes:
+            if self.mode is self.session.HOOKS_DENY_ALL:
+                self.session.disable_hook_categories(*self.changes)
+            else:
+                self.session.enable_hook_categories(*self.changes)
+        self.session.set_hooks_mode(self.oldmode)
+
+INDENT = ''
+class security_enabled(object):
+    """context manager to control security w/ session.execute, since by
+    default security is disabled on queries executed on the repository
+    side.
+    """
+    def __init__(self, session, read=None, write=None):
+        self.session = session
+        self.read = read
+        self.write = write
+
+    def __enter__(self):
+#        global INDENT
+        if self.read is not None:
+            self.oldread = self.session.set_read_security(self.read)
+#            print INDENT + 'read', self.read, self.oldread
+        if self.write is not None:
+            self.oldwrite = self.session.set_write_security(self.write)
+#            print INDENT + 'write', self.write, self.oldwrite
+#        INDENT += '  '
+
+    def __exit__(self, exctype, exc, traceback):
+#        global INDENT
+#        INDENT = INDENT[:-2]
+        if self.read is not None:
+            self.session.set_read_security(self.oldread)
+#            print INDENT + 'reset read to', self.oldread
+        if self.write is not None:
+            self.session.set_write_security(self.oldwrite)
+#            print INDENT + 'reset write to', self.oldwrite
+
+
+
 class Session(RequestSessionBase):
     """tie session id, user, connections pool and other session data all
     together
@@ -57,7 +121,6 @@
         self.creation = time()
         self.timestamp = self.creation
         self.is_internal_session = False
-        self.is_super_session = False
         self.default_mode = 'read'
         # short cut to querier .execute method
         self._execute = repo.querier.execute
@@ -78,19 +141,9 @@
     def hijack_user(self, user):
         """return a fake request/session using specified user"""
         session = Session(user, self.repo)
-        session._threaddata = self.actual_session()._threaddata
+        session._threaddata.pool = pool
         return session
 
-    def _super_call(self, __cb, *args, **kwargs):
-        if self.is_super_session:
-            __cb(self, *args, **kwargs)
-            return
-        self.is_super_session = True
-        try:
-            __cb(self, *args, **kwargs)
-        finally:
-            self.is_super_session = False
-
     def add_relation(self, fromeid, rtype, toeid):
         """provide direct access to the repository method to add a relation.
 
@@ -102,14 +155,13 @@
         You may use this in hooks when you know both eids of the relation you
         want to add.
         """
-        if self.vreg.schema[rtype].inlined:
-            entity = self.entity_from_eid(fromeid)
-            entity[rtype] = toeid
-            self._super_call(self.repo.glob_update_entity,
-                             entity, set((rtype,)))
-        else:
-            self._super_call(self.repo.glob_add_relation,
-                             fromeid, rtype, toeid)
+        with security_enabled(self, False, False):
+            if self.vreg.schema[rtype].inlined:
+                entity = self.entity_from_eid(fromeid)
+                entity[rtype] = toeid
+                self.repo.glob_update_entity(self, entity, set((rtype,)))
+            else:
+                self.repo.glob_add_relation(self, fromeid, rtype, toeid)
 
     def delete_relation(self, fromeid, rtype, toeid):
         """provide direct access to the repository method to delete a relation.
@@ -122,14 +174,13 @@
         You may use this in hooks when you know both eids of the relation you
         want to delete.
         """
-        if self.vreg.schema[rtype].inlined:
-            entity = self.entity_from_eid(fromeid)
-            entity[rtype] = None
-            self._super_call(self.repo.glob_update_entity,
-                             entity, set((rtype,)))
-        else:
-            self._super_call(self.repo.glob_delete_relation,
-                             fromeid, rtype, toeid)
+        with security_enabled(self, False, False):
+            if self.vreg.schema[rtype].inlined:
+                entity = self.entity_from_eid(fromeid)
+                entity[rtype] = None
+                self.repo.glob_update_entity(self, entity, set((rtype,)))
+            else:
+                self.repo.glob_delete_relation(self, fromeid, rtype, toeid)
 
     # relations cache handling #################################################
 
@@ -198,10 +249,6 @@
 
     # resource accessors ######################################################
 
-    def actual_session(self):
-        """return the original parent session if any, else self"""
-        return self
-
     def system_sql(self, sql, args=None, rollback_on_failure=True):
         """return a sql cursor on the system database"""
         if not sql.split(None, 1)[0].upper() == 'SELECT':
@@ -245,6 +292,162 @@
         rdef = rschema.rdef(subjtype, objtype)
         return rdef.get(rprop)
 
+    # security control #########################################################
+
+    DEFAULT_SECURITY = object() # evaluated to true by design
+
+    @property
+    def read_security(self):
+        """return a boolean telling if read security is activated or not"""
+        try:
+            return self._threaddata.read_security
+        except AttributeError:
+            self._threaddata.read_security = self.DEFAULT_SECURITY
+            return self._threaddata.read_security
+
+    def set_read_security(self, activated):
+        """[de]activate read security, returning the previous value set for
+        later restoration.
+
+        you should usually use the `security_enabled` context manager instead
+        of this to change security settings.
+        """
+        oldmode = self.read_security
+        self._threaddata.read_security = activated
+        # dbapi_query used to detect hooks triggered by a 'dbapi' query (eg not
+        # issued on the session). This is tricky since we the execution model of
+        # a (write) user query is:
+        #
+        # repository.execute (security enabled)
+        #  \-> querier.execute
+        #       \-> repo.glob_xxx (add/update/delete entity/relation)
+        #            \-> deactivate security before calling hooks
+        #                 \-> WE WANT TO CHECK QUERY NATURE HERE
+        #                      \-> potentially, other calls to querier.execute
+        #
+        # so we can't rely on simply checking session.read_security, but
+        # recalling the first transition from DEFAULT_SECURITY to something
+        # else (False actually) is not perfect but should be enough
+        self._threaddata.dbapi_query = oldmode is self.DEFAULT_SECURITY
+        return oldmode
+
+    @property
+    def write_security(self):
+        """return a boolean telling if write security is activated or not"""
+        try:
+            return self._threaddata.write_security
+        except:
+            self._threaddata.write_security = self.DEFAULT_SECURITY
+            return self._threaddata.write_security
+
+    def set_write_security(self, activated):
+        """[de]activate write security, returning the previous value set for
+        later restoration.
+
+        you should usually use the `security_enabled` context manager instead
+        of this to change security settings.
+        """
+        oldmode = self.write_security
+        self._threaddata.write_security = activated
+        return oldmode
+
+    @property
+    def running_dbapi_query(self):
+        """return a boolean telling if it's triggered by a db-api query or by
+        a session query.
+
+        To be used in hooks, else may have a wrong value.
+        """
+        return getattr(self._threaddata, 'dbapi_query', True)
+
+    # hooks activation control #################################################
+    # all hooks should be activated during normal execution
+
+    HOOKS_ALLOW_ALL = object()
+    HOOKS_DENY_ALL = object()
+
+    @property
+    def hooks_mode(self):
+        return getattr(self._threaddata, 'hooks_mode', self.HOOKS_ALLOW_ALL)
+
+    def set_hooks_mode(self, mode):
+        assert mode is self.HOOKS_ALLOW_ALL or mode is self.HOOKS_DENY_ALL
+        oldmode = getattr(self._threaddata, 'hooks_mode', self.HOOKS_ALLOW_ALL)
+        self._threaddata.hooks_mode = mode
+        return oldmode
+
+    @property
+    def disabled_hook_categories(self):
+        try:
+            return getattr(self._threaddata, 'disabled_hook_cats')
+        except AttributeError:
+            cats = self._threaddata.disabled_hook_cats = set()
+            return cats
+
+    @property
+    def enabled_hook_categories(self):
+        try:
+            return getattr(self._threaddata, 'enabled_hook_cats')
+        except AttributeError:
+            cats = self._threaddata.enabled_hook_cats = set()
+            return cats
+
+    def disable_hook_categories(self, *categories):
+        """disable the given hook categories:
+
+        - on HOOKS_DENY_ALL mode, ensure those categories are not enabled
+        - on HOOKS_ALLOW_ALL mode, ensure those categories are disabled
+        """
+        changes = set()
+        if self.hooks_mode is self.HOOKS_DENY_ALL:
+            enablecats = self.enabled_hook_categories
+            for category in categories:
+                if category in enablecats:
+                    enablecats.remove(category)
+                    changes.add(category)
+        else:
+            disablecats = self.disabled_hook_categories
+            for category in categories:
+                if category not in disablecats:
+                    disablecats.add(category)
+                    changes.add(category)
+        return tuple(changes)
+
+    def enable_hook_categories(self, *categories):
+        """enable the given hook categories:
+
+        - on HOOKS_DENY_ALL mode, ensure those categories are enabled
+        - on HOOKS_ALLOW_ALL mode, ensure those categories are not disabled
+        """
+        changes = set()
+        if self.hooks_mode is self.HOOKS_DENY_ALL:
+            enablecats = self.enabled_hook_categories
+            for category in categories:
+                if category not in enablecats:
+                    enablecats.add(category)
+                    changes.add(category)
+        else:
+            disablecats = self.disabled_hook_categories
+            for category in categories:
+                if category in self.disabled_hook_categories:
+                    disablecats.remove(category)
+                    changes.add(category)
+        return tuple(changes)
+
+    def is_hook_category_activated(self, category):
+        """return a boolean telling if the given category is currently activated
+        or not
+        """
+        if self.hooks_mode is self.HOOKS_DENY_ALL:
+            return category in self.enabled_hook_categories
+        return category not in self.disabled_hook_categories
+
+    def is_hook_activated(self, hook):
+        """return a boolean telling if the given hook class is currently
+        activated or not
+        """
+        return self.is_hook_category_activated(hook.category)
+
     # connection management ###################################################
 
     def keep_pool_mode(self, mode):
@@ -402,46 +605,15 @@
         """return the source where the entity with id <eid> is located"""
         return self.repo.source_from_eid(eid, self)
 
-    def decorate_rset(self, rset, propagate=False):
+    def decorate_rset(self, rset):
         rset.vreg = self.vreg
-        rset.req = propagate and self or self.actual_session()
+        rset.req = self
         return rset
 
-    @property
-    def super_session(self):
-        try:
-            csession = self.childsession
-        except AttributeError:
-            if isinstance(self, (ChildSession, InternalSession)):
-                csession = self
-            else:
-                csession = ChildSession(self)
-            self.childsession = csession
-        # need shared pool set
-        self.set_pool(checkclosed=False)
-        return csession
-
-    def unsafe_execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
-                       propagate=False):
-        """like .execute but with security checking disabled (this method is
-        internal to the server, it's not part of the db-api)
-
-        if `propagate` is true, the super_session will be attached to the result
-        set instead of the parent session, hence further query done through
-        entities fetched from this result set will bypass security as well
-        """
-        return self.super_session.execute(rql, kwargs, eid_key, build_descr,
-                                          propagate)
-
-    def execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
-                propagate=False):
-        """db-api like method directly linked to the querier execute method
-
-        Becare that unlike actual cursor.execute, `build_descr` default to
-        false
-        """
+    def execute(self, rql, kwargs=None, eid_key=None, build_descr=True):
+        """db-api like method directly linked to the querier execute method"""
         rset = self._execute(self, rql, kwargs, eid_key, build_descr)
-        return self.decorate_rset(rset, propagate)
+        return self.decorate_rset(rset)
 
     def _clear_thread_data(self):
         """remove everything from the thread local storage, except pool
@@ -466,58 +638,60 @@
             return
         if self.commit_state:
             return
-        # on rollback, an operation should have the following state
-        # information:
-        # - processed by the precommit/commit event or not
-        # - if processed, is it the failed operation
-        try:
-            for trstate in ('precommit', 'commit'):
-                processed = []
-                self.commit_state = trstate
-                try:
-                    while self.pending_operations:
-                        operation = self.pending_operations.pop(0)
-                        operation.processed = trstate
-                        processed.append(operation)
+        # by default, operations are executed with security turned off
+        with security_enabled(self, False, False):
+            # on rollback, an operation should have the following state
+            # information:
+            # - processed by the precommit/commit event or not
+            # - if processed, is it the failed operation
+            try:
+                for trstate in ('precommit', 'commit'):
+                    processed = []
+                    self.commit_state = trstate
+                    try:
+                        while self.pending_operations:
+                            operation = self.pending_operations.pop(0)
+                            operation.processed = trstate
+                            processed.append(operation)
+                            operation.handle_event('%s_event' % trstate)
+                        self.pending_operations[:] = processed
+                        self.debug('%s session %s done', trstate, self.id)
+                    except:
+                        self.exception('error while %sing', trstate)
+                        # if error on [pre]commit:
+                        #
+                        # * set .failed = True on the operation causing the failure
+                        # * call revert<event>_event on processed operations
+                        # * call rollback_event on *all* operations
+                        #
+                        # that seems more natural than not calling rollback_event
+                        # for processed operations, and allow generic rollback
+                        # instead of having to implements rollback, revertprecommit
+                        # and revertcommit, that will be enough in mont case.
+                        operation.failed = True
+                        for operation in processed:
+                            operation.handle_event('revert%s_event' % trstate)
+                        # XXX use slice notation since self.pending_operations is a
+                        # read-only property.
+                        self.pending_operations[:] = processed + self.pending_operations
+                        self.rollback(reset_pool)
+                        raise
+                self.pool.commit()
+                self.commit_state = trstate = 'postcommit'
+                while self.pending_operations:
+                    operation = self.pending_operations.pop(0)
+                    operation.processed = trstate
+                    try:
                         operation.handle_event('%s_event' % trstate)
-                    self.pending_operations[:] = processed
-                    self.debug('%s session %s done', trstate, self.id)
-                except:
-                    self.exception('error while %sing', trstate)
-                    # if error on [pre]commit:
-                    #
-                    # * set .failed = True on the operation causing the failure
-                    # * call revert<event>_event on processed operations
-                    # * call rollback_event on *all* operations
-                    #
-                    # that seems more natural than not calling rollback_event
-                    # for processed operations, and allow generic rollback
-                    # instead of having to implements rollback, revertprecommit
-                    # and revertcommit, that will be enough in mont case.
-                    operation.failed = True
-                    for operation in processed:
-                        operation.handle_event('revert%s_event' % trstate)
-                    # XXX use slice notation since self.pending_operations is a
-                    # read-only property.
-                    self.pending_operations[:] = processed + self.pending_operations
-                    self.rollback(reset_pool)
-                    raise
-            self.pool.commit()
-            self.commit_state = trstate = 'postcommit'
-            while self.pending_operations:
-                operation = self.pending_operations.pop(0)
-                operation.processed = trstate
-                try:
-                    operation.handle_event('%s_event' % trstate)
-                except:
-                    self.critical('error while %sing', trstate,
-                                  exc_info=sys.exc_info())
-            self.info('%s session %s done', trstate, self.id)
-        finally:
-            self._clear_thread_data()
-            self._touch()
-            if reset_pool:
-                self.reset_pool(ignoremode=True)
+                    except:
+                        self.critical('error while %sing', trstate,
+                                      exc_info=sys.exc_info())
+                self.info('%s session %s done', trstate, self.id)
+            finally:
+                self._clear_thread_data()
+                self._touch()
+                if reset_pool:
+                    self.reset_pool(ignoremode=True)
 
     def rollback(self, reset_pool=True):
         """rollback the current session's transaction"""
@@ -527,21 +701,23 @@
             self._touch()
             self.debug('rollback session %s done (no db activity)', self.id)
             return
-        try:
-            while self.pending_operations:
-                try:
-                    operation = self.pending_operations.pop(0)
-                    operation.handle_event('rollback_event')
-                except:
-                    self.critical('rollback error', exc_info=sys.exc_info())
-                    continue
-            self.pool.rollback()
-            self.debug('rollback for session %s done', self.id)
-        finally:
-            self._clear_thread_data()
-            self._touch()
-            if reset_pool:
-                self.reset_pool(ignoremode=True)
+        # by default, operations are executed with security turned off
+        with security_enabled(self, False, False):
+            try:
+                while self.pending_operations:
+                    try:
+                        operation = self.pending_operations.pop(0)
+                        operation.handle_event('rollback_event')
+                    except:
+                        self.critical('rollback error', exc_info=sys.exc_info())
+                        continue
+                self.pool.rollback()
+                self.debug('rollback for session %s done', self.id)
+            finally:
+                self._clear_thread_data()
+                self._touch()
+                if reset_pool:
+                    self.reset_pool(ignoremode=True)
 
     def close(self):
         """do not close pool on session close, since they are shared now"""
@@ -664,6 +840,25 @@
 
     # deprecated ###############################################################
 
+    @deprecated("[3.7] control security with session.[read|write]_security")
+    def unsafe_execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
+                       propagate=False):
+        """like .execute but with security checking disabled (this method is
+        internal to the server, it's not part of the db-api)
+        """
+        return self.execute(rql, kwargs, eid_key, build_descr)
+
+    @property
+    @deprecated("[3.7] is_super_session is deprecated, test "
+                "session.read_security and or session.write_security")
+    def is_super_session(self):
+        return not self.read_security or not self.write_security
+
+    @deprecated("[3.7] session is actual session")
+    def actual_session(self):
+        """return the original parent session if any, else self"""
+        return self
+
     @property
     @deprecated("[3.6] use session.vreg.schema")
     def schema(self):
@@ -690,84 +885,6 @@
         return self.entity_from_eid(eid)
 
 
-class ChildSession(Session):
-    """child (or internal) session are used to hijack the security system
-    """
-    cnxtype = 'inmemory'
-
-    def __init__(self, parent_session):
-        self.id = None
-        self.is_internal_session = False
-        self.is_super_session = True
-        # session which has created this one
-        self.parent_session = parent_session
-        self.user = InternalManager()
-        self.user.req = self # XXX remove when "vreg = user.req.vreg" hack in entity.py is gone
-        self.repo = parent_session.repo
-        self.vreg = parent_session.vreg
-        self.data = parent_session.data
-        self.encoding = parent_session.encoding
-        self.lang = parent_session.lang
-        self._ = self.__ = parent_session._
-        # short cut to querier .execute method
-        self._execute = self.repo.querier.execute
-
-    @property
-    def super_session(self):
-        return self
-
-    def get_mode(self):
-        return self.parent_session.mode
-    def set_mode(self, value):
-        self.parent_session.set_mode(value)
-    mode = property(get_mode, set_mode)
-
-    def get_commit_state(self):
-        return self.parent_session.commit_state
-    def set_commit_state(self, value):
-        self.parent_session.set_commit_state(value)
-    commit_state = property(get_commit_state, set_commit_state)
-
-    @property
-    def pool(self):
-        return self.parent_session.pool
-    @property
-    def pending_operations(self):
-        return self.parent_session.pending_operations
-    @property
-    def transaction_data(self):
-        return self.parent_session.transaction_data
-
-    def set_pool(self):
-        """the session need a pool to execute some queries"""
-        self.parent_session.set_pool()
-
-    def reset_pool(self):
-        """the session has no longer using its pool, at least for some time
-        """
-        self.parent_session.reset_pool()
-
-    def actual_session(self):
-        """return the original parent session if any, else self"""
-        return self.parent_session
-
-    def commit(self, reset_pool=True):
-        """commit the current session's transaction"""
-        self.parent_session.commit(reset_pool)
-
-    def rollback(self, reset_pool=True):
-        """rollback the current session's transaction"""
-        self.parent_session.rollback(reset_pool)
-
-    def close(self):
-        """do not close pool on session close, since they are shared now"""
-        self.rollback()
-
-    def user_data(self):
-        """returns a dictionnary with this user's information"""
-        return self.parent_session.user_data()
-
-
 class InternalSession(Session):
     """special session created internaly by the repository"""
 
@@ -777,11 +894,7 @@
         self.user.req = self # XXX remove when "vreg = user.req.vreg" hack in entity.py is gone
         self.cnxtype = 'inmemory'
         self.is_internal_session = True
-        self.is_super_session = True
-
-    @property
-    def super_session(self):
-        return self
+        self.disable_hook_categories('integrity')
 
 
 class InternalManager(object):
--- a/server/sources/extlite.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/sources/extlite.py	Tue Mar 09 11:01:44 2010 +0100
@@ -20,12 +20,6 @@
         self.source = source
         self._cnx = None
 
-    @property
-    def logged_user(self):
-        if self._cnx is None:
-            self._cnx = self.source._sqlcnx
-        return self._cnx.logged_user
-
     def cursor(self):
         if self._cnx is None:
             self._cnx = self.source._sqlcnx
--- a/server/sources/native.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/sources/native.py	Tue Mar 09 11:01:44 2010 +0100
@@ -21,10 +21,8 @@
 from logilab.common.cache import Cache
 from logilab.common.decorators import cached, clear_cache
 from logilab.common.configuration import Method
-from logilab.common.adbh import get_adv_func_helper
 from logilab.common.shellutils import getlogin
-
-from indexer import get_indexer
+from logilab.db import get_db_helper
 
 from cubicweb import UnknownEid, AuthenticationError, Binary, server
 from cubicweb.cwconfig import CubicWebNoAppConfiguration
@@ -151,16 +149,9 @@
                                 *args, **kwargs)
         # sql generator
         self._rql_sqlgen = self.sqlgen_class(appschema, self.dbhelper,
-                                             self.encoding, ATTR_MAP.copy())
+                                             ATTR_MAP.copy())
         # full text index helper
         self.do_fti = not repo.config['delay-full-text-indexation']
-        if self.do_fti:
-            self.indexer = get_indexer(self.dbdriver, self.encoding)
-            # XXX should go away with logilab.db
-            self.dbhelper.fti_uid_attr = self.indexer.uid_attr
-            self.dbhelper.fti_table = self.indexer.table
-            self.dbhelper.fti_restriction_sql = self.indexer.restriction_sql
-            self.dbhelper.fti_need_distinct_query = self.indexer.need_distinct
         # sql queries cache
         self._cache = Cache(repo.config['rql-cache-size'])
         self._temp_table_data = {}
@@ -207,7 +198,7 @@
         pool.pool_set()
         # check full text index availibility
         if self.do_fti:
-            if not self.indexer.has_fti_table(pool['system']):
+            if not self.dbhelper.has_fti_table(pool['system']):
                 if not self.repo.config.creating:
                     self.critical('no text index table')
                 self.do_fti = False
@@ -321,8 +312,7 @@
         assert isinstance(sql, basestring), repr(sql)
         try:
             cursor = self.doexec(session, sql, args)
-        except (self.dbapi_module.OperationalError,
-                self.dbapi_module.InterfaceError):
+        except (self.OperationalError, self.InterfaceError):
             # FIXME: better detection of deconnection pb
             self.info("request failed '%s' ... retry with a new cursor", sql)
             session.pool.reconnect(self)
@@ -342,7 +332,7 @@
             prefix='ON THE FLY temp data insertion into %s from' % table)
         # generate sql queries if we are able to do so
         sql, query_args = self._rql_sqlgen.generate(union, args, varmap)
-        query = 'INSERT INTO %s %s' % (table, sql.encode(self.encoding))
+        query = 'INSERT INTO %s %s' % (table, sql.encode(self._dbencoding))
         self.doexec(session, query, self.merge_args(args, query_args))
 
     def manual_insert(self, results, table, session):
@@ -359,7 +349,7 @@
             row = tuple(row)
             for index, cell in enumerate(row):
                 if isinstance(cell, Binary):
-                    cell = self.binary(cell.getvalue())
+                    cell = self._binary(cell.getvalue())
                 kwargs[str(index)] = cell
             kwargs_list.append(kwargs)
         self.doexecmany(session, query, kwargs_list)
@@ -614,7 +604,7 @@
         index
         """
         try:
-            self.indexer.cursor_unindex_object(eid, session.pool['system'])
+            self.dbhelper.cursor_unindex_object(eid, session.pool['system'])
         except Exception: # let KeyboardInterrupt / SystemExit propagate
             self.exception('error while unindexing %s', eid)
 
@@ -625,8 +615,8 @@
         try:
             # use cursor_index_object, not cursor_reindex_object since
             # unindexing done in the FTIndexEntityOp
-            self.indexer.cursor_index_object(entity.eid, entity,
-                                             session.pool['system'])
+            self.dbhelper.cursor_index_object(entity.eid, entity,
+                                              session.pool['system'])
         except Exception: # let KeyboardInterrupt / SystemExit propagate
             self.exception('error while reindexing %s', entity)
 
@@ -659,7 +649,7 @@
 
 
 def sql_schema(driver):
-    helper = get_adv_func_helper(driver)
+    helper = get_db_helper(driver)
     tstamp_col_type = helper.TYPE_MAPPING['Datetime']
     schema = """
 /* Create the repository's system database */
@@ -692,7 +682,7 @@
 
 
 def sql_drop_schema(driver):
-    helper = get_adv_func_helper(driver)
+    helper = get_db_helper(driver)
     return """
 %s
 DROP TABLE entities;
--- a/server/sources/rql2sql.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/sources/rql2sql.py	Tue Mar 09 11:01:44 2010 +0100
@@ -332,10 +332,10 @@
     protected by a lock
     """
 
-    def __init__(self, schema, dbms_helper, dbencoding='UTF-8', attrmap=None):
+    def __init__(self, schema, dbms_helper, attrmap=None):
         self.schema = schema
         self.dbms_helper = dbms_helper
-        self.dbencoding = dbencoding
+        self.dbencoding = dbms_helper.dbencoding
         self.keyword_map = {'NOW' : self.dbms_helper.sql_current_timestamp,
                             'TODAY': self.dbms_helper.sql_current_date,
                             }
@@ -977,10 +977,9 @@
 
     def visit_function(self, func):
         """generate SQL name for a function"""
-        # function_description will check function is supported by the backend
-        sqlname = self.dbms_helper.func_sqlname(func.name)
-        return '%s(%s)' % (sqlname, ', '.join(c.accept(self)
-                                              for c in func.children))
+        # func_sql_call will check function is supported by the backend
+        return self.dbms_helper.func_as_sql(func.name,
+                                            [c.accept(self) for c in func.children])
 
     def visit_constant(self, constant):
         """generate SQL name for a constant"""
--- a/server/sources/storages.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/sources/storages.py	Tue Mar 09 11:01:44 2010 +0100
@@ -92,9 +92,8 @@
         cu = sysource.doexec(entity._cw,
                              'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s' % (
                                  attr, entity.__regid__, entity.eid))
-        dbmod = sysource.dbapi_module
-        return dbmod.process_value(cu.fetchone()[0], [None, dbmod.BINARY],
-                                   binarywrap=str)
+        return sysource._process_value(cu.fetchone()[0], [None, dbmod.BINARY],
+                                       binarywrap=str)
 
 
 class AddFileOp(Operation):
--- a/server/sqlutils.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/sqlutils.py	Tue Mar 09 11:01:44 2010 +0100
@@ -11,21 +11,17 @@
 import subprocess
 from datetime import datetime, date
 
-import logilab.common as lgc
-from logilab.common import db
+from logilab import db, common as lgc
 from logilab.common.shellutils import ProgressBar
-from logilab.common.adbh import get_adv_func_helper
-from logilab.common.sqlgen import SQLGenerator
 from logilab.common.date import todate, todatetime
-
-from indexer import get_indexer
+from logilab.db.sqlgen import SQLGenerator
 
 from cubicweb import Binary, ConfigurationError
 from cubicweb.uilib import remove_html_tags
 from cubicweb.schema import PURE_VIRTUAL_RTYPES
 from cubicweb.server import SQL_CONNECT_HOOKS
 from cubicweb.server.utils import crypt_password
-
+from rql.utils import RQL_FUNCTIONS_REGISTRY
 
 lgc.USE_MX_DATETIME = False
 SQL_PREFIX = 'cw_'
@@ -77,8 +73,8 @@
     w(native.grant_schema(user, set_owner))
     w('')
     if text_index:
-        indexer = get_indexer(driver)
-        w(indexer.sql_grant_user(user))
+        dbhelper = db.get_db_helper(driver)
+        w(dbhelper.sql_grant_user_on_fti(user))
         w('')
     w(grant_schema(schema, user, set_owner, skip_entities=skip_entities, prefix=SQL_PREFIX))
     return '\n'.join(output)
@@ -96,11 +92,10 @@
     w = output.append
     w(native.sql_schema(driver))
     w('')
+    dbhelper = db.get_db_helper(driver)
     if text_index:
-        indexer = get_indexer(driver)
-        w(indexer.sql_init_fti())
+        w(dbhelper.sql_init_fti())
         w('')
-    dbhelper = get_adv_func_helper(driver)
     w(schema2sql(dbhelper, schema, prefix=SQL_PREFIX,
                  skip_entities=skip_entities, skip_relations=skip_relations))
     if dbhelper.users_support and user:
@@ -120,8 +115,8 @@
     w(native.sql_drop_schema(driver))
     w('')
     if text_index:
-        indexer = get_indexer(driver)
-        w(indexer.sql_drop_fti())
+        dbhelper = db.get_db_helper(driver)
+        w(dbhelper.sql_drop_fti())
         w('')
     w(dropschema2sql(schema, prefix=SQL_PREFIX,
                      skip_entities=skip_entities,
@@ -137,55 +132,42 @@
     def __init__(self, source_config):
         try:
             self.dbdriver = source_config['db-driver'].lower()
-            self.dbname = source_config['db-name']
+            dbname = source_config['db-name']
         except KeyError:
             raise ConfigurationError('missing some expected entries in sources file')
-        self.dbhost = source_config.get('db-host')
+        dbhost = source_config.get('db-host')
         port = source_config.get('db-port')
-        self.dbport = port and int(port) or None
-        self.dbuser = source_config.get('db-user')
-        self.dbpasswd = source_config.get('db-password')
-        self.encoding = source_config.get('db-encoding', 'UTF-8')
-        self.dbapi_module = db.get_dbapi_compliant_module(self.dbdriver)
-        self.dbdriver_extra_args = source_config.get('db-extra-arguments')
-        self.binary = self.dbapi_module.Binary
-        self.dbhelper = self.dbapi_module.adv_func_helper
+        dbport = port and int(port) or None
+        dbuser = source_config.get('db-user')
+        dbpassword = source_config.get('db-password')
+        dbencoding = source_config.get('db-encoding', 'UTF-8')
+        dbextraargs = source_config.get('db-extra-arguments')
+        self.dbhelper = db.get_db_helper(self.dbdriver)
+        self.dbhelper.record_connection_info(dbname, dbhost, dbport, dbuser,
+                                             dbpassword, dbextraargs,
+                                             dbencoding)
         self.sqlgen = SQLGenerator()
+        # copy back some commonly accessed attributes
+        dbapi_module = self.dbhelper.dbapi_module
+        self.OperationalError = dbapi_module.OperationalError
+        self.InterfaceError = dbapi_module.InterfaceError
+        self._binary = dbapi_module.Binary
+        self._process_value = dbapi_module.process_value
+        self._dbencoding = dbencoding
 
-    def get_connection(self, user=None, password=None):
+    def get_connection(self):
         """open and return a connection to the database"""
-        if user or self.dbuser:
-            self.info('connecting to %s@%s for user %s', self.dbname,
-                      self.dbhost or 'localhost', user or self.dbuser)
-        else:
-            self.info('connecting to %s@%s', self.dbname,
-                      self.dbhost or 'localhost')
-        extra = {}
-        if self.dbdriver_extra_args:
-            extra = {'extra_args': self.dbdriver_extra_args}
-        cnx = self.dbapi_module.connect(self.dbhost, self.dbname,
-                                        user or self.dbuser,
-                                        password or self.dbpasswd,
-                                        port=self.dbport,
-                                        **extra)
-        init_cnx(self.dbdriver, cnx)
-        #self.dbapi_module.type_code_test(cnx.cursor())
-        return cnx
+        return self.dbhelper.get_connection()
 
     def backup_to_file(self, backupfile):
-        for cmd in self.dbhelper.backup_commands(self.dbname, self.dbhost,
-                                                 self.dbuser, backupfile,
-                                                 dbport=self.dbport,
+        for cmd in self.dbhelper.backup_commands(backupfile,
                                                  keepownership=False):
             if _run_command(cmd):
                 if not confirm('   [Failed] Continue anyway?', default='n'):
                     raise Exception('Failed command: %s' % cmd)
 
     def restore_from_file(self, backupfile, confirm, drop=True):
-        for cmd in self.dbhelper.restore_commands(self.dbname, self.dbhost,
-                                                  self.dbuser, backupfile,
-                                                  self.encoding,
-                                                  dbport=self.dbport,
+        for cmd in self.dbhelper.restore_commands(backupfile,
                                                   keepownership=False,
                                                   drop=drop):
             if _run_command(cmd):
@@ -198,7 +180,7 @@
             for key, val in args.iteritems():
                 # convert cubicweb binary into db binary
                 if isinstance(val, Binary):
-                    val = self.binary(val.getvalue())
+                    val = self._binary(val.getvalue())
                 newargs[key] = val
             # should not collide
             newargs.update(query_args)
@@ -208,10 +190,12 @@
     def process_result(self, cursor):
         """return a list of CubicWeb compliant values from data in the given cursor
         """
+        # begin bind to locals for optimization
         descr = cursor.description
-        encoding = self.encoding
-        process_value = self.dbapi_module.process_value
+        encoding = self._dbencoding
+        process_value = self._process_value
         binary = Binary
+        # /end
         results = cursor.fetchall()
         for i, line in enumerate(results):
             result = []
@@ -242,14 +226,14 @@
                         value = value.getvalue()
                     else:
                         value = crypt_password(value)
-                    value = self.binary(value)
+                    value = self._binary(value)
                 # XXX needed for sqlite but I don't think it is for other backends
                 elif atype == 'Datetime' and isinstance(value, date):
                     value = todatetime(value)
                 elif atype == 'Date' and isinstance(value, datetime):
                     value = todate(value)
                 elif isinstance(value, Binary):
-                    value = self.binary(value.getvalue())
+                    value = self._binary(value.getvalue())
             attrs[SQL_PREFIX+str(attr)] = value
         return attrs
 
@@ -259,12 +243,8 @@
 set_log_methods(SQLAdapterMixIn, getLogger('cubicweb.sqladapter'))
 
 def init_sqlite_connexion(cnx):
-    # XXX should not be publicly exposed
-    #def comma_join(strings):
-    #    return ', '.join(strings)
-    #cnx.create_function("COMMA_JOIN", 1, comma_join)
 
-    class concat_strings(object):
+    class group_concat(object):
         def __init__(self):
             self.values = []
         def step(self, value):
@@ -272,10 +252,7 @@
                 self.values.append(value)
         def finalize(self):
             return ', '.join(self.values)
-    # renamed to GROUP_CONCAT in cubicweb 2.45, keep old name for bw compat for
-    # some time
-    cnx.create_aggregate("CONCAT_STRINGS", 1, concat_strings)
-    cnx.create_aggregate("GROUP_CONCAT", 1, concat_strings)
+    cnx.create_aggregate("GROUP_CONCAT", 1, group_concat)
 
     def _limit_size(text, maxsize, format='text/plain'):
         if len(text) < maxsize:
@@ -293,9 +270,9 @@
     def limit_size2(text, maxsize):
         return _limit_size(text, maxsize)
     cnx.create_function("TEXT_LIMIT_SIZE", 2, limit_size2)
+
     import yams.constraints
-    if hasattr(yams.constraints, 'patch_sqlite_decimal'):
-        yams.constraints.patch_sqlite_decimal()
+    yams.constraints.patch_sqlite_decimal()
 
     def fspath(eid, etype, attr):
         try:
@@ -320,10 +297,5 @@
                 raise
     cnx.create_function('_fsopen', 1, _fsopen)
 
-
 sqlite_hooks = SQL_CONNECT_HOOKS.setdefault('sqlite', [])
 sqlite_hooks.append(init_sqlite_connexion)
-
-def init_cnx(driver, cnx):
-    for hook in SQL_CONNECT_HOOKS.get(driver, ()):
-        hook(cnx)
--- a/server/ssplanner.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/ssplanner.py	Tue Mar 09 11:01:44 2010 +0100
@@ -63,6 +63,7 @@
         return {}
     eidconsts = {}
     neweids = session.transaction_data.get('neweids', ())
+    checkread = session.read_security
     for rel in rqlst.where.get_nodes(Relation):
         if rel.r_type == 'eid' and not rel.neged(strict=True):
             lhs, rhs = rel.get_variable_parts()
@@ -71,7 +72,7 @@
                 # check read permission here since it may not be done by
                 # the generated select substep if not emited (eg nothing
                 # to be selected)
-                if eid not in neweids:
+                if checkread and eid not in neweids:
                     eschema(session.describe(eid)[0]).check_perm(session, 'read')
                 eidconsts[lhs.variable] = eid
     return eidconsts
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/server/test/data/site_cubicweb.py	Tue Mar 09 11:01:44 2010 +0100
@@ -0,0 +1,23 @@
+"""
+
+:organization: Logilab
+:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+"""
+
+from logilab.db import FunctionDescr
+from logilab.db.sqlite import register_sqlite_pyfunc
+from rql.utils import register_function
+
+try:
+    class DUMB_SORT(FunctionDescr):
+        supported_backends = ('sqlite',)
+
+    register_function(DUMB_SORT)
+    def dumb_sort(something):
+        return something
+    register_sqlite_pyfunc(dumb_sort)
+except:
+    # already registered
+    pass
--- a/server/test/data/site_erudi.py	Mon Mar 08 19:11:47 2010 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-"""
-
-:organization: Logilab
-:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-from logilab.common.adbh import FunctionDescr
-from rql.utils import register_function
-
-try:
-    class DUMB_SORT(FunctionDescr):
-        supported_backends = ('sqlite',)
-
-    register_function(DUMB_SORT)
-
-
-    def init_sqlite_connexion(cnx):
-        def dumb_sort(something):
-            return something
-        cnx.create_function("DUMB_SORT", 1, dumb_sort)
-
-    from cubicweb.server import sqlutils
-    sqlutils.SQL_CONNECT_HOOKS['sqlite'].append(init_sqlite_connexion)
-except:
-    # already registered
-    pass
--- a/server/test/unittest_hook.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/test/unittest_hook.py	Tue Mar 09 11:01:44 2010 +0100
@@ -108,13 +108,19 @@
 
     def test_call_hook(self):
         self.o.register(AddAnyHook)
-        cw = mock_object(vreg=self.vreg)
-        self.assertRaises(HookCalled, self.o.call_hooks, 'before_add_entity', cw)
+        dis = set()
+        cw = mock_object(vreg=self.vreg,
+                         set_read_security=lambda *a,**k: None,
+                         set_write_security=lambda *a,**k: None,
+                         is_hook_activated=lambda x, cls: cls.category not in dis)
+        self.assertRaises(HookCalled,
+                          self.o.call_hooks, 'before_add_entity', cw)
         self.o.call_hooks('before_delete_entity', cw) # nothing to call
-        config.disabled_hooks_categories.add('cat1')
+        dis.add('cat1')
         self.o.call_hooks('before_add_entity', cw) # disabled hooks category, not called
-        config.disabled_hooks_categories.remove('cat1')
-        self.assertRaises(HookCalled, self.o.call_hooks, 'before_add_entity', cw)
+        dis.remove('cat1')
+        self.assertRaises(HookCalled,
+                          self.o.call_hooks, 'before_add_entity', cw)
         self.o.unregister(AddAnyHook)
         self.o.call_hooks('before_add_entity', cw) # nothing to call
 
--- a/server/test/unittest_querier.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/test/unittest_querier.py	Tue Mar 09 11:01:44 2010 +0100
@@ -35,7 +35,7 @@
 SQL_CONNECT_HOOKS['sqlite'].append(init_sqlite_connexion)
 
 
-from logilab.common.adbh import _GenericAdvFuncHelper
+from logilab.db import _GenericAdvFuncHelper
 TYPEMAP = _GenericAdvFuncHelper.TYPE_MAPPING
 
 class MakeSchemaTC(TestCase):
@@ -397,6 +397,18 @@
         rset = self.execute('Note X WHERE NOT Y evaluee X')
         self.assertEquals(len(rset.rows), 1, rset.rows)
 
+    def test_select_date_extraction(self):
+        self.execute("INSERT Personne X: X nom 'foo', X datenaiss %(d)s",
+                     {'d': datetime(2001, 2,3, 12,13)})
+        test_data = [('YEAR', 2001), ('MONTH', 2), ('DAY', 3),
+                     ('HOUR', 12), ('MINUTE', 13)]
+        for funcname, result in test_data:
+            rset = self.execute('Any %s(D) WHERE X is Personne, X datenaiss D'
+                                % funcname)
+            self.assertEquals(len(rset.rows), 1)
+            self.assertEquals(rset.rows[0][0], result)
+            self.assertEquals(rset.description, [('Int',)])
+
     def test_select_aggregat_count(self):
         rset = self.execute('Any COUNT(X)')
         self.assertEquals(len(rset.rows), 1)
@@ -430,7 +442,7 @@
         self.assertEquals(rset.description, [('Int',)])
 
     def test_select_custom_aggregat_concat_string(self):
-        rset = self.execute('Any CONCAT_STRINGS(N) WHERE X is CWGroup, X name N')
+        rset = self.execute('Any GROUP_CONCAT(N) WHERE X is CWGroup, X name N')
         self.failUnless(rset)
         self.failUnlessEqual(sorted(rset[0][0].split(', ')), ['guests', 'managers',
                                                              'owners', 'users'])
--- a/server/test/unittest_repository.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/test/unittest_repository.py	Tue Mar 09 11:01:44 2010 +0100
@@ -184,7 +184,9 @@
         repo = self.repo
         cnxid = repo.connect(self.admlogin, password=self.admpassword)
         # rollback state change which trigger TrInfo insertion
-        user = repo._get_session(cnxid).user
+        session = repo._get_session(cnxid)
+        session.set_pool()
+        user = session.user
         user.fire_transition('deactivate')
         rset = repo.execute(cnxid, 'TrInfo T WHERE T wf_info_for X, X eid %(x)s', {'x': user.eid})
         self.assertEquals(len(rset), 1)
--- a/server/test/unittest_rql2sql.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/test/unittest_rql2sql.py	Tue Mar 09 11:01:44 2010 +0100
@@ -13,7 +13,6 @@
 from logilab.common.testlib import TestCase, unittest_main, mock_object
 
 from rql import BadRQLQuery
-from indexer import get_indexer
 
 #from cubicweb.server.sources.native import remove_unused_solutions
 from cubicweb.server.sources.rql2sql import SQLGenerator, remove_unused_solutions
@@ -1072,7 +1071,7 @@
 WHERE rel_is0.eid_to=2'''),
 
     ]
-from logilab.common.adbh import ADV_FUNC_HELPER_DIRECTORY
+from logilab.db import get_db_helper
 
 class CWRQLTC(RQLGeneratorTC):
     schema = schema
@@ -1106,12 +1105,7 @@
     #capture = True
     def setUp(self):
         RQLGeneratorTC.setUp(self)
-        indexer = get_indexer('postgres', 'utf8')
-        dbms_helper = ADV_FUNC_HELPER_DIRECTORY['postgres']
-        dbms_helper.fti_uid_attr = indexer.uid_attr
-        dbms_helper.fti_table = indexer.table
-        dbms_helper.fti_restriction_sql = indexer.restriction_sql
-        dbms_helper.fti_need_distinct_query = indexer.need_distinct
+        dbms_helper = get_db_helper('postgres')
         self.o = SQLGenerator(schema, dbms_helper)
 
     def _norm_sql(self, sql):
@@ -1212,6 +1206,13 @@
 FROM cw_CWUser AS _X
 WHERE _X.cw_login IS NULL''')
 
+
+    def test_date_extraction(self):
+        self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
+                    '''SELECT CAST(EXTRACT(MONTH from _P.cw_creation_date) AS INTEGER)
+FROM cw_Personne AS _P''')
+
+
     def test_parser_parse(self):
         for t in self._parse(PARSER):
             yield t
@@ -1409,17 +1410,17 @@
 
     def setUp(self):
         RQLGeneratorTC.setUp(self)
-        indexer = get_indexer('sqlite', 'utf8')
-        dbms_helper = ADV_FUNC_HELPER_DIRECTORY['sqlite']
-        dbms_helper.fti_uid_attr = indexer.uid_attr
-        dbms_helper.fti_table = indexer.table
-        dbms_helper.fti_restriction_sql = indexer.restriction_sql
-        dbms_helper.fti_need_distinct_query = indexer.need_distinct
+        dbms_helper = get_db_helper('sqlite')
         self.o = SQLGenerator(schema, dbms_helper)
 
     def _norm_sql(self, sql):
         return sql.strip().replace(' ILIKE ', ' LIKE ').replace('\nINTERSECT ALL\n', '\nINTERSECT\n')
 
+    def test_date_extraction(self):
+        self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
+                    '''SELECT MONTH(_P.cw_creation_date)
+FROM cw_Personne AS _P''')
+
     def test_union(self):
         for t in self._parse((
             ('(Any N ORDERBY 1 WHERE X name N, X is State)'
@@ -1517,12 +1518,7 @@
 
     def setUp(self):
         RQLGeneratorTC.setUp(self)
-        indexer = get_indexer('mysql', 'utf8')
-        dbms_helper = ADV_FUNC_HELPER_DIRECTORY['mysql']
-        dbms_helper.fti_uid_attr = indexer.uid_attr
-        dbms_helper.fti_table = indexer.table
-        dbms_helper.fti_restriction_sql = indexer.restriction_sql
-        dbms_helper.fti_need_distinct_query = indexer.need_distinct
+        dbms_helper = get_db_helper('mysql')
         self.o = SQLGenerator(schema, dbms_helper)
 
     def _norm_sql(self, sql):
@@ -1537,6 +1533,11 @@
             latest = firstword
         return '\n'.join(newsql)
 
+    def test_date_extraction(self):
+        self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
+                    '''SELECT EXTRACT(MONTH from _P.cw_creation_date)
+FROM cw_Personne AS _P''')
+
     def test_from_clause_needed(self):
         queries = [("Any 1 WHERE EXISTS(T is CWGroup, T name 'managers')",
                     '''SELECT 1
--- a/server/test/unittest_sqlutils.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/server/test/unittest_sqlutils.py	Tue Mar 09 11:01:44 2010 +0100
@@ -20,13 +20,13 @@
 
     def test_init(self):
         o = SQLAdapterMixIn(BASE_CONFIG)
-        self.assertEquals(o.encoding, 'UTF-8')
+        self.assertEquals(o.dbhelper.dbencoding, 'UTF-8')
 
     def test_init_encoding(self):
         config = BASE_CONFIG.copy()
         config['db-encoding'] = 'ISO-8859-1'
         o = SQLAdapterMixIn(config)
-        self.assertEquals(o.encoding, 'ISO-8859-1')
+        self.assertEquals(o.dbhelper.dbencoding, 'ISO-8859-1')
 
 if __name__ == '__main__':
     unittest_main()
--- a/sobjects/notification.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/sobjects/notification.py	Tue Mar 09 11:01:44 2010 +0100
@@ -33,11 +33,9 @@
     def recipients(self):
         mode = self._cw.vreg.config['default-recipients-mode']
         if mode == 'users':
-            # use unsafe execute else we may don't have the right to see users
-            # to notify...
-            execute = self._cw.unsafe_execute
+            execute = self._cw.execute
             dests = [(u.get_email(), u.property_value('ui.language'))
-                     for u in execute(self.user_rql, build_descr=True, propagate=True).entities()]
+                     for u in execute(self.user_rql, build_descr=True).entities()]
         elif mode == 'default-dest-addrs':
             lang = self._cw.vreg.property_value('ui.language')
             dests = zip(self._cw.vreg.config['default-dest-addrs'], repeat(lang))
@@ -158,7 +156,8 @@
                 if not rdef.has_perm(self._cw, 'read', eid=self.cw_rset[0][0]):
                     continue
             # XXX suppose it's a subject relation...
-            elif not rschema.has_perm(self._cw, 'read', fromeid=self.cw_rset[0][0]): # XXX toeid
+            elif not rschema.has_perm(self._cw, 'read',
+                                      fromeid=self.cw_rset[0][0]):
                 continue
             if attr in self.no_detailed_change_attrs:
                 msg = _('%s updated') % _(attr)
--- a/sobjects/supervising.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/sobjects/supervising.py	Tue Mar 09 11:01:44 2010 +0100
@@ -92,7 +92,7 @@
         return self._cw._('[%s supervision] changes summary') % self._cw.vreg.config.appid
 
     def call(self, changes):
-        user = self._cw.actual_session().user
+        user = self._cw.user
         self.w(self._cw._('user %s has made the following change(s):\n\n')
                % user.login)
         for event, changedescr in filter_changes(changes):
@@ -129,17 +129,16 @@
         self.w(u'  %s' % entity.absolute_url())
 
     def _relation_context(self, changedescr):
-        _ = self._cw._
-        session = self._cw.actual_session()
+        session = self._cw
         def describe(eid):
             try:
-                return _(session.describe(eid)[0]).lower()
+                return session._(session.describe(eid)[0]).lower()
             except UnknownEid:
                 # may occurs when an entity has been deleted from an external
                 # source and we're cleaning its relation
-                return _('unknown external entity')
+                return session._('unknown external entity')
         eidfrom, rtype, eidto = changedescr.eidfrom, changedescr.rtype, changedescr.eidto
-        return {'rtype': _(rtype),
+        return {'rtype': session._(rtype),
                 'eidfrom': eidfrom,
                 'frometype': describe(eidfrom),
                 'eidto': eidto,
--- a/test/unittest_entity.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/test/unittest_entity.py	Tue Mar 09 11:01:44 2010 +0100
@@ -436,7 +436,7 @@
 
     def test_complete_relation(self):
         session = self.session
-        eid = session.unsafe_execute(
+        eid = session.execute(
             'INSERT TrInfo X: X comment "zou", X wf_info_for U, X from_state S1, X to_state S2 '
             'WHERE U login "admin", S1 name "activated", S2 name "deactivated"')[0][0]
         trinfo = self.entity('Any X WHERE X eid %(x)s', {'x': eid}, 'x')
--- a/utils.py	Mon Mar 08 19:11:47 2010 +0100
+++ b/utils.py	Tue Mar 09 11:01:44 2010 +0100
@@ -11,6 +11,7 @@
 import decimal
 import datetime
 import random
+from uuid import uuid4
 from warnings import warn
 
 from logilab.mtconverter import xml_escape
@@ -21,28 +22,10 @@
 # initialize random seed from current time
 random.seed()
 
-if sys.version_info[:2] < (2, 5):
-
-    from time import time
-    from md5 import md5
-    from random import randint
-
-    def make_uid(key):
-        """forge a unique identifier
-        XXX not that unique on win32
-        """
-        key = str(key)
-        msg = key + "%.10f" % time() + str(randint(0, 1000000))
-        return key + md5(msg).hexdigest()
-
-else:
-
-    from uuid import uuid4
-
-    def make_uid(key):
-        # remove dash, generated uid are used as identifier sometimes (sql table
-        # names at least)
-        return str(key) + str(uuid4()).replace('-', '')
+def make_uid(key):
+    # remove dash, generated uid are used as identifier sometimes (sql table
+    # names at least)
+    return str(key) + str(uuid4()).replace('-', '')
 
 
 def dump_class(cls, clsname):