backport stable
authorSylvain Thénault <sylvain.thenault@logilab.fr>
Tue, 30 Mar 2010 14:32:03 +0200
changeset 5082 d6fd82a5a4e8
parent 5052 c9dbd95333f7 (current diff)
parent 5081 2ea98b8512dd (diff)
child 5121 a63d7886fcf5
backport stable
devtools/testlib.py
server/querier.py
--- a/__init__.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/__init__.py	Tue Mar 30 14:32:03 2010 +0200
@@ -112,7 +112,7 @@
 
 CW_EVENT_MANAGER = CubicWebEventManager()
 
-def onevent(event):
+def onevent(event, *args, **kwargs):
     """decorator to ease event / callback binding
 
     >>> from cubicweb import onevent
@@ -123,6 +123,6 @@
     >>>
     """
     def _decorator(func):
-        CW_EVENT_MANAGER.bind(event, func)
+        CW_EVENT_MANAGER.bind(event, func, *args, **kwargs)
         return func
     return _decorator
--- a/cwvreg.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/cwvreg.py	Tue Mar 30 14:32:03 2010 +0200
@@ -262,6 +262,7 @@
         self.schema = None
         self.initialized = False
         self.reset()
+        # XXX give force_reload (or refactor [re]loading...)
         if self.config.mode != 'test':
             # don't clear rtags during test, this may cause breakage with
             # manually imported appobject modules
--- a/dataimport.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/dataimport.py	Tue Mar 30 14:32:03 2010 +0200
@@ -62,6 +62,7 @@
 from logilab.common.decorators import cached
 from logilab.common.deprecation import deprecated
 
+from cubicweb.server.utils import eschema_eid
 
 def ucsvreader_pb(filepath, encoding='utf-8', separator=',', quote='"',
                   skipfirst=False, withpb=True):
@@ -402,8 +403,9 @@
         self.commit()
 
     def commit(self):
-        self._commit()
+        txuuid = self._commit()
         self.session.set_pool()
+        return txuuid
 
     def rql(self, *args):
         if self._rql is not None:
@@ -558,9 +560,10 @@
         self._nb_inserted_entities = 0
         self._nb_inserted_types = 0
         self._nb_inserted_relations = 0
-        self.rql = session.unsafe_execute
-        # disable undoing
-        session.undo_actions = frozenset()
+        self.rql = session.execute
+        # deactivate security
+        session.set_read_security(False)
+        session.set_write_security(False)
 
     def create_entity(self, etype, **kwargs):
         for k, v in kwargs.iteritems():
@@ -570,9 +573,10 @@
         entity._related_cache = {}
         self.metagen.init_entity(entity)
         entity.update(kwargs)
+        entity.edited_attributes = set(entity)
         session = self.session
         self.source.add_entity(session, entity)
-        self.source.add_info(session, entity, self.source, complete=False)
+        self.source.add_info(session, entity, self.source, None, complete=False)
         for rtype, targeteids in rels.iteritems():
             # targeteids may be a single eid or a list of eids
             inlined = self.rschema(rtype).inlined
@@ -621,7 +625,7 @@
         self.etype_attrs = []
         self.etype_rels = []
         # attributes/relations specific to each entity
-        self.entity_attrs = ['eid', 'cwuri']
+        self.entity_attrs = ['cwuri']
         #self.entity_rels = [] XXX not handled (YAGNI?)
         schema = session.vreg.schema
         rschema = schema.rschema
@@ -650,18 +654,15 @@
         return entity, rels
 
     def init_entity(self, entity):
+        entity.eid = self.source.create_eid(self.session)
         for attr in self.entity_attrs:
             entity[attr] = self.generate(entity, attr)
-        entity.eid = entity['eid']
 
     def generate(self, entity, rtype):
         return getattr(self, 'gen_%s' % rtype)(entity)
 
-    def gen_eid(self, entity):
-        return self.source.create_eid(self.session)
-
     def gen_cwuri(self, entity):
-        return u'%seid/%s' % (self.baseurl, entity['eid'])
+        return u'%seid/%s' % (self.baseurl, entity.eid)
 
     def gen_creation_date(self, entity):
         return self.time
@@ -685,10 +686,8 @@
     # schema has been loaded from the fs (hence entity type schema eids are not
     # known)
     def test_gen_is(self, entity):
-        from cubicweb.hooks.metadata import eschema_eid
         return eschema_eid(self.session, entity.e_schema)
     def test_gen_is_instanceof(self, entity):
-        from cubicweb.hooks.metadata import eschema_eid
         eids = []
         for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
             eids.append(eschema_eid(self.session, eschema))
--- a/devtools/testlib.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/devtools/testlib.py	Tue Mar 30 14:32:03 2010 +0200
@@ -278,20 +278,20 @@
             return req.user
 
     def create_user(self, login, groups=('users',), password=None, req=None,
-                    commit=True):
+                    commit=True, **kwargs):
         """create and return a new user entity"""
         if password is None:
             password = login.encode('utf8')
-        cursor = self._orig_cnx.cursor(req or self.request())
-        rset = cursor.execute('INSERT CWUser X: X login %(login)s, X upassword %(passwd)s',
-                              {'login': unicode(login), 'passwd': password})
-        user = rset.get_entity(0, 0)
-        cursor.execute('SET X in_group G WHERE X eid %%(x)s, G name IN(%s)'
-                       % ','.join(repr(g) for g in groups),
-                       {'x': user.eid}, 'x')
+        if req is None:
+            req = self._orig_cnx.request()
+        user = req.create_entity('CWUser', login=unicode(login),
+                                 upassword=password, **kwargs)
+        req.execute('SET X in_group G WHERE X eid %%(x)s, G name IN(%s)'
+                    % ','.join(repr(g) for g in groups),
+                    {'x': user.eid}, 'x')
         user.clear_related_cache('in_group', 'subject')
         if commit:
-            self._orig_cnx.commit()
+            req.cnx.commit()
         return user
 
     def login(self, login, **kwargs):
--- a/entities/test/unittest_wfobjs.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/entities/test/unittest_wfobjs.py	Tue Mar 30 14:32:03 2010 +0200
@@ -443,19 +443,21 @@
 
 class AutoTransitionTC(CubicWebTC):
 
-    def setup_database(self):
-        self.wf = add_wf(self, 'CWUser')
-        asleep = self.wf.add_state('asleep', initial=True)
-        dead = self.wf.add_state('dead')
-        self.wf.add_transition('rest', asleep, asleep)
-        self.wf.add_transition('sick', asleep, dead, type=u'auto',
-                               conditions=({'expr': u'U surname "toto"',
-                                            'mainvars': u'U'},))
+    def setup_custom_wf(self):
+        wf = add_wf(self, 'CWUser')
+        asleep = wf.add_state('asleep', initial=True)
+        dead = wf.add_state('dead')
+        wf.add_transition('rest', asleep, asleep)
+        wf.add_transition('sick', asleep, dead, type=u'auto',
+                          conditions=({'expr': u'X surname "toto"',
+                                       'mainvars': u'X'},))
+        return wf
 
     def test_auto_transition_fired(self):
+        wf = self.setup_custom_wf()
         user = self.create_user('member')
         self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
-                     {'wf': self.wf.eid, 'x': user.eid})
+                     {'wf': wf.eid, 'x': user.eid})
         self.commit()
         user.clear_all_caches()
         self.assertEquals(user.state, 'asleep')
@@ -469,7 +471,7 @@
                           ['rest'])
         self.assertEquals(parse_hist(user.workflow_history),
                           [('asleep', 'asleep', 'rest', None)])
-        self.request().user.set_attributes(surname=u'toto') # fulfill condition
+        user.set_attributes(surname=u'toto') # fulfill condition
         self.commit()
         user.fire_transition('rest')
         self.commit()
@@ -480,6 +482,26 @@
                            ('asleep', 'asleep', 'rest', None),
                            ('asleep', 'dead', 'sick', None),])
 
+    def test_auto_transition_custom_initial_state_fired(self):
+        wf = self.setup_custom_wf()
+        user = self.create_user('member', surname=u'toto')
+        self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+                     {'wf': wf.eid, 'x': user.eid})
+        self.commit()
+        self.assertEquals(user.state, 'dead')
+
+    def test_auto_transition_initial_state_fired(self):
+        wf = self.execute('Any WF WHERE ET default_workflow WF, '
+                          'ET name %(et)s', {'et': 'CWUser'}).get_entity(0, 0)
+        dead = wf.add_state('dead')
+        wf.add_transition('sick', wf.state_by_name('activated'), dead,
+                          type=u'auto', conditions=({'expr': u'X surname "toto"',
+                                                     'mainvars': u'X'},))
+        self.commit()
+        user = self.create_user('member', surname=u'toto')
+        self.commit()
+        self.assertEquals(user.state, 'dead')
+
 
 class WorkflowHooksTC(CubicWebTC):
 
--- a/etwist/server.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/etwist/server.py	Tue Mar 30 14:32:03 2010 +0200
@@ -164,7 +164,7 @@
                         datadir = self.config.locate_resource(segments[1])
                         if datadir is None:
                             return None, []
-                    self.info('static file %s from %s', segments[-1], datadir)
+                    self.debug('static file %s from %s', segments[-1], datadir)
                     if segments[0] == 'data':
                         return static.File(str(datadir)), segments[1:]
                     else:
--- a/hooks/integrity.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/hooks/integrity.py	Tue Mar 30 14:32:03 2010 +0200
@@ -17,6 +17,7 @@
 from cubicweb.selectors import implements
 from cubicweb.uilib import soup2xhtml
 from cubicweb.server import hook
+from cubicweb.server.hook import set_operation
 
 # special relations that don't have to be checked for integrity, usually
 # because they are handled internally by hooks (so we trust ourselves)
@@ -62,41 +63,40 @@
     """checking relation cardinality has to be done after commit in
     case the relation is being replaced
     """
-    eid, rtype = None, None
+    role = key = base_rql = None
 
     def precommit_event(self):
-        # recheck pending eids
-        if self.session.deleted_in_transaction(self.eid):
-            return
-        if self.rtype in self.session.transaction_data.get('pendingrtypes', ()):
-            return
-        if self.session.execute(*self._rql()).rowcount < 1:
-            etype = self.session.describe(self.eid)[0]
-            _ = self.session._
-            msg = _('at least one relation %(rtype)s is required on %(etype)s (%(eid)s)')
-            msg %= {'rtype': _(self.rtype), 'etype': _(etype), 'eid': self.eid}
-            qname = role_name(self.rtype, self.role)
-            raise ValidationError(self.eid, {qname: msg})
-
-    def commit_event(self):
-        pass
-
-    def _rql(self):
-        raise NotImplementedError()
+        session =self.session
+        pendingeids = session.transaction_data.get('pendingeids', ())
+        pendingrtypes = session.transaction_data.get('pendingrtypes', ())
+        # poping key is not optional: if further operation trigger new deletion
+        # of relation, we'll need a new operation
+        for eid, rtype in session.transaction_data.pop(self.key):
+            # recheck pending eids / relation types
+            if eid in pendingeids:
+                continue
+            if rtype in pendingrtypes:
+                continue
+            if not session.execute(self.base_rql % rtype, {'x': eid}, 'x'):
+                etype = session.describe(eid)[0]
+                _ = session._
+                msg = _('at least one relation %(rtype)s is required on '
+                        '%(etype)s (%(eid)s)')
+                msg %= {'rtype': _(rtype), 'etype': _(etype), 'eid': eid}
+                raise ValidationError(eid, {role_name(rtype, self.role): msg})
 
 
 class _CheckSRelationOp(_CheckRequiredRelationOperation):
     """check required subject relation"""
     role = 'subject'
-    def _rql(self):
-        return 'Any O WHERE S eid %%(x)s, S %s O' % self.rtype, {'x': self.eid}, 'x'
-
+    key = '_cwisrel'
+    base_rql = 'Any O WHERE S eid %%(x)s, S %s O'
 
 class _CheckORelationOp(_CheckRequiredRelationOperation):
     """check required object relation"""
     role = 'object'
-    def _rql(self):
-        return 'Any S WHERE O eid %%(x)s, S %s O' % self.rtype, {'x': self.eid}, 'x'
+    key = '_cwiorel'
+    base_rql = 'Any S WHERE O eid %%(x)s, S %s O'
 
 
 class IntegrityHook(hook.Hook):
@@ -112,14 +112,6 @@
     def __call__(self):
         getattr(self, self.event)()
 
-    def checkrel_if_necessary(self, opcls, rtype, eid):
-        """check an equivalent operation has not already been added"""
-        for op in self._cw.pending_operations:
-            if isinstance(op, opcls) and op.rtype == rtype and op.eid == eid:
-                break
-        else:
-            opcls(self._cw, rtype=rtype, eid=eid)
-
     def after_add_entity(self):
         eid = self.entity.eid
         eschema = self.entity.e_schema
@@ -127,10 +119,14 @@
             # skip automatically handled relations
             if rschema.type in DONT_CHECK_RTYPES_ON_ADD:
                 continue
-            opcls = role == 'subject' and _CheckSRelationOp or _CheckORelationOp
             rdef = rschema.role_rdef(eschema, targetschemas[0], role)
             if rdef.role_cardinality(role) in '1+':
-                self.checkrel_if_necessary(opcls, rschema.type, eid)
+                if role == 'subject':
+                    set_operation(self._cw, '_cwisrel', (eid, rschema.type),
+                                  _CheckSRelationOp)
+                else:
+                    set_operation(self._cw, '_cwiorel', (eid, rschema.type),
+                                  _CheckORelationOp)
 
     def before_delete_relation(self):
         rtype = self.rtype
@@ -138,14 +134,16 @@
             return
         session = self._cw
         eidfrom, eidto = self.eidfrom, self.eidto
-        card = session.schema_rproperty(rtype, eidfrom, eidto, 'cardinality')
         pendingrdefs = session.transaction_data.get('pendingrdefs', ())
         if (session.describe(eidfrom)[0], rtype, session.describe(eidto)[0]) in pendingrdefs:
             return
+        card = session.schema_rproperty(rtype, eidfrom, eidto, 'cardinality')
         if card[0] in '1+' and not session.deleted_in_transaction(eidfrom):
-            self.checkrel_if_necessary(_CheckSRelationOp, rtype, eidfrom)
+            set_operation(self._cw, '_cwisrel', (eidfrom, rtype),
+                          _CheckSRelationOp)
         if card[1] in '1+' and not session.deleted_in_transaction(eidto):
-            self.checkrel_if_necessary(_CheckORelationOp, rtype, eidto)
+            set_operation(self._cw, '_cwiorel', (eidto, rtype),
+                          _CheckORelationOp)
 
 
 class _CheckConstraintsOp(hook.LateOperation):
@@ -291,19 +289,32 @@
 # not really integrity check, they maintain consistency on changes
 
 class _DelayedDeleteOp(hook.Operation):
-    """delete the object of composite relation except if the relation
-    has actually been redirected to another composite
+    """delete the object of composite relation except if the relation has
+    actually been redirected to another composite
     """
+    key = base_rql = None
 
     def precommit_event(self):
         session = self.session
-        # don't do anything if the entity is being created or deleted
-        if not (session.deleted_in_transaction(self.eid) or
-                session.added_in_transaction(self.eid)):
-            etype = session.describe(self.eid)[0]
-            session.execute('DELETE %s X WHERE X eid %%(x)s, NOT %s'
-                            % (etype, self.relation),
-                            {'x': self.eid}, 'x')
+        pendingeids = session.transaction_data.get('pendingeids', ())
+        neweids = session.transaction_data.get('neweids', ())
+        # poping key is not optional: if further operation trigger new deletion
+        # of composite relation, we'll need a new operation
+        for eid, rtype in session.transaction_data.pop(self.key):
+            # don't do anything if the entity is being created or deleted
+            if not (eid in pendingeids or eid in neweids):
+                etype = session.describe(eid)[0]
+                session.execute(self.base_rql % (etype, rtype), {'x': eid}, 'x')
+
+class _DelayedDeleteSEntityOp(_DelayedDeleteOp):
+    """delete orphan subject entity of a composite relation"""
+    key = '_cwiscomp'
+    base_rql = 'DELETE %s X WHERE X eid %%(x)s, NOT X %s Y'
+
+class _DelayedDeleteOEntityOp(_DelayedDeleteOp):
+    """check required object relation"""
+    key = '_cwiocomp'
+    base_rql = 'DELETE %s X WHERE X eid %%(x)s, NOT Y %s X'
 
 
 class DeleteCompositeOrphanHook(hook.Hook):
@@ -323,8 +334,8 @@
         composite = self._cw.schema_rproperty(self.rtype, self.eidfrom, self.eidto,
                                               'composite')
         if composite == 'subject':
-            _DelayedDeleteOp(self._cw, eid=self.eidto,
-                             relation='Y %s X' % self.rtype)
+            set_operation(self._cw, '_cwiocomp', (self.eidto, self.rtype),
+                          _DelayedDeleteOEntityOp)
         elif composite == 'object':
-            _DelayedDeleteOp(self._cw, eid=self.eidfrom,
-                             relation='X %s Y' % self.rtype)
+            set_operation(self._cw, '_cwiscomp', (self.eidfrom, self.rtype),
+                          _DelayedDeleteSEntityOp)
--- a/hooks/metadata.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/hooks/metadata.py	Tue Mar 30 14:32:03 2010 +0200
@@ -12,17 +12,7 @@
 
 from cubicweb.selectors import implements
 from cubicweb.server import hook
-
-
-def eschema_eid(session, eschema):
-    """get eid of the CWEType entity for the given yams type"""
-    # eschema.eid is None if schema has been readen from the filesystem, not
-    # from the database (eg during tests)
-    if eschema.eid is None:
-        eschema.eid = session.execute(
-            'Any X WHERE X is CWEType, X name %(name)s',
-            {'name': str(eschema)})[0][0]
-    return eschema.eid
+from cubicweb.server.utils import eschema_eid
 
 
 class MetaDataHook(hook.Hook):
--- a/hooks/workflow.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/hooks/workflow.py	Tue Mar 30 14:32:03 2010 +0200
@@ -45,7 +45,7 @@
             state = entity.current_workflow.initial
             if state:
                 session.add_relation(entity.eid, 'in_state', state.eid)
-
+                _FireAutotransitionOp(session, entity=entity)
 
 class _FireAutotransitionOp(hook.Operation):
     """try to fire auto transition after state changes"""
@@ -86,6 +86,7 @@
                 if entity.current_state.eid != deststate.eid:
                     _change_state(session, entity.eid,
                                   entity.current_state.eid, deststate.eid)
+                    _FireAutotransitionOp(session, entity=entity)
                 return
             msg = session._('workflow changed to "%s"')
             msg %= session._(mainwf.name)
--- a/rset.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/rset.py	Tue Mar 30 14:32:03 2010 +0200
@@ -113,7 +113,7 @@
         # but I tend to think that since we have that, we should not need this
         # method anymore (syt)
         rset = ResultSet(self.rows+rset.rows, self.rql, self.args,
-                         self.description +rset.description)
+                         self.description + rset.description)
         rset.req = self.req
         return rset
 
--- a/server/hook.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/server/hook.py	Tue Mar 30 14:32:03 2010 +0200
@@ -450,6 +450,24 @@
 set_log_methods(Operation, getLogger('cubicweb.session'))
 
 
+def set_operation(session, datakey, value, opcls, **opkwargs):
+    """Search for session.transaction_data[`datakey`] (expected to be a set):
+
+    * if found, simply append `value`
+
+    * else, initialize it to set([`value`]) and instantiate the given `opcls`
+      operation class with additional keyword arguments.
+
+    You should use this instead of creating on operation for each `value`,
+    since handling operations becomes coslty on massive data import.
+    """
+    try:
+        session.transaction_data[datakey].add(value)
+    except KeyError:
+        opcls(session, *opkwargs)
+        session.transaction_data[datakey] = set((value,))
+
+
 class LateOperation(Operation):
     """special operation which should be called after all possible (ie non late)
     operations
@@ -529,3 +547,40 @@
         execute = self.session.execute
         for rql in self.rqls:
             execute(*rql)
+
+
+class CleanupNewEidsCacheOp(SingleLastOperation):
+    """on rollback of a insert query we have to remove from repository's
+    type/source cache eids of entities added in that transaction.
+
+    NOTE: querier's rqlst/solutions cache may have been polluted too with
+    queries such as Any X WHERE X eid 32 if 32 has been rollbacked however
+    generated queries are unpredictable and analysing all the cache probably
+    too expensive. Notice that there is no pb when using args to specify eids
+    instead of giving them into the rql string.
+    """
+
+    def rollback_event(self):
+        """the observed connections pool has been rollbacked,
+        remove inserted eid from repository type/source cache
+        """
+        try:
+            self.session.repo.clear_caches(
+                self.session.transaction_data['neweids'])
+        except KeyError:
+            pass
+
+class CleanupDeletedEidsCacheOp(SingleLastOperation):
+    """on commit of delete query, we have to remove from repository's
+    type/source cache eids of entities deleted in that transaction.
+    """
+
+    def commit_event(self):
+        """the observed connections pool has been rollbacked,
+        remove inserted eid from repository type/source cache
+        """
+        try:
+            self.session.repo.clear_caches(
+                self.session.transaction_data['pendingeids'])
+        except KeyError:
+            pass
--- a/server/querier.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/server/querier.py	Tue Mar 30 14:32:03 2010 +0200
@@ -613,7 +613,7 @@
                 return empty_rset(rql, args, rqlst)
             self._rql_cache[cachekey] = rqlst
         orig_rqlst = rqlst
-        if not rqlst.TYPE == 'select':
+        if rqlst.TYPE != 'select':
             if session.read_security:
                 check_no_password_selected(rqlst)
             # write query, ensure session's mode is 'write' so connections won't
--- a/server/repository.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/server/repository.py	Tue Mar 30 14:32:03 2010 +0200
@@ -38,42 +38,11 @@
                       UnknownEid, AuthenticationError, ExecutionError,
                       ETypeNotSupportedBySources, MultiSourcesError,
                       BadConnectionId, Unauthorized, ValidationError,
-                      typed_eid)
+                      typed_eid, onevent)
 from cubicweb import cwvreg, schema, server
 from cubicweb.server import utils, hook, pool, querier, sources
-from cubicweb.server.session import Session, InternalSession, security_enabled
-
-
-class CleanupEidTypeCacheOp(hook.SingleLastOperation):
-    """on rollback of a insert query or commit of delete query, we have to
-    clear repository's cache from no more valid entries
-
-    NOTE: querier's rqlst/solutions cache may have been polluted too with
-    queries such as Any X WHERE X eid 32 if 32 has been rollbacked however
-    generated queries are unpredictable and analysing all the cache probably
-    too expensive. Notice that there is no pb when using args to specify eids
-    instead of giving them into the rql string.
-    """
-
-    def commit_event(self):
-        """the observed connections pool has been rollbacked,
-        remove inserted eid from repository type/source cache
-        """
-        try:
-            self.session.repo.clear_caches(
-                self.session.transaction_data['pendingeids'])
-        except KeyError:
-            pass
-
-    def rollback_event(self):
-        """the observed connections pool has been rollbacked,
-        remove inserted eid from repository type/source cache
-        """
-        try:
-            self.session.repo.clear_caches(
-                self.session.transaction_data['neweids'])
-        except KeyError:
-            pass
+from cubicweb.server.session import Session, InternalSession, InternalManager, \
+     security_enabled
 
 
 def del_existing_rel_if_needed(session, eidfrom, rtype, eidto):
@@ -164,6 +133,12 @@
         # open some connections pools
         if config.open_connections_pools:
             self.open_connections_pools()
+        @onevent('after-registry-reload', self)
+        def fix_user_classes(self):
+            usercls = self.vreg['etypes'].etype_class('CWUser')
+            for session in self._sessions.values():
+                if not isinstance(session.user, InternalManager):
+                    session.user.__class__ = usercls
 
     def _bootstrap_hook_registry(self):
         """called during bootstrap since we need the metadata hooks"""
@@ -398,7 +373,8 @@
         session = self.internal_session()
         try:
             rset = session.execute('Any L WHERE U login L, U primary_email M, '
-                                   'M address %(login)s', {'login': login})
+                                   'M address %(login)s', {'login': login},
+                                   build_descr=False)
             if rset.rowcount == 1:
                 login = rset[0][0]
         finally:
@@ -530,13 +506,14 @@
         # for consistency, keep same error as unique check hook (although not required)
         errmsg = session._('the value "%s" is already used, use another one')
         try:
-            if (session.execute('CWUser X WHERE X login %(login)s', {'login': login})
+            if (session.execute('CWUser X WHERE X login %(login)s', {'login': login},
+                                build_descr=False)
                 or session.execute('CWUser X WHERE X use_email C, C address %(login)s',
-                                   {'login': login})):
+                                   {'login': login}, build_descr=False)):
                 qname = role_name('login', 'subject')
                 raise ValidationError(None, {qname: errmsg % login})
             # we have to create the user
-            user = self.vreg['etypes'].etype_class('CWUser')(session, None)
+            user = self.vreg['etypes'].etype_class('CWUser')(session)
             if isinstance(password, unicode):
                 # password should *always* be utf8 encoded
                 password = password.encode('UTF8')
@@ -548,12 +525,13 @@
                             {'x': user.eid})
             if email or '@' in login:
                 d = {'login': login, 'email': email or login}
-                if session.execute('EmailAddress X WHERE X address %(email)s', d):
+                if session.execute('EmailAddress X WHERE X address %(email)s', d,
+                                   build_descr=False):
                     qname = role_name('address', 'subject')
                     raise ValidationError(None, {qname: errmsg % d['email']})
                 session.execute('INSERT EmailAddress X: X address %(email)s, '
                                 'U primary_email X, U use_email X '
-                                'WHERE U login %(login)s', d)
+                                'WHERE U login %(login)s', d, build_descr=False)
             session.commit()
         finally:
             session.close()
@@ -933,31 +911,20 @@
         and index the entity with the full text index
         """
         # begin by inserting eid/type/source/extid into the entities table
-        new = session.transaction_data.setdefault('neweids', set())
-        new.add(entity.eid)
+        hook.set_operation(session, 'neweids', entity.eid,
+                           hook.CleanupNewEidsCacheOp)
         self.system_source.add_info(session, entity, source, extid, complete)
-        CleanupEidTypeCacheOp(session)
 
     def delete_info(self, session, entity, sourceuri, extid):
         """called by external source when some entity known by the system source
         has been deleted in the external source
         """
-        self._prepare_delete_info(session, entity, sourceuri)
+        # mark eid as being deleted in session info and setup cache update
+        # operation
+        hook.set_operation(session, 'pendingeids', entity.eid,
+                           hook.CleanupDeletedEidsCacheOp)
         self._delete_info(session, entity, sourceuri, extid)
 
-    def _prepare_delete_info(self, session, entity, sourceuri):
-        """prepare the repository for deletion of an entity:
-        * update the fti
-        * mark eid as being deleted in session info
-        * setup cache update operation
-        * if undoable, get back all entity's attributes and relation
-        """
-        eid = entity.eid
-        self.system_source.fti_unindex_entity(session, eid)
-        pending = session.transaction_data.setdefault('pendingeids', set())
-        pending.add(eid)
-        CleanupEidTypeCacheOp(session)
-
     def _delete_info(self, session, entity, sourceuri, extid):
                      # attributes=None, relations=None):
         """delete system information on deletion of an entity:
@@ -977,10 +944,9 @@
                 if role == 'subject':
                     # don't skip inlined relation so they are regularly
                     # deleted and so hooks are correctly called
-                    selection = 'X %s Y' % rtype
+                    rql = 'DELETE X %s Y WHERE X eid %%(x)s' % rtype
                 else:
-                    selection = 'Y %s X' % rtype
-                rql = 'DELETE %s WHERE X eid %%(x)s' % selection
+                    rql = 'DELETE Y %s X WHERE X eid %%(x)s' % rtype
                 session.execute(rql, {'x': eid}, 'x', build_descr=False)
         self.system_source.delete_info(session, entity, sourceuri, extid)
 
@@ -1011,6 +977,20 @@
         else:
             raise ETypeNotSupportedBySources(etype)
 
+    def init_entity_caches(self, session, entity, source):
+        """add entity to session entities cache and repo's extid cache.
+        Return entity's ext id if the source isn't the system source.
+        """
+        session.set_entity_cache(entity)
+        suri = source.uri
+        if suri == 'system':
+            extid = None
+        else:
+            extid = source.get_extid(entity)
+            self._extid_cache[(str(extid), suri)] = entity.eid
+        self._type_source_cache[entity.eid] = (entity.__regid__, suri, extid)
+        return extid
+
     def glob_add_entity(self, session, entity):
         """add an entity to the repository
 
@@ -1026,17 +1006,19 @@
             entity.__class__ = entity_.__class__
             entity.__dict__.update(entity_.__dict__)
         eschema = entity.e_schema
-        etype = str(eschema)
-        source = self.locate_etype_source(etype)
-        # attribute an eid to the entity before calling hooks
+        source = self.locate_etype_source(entity.__regid__)
+        # allocate an eid to the entity before calling hooks
         entity.set_eid(self.system_source.create_eid(session))
+        # set caches asap
+        extid = self.init_entity_caches(session, entity, source)
         if server.DEBUG & server.DBG_REPO:
-            print 'ADD entity', etype, entity.eid, dict(entity)
+            print 'ADD entity', entity.__regid__, entity.eid, dict(entity)
         relations = []
         if source.should_call_hooks:
             self.hm.call_hooks('before_add_entity', session, entity=entity)
         # XXX use entity.keys here since edited_attributes is not updated for
-        # inline relations
+        # inline relations XXX not true, right? (see edited_attributes
+        # affectation above)
         for attr in entity.iterkeys():
             rschema = eschema.subjrels[attr]
             if not rschema.final: # inlined relation
@@ -1045,15 +1027,9 @@
         if session.is_hook_category_activated('integrity'):
             entity.check(creation=True)
         source.add_entity(session, entity)
-        if source.uri != 'system':
-            extid = source.get_extid(entity)
-            self._extid_cache[(str(extid), source.uri)] = entity.eid
-        else:
-            extid = None
         self.add_info(session, entity, source, extid, complete=False)
         entity._is_saved = True # entity has an eid and is saved
         # prefill entity relation caches
-        session.set_entity_cache(entity)
         for rschema in eschema.subject_relations():
             rtype = str(rschema)
             if rtype in schema.VIRTUAL_RTYPES:
@@ -1085,9 +1061,8 @@
         """replace an entity in the repository
         the type and the eid of an entity must not be changed
         """
-        etype = str(entity.e_schema)
         if server.DEBUG & server.DBG_REPO:
-            print 'UPDATE entity', etype, entity.eid, \
+            print 'UPDATE entity', entity.__regid__, entity.eid, \
                   dict(entity), edited_attributes
         entity.edited_attributes = edited_attributes
         if session.is_hook_category_activated('integrity'):
@@ -1150,7 +1125,6 @@
         """delete an entity and all related entities from the repository"""
         entity = session.entity_from_eid(eid)
         etype, sourceuri, extid = self.type_and_source_from_eid(eid, session)
-        self._prepare_delete_info(session, entity, sourceuri)
         if server.DEBUG & server.DBG_REPO:
             print 'DELETE entity', etype, eid
         source = self.sources_by_uri[sourceuri]
--- a/server/serverconfig.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/server/serverconfig.py	Tue Mar 30 14:32:03 2010 +0200
@@ -141,6 +141,14 @@
 kept (hence undoable).',
           'group': 'main', 'inputlevel': 1,
           }),
+        ('multi-sources-etypes',
+         {'type' : 'csv', 'default': (),
+          'help': 'defines which entity types from this repository are used \
+by some other instances. You should set this properly so those instances to \
+detect updates / deletions.',
+          'group': 'main', 'inputlevel': 1,
+          }),
+
         ('delay-full-text-indexation',
          {'type' : 'yn', 'default': False,
           'help': 'When full text indexation of entity has a too important cost'
--- a/server/session.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/server/session.py	Tue Mar 30 14:32:03 2010 +0200
@@ -21,13 +21,18 @@
 from cubicweb import Binary, UnknownEid, schema
 from cubicweb.req import RequestSessionBase
 from cubicweb.dbapi import ConnectionProperties
-from cubicweb.utils import make_uid
+from cubicweb.utils import make_uid, RepeatList
 from cubicweb.rqlrewrite import RQLRewriter
 
 ETYPE_PYOBJ_MAP[Binary] = 'Bytes'
 
 NO_UNDO_TYPES = schema.SCHEMA_TYPES.copy()
 NO_UNDO_TYPES.add('CWCache')
+# is / is_instance_of are usually added by sql hooks except when using
+# dataimport.NoHookRQLObjectStore, and we don't want to record them
+# anyway in the later case
+NO_UNDO_TYPES.add('is')
+NO_UNDO_TYPES.add('is_instance_of')
 # XXX rememberme,forgotpwd,apycot,vcsfile
 
 def is_final(rqlst, variable, args):
@@ -829,7 +834,7 @@
             selected = rqlst.children[0].selection
             solution = rqlst.children[0].solutions[0]
             description = _make_description(selected, args, solution)
-            return [tuple(description)] * len(result)
+            return RepeatList(len(result), tuple(description))
         # hard, delegate the work :o)
         return self.manual_build_descr(rqlst, args, result)
 
@@ -858,7 +863,7 @@
                 etype = rqlst.children[0].solutions[0]
                 basedescription.append(term.get_type(etype, args))
         if not todetermine:
-            return [tuple(basedescription)] * len(result)
+            return RepeatList(len(result), tuple(basedescription))
         return self._build_descr(result, basedescription, todetermine)
 
     def _build_descr(self, result, basedescription, todetermine):
--- a/server/sources/native.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/server/sources/native.py	Tue Mar 30 14:32:03 2010 +0200
@@ -28,14 +28,15 @@
 from logilab.common.shellutils import getlogin
 from logilab.database import get_db_helper
 
-from cubicweb import UnknownEid, AuthenticationError, Binary, server, neg_role
-from cubicweb import transaction as tx
+from cubicweb import UnknownEid, AuthenticationError, ValidationError, Binary
+from cubicweb import transaction as tx, server, neg_role
 from cubicweb.schema import VIRTUAL_RTYPES
 from cubicweb.cwconfig import CubicWebNoAppConfiguration
 from cubicweb.server import hook
-from cubicweb.server.utils import crypt_password
+from cubicweb.server.utils import crypt_password, eschema_eid
 from cubicweb.server.sqlutils import SQL_PREFIX, SQLAdapterMixIn
 from cubicweb.server.rqlannotation import set_qdata
+from cubicweb.server.hook import CleanupDeletedEidsCacheOp
 from cubicweb.server.session import hooks_control, security_enabled
 from cubicweb.server.sources import AbstractSource, dbg_st_search, dbg_results
 from cubicweb.server.sources.rql2sql import SQLGenerator
@@ -128,6 +129,45 @@
                                'rtype': rdef.rtype,
                                'eid': tentity.eid})
 
+def _undo_rel_info(session, subj, rtype, obj):
+    entities = []
+    for role, eid in (('subject', subj), ('object', obj)):
+        try:
+            entities.append(session.entity_from_eid(eid))
+        except UnknownEid:
+            raise UndoException(session._(
+                "Can't restore relation %(rtype)s, %(role)s entity %(eid)s"
+                " doesn't exist anymore.")
+                                % {'role': session._(role),
+                                   'rtype': session._(rtype),
+                                   'eid': eid})
+    sentity, oentity = entities
+    try:
+        rschema = session.vreg.schema.rschema(rtype)
+        rdef = rschema.rdefs[(sentity.__regid__, oentity.__regid__)]
+    except KeyError:
+        raise UndoException(session._(
+            "Can't restore relation %(rtype)s between %(subj)s and "
+            "%(obj)s, that relation does not exists anymore in the "
+            "schema.")
+                            % {'rtype': session._(rtype),
+                               'subj': subj,
+                               'obj': obj})
+    return sentity, oentity, rdef
+
+def _undo_has_later_transaction(session, eid):
+    return session.system_sql('''\
+SELECT T.tx_uuid FROM transactions AS TREF, transactions AS T
+WHERE TREF.tx_uuid='%(txuuid)s' AND T.tx_uuid!='%(txuuid)s'
+AND T.tx_time>=TREF.tx_time
+AND (EXISTS(SELECT 1 FROM tx_entity_actions AS TEA
+            WHERE TEA.tx_uuid=T.tx_uuid AND TEA.eid=%(eid)s)
+     OR EXISTS(SELECT 1 FROM tx_relation_actions as TRA
+               WHERE TRA.tx_uuid=T.tx_uuid AND (
+                   TRA.eid_from=%(eid)s OR TRA.eid_to=%(eid)s))
+     )''' % {'txuuid': session.transaction_data['undoing_uuid'],
+             'eid': eid}).fetchone()
+
 
 class NativeSQLSource(SQLAdapterMixIn, AbstractSource):
     """adapter for source using the native cubicweb schema (see below)
@@ -191,9 +231,13 @@
         # sql queries cache
         self._cache = Cache(repo.config['rql-cache-size'])
         self._temp_table_data = {}
+        # we need a lock to protect eid attribution function (XXX, really?
+        # explain)
         self._eid_creation_lock = Lock()
         # (etype, attr) / storage mapping
         self._storages = {}
+        # entity types that may be used by other multi-sources instances
+        self.multisources_etypes = set(repo.config['multi-sources-etypes'])
         # XXX no_sqlite_wrap trick since we've a sqlite locking pb when
         # running unittest_multisources with the wrapping below
         if self.dbdriver == 'sqlite' and \
@@ -481,6 +525,13 @@
             sql = self.sqlgen.delete(SQL_PREFIX + entity.__regid__, attrs)
             self.doexec(session, sql, attrs)
 
+    def add_relation(self, session, subject, rtype, object, inlined=False):
+        """add a relation to the source"""
+        self._add_relation(session, subject, rtype, object, inlined)
+        if session.undoable_action('A', rtype):
+            self._record_tx_action(session, 'tx_relation_actions', 'A',
+                                   eid_from=subject, rtype=rtype, eid_to=object)
+
     def _add_relation(self, session, subject, rtype, object, inlined=False):
         """add a relation to the source"""
         if inlined is False:
@@ -493,17 +544,17 @@
                                      ['cw_eid'])
         self.doexec(session, sql, attrs)
 
-    def add_relation(self, session, subject, rtype, object, inlined=False):
-        """add a relation to the source"""
-        self._add_relation(session, subject, rtype, object, inlined)
-        if session.undoable_action('A', rtype):
-            self._record_tx_action(session, 'tx_relation_actions', 'A',
-                                   eid_from=subject, rtype=rtype, eid_to=object)
-
     def delete_relation(self, session, subject, rtype, object):
         """delete a relation from the source"""
         rschema = self.schema.rschema(rtype)
-        if rschema.inlined:
+        self._delete_relation(session, subject, rtype, object, rschema.inlined)
+        if session.undoable_action('R', rtype):
+            self._record_tx_action(session, 'tx_relation_actions', 'R',
+                                   eid_from=subject, rtype=rtype, eid_to=object)
+
+    def _delete_relation(self, session, subject, rtype, object, inlined=False):
+        """delete a relation from the source"""
+        if inlined:
             table = SQL_PREFIX + session.describe(subject)[0]
             column = SQL_PREFIX + rtype
             sql = 'UPDATE %s SET %s=NULL WHERE %seid=%%(eid)s' % (table, column,
@@ -513,9 +564,6 @@
             attrs = {'eid_from': subject, 'eid_to': object}
             sql = self.sqlgen.delete('%s_relation' % rtype, attrs)
         self.doexec(session, sql, attrs)
-        if session.undoable_action('R', rtype):
-            self._record_tx_action(session, 'tx_relation_actions', 'R',
-                                   eid_from=subject, rtype=rtype, eid_to=object)
 
     def doexec(self, session, query, args=None, rollback=True):
         """Execute a query.
@@ -651,30 +699,36 @@
         if self.do_fti and self.need_fti_indexation(entity.__regid__):
             if complete:
                 entity.complete(entity.e_schema.indexable_attributes())
-            FTIndexEntityOp(session, entity=entity)
+            self.index_entity(session, entity=entity)
 
     def update_info(self, session, entity, need_fti_update):
         """mark entity as being modified, fulltext reindex if needed"""
         if self.do_fti and need_fti_update:
             # reindex the entity only if this query is updating at least
             # one indexable attribute
-            FTIndexEntityOp(session, entity=entity)
-        # update entities.mtime
+            self.index_entity(session, entity=entity)
+        # update entities.mtime.
+        # XXX Only if entity.__regid__ in self.multisources_etypes?
         attrs = {'eid': entity.eid, 'mtime': datetime.now()}
         self.doexec(session, self.sqlgen.update('entities', attrs, ['eid']), attrs)
 
     def delete_info(self, session, entity, uri, extid):
-        """delete system information on deletion of an entity by transfering
-        record from the entities table to the deleted_entities table
+        """delete system information on deletion of an entity:
+        * update the fti
+        * remove record from the entities table
+        * transfer it to the deleted_entities table if the entity's type is
+          multi-sources
         """
+        self.fti_unindex_entity(session, entity.eid)
         attrs = {'eid': entity.eid}
         self.doexec(session, self.sqlgen.delete('entities', attrs), attrs)
+        if not entity.__regid__ in self.multisources_etypes:
+            return
         if extid is not None:
             assert isinstance(extid, str), type(extid)
             extid = b64encode(extid)
         attrs = {'type': entity.__regid__, 'eid': entity.eid, 'extid': extid,
-                 'source': uri, 'dtime': datetime.now(),
-                 }
+                 'source': uri, 'dtime': datetime.now()}
         self.doexec(session, self.sqlgen.insert('deleted_entities', attrs), attrs)
 
     def modified_entities(self, session, etypes, mtime):
@@ -685,6 +739,11 @@
         * list of (etype, eid) of entities of the given types which have been
           deleted since the given timestamp
         """
+        for etype in etypes:
+            if not etype in self.multisources_etypes:
+                self.critical('%s not listed as a multi-sources entity types. '
+                              'Modify your configuration' % etype)
+                self.multisources_etypes.add(etype)
         modsql = _modified_sql('entities', etypes)
         cursor = self.doexec(session, modsql, {'time': mtime})
         modentities = cursor.fetchall()
@@ -777,6 +836,7 @@
         restr = {'tx_uuid': txuuid}
         if public:
             restr['txa_public'] = True
+        # XXX use generator to avoid loading everything in memory?
         sql = self.sqlgen.select('tx_entity_actions', restr,
                                  ('txa_action', 'txa_public', 'txa_order',
                                   'etype', 'eid', 'changes'))
@@ -791,11 +851,17 @@
         return sorted(actions, key=lambda x: x.order)
 
     def undo_transaction(self, session, txuuid):
-        """See :class:`cubicweb.dbapi.Connection.undo_transaction`"""
+        """See :class:`cubicweb.dbapi.Connection.undo_transaction`
+
+        important note: while undoing of a transaction, only hooks in the
+        'integrity', 'activeintegrity' and 'undo' categories are called.
+        """
         # set mode so pool isn't released subsquently until commit/rollback
         session.mode = 'write'
         errors = []
-        with hooks_control(session, session.HOOKS_DENY_ALL, 'integrity'):
+        session.transaction_data['undoing_uuid'] = txuuid
+        with hooks_control(session, session.HOOKS_DENY_ALL,
+                           'integrity', 'activeintegrity', 'undo'):
             with security_enabled(session, read=False):
                 for action in reversed(self.tx_actions(session, txuuid, False)):
                     undomethod = getattr(self, '_undo_%s' % action.action.lower())
@@ -890,30 +956,6 @@
                     % {'rtype': rtype, 'eid': eid})
             if not rschema.final:
                 assert value is None
-                    # try:
-                    #     tentity = session.entity_from_eid(eid)
-                    # except UnknownEid:
-                    #     err(_("Can't restore %(role)s relation %(rtype)s to "
-                    #           "entity %(eid)s which doesn't exist anymore.")
-                    #         % {'role': _('subject'),
-                    #            'rtype': _(rtype),
-                    #            'eid': eid})
-                    #     continue
-                    # rdef = rdefs[(eschema, tentity.__regid__)]
-                    # try:
-                    #     _undo_check_relation_target(tentity, rdef, 'object')
-                    # except UndoException, ex:
-                    #     err(unicode(ex))
-                    #     continue
-                    # if rschema.inlined:
-                    #     entity[rtype] = value
-                    # else:
-                    #     # restore relation where inlined changed since the deletion
-                    #     del action.changes[column]
-                    #     self._add_relation(session, subject, rtype, object)
-                    # # set related cache
-                    # session.update_rel_cache_add(eid, rtype, value,
-                    #                              rschema.symmetric)
             elif eschema.destination(rtype) in ('Bytes', 'Password'):
                 action.changes[column] = self._binary(value)
                 entity[rtype] = Binary(value)
@@ -922,6 +964,7 @@
             else:
                 entity[rtype] = value
         entity.set_eid(eid)
+        session.repo.init_entity_caches(session, entity, self)
         entity.edited_attributes = set(entity)
         entity.check()
         self.repo.hm.call_hooks('before_add_entity', session, entity=entity)
@@ -929,64 +972,85 @@
         action.changes['cw_eid'] = eid
         sql = self.sqlgen.insert(SQL_PREFIX + etype, action.changes)
         self.doexec(session, sql, action.changes)
+        # add explicitly is / is_instance_of whose deletion is not recorded for
+        # consistency with addition (done by sql in hooks)
+        self.doexec(session, 'INSERT INTO is_relation(eid_from, eid_to) '
+                    'VALUES(%s, %s)' % (eid, eschema_eid(session, eschema)))
+        for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
+            self.doexec(session, 'INSERT INTO is_instance_of_relation(eid_from,'
+                        'eid_to) VALUES(%s, %s)' % (eid, eschema_eid(session, eschema)))
         # restore record in entities (will update fti if needed)
         self.add_info(session, entity, self, None, True)
-        # remove record from deleted_entities
-        self.doexec(session, 'DELETE FROM deleted_entities WHERE eid=%s' % eid)
+        # remove record from deleted_entities if entity's type is multi-sources
+        if entity.__regid__ in self.multisources_etypes:
+            self.doexec(session,
+                        'DELETE FROM deleted_entities WHERE eid=%s' % eid)
         self.repo.hm.call_hooks('after_add_entity', session, entity=entity)
         return errors
 
     def _undo_r(self, session, action):
         """undo a relation removal"""
         errors = []
-        err = errors.append
-        _ = session._
         subj, rtype, obj = action.eid_from, action.rtype, action.eid_to
-        entities = []
-        for role, eid in (('subject', subj), ('object', obj)):
-            try:
-                entities.append(session.entity_from_eid(eid))
-            except UnknownEid:
-                err(_("Can't restore relation %(rtype)s, %(role)s entity %(eid)s"
-                      " doesn't exist anymore.")
-                    % {'role': _(role),
-                       'rtype': _(rtype),
-                       'eid': eid})
-        if not len(entities) == 2:
-            return errors
-        sentity, oentity = entities
         try:
-            rschema = self.schema.rschema(rtype)
-            rdef = rschema.rdefs[(sentity.__regid__, oentity.__regid__)]
-        except KeyError:
-            err(_("Can't restore relation %(rtype)s between %(subj)s and "
-                  "%(obj)s, that relation does not exists anymore in the "
-                  "schema.")
-                % {'rtype': rtype,
-                   'subj': subj,
-                   'obj': obj})
+            sentity, oentity, rdef = _undo_rel_info(session, subj, rtype, obj)
+        except UndoException, ex:
+            errors.append(unicode(ex))
         else:
             for role, entity in (('subject', sentity),
                                  ('object', oentity)):
                 try:
                     _undo_check_relation_target(entity, rdef, role)
                 except UndoException, ex:
-                    err(unicode(ex))
+                    errors.append(unicode(ex))
                     continue
         if not errors:
             self.repo.hm.call_hooks('before_add_relation', session,
                                     eidfrom=subj, rtype=rtype, eidto=obj)
             # add relation in the database
-            self._add_relation(session, subj, rtype, obj, rschema.inlined)
+            self._add_relation(session, subj, rtype, obj, rdef.rtype.inlined)
             # set related cache
-            session.update_rel_cache_add(subj, rtype, obj, rschema.symmetric)
+            session.update_rel_cache_add(subj, rtype, obj, rdef.rtype.symmetric)
             self.repo.hm.call_hooks('after_add_relation', session,
                                     eidfrom=subj, rtype=rtype, eidto=obj)
         return errors
 
     def _undo_c(self, session, action):
         """undo an entity creation"""
-        return ['undoing of entity creation not yet supported.']
+        eid = action.eid
+        # XXX done to avoid fetching all remaining relation for the entity
+        # we should find an efficient way to do this (keeping current veolidf
+        # massive deletion performance)
+        if _undo_has_later_transaction(session, eid):
+            msg = session._('some later transaction(s) touch entity, undo them '
+                            'first')
+            raise ValidationError(eid, {None: msg})
+        etype = action.etype
+        # get an entity instance
+        try:
+            entity = self.repo.vreg['etypes'].etype_class(etype)(session)
+        except Exception:
+            return [session._(
+                "Can't undo creation of entity %s of type %s, type "
+                "no more supported" % (eid, etype))]
+        entity.set_eid(eid)
+        # for proper eid/type cache update
+        hook.set_operation(session, 'pendingeids', eid,
+                           CleanupDeletedEidsCacheOp)
+        self.repo.hm.call_hooks('before_delete_entity', session, entity=entity)
+        # remove is / is_instance_of which are added using sql by hooks, hence
+        # unvisible as transaction action
+        self.doexec(session, 'DELETE FROM is_relation WHERE eid_from=%s' % eid)
+        self.doexec(session, 'DELETE FROM is_instance_of_relation WHERE eid_from=%s' % eid)
+        # XXX check removal of inlined relation?
+        # delete the entity
+        attrs = {'cw_eid': eid}
+        sql = self.sqlgen.delete(SQL_PREFIX + entity.__regid__, attrs)
+        self.doexec(session, sql, attrs)
+        # remove record from entities (will update fti if needed)
+        self.delete_info(session, entity, self.uri, None)
+        self.repo.hm.call_hooks('after_delete_entity', session, entity=entity)
+        return ()
 
     def _undo_u(self, session, action):
         """undo an entity update"""
@@ -994,7 +1058,35 @@
 
     def _undo_a(self, session, action):
         """undo a relation addition"""
-        return ['undoing of relation addition not yet supported.']
+        errors = []
+        subj, rtype, obj = action.eid_from, action.rtype, action.eid_to
+        try:
+            sentity, oentity, rdef = _undo_rel_info(session, subj, rtype, obj)
+        except UndoException, ex:
+            errors.append(unicode(ex))
+        else:
+            rschema = rdef.rtype
+            if rschema.inlined:
+                sql = 'SELECT 1 FROM cw_%s WHERE cw_eid=%s and cw_%s=%s'\
+                      % (sentity.__regid__, subj, rtype, obj)
+            else:
+                sql = 'SELECT 1 FROM %s_relation WHERE eid_from=%s and eid_to=%s'\
+                      % (rtype, subj, obj)
+            cu = self.doexec(session, sql)
+            if cu.fetchone() is None:
+                errors.append(session._(
+                    "Can't undo addition of relation %s from %s to %s, doesn't "
+                    "exist anymore" % (rtype, subj, obj)))
+        if not errors:
+            self.repo.hm.call_hooks('before_delete_relation', session,
+                                    eidfrom=subj, rtype=rtype, eidto=obj)
+            # delete relation from the database
+            self._delete_relation(session, subj, rtype, obj, rschema.inlined)
+            # set related cache
+            session.update_rel_cache_del(subj, rtype, obj, rschema.symmetric)
+            self.repo.hm.call_hooks('after_delete_relation', session,
+                                    eidfrom=subj, rtype=rtype, eidto=obj)
+        return errors
 
     # full text index handling #################################################
 
@@ -1011,7 +1103,7 @@
         """create an operation to [re]index textual content of the given entity
         on commit
         """
-        FTIndexEntityOp(session, entity=entity)
+        hook.set_operation(session, 'ftindex', entity.eid, FTIndexEntityOp)
 
     def fti_unindex_entity(self, session, eid):
         """remove text content for entity with the given eid from the full text
@@ -1045,21 +1137,18 @@
 
     def precommit_event(self):
         session = self.session
-        entity = self.entity
-        if entity.eid in session.transaction_data.get('pendingeids', ()):
-            return # entity added and deleted in the same transaction
-        alreadydone = session.transaction_data.setdefault('indexedeids', set())
-        if entity.eid in alreadydone:
-            self.debug('skipping reindexation of %s, already done', entity.eid)
-            return
-        alreadydone.add(entity.eid)
         source = session.repo.system_source
-        for container in entity.fti_containers():
-            source.fti_unindex_entity(session, container.eid)
-            source.fti_index_entity(session, container)
-
-    def commit_event(self):
-        pass
+        pendingeids = session.transaction_data.get('pendingeids', ())
+        done = session.transaction_data.setdefault('indexedeids', set())
+        for eid in session.transaction_data.pop('ftindex', ()):
+            if eid in pendingeids or eid in done:
+                # entity added and deleted in the same transaction or already
+                # processed
+                return
+            done.add(eid)
+            for container in session.entity_from_eid(eid).fti_containers():
+                source.fti_unindex_entity(session, container.eid)
+                source.fti_index_entity(session, container)
 
 
 def sql_schema(driver):
--- a/server/ssplanner.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/server/ssplanner.py	Tue Mar 30 14:32:03 2010 +0200
@@ -18,6 +18,7 @@
 from cubicweb.schema import VIRTUAL_RTYPES
 from cubicweb.rqlrewrite import add_types_restriction
 from cubicweb.server.session import security_enabled
+from cubicweb.server.hook import CleanupDeletedEidsCacheOp
 
 READ_ONLY_RTYPES = set(('eid', 'has_text', 'is', 'is_instance_of', 'identity'))
 
@@ -507,11 +508,17 @@
     def execute(self):
         """execute this step"""
         results = self.execute_child()
-        todelete = frozenset(typed_eid(eid) for eid, in self.execute_child())
+        todelete = frozenset(typed_eid(eid) for eid, in results)
         session = self.plan.session
         delete = session.repo.glob_delete_entity
-        # register pending eids first to avoid multiple deletion
-        pending = session.transaction_data.setdefault('pendingeids', set())
+        # mark eids as being deleted in session info and setup cache update
+        # operation (register pending eids before actual deletion to avoid
+        # multiple call to glob_delete_entity)
+        try:
+            pending = session.transaction_data['pendingeids']
+        except KeyError:
+            pending = session.transaction_data['pendingeids'] = set()
+            CleanupDeletedEidsCacheOp(session)
         actual = todelete - pending
         pending |= actual
         for eid in actual:
--- a/server/test/unittest_ldapuser.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/server/test/unittest_ldapuser.py	Tue Mar 30 14:32:03 2010 +0200
@@ -189,7 +189,7 @@
         self.sexecute('Any X, Y WHERE X copain Y, X login "comme", Y login "cochon"')
 
     def test_multiple_entities_from_different_sources(self):
-        self.create_user('cochon', req=self.session)
+        self.create_user('cochon')
         self.failUnless(self.sexecute('Any X,Y WHERE X login %(syt)s, Y login "cochon"', {'syt': SYT}))
 
     def test_exists1(self):
@@ -202,15 +202,15 @@
         self.assertEquals(rset.rows, [['admin', 'activated'], [SYT, 'activated']])
 
     def test_exists2(self):
-        self.create_user('comme', req=self.session)
-        self.create_user('cochon', req=self.session)
+        self.create_user('comme')
+        self.create_user('cochon')
         self.sexecute('SET X copain Y WHERE X login "comme", Y login "cochon"')
         rset = self.sexecute('Any GN ORDERBY GN WHERE X in_group G, G name GN, (G name "managers" OR EXISTS(X copain T, T login in ("comme", "cochon")))')
         self.assertEquals(rset.rows, [['managers'], ['users']])
 
     def test_exists3(self):
-        self.create_user('comme', req=self.session)
-        self.create_user('cochon', req=self.session)
+        self.create_user('comme')
+        self.create_user('cochon')
         self.sexecute('SET X copain Y WHERE X login "comme", Y login "cochon"')
         self.failUnless(self.sexecute('Any X, Y WHERE X copain Y, X login "comme", Y login "cochon"'))
         self.sexecute('SET X copain Y WHERE X login %(syt)s, Y login "cochon"', {'syt': SYT})
@@ -219,9 +219,9 @@
         self.assertEquals(sorted(rset.rows), [['managers', 'admin'], ['users', 'comme'], ['users', SYT]])
 
     def test_exists4(self):
-        self.create_user('comme', req=self.session)
-        self.create_user('cochon', groups=('users', 'guests'), req=self.session)
-        self.create_user('billy', req=self.session)
+        self.create_user('comme')
+        self.create_user('cochon', groups=('users', 'guests'))
+        self.create_user('billy')
         self.sexecute('SET X copain Y WHERE X login "comme", Y login "cochon"')
         self.sexecute('SET X copain Y WHERE X login "cochon", Y login "cochon"')
         self.sexecute('SET X copain Y WHERE X login "comme", Y login "billy"')
@@ -241,9 +241,9 @@
         self.assertEquals(sorted(rset.rows), sorted(all.rows))
 
     def test_exists5(self):
-        self.create_user('comme', req=self.session)
-        self.create_user('cochon', groups=('users', 'guests'), req=self.session)
-        self.create_user('billy', req=self.session)
+        self.create_user('comme')
+        self.create_user('cochon', groups=('users', 'guests'))
+        self.create_user('billy')
         self.sexecute('SET X copain Y WHERE X login "comme", Y login "cochon"')
         self.sexecute('SET X copain Y WHERE X login "cochon", Y login "cochon"')
         self.sexecute('SET X copain Y WHERE X login "comme", Y login "billy"')
@@ -273,7 +273,7 @@
                           sorted(r[0] for r in afeids + ueids))
 
     def _init_security_test(self):
-        self.create_user('iaminguestsgrouponly', groups=('guests',), req=self.session)
+        self.create_user('iaminguestsgrouponly', groups=('guests',))
         cnx = self.login('iaminguestsgrouponly')
         return cnx.cursor()
 
--- a/server/test/unittest_undo.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/server/test/unittest_undo.py	Tue Mar 30 14:32:03 2010 +0200
@@ -24,6 +24,15 @@
         self.session.undo_support = set()
         super(UndoableTransactionTC, self).tearDown()
 
+    def check_transaction_deleted(self, txuuid):
+        # also check transaction actions have been properly deleted
+        cu = self.session.system_sql(
+            "SELECT * from tx_entity_actions WHERE tx_uuid='%s'" % txuuid)
+        self.failIf(cu.fetchall())
+        cu = self.session.system_sql(
+            "SELECT * from tx_relation_actions WHERE tx_uuid='%s'" % txuuid)
+        self.failIf(cu.fetchall())
+
     def test_undo_api(self):
         self.failUnless(self.txuuid)
         # test transaction api
@@ -154,13 +163,7 @@
         self.assertEquals(len(txs), 2)
         self.assertRaises(NoSuchTransaction,
                           self.cnx.transaction_info, txuuid)
-        # also check transaction actions have been properly deleted
-        cu = self.session.system_sql(
-            "SELECT * from tx_entity_actions WHERE tx_uuid='%s'" % txuuid)
-        self.failIf(cu.fetchall())
-        cu = self.session.system_sql(
-            "SELECT * from tx_relation_actions WHERE tx_uuid='%s'" % txuuid)
-        self.failIf(cu.fetchall())
+        self.check_transaction_deleted(txuuid)
         # the final test: check we can login with the previously deleted user
         self.login('toto')
 
@@ -196,11 +199,74 @@
         g.delete()
         self.commit()
         errors = self.cnx.undo_transaction(txuuid)
-        self.assertRaises(ValidationError, self.commit)
+        self.assertEquals(errors,
+                          [u"Can't restore relation in_group, object entity "
+                          "%s doesn't exist anymore." % g.eid])
+        ex = self.assertRaises(ValidationError, self.commit)
+        self.assertEquals(ex.entity, self.toto.eid)
+        self.assertEquals(ex.errors,
+                          {'in_group-subject': u'at least one relation in_group is '
+                           'required on CWUser (%s)' % self.toto.eid})
+
+    def test_undo_creation_1(self):
+        session = self.session
+        c = session.create_entity('Card', title=u'hop', content=u'hop')
+        p = session.create_entity('Personne', nom=u'louis', fiche=c)
+        txuuid = self.commit()
+        errors = self.cnx.undo_transaction(txuuid)
+        self.commit()
+        self.failIf(errors)
+        self.failIf(self.execute('Any X WHERE X eid %(x)s', {'x': c.eid}, 'x'))
+        self.failIf(self.execute('Any X WHERE X eid %(x)s', {'x': p.eid}, 'x'))
+        self.failIf(self.execute('Any X,Y WHERE X fiche Y'))
+        self.session.set_pool()
+        for eid in (p.eid, c.eid):
+            self.failIf(session.system_sql(
+                'SELECT * FROM entities WHERE eid=%s' % eid).fetchall())
+            self.failIf(session.system_sql(
+                'SELECT 1 FROM owned_by_relation WHERE eid_from=%s' % eid).fetchall())
+            # added by sql in hooks (except when using dataimport)
+            self.failIf(session.system_sql(
+                'SELECT 1 FROM is_relation WHERE eid_from=%s' % eid).fetchall())
+            self.failIf(session.system_sql(
+                'SELECT 1 FROM is_instance_of_relation WHERE eid_from=%s' % eid).fetchall())
+        self.check_transaction_deleted(txuuid)
+
 
-    def test_undo_creation(self):
-        # XXX what about relation / composite entities which have been created
-        # afterwhile and linked to the undoed addition ?
-        self.skip('not implemented')
+    def test_undo_creation_integrity_1(self):
+        session = self.session
+        tutu = self.create_user('tutu', commit=False)
+        txuuid = self.commit()
+        email = self.request().create_entity('EmailAddress', address=u'tutu@cubicweb.org')
+        prop = self.request().create_entity('CWProperty', pkey=u'ui.default-text-format',
+                                            value=u'text/html')
+        tutu.set_relations(use_email=email, reverse_for_user=prop)
+        self.commit()
+        ex = self.assertRaises(ValidationError,
+                               self.cnx.undo_transaction, txuuid)
+        self.assertEquals(ex.entity, tutu.eid)
+        self.assertEquals(ex.errors,
+                          {None: 'some later transaction(s) touch entity, undo them first'})
+
+    def test_undo_creation_integrity_2(self):
+        session = self.session
+        g = session.create_entity('CWGroup', name=u'staff')
+        txuuid = self.commit()
+        session.execute('DELETE U in_group G WHERE U eid %(x)s', {'x': self.toto.eid})
+        self.toto.set_relations(in_group=g)
+        self.commit()
+        ex = self.assertRaises(ValidationError,
+                               self.cnx.undo_transaction, txuuid)
+        self.assertEquals(ex.entity, g.eid)
+        self.assertEquals(ex.errors,
+                          {None: 'some later transaction(s) touch entity, undo them first'})
+        # self.assertEquals(errors,
+        #                   [u"Can't restore relation in_group, object entity "
+        #                   "%s doesn't exist anymore." % g.eid])
+        # ex = self.assertRaises(ValidationError, self.commit)
+        # self.assertEquals(ex.entity, self.toto.eid)
+        # self.assertEquals(ex.errors,
+        #                   {'in_group-subject': u'at least one relation in_group is '
+        #                    'required on CWUser (%s)' % self.toto.eid})
 
     # test implicit 'replacement' of an inlined relation
--- a/server/utils.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/server/utils.py	Tue Mar 30 14:32:03 2010 +0200
@@ -65,6 +65,18 @@
                 del sol[vname]
 
 
+def eschema_eid(session, eschema):
+    """get eid of the CWEType entity for the given yams type. You should use
+    this because when schema has been loaded from the file-system, not from the
+    database, (e.g. during tests), eschema.eid is not set.
+    """
+    if eschema.eid is None:
+        eschema.eid = session.execute(
+            'Any X WHERE X is CWEType, X name %(name)s',
+            {'name': str(eschema)})[0][0]
+    return eschema.eid
+
+
 DEFAULT_MSG = 'we need a manager connection on the repository \
 (the server doesn\'t have to run, even should better not)'
 
--- a/test/unittest_utils.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/test/unittest_utils.py	Tue Mar 30 14:32:03 2010 +0200
@@ -11,7 +11,7 @@
 import datetime
 
 from logilab.common.testlib import TestCase, unittest_main
-from cubicweb.utils import make_uid, UStringIO, SizeConstrainedList
+from cubicweb.utils import make_uid, UStringIO, SizeConstrainedList, RepeatList
 
 try:
     import simplejson
@@ -41,6 +41,52 @@
         self.assert_(UStringIO())
 
 
+class RepeatListTC(TestCase):
+
+    def test_base(self):
+        l = RepeatList(3, (1, 3))
+        self.assertEquals(l[0], (1, 3))
+        self.assertEquals(l[2], (1, 3))
+        self.assertEquals(l[-1], (1, 3))
+        self.assertEquals(len(l), 3)
+        # XXX
+        self.assertEquals(l[4], (1, 3))
+
+        self.failIf(RepeatList(0, None))
+
+    def test_slice(self):
+        l = RepeatList(3, (1, 3))
+        self.assertEquals(l[0:1], [(1, 3)])
+        self.assertEquals(l[0:4], [(1, 3)]*3)
+        self.assertEquals(l[:], [(1, 3)]*3)
+
+    def test_iter(self):
+        self.assertEquals(list(RepeatList(3, (1, 3))),
+                          [(1, 3)]*3)
+
+    def test_add(self):
+        l = RepeatList(3, (1, 3))
+        self.assertEquals(l + [(1, 4)], [(1, 3)]*3  + [(1, 4)])
+        self.assertEquals([(1, 4)] + l, [(1, 4)] + [(1, 3)]*3)
+        self.assertEquals(l + RepeatList(2, (2, 3)), [(1, 3)]*3 + [(2, 3)]*2)
+
+        x = l + RepeatList(2, (1, 3))
+        self.assertIsInstance(x, RepeatList)
+        self.assertEquals(len(x), 5)
+        self.assertEquals(x[0], (1, 3))
+
+        x = l + [(1, 3)] * 2
+        self.assertEquals(x, [(1, 3)] * 5)
+
+    def test_eq(self):
+        self.assertEquals(RepeatList(3, (1, 3)),
+                          [(1, 3)]*3)
+
+    def test_pop(self):
+        l = RepeatList(3, (1, 3))
+        l.pop(2)
+        self.assertEquals(l, [(1, 3)]*2)
+
 class SizeConstrainedListTC(TestCase):
 
     def test_append(self):
--- a/utils.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/utils.py	Tue Mar 30 14:32:03 2010 +0200
@@ -12,6 +12,7 @@
 import decimal
 import datetime
 import random
+from itertools import repeat
 from uuid import uuid4
 from warnings import warn
 
@@ -101,6 +102,42 @@
     __iadd__ = extend
 
 
+class RepeatList(object):
+    """fake a list with the same element in each row"""
+    __slots__ = ('_size', '_item')
+    def __init__(self, size, item):
+        self._size = size
+        self._item = item
+    def __len__(self):
+        return self._size
+    def __nonzero__(self):
+        return self._size
+    def __iter__(self):
+        return repeat(self._item, self._size)
+    def __getitem__(self, index):
+        return self._item
+    def __getslice__(self, i, j):
+        # XXX could be more efficient, but do we bother?
+        return ([self._item] * self._size)[i:j]
+    def __add__(self, other):
+        if isinstance(other, RepeatList):
+            if other._item == self._item:
+                return RepeatList(self._size + other._size, self._item)
+            return ([self._item] * self._size) + other[:]
+        return ([self._item] * self._size) + other
+    def __radd__(self, other):
+        if isinstance(other, RepeatList):
+            if other._item == self._item:
+                return RepeatList(self._size + other._size, self._item)
+            return other[:] + ([self._item] * self._size)
+        return other[:] + ([self._item] * self._size)
+    def __eq__(self, other):
+        if isinstance(other, RepeatList):
+            return other._size == self.size and other._item == self.item
+        return self[:] == other
+    def pop(self, i):
+        self._size -= 1
+
 class UStringIO(list):
     """a file wrapper which automatically encode unicode string to an encoding
     specifed in the constructor
--- a/vregistry.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/vregistry.py	Tue Mar 30 14:32:03 2010 +0200
@@ -48,7 +48,21 @@
             subfiles = [join(fileordir, fname) for fname in listdir(fileordir)]
             _toload_info(subfiles, extrapath, _toload)
         elif fileordir[-3:] == '.py':
-            modname = '.'.join(modpath_from_file(fileordir, extrapath))
+            modpath = modpath_from_file(fileordir, extrapath)
+            # omit '__init__' from package's name to avoid loading that module
+            # once for each name when it is imported by some other appobject
+            # module. This supposes import in modules are done as::
+            #
+            #   from package import something
+            #
+            # not::
+            #
+            #  from package.__init__ import something
+            #
+            # which seems quite correct.
+            if modpath[-1] == '__init__':
+                modpath.pop()
+            modname = '.'.join(modpath)
             _toload[0][modname] = fileordir
             _toload[1].append((fileordir, modname))
     return _toload
--- a/web/formfields.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/web/formfields.py	Tue Mar 30 14:32:03 2010 +0200
@@ -21,7 +21,7 @@
 
 from cubicweb import Binary, tags, uilib
 from cubicweb.web import INTERNAL_FIELD_VALUE, ProcessFormError, eid_param, \
-     formwidgets as fw
+     formwidgets as fw, uicfg
 
 
 class UnmodifiedField(Exception):
@@ -880,6 +880,8 @@
         return [self] + list(self.fields)
 
 
+_AFF_KWARGS = uicfg.autoform_field_kwargs
+
 def guess_field(eschema, rschema, role='subject', skip_meta_attr=True, **kwargs):
     """return the most adapated widget to edit the relation
     'subjschema rschema objschema' according to information found in the schema
@@ -894,14 +896,14 @@
     else:
         targetschema = rdef.subject
     card = rdef.role_cardinality(role)
-    kwargs['required'] = card in '1+'
     kwargs['name'] = rschema.type
     kwargs['role'] = role
+    kwargs['eidparam'] = True
+    kwargs.setdefault('required', card in '1+')
     if role == 'object':
         kwargs.setdefault('label', (eschema.type, rschema.type + '_object'))
     else:
         kwargs.setdefault('label', (eschema.type, rschema.type))
-    kwargs['eidparam'] = True
     kwargs.setdefault('help', rdef.description)
     if rschema.final:
         if skip_meta_attr and rschema in eschema.meta_attributes():
@@ -929,8 +931,10 @@
             for metadata in KNOWN_METAATTRIBUTES:
                 metaschema = eschema.has_metadata(rschema, metadata)
                 if metaschema is not None:
+                    metakwargs = _AFF_KWARGS.etype_get(eschema, metaschema, 'subject')
                     kwargs['%s_field' % metadata] = guess_field(eschema, metaschema,
-                                                                skip_meta_attr=False)
+                                                                skip_meta_attr=False,
+                                                                **metakwargs)
         return fieldclass(**kwargs)
     return RelationField.fromcardinality(card, **kwargs)
 
--- a/web/test/unittest_pdf.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/web/test/unittest_pdf.py	Tue Mar 30 14:32:03 2010 +0200
@@ -1,12 +1,11 @@
-from unittest import TestCase
 import os.path as osp
+from tempfile import NamedTemporaryFile
+from subprocess import Popen as sub
 from xml.etree.cElementTree import ElementTree, fromstring, tostring, dump
 
-from tempfile import NamedTemporaryFile
-from subprocess import Popen as sub
+from logilab.common.testlib import TestCase, unittest_main
 
 from cubicweb.utils import can_do_pdf_conversion
-
 from cubicweb.ext.xhtml2fo import ReportTransformer
 
 DATADIR = osp.join(osp.dirname(__file__), 'data')
@@ -38,6 +37,5 @@
         self.assertTextEquals(output[150:1500], reference[150:1500])
 
 if __name__ == '__main__':
-    from logilab.common.testlib import unittest_main
     unittest_main()
 
--- a/web/views/editcontroller.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/web/views/editcontroller.py	Tue Mar 30 14:32:03 2010 +0200
@@ -290,5 +290,3 @@
     def _action_delete(self):
         self.delete_entities(self._cw.edited_eids(withtype=True))
         return self.reset()
-
-
--- a/web/views/primary.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/web/views/primary.py	Tue Mar 30 14:32:03 2010 +0200
@@ -15,7 +15,7 @@
 from cubicweb import Unauthorized
 from cubicweb.selectors import match_kwargs
 from cubicweb.view import EntityView
-from cubicweb.schema import display_name
+from cubicweb.schema import VIRTUAL_RTYPES, display_name
 from cubicweb.web import uicfg
 
 
@@ -202,6 +202,8 @@
         rdefs = []
         eschema = entity.e_schema
         for rschema, tschemas, role in eschema.relation_definitions(True):
+            if rschema in VIRTUAL_RTYPES:
+                continue
             matchtschemas = []
             for tschema in tschemas:
                 section = self.rsection.etype_get(eschema, rschema, role,
--- a/web/views/sessions.py	Fri Mar 26 19:21:17 2010 +0100
+++ b/web/views/sessions.py	Tue Mar 30 14:32:03 2010 +0200
@@ -22,6 +22,8 @@
         #assert isinstance(self.authmanager, RepositoryAuthenticationManager)
         self._sessions = {}
 
+    # dump_data / restore_data to avoid loosing open sessions on registry
+    # reloading
     def dump_data(self):
         return self._sessions
     def restore_data(self, data):
@@ -38,9 +40,9 @@
         if self.has_expired(session):
             self.close_session(session)
             raise InvalidSession()
-        # give an opportunity to auth manager to hijack the session
-        # (necessary with the RepositoryAuthenticationManager in case
-        #  the connection to the repository has expired)
+        # give an opportunity to auth manager to hijack the session (necessary
+        # with the RepositoryAuthenticationManager in case the connection to the
+        # repository has expired)
         try:
             session = self.authmanager.validate_session(req, session)
             # necessary in case session has been hijacked