[merge] backport stable fixes into default
authorAurelien Campeas <aurelien.campeas@logilab.fr>
Thu, 24 Mar 2011 15:21:13 +0100
changeset 7109 611663348158
parent 7077 784d6f300070 (current diff)
parent 7108 bcdf22734059 (diff)
child 7141 ef29d3ea3909
[merge] backport stable fixes into default
--- a/.hgignore	Fri Mar 11 12:17:32 2011 +0100
+++ b/.hgignore	Thu Mar 24 15:21:13 2011 +0100
@@ -9,3 +9,9 @@
 \#.*?\#$
 \.swp$
 ^doc/book/en/apidoc$
+\.old$
+syntax: regexp
+.*/data/database/.*\.sqlite
+.*/data/database/.*\.config
+.*/data/database/tmpdb.*
+
--- a/.hgtags	Fri Mar 11 12:17:32 2011 +0100
+++ b/.hgtags	Thu Mar 24 15:21:13 2011 +0100
@@ -181,6 +181,7 @@
 bf5d9a1415e3c9abe6b68ba3b24a8ad741f9de3c cubicweb-debian-version-3.10.7-1
 e581a86a68f089946a98c966ebca7aee58a5718f cubicweb-version-3.10.8
 132b525de25bc75ed6389c45aee77e847cb3a437 cubicweb-debian-version-3.10.8-1
+48f468f33704e401a8e7907e258bf1ac61eb8407 cubicweb-version-3.9.x
 37432cede4fe55b97fc2e9be0a2dd20e8837a848 cubicweb-version-3.11.0
 8daabda9f571863e8754f8ab722744c417ba3abf cubicweb-debian-version-3.11.0-1
 d0410eb4d8bbf657d7f32b0c681db09b1f8119a0 cubicweb-version-3.11.1
--- a/__pkginfo__.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/__pkginfo__.py	Thu Mar 24 15:21:13 2011 +0100
@@ -52,7 +52,7 @@
     'Twisted': '',
     # XXX graphviz
     # server dependencies
-    'logilab-database': '>= 1.3.3',
+    'logilab-database': '>= 1.4.0',
     'pysqlite': '>= 2.5.5', # XXX install pysqlite2
     }
 
--- a/appobject.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/appobject.py	Thu Mar 24 15:21:13 2011 +0100
@@ -576,4 +576,8 @@
     def propval(self, propid):
         return self._cw.property_value(self._cwpropkey(propid))
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
 set_log_methods(AppObject, getLogger('cubicweb.appobject'))
--- a/cwconfig.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/cwconfig.py	Thu Mar 24 15:21:13 2011 +0100
@@ -1182,6 +1182,13 @@
             SMTP_LOCK.release()
         return True
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    @classmethod
+    def debug(cls, msg, *a, **kw):
+        pass
+    info = warning = error = critical = exception = debug 
+
 set_log_methods(CubicWebNoAppConfiguration,
                 logging.getLogger('cubicweb.configuration'))
 
--- a/dbapi.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/dbapi.py	Thu Mar 24 15:21:13 2011 +0100
@@ -375,6 +375,9 @@
     def del_session_data(self, key):
         self.session.data.pop(key, None)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
 
 set_log_methods(DBAPIRequest, getLogger('cubicweb.dbapi'))
 
--- a/debian/control	Fri Mar 11 12:17:32 2011 +0100
+++ b/debian/control	Thu Mar 24 15:21:13 2011 +0100
@@ -33,7 +33,7 @@
 Conflicts: cubicweb-multisources
 Replaces: cubicweb-multisources
 Provides: cubicweb-multisources
-Depends: ${misc:Depends}, ${python:Depends}, cubicweb-common (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-logilab-database (>= 1.3.3), cubicweb-postgresql-support | cubicweb-mysql-support | python-pysqlite2
+Depends: ${misc:Depends}, ${python:Depends}, cubicweb-common (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-logilab-database (>= 1.4.0), cubicweb-postgresql-support | cubicweb-mysql-support | python-pysqlite2
 Recommends: pyro (<< 4.0.0), cubicweb-documentation (= ${source:Version})
 Description: server part of the CubicWeb framework
  CubicWeb is a semantic web application framework.
--- a/devtools/__init__.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/devtools/__init__.py	Thu Mar 24 15:21:13 2011 +0100
@@ -17,17 +17,26 @@
 # with CubicWeb.  If not, see <http://www.gnu.org/licenses/>.
 """Test tools for cubicweb"""
 
+from __future__ import with_statement
+
 __docformat__ = "restructuredtext en"
 
 import os
 import sys
 import logging
+import shutil
+import pickle
+import glob
+import warnings
 from datetime import timedelta
 from os.path import (abspath, join, exists, basename, dirname, normpath, split,
-                     isfile, isabs, splitext)
+                     isfile, isabs, splitext, isdir, expanduser)
+from functools import partial
+import hashlib
 
 from logilab.common.date import strptime
-from cubicweb import CW_SOFTWARE_ROOT, ConfigurationError, schema, cwconfig
+from logilab.common.decorators import cached, clear_cache
+from cubicweb import CW_SOFTWARE_ROOT, ConfigurationError, schema, cwconfig, BadConnectionId
 from cubicweb.server.serverconfig import ServerConfiguration
 from cubicweb.etwist.twconfig import TwistedConfiguration
 
@@ -78,20 +87,49 @@
                               },
                    }
 
+def turn_repo_off(repo):
+    """ Idea: this is less costly than a full re-creation of the repo object.
+    off:
+    * session are closed,
+    * pools are closed
+    * system source is shutdown
+    """
+    if not repo._needs_refresh:
+        for sessionid in list(repo._sessions):
+            warnings.warn('%s Open session found while turning repository off'
+                          %sessionid, RuntimeWarning)
+            try:
+                repo.close(sessionid)
+            except BadConnectionId: #this is strange ? thread issue ?
+                print 'XXX unknown session', sessionid
+        for pool in repo.pools:
+            pool.close(True)
+        repo.system_source.shutdown()
+        repo._needs_refresh = True
+        repo._has_started = False
+
+def turn_repo_on(repo):
+    """Idea: this is less costly than a full re-creation of the repo object.
+    on:
+    * pools are connected
+    * cache are cleared
+    """
+    if repo._needs_refresh:
+        for pool in repo.pools:
+            pool.reconnect()
+        repo._type_source_cache = {}
+        repo._extid_cache = {}
+        repo.querier._rql_cache = {}
+        for source in repo.sources:
+            source.reset_caches()
+        repo._needs_refresh = False
+
 
 class TestServerConfiguration(ServerConfiguration):
     mode = 'test'
     set_language = False
     read_instance_schema = False
     init_repository = True
-    db_require_setup = True
-    options = cwconfig.merge_options(
-        ServerConfiguration.options +
-        tuple((opt, optdict) for opt, optdict in TwistedConfiguration.options
-              if opt in ('anonymous-user', 'anonymous-password')))
-    # By default anonymous login are allow but some test need to deny of to
-    # change the default user. Set it to None to prevent anonymous login.
-    anonymous_credential = ('anon', 'anon')
 
     def __init__(self, appid='data', apphome=None, log_threshold=logging.CRITICAL+10):
         # must be set before calling parent __init__
@@ -106,7 +144,20 @@
         # need this, usually triggered by cubicweb-ctl
         self.load_cwctl_plugins()
 
-    anonymous_user = TwistedConfiguration.anonymous_user.im_func
+    # By default anonymous login are allow but some test need to deny of to
+    # change the default user. Set it to None to prevent anonymous login.
+    anonymous_credential = ('anon', 'anon')
+
+    def anonymous_user(self):
+        if not self.anonymous_credential:
+            return None, None
+        return self.anonymous_credential
+
+    def set_anonymous_allowed(self, allowed, anonuser='anon'):
+        if allowed:
+            self.anonymous_credential = (anonuser, anonuser)
+        else:
+            self.anonymous_credential = None
 
     @property
     def apphome(self):
@@ -115,10 +166,6 @@
 
     def load_configuration(self):
         super(TestServerConfiguration, self).load_configuration()
-        if self.anonymous_credential:
-            user, password = self.anonymous_credential
-            self.global_set_option('anonymous-user', user)
-            self.global_set_option('anonymous-password', password)
         # no undo support in tests
         self.global_set_option('undo-support', '')
 
@@ -214,131 +261,402 @@
               self.view('foaf', rset)
 
     """
-    db_require_setup = False    # skip init_db / reset_db steps
     read_instance_schema = True # read schema from database
 
 
 # test database handling #######################################################
 
-def init_test_database(config=None, appid='data', apphome=None):
-    """init a test database for a specific driver"""
-    from cubicweb.dbapi import in_memory_repo_cnx
-    config = config or TestServerConfiguration(appid, apphome=apphome)
-    sources = config.sources()
-    driver = sources['system']['db-driver']
-    if config.db_require_setup:
-        if driver == 'sqlite':
-            init_test_database_sqlite(config)
-        elif driver == 'postgres':
-            init_test_database_postgres(config)
+DEFAULT_EMPTY_DB_ID = '__default_empty_db__'
+
+class TestDataBaseHandler(object):
+    DRIVER = None
+    db_cache = {}
+    explored_glob = set()
+
+    def __init__(self, config):
+        self.config = config
+        self._repo = None
+        # pure consistency check
+        assert self.system_source['db-driver'] == self.DRIVER
+
+    def _ensure_test_backup_db_dir(self):
+        """Return path of directory for database backup.
+
+        The function create it if necessary"""
+        backupdir = join(self.config.apphome, 'database')
+        if not isdir(backupdir):
+            os.makedirs(backupdir)
+        return backupdir
+
+    def config_path(self, db_id):
+        """Path for config backup of a given database id"""
+        return self.absolute_backup_file(db_id, 'config')
+
+    def absolute_backup_file(self, db_id, suffix):
+        """Path for config backup of a given database id"""
+        dbname = self.dbname.replace('-', '_')
+        assert '.' not in db_id
+        filename = '%s-%s.%s' % (dbname, db_id, suffix)
+        return join(self._ensure_test_backup_db_dir(), filename)
+
+    def db_cache_key(self, db_id, dbname=None):
+        """Build a database cache key for a db_id with the current config
+
+        This key is meant to be used in the cls.db_cache mapping"""
+        if dbname is None:
+            dbname = self.dbname
+        dbname = os.path.basename(dbname)
+        dbname = dbname.replace('-', '_')
+        return (self.config.apphome, dbname, db_id)
+
+    def backup_database(self, db_id):
+        """Store the content of the current database as <db_id>
+
+        The config used are also stored."""
+        backup_data = self._backup_database(db_id)
+        config_path = self.config_path(db_id)
+        # XXX we dump a dict of the config
+        # This is an experimental to help config dependant setup (like BFSS) to
+        # be propertly restored
+        with open(config_path, 'wb') as conf_file:
+            conf_file.write(pickle.dumps(dict(self.config)))
+        self.db_cache[self.db_cache_key(db_id)] = (backup_data, config_path)
+
+    def _backup_database(self, db_id):
+        """Actual backup the current database.
+
+        return a value to be stored in db_cache to allow restoration"""
+        raise NotImplementedError()
+
+    def restore_database(self, db_id):
+        """Restore a database.
+
+        takes as argument value stored in db_cache by self._backup_database"""
+        # XXX set a clearer error message ???
+        backup_coordinates, config_path = self.db_cache[self.db_cache_key(db_id)]
+        # reload the config used to create the database.
+        config = pickle.loads(open(config_path, 'rb').read())
+        # shutdown repo before changing database content
+        if self._repo is not None:
+            self._repo.turn_repo_off()
+        self._restore_database(backup_coordinates, config)
+
+    def _restore_database(self, backup_coordinates, config):
+        """Actual restore of the current database.
+
+        Use the value tostored in db_cache as input """
+        raise NotImplementedError()
+
+    def get_repo(self, startup=False):
+        """ return Repository object on the current database.
+
+        (turn the current repo object "on" if there is one or recreate one)
+        if startup is True, server startup server hooks will be called if needed
+        """
+        if self._repo is None:
+            self._repo = self._new_repo(self.config)
+        repo = self._repo
+        repo.turn_repo_on()
+        if startup and not repo._has_started:
+            repo.hm.call_hooks('server_startup', repo=repo)
+            repo._has_started = True
+        return repo
+
+    def _new_repo(self, config):
+        """Factory method to create a new Repository Instance"""
+        from cubicweb.dbapi import in_memory_repo
+        config._cubes = None
+        repo = in_memory_repo(config)
+        # extending Repository class
+        repo._has_started = False
+        repo._needs_refresh = False
+        repo.turn_repo_on = partial(turn_repo_on, repo)
+        repo.turn_repo_off = partial(turn_repo_off, repo)
+        return repo
+
+
+    def get_cnx(self):
+        """return Connection object ont he current repository"""
+        from cubicweb.dbapi import in_memory_cnx
+        repo = self.get_repo()
+        sources = self.config.sources()
+        login  = unicode(sources['admin']['login'])
+        password = sources['admin']['password'] or 'xxx'
+        cnx = in_memory_cnx(repo, login, password=password)
+        return cnx
+
+    def get_repo_and_cnx(self, db_id=DEFAULT_EMPTY_DB_ID):
+        """Reset database with the current db_id and return (repo, cnx)
+
+        A database *MUST* have been build with the current <db_id> prior to
+        call this method. See the ``build_db_cache`` method. The returned
+        repository have it's startup hooks called and the connection is
+        establised as admin."""
+
+        self.restore_database(db_id)
+        repo = self.get_repo(startup=True)
+        cnx  = self.get_cnx()
+        return repo, cnx
+
+    @property
+    def system_source(self):
+        sources = self.config.sources()
+        return sources['system']
+
+    @property
+    def dbname(self):
+        return self.system_source['db-name']
+
+    def init_test_database():
+        """actual initialisation of the database"""
+        raise ValueError('no initialization function for driver %r' % driver)
+
+    def has_cache(self, db_id):
+        """Check if a given database id exist in cb cache for the current config"""
+        cache_glob = self.absolute_backup_file('*', '*')
+        if cache_glob not in self.explored_glob:
+            self.discover_cached_db()
+        return self.db_cache_key(db_id) in self.db_cache
+
+    def discover_cached_db(self):
+        """Search available db_if for the current config"""
+        cache_glob = self.absolute_backup_file('*', '*')
+        directory = os.path.dirname(cache_glob)
+        entries={}
+        candidates = glob.glob(cache_glob)
+        for filepath in candidates:
+            data = os.path.basename(filepath)
+            # database backup are in the forms are <dbname>-<db_id>.<backtype>
+            dbname, data = data.split('-', 1)
+            db_id, filetype = data.split('.', 1)
+            entries.setdefault((dbname, db_id), {})[filetype] = filepath
+        for (dbname, db_id), entry in entries.iteritems():
+            # apply necessary transformation from the driver
+            value = self.process_cache_entry(directory, dbname, db_id, entry)
+            assert 'config' in entry
+            if value is not None: # None value means "not handled by this driver
+                                  # XXX Ignored value are shadowed to other Handler if cache are common.
+                key = self.db_cache_key(db_id, dbname=dbname)
+                self.db_cache[key] = value, entry['config']
+        self.explored_glob.add(cache_glob)
+
+    def process_cache_entry(self, directory, dbname, db_id, entry):
+        """Transforms potential cache entry to proper backup coordinate
+
+        entry argument is a "filetype" -> "filepath" mapping
+        Return None if an entry should be ignored."""
+        return None
+
+    def build_db_cache(self, test_db_id=DEFAULT_EMPTY_DB_ID, pre_setup_func=None):
+        """Build Database cache for ``test_db_id`` if a cache doesn't exist
+
+        if ``test_db_id is DEFAULT_EMPTY_DB_ID`` self.init_test_database is
+        called. otherwise, DEFAULT_EMPTY_DB_ID is build/restored and
+        ``pre_setup_func`` to setup the database.
+
+        This function backup any database it build"""
+
+        if self.has_cache(test_db_id):
+            return #test_db_id, 'already in cache'
+        if test_db_id is DEFAULT_EMPTY_DB_ID:
+            self.init_test_database()
         else:
-            raise ValueError('no initialization function for driver %r' % driver)
-    config._cubes = None # avoid assertion error
-    repo, cnx = in_memory_repo_cnx(config, unicode(sources['admin']['login']),
-                              password=sources['admin']['password'] or 'xxx')
-    if driver == 'sqlite':
-        install_sqlite_patch(repo.querier)
-    return repo, cnx
-
-def reset_test_database(config):
-    """init a test database for a specific driver"""
-    if not config.db_require_setup:
-        return
-    driver = config.sources()['system']['db-driver']
-    if driver == 'sqlite':
-        reset_test_database_sqlite(config)
-    elif driver == 'postgres':
-        init_test_database_postgres(config)
-    else:
-        raise ValueError('no reset function for driver %r' % driver)
-
+            print 'Building %s for database %s' % (test_db_id, self.dbname)
+            self.build_db_cache(DEFAULT_EMPTY_DB_ID)
+            self.restore_database(DEFAULT_EMPTY_DB_ID)
+            repo = self.get_repo(startup=True)
+            cnx = self.get_cnx()
+            session = repo._sessions[cnx.sessionid]
+            session.set_pool()
+            _commit = session.commit
+            def always_pooled_commit():
+                _commit()
+                session.set_pool()
+            session.commit = always_pooled_commit
+            pre_setup_func(session, self.config)
+            session.commit()
+            cnx.close()
+        self.backup_database(test_db_id)
 
 ### postgres test database handling ############################################
 
-def init_test_database_postgres(config):
-    """initialize a fresh postgresql databse used for testing purpose"""
-    from logilab.database import get_db_helper
-    from cubicweb.server import init_repository
-    from cubicweb.server.serverctl import (createdb, system_source_cnx,
-                                           _db_sys_cnx)
-    source = config.sources()['system']
-    dbname = source['db-name']
-    templdbname = dbname + '_template'
-    helper = get_db_helper('postgres')
-    # connect on the dbms system base to create our base
-    dbcnx = _db_sys_cnx(source, 'CREATE DATABASE and / or USER', verbose=0)
-    cursor = dbcnx.cursor()
-    try:
-        if dbname in helper.list_databases(cursor):
-            cursor.execute('DROP DATABASE %s' % dbname)
-        if not templdbname in helper.list_databases(cursor):
-            source['db-name'] = templdbname
-            createdb(helper, source, dbcnx, cursor)
-            dbcnx.commit()
-            cnx = system_source_cnx(source, special_privs='LANGUAGE C', verbose=0)
+class PostgresTestDataBaseHandler(TestDataBaseHandler):
+
+    # XXX
+    # XXX PostgresTestDataBaseHandler Have not been tested at all.
+    # XXX
+    DRIVER = 'postgres'
+
+    @property
+    @cached
+    def helper(self):
+        from logilab.database import get_db_helper
+        return get_db_helper('postgres')
+
+    @property
+    @cached
+    def dbcnx(self):
+        from cubicweb.server.serverctl import _db_sys_cnx
+        return  _db_sys_cnx(self.system_source, 'CREATE DATABASE and / or USER', verbose=0)
+
+    @property
+    @cached
+    def cursor(self):
+        return self.dbcnx.cursor()
+
+    def init_test_database(self):
+        """initialize a fresh postgresql databse used for testing purpose"""
+        from cubicweb.server import init_repository
+        from cubicweb.server.serverctl import system_source_cnx, createdb
+        # connect on the dbms system base to create our base
+        try:
+            self._drop(self.dbname)
+
+            createdb(self.helper, self.system_source, self.dbcnx, self.cursor)
+            self.dbcnx.commit()
+            cnx = system_source_cnx(self.system_source, special_privs='LANGUAGE C', verbose=0)
             templcursor = cnx.cursor()
-            # XXX factorize with db-create code
-            helper.init_fti_extensions(templcursor)
-            # install plpythonu/plpgsql language if not installed by the cube
-            langs = sys.platform == 'win32' and ('plpgsql',) or ('plpythonu', 'plpgsql')
-            for extlang in langs:
-                helper.create_language(templcursor, extlang)
-            cnx.commit()
-            templcursor.close()
-            cnx.close()
-            init_repository(config, interactive=False)
-            source['db-name'] = dbname
-    except:
-        dbcnx.rollback()
-        # XXX drop template
-        raise
-    createdb(helper, source, dbcnx, cursor, template=templdbname)
-    dbcnx.commit()
-    dbcnx.close()
+            try:
+                # XXX factorize with db-create code
+                self.helper.init_fti_extensions(templcursor)
+                # install plpythonu/plpgsql language if not installed by the cube
+                langs = sys.platform == 'win32' and ('plpgsql',) or ('plpythonu', 'plpgsql')
+                for extlang in langs:
+                    self.helper.create_language(templcursor, extlang)
+                cnx.commit()
+            finally:
+                templcursor.close()
+                cnx.close()
+            init_repository(self.config, interactive=False)
+        except:
+            self.dbcnx.rollback()
+            print >> sys.stderr, 'building', self.dbname, 'failed'
+            #self._drop(self.dbname)
+            raise
+
+    def helper_clear_cache(self):
+        self.dbcnx.commit()
+        self.dbcnx.close()
+        clear_cache(self, 'dbcnx')
+        clear_cache(self, 'helper')
+        clear_cache(self, 'cursor')
+
+    def __del__(self):
+        self.helper_clear_cache()
+
+    @property
+    def _config_id(self):
+        return hashlib.sha1(self.config.apphome).hexdigest()[:10]
+
+    def _backup_name(self, db_id): # merge me with parent
+        backup_name = '_'.join(('cache', self._config_id, self.dbname, db_id))
+        return backup_name.lower()
+
+    def _drop(self, db_name):
+        if db_name in self.helper.list_databases(self.cursor):
+            #print 'dropping overwritted database:', db_name
+            self.cursor.execute('DROP DATABASE %s' % db_name)
+            self.dbcnx.commit()
+
+    def _backup_database(self, db_id):
+        """Actual backup the current database.
+
+        return a value to be stored in db_cache to allow restoration"""
+        from cubicweb.server.serverctl import createdb
+        orig_name = self.system_source['db-name']
+        try:
+            backup_name = self._backup_name(db_id)
+            #print 'storing postgres backup as', backup_name
+            self._drop(backup_name)
+            self.system_source['db-name'] = backup_name
+            createdb(self.helper, self.system_source, self.dbcnx, self.cursor, template=orig_name)
+            self.dbcnx.commit()
+            return backup_name
+        finally:
+            self.system_source['db-name'] = orig_name
+
+    def _restore_database(self, backup_coordinates, config):
+        from cubicweb.server.serverctl import createdb
+        """Actual restore of the current database.
+
+        Use the value tostored in db_cache as input """
+        #print 'restoring postgrest backup from', backup_coordinates
+        self._drop(self.dbname)
+        createdb(self.helper, self.system_source, self.dbcnx, self.cursor,
+                 template=backup_coordinates)
+        self.dbcnx.commit()
+
+
 
 ### sqlserver2005 test database handling #######################################
 
-def init_test_database_sqlserver2005(config):
-    """initialize a fresh sqlserver databse used for testing purpose"""
-    if config.init_repository:
-        from cubicweb.server import init_repository
-        init_repository(config, interactive=False, drop=True)
+class SQLServerTestDataBaseHandler(TestDataBaseHandler):
+    DRIVER = 'sqlserver'
+
+    # XXX complete me
+
+    def init_test_database(self):
+        """initialize a fresh sqlserver databse used for testing purpose"""
+        if self.config.init_repository:
+            from cubicweb.server import init_repository
+            init_repository(config, interactive=False, drop=True)
 
 ### sqlite test database handling ##############################################
 
-def cleanup_sqlite(dbfile, removetemplate=False):
-    try:
-        os.remove(dbfile)
-        os.remove('%s-journal' % dbfile)
-    except OSError:
-        pass
-    if removetemplate:
+class SQLiteTestDataBaseHandler(TestDataBaseHandler):
+    DRIVER = 'sqlite'
+
+    @staticmethod
+    def _cleanup_database(dbfile):
         try:
-            os.remove('%s-template' % dbfile)
+            os.remove(dbfile)
+            os.remove('%s-journal' % dbfile)
         except OSError:
             pass
 
-def reset_test_database_sqlite(config):
-    import shutil
-    dbfile = config.sources()['system']['db-name']
-    cleanup_sqlite(dbfile)
-    template = '%s-template' % dbfile
-    if exists(template):
-        shutil.copy(template, dbfile)
-        return True
-    return False
+    def absolute_dbfile(self):
+        """absolute path of current database file"""
+        dbfile = join(self._ensure_test_backup_db_dir(),
+                      self.config.sources()['system']['db-name'])
+        self.config.sources()['system']['db-name'] = dbfile
+        return dbfile
+
+
+    def process_cache_entry(self, directory, dbname, db_id, entry):
+        return entry.get('sqlite')
 
-def init_test_database_sqlite(config):
-    """initialize a fresh sqlite databse used for testing purpose"""
-    # remove database file if it exists
-    dbfile = join(config.apphome, config.sources()['system']['db-name'])
-    config.sources()['system']['db-name'] = dbfile
-    if not reset_test_database_sqlite(config):
+    def _backup_database(self, db_id=DEFAULT_EMPTY_DB_ID):
+        # XXX remove database file if it exists ???
+        dbfile = self.absolute_dbfile()
+        backup_file = self.absolute_backup_file(db_id, 'sqlite')
+        shutil.copy(dbfile, backup_file)
+        # Usefull to debug WHO write a database
+        # backup_stack = self.absolute_backup_file(db_id, '.stack')
+        #with open(backup_stack, 'w') as backup_stack_file:
+        #    import traceback
+        #    traceback.print_stack(file=backup_stack_file)
+        return backup_file
+
+    def _new_repo(self, config):
+        repo = super(SQLiteTestDataBaseHandler, self)._new_repo(config)
+        install_sqlite_patch(repo.querier)
+        return repo
+
+    def _restore_database(self, backup_coordinates, _config):
+        # remove database file if it exists ?
+        dbfile = self.absolute_dbfile()
+        self._cleanup_database(dbfile)
+        #print 'resto from', backup_coordinates
+        shutil.copy(backup_coordinates, dbfile)
+        repo = self.get_repo()
+
+    def init_test_database(self):
+        """initialize a fresh sqlite databse used for testing purpose"""
         # initialize the database
-        import shutil
         from cubicweb.server import init_repository
-        init_repository(config, interactive=False)
-        shutil.copy(dbfile, '%s-template' % dbfile)
+        self._cleanup_database(self.absolute_dbfile())
+        init_repository(self.config, interactive=False)
+
 
 def install_sqlite_patch(querier):
     """This patch hotfixes the following sqlite bug :
@@ -377,3 +695,79 @@
         return new_execute
     querier.__class__.execute = wrap_execute(querier.__class__.execute)
     querier.__class__._devtools_sqlite_patched = True
+
+
+
+HANDLERS = {}
+
+def register_handler(handlerkls, overwrite=False):
+    assert handlerkls is not None
+    if overwrite or handlerkls.DRIVER not in HANDLERS:
+        HANDLERS[handlerkls.DRIVER] = handlerkls
+    else:
+        msg = "%s: Handler already exists use overwrite if it's intended\n"\
+              "(existing handler class is %r)"
+        raise ValueError(msg % (handlerkls.DRIVER, HANDLERS[handlerkls.DRIVER]))
+
+register_handler(PostgresTestDataBaseHandler)
+register_handler(SQLiteTestDataBaseHandler)
+register_handler(SQLServerTestDataBaseHandler)
+
+
+class HCache(object):
+    """Handler cache object: store database handler for a given configuration.
+
+    We only keep one repo in cache to prevent too much objects to stay alive
+    (database handler holds a reference to a repository). As at the moment a new
+    handler is created for each TestCase class and all test methods are executed
+    sequentialy whithin this class, there should not have more cache miss that
+    if we had a wider cache as once a Handler stop being used it won't be used
+    again.
+    """
+
+    def __init__(self):
+        self.config = None
+        self.handler = None
+
+    def get(self, config):
+        if config is self.config:
+            return self.handler
+        else:
+            return None
+
+    def set(self, config, handler):
+        self.config = config
+        self.handler = handler
+
+HCACHE = HCache()
+
+
+# XXX a class method on Test ?
+def get_test_db_handler(config):
+    handler = HCACHE.get(config)
+    if handler is not None:
+        return handler
+    sources = config.sources()
+    driver = sources['system']['db-driver']
+    key = (driver, config)
+    handlerkls = HANDLERS.get(driver, None)
+    if handlerkls is not None:
+        handler = handlerkls(config)
+        HCACHE.set(config, handler)
+        return handler
+    else:
+        raise ValueError('no initialization function for driver %r' % driver)
+
+### compatibility layer ##############################################
+from logilab.common.deprecation import deprecated
+
+@deprecated("please use the new DatabaseHandler mecanism")
+def init_test_database(config=None, configdir='data', apphome=None):
+    """init a test database for a specific driver"""
+    if config is None:
+        config = TestServerConfiguration(apphome=apphome)
+    handler = get_test_db_handler(config)
+    handler.build_db_cache()
+    return handler.get_repo_and_cnx()
+
+
--- a/devtools/cwwindmill.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/devtools/cwwindmill.py	Thu Mar 24 15:21:13 2011 +0100
@@ -89,11 +89,10 @@
         Instead of toggle `edit_test` value, try `python <test script> -f`
         """
         browser = 'firefox'
-<<<<<<< /home/syt/src/fcubicweb/cubicweb/devtools/cwwindmill.py
 
         edit_test = "-i" in sys.argv # detection for pytest invocation
         # Windmill use case are written with no anonymous user
-        anonymous_logged = False
+        anonymous_allowed = False
 
         tags = CubicWebServerTC.tags & Tags(('windmill',))
 
--- a/devtools/httptest.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/devtools/httptest.py	Thu Mar 24 15:21:13 2011 +0100
@@ -89,12 +89,11 @@
     """Class for running test web server. See :class:`CubicWebServerConfig`.
 
     Class attributes:
-    * `anonymous_logged`: flag telling if anonymous user should be logged-in
-      by default (True by default) XXX (syt) s/logged-in/allowed/ ?
+    * `anonymous_allowed`: flag telling if anonymous browsing should be allowed
     """
     configcls = CubicWebServerConfig
     # anonymous is logged by default in cubicweb test cases
-    anonymous_logged = True
+    anonymous_allowed = True
 
     def start_server(self):
         # use a semaphore to avoid starting test while the http server isn't
@@ -189,6 +188,5 @@
 
     @classmethod
     def init_config(cls, config):
-        if not cls.anonymous_logged:
-            config.anonymous_credential = None
+        config.set_anonymous_allowed(cls.anonymous_allowed)
         super(CubicWebServerTC, cls).init_config(config)
--- a/devtools/test/unittest_httptest.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/devtools/test/unittest_httptest.py	Thu Mar 24 15:21:13 2011 +0100
@@ -42,7 +42,7 @@
 
 class TwistedCWIdentTC(CubicWebServerTC):
 
-    anonymous_logged = False
+    anonymous_allowed = False
     tags = CubicWebServerTC.tags | Tags(('auth',))
 
     def test_response_denied(self):
--- a/devtools/testlib.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/devtools/testlib.py	Thu Mar 24 15:21:13 2011 +0100
@@ -49,7 +49,7 @@
 from cubicweb.server.session import security_enabled
 from cubicweb.server.hook import SendMailOp
 from cubicweb.devtools import SYSTEM_ENTITIES, SYSTEM_RELATIONS, VIEW_VALIDATORS
-from cubicweb.devtools import BASE_URL, fake, htmlparser
+from cubicweb.devtools import BASE_URL, fake, htmlparser, DEFAULT_EMPTY_DB_ID
 from cubicweb.utils import json
 
 # low-level utilities ##########################################################
@@ -61,7 +61,8 @@
     def do_view(self, arg):
         import webbrowser
         data = self._getval(arg)
-        file('/tmp/toto.html', 'w').write(data)
+        with file('/tmp/toto.html', 'w') as toto:
+            toto.write(data)
         webbrowser.open('file:///tmp/toto.html')
 
 def line_context_filter(line_no, center, before=3, after=None):
@@ -83,22 +84,6 @@
         protected_entities = yams.schema.BASE_TYPES.union(SYSTEM_ENTITIES)
     return set(schema.entities()) - protected_entities
 
-def refresh_repo(repo, resetschema=False, resetvreg=False):
-    for pool in repo.pools:
-        pool.close(True)
-    repo.system_source.shutdown()
-    devtools.reset_test_database(repo.config)
-    for pool in repo.pools:
-        pool.reconnect()
-    repo._type_source_cache = {}
-    repo._extid_cache = {}
-    repo.querier._rql_cache = {}
-    for source in repo.sources:
-        source.reset_caches()
-    if resetschema:
-        repo.set_schema(repo.config.load_schema(), resetvreg=resetvreg)
-
-
 # email handling, to test emails sent by an application ########################
 
 MAILBOX = []
@@ -191,6 +176,19 @@
     configcls = devtools.ApptestConfiguration
     reset_schema = reset_vreg = False # reset schema / vreg between tests
     tags = TestCase.tags | Tags('cubicweb', 'cw_repo')
+    test_db_id = DEFAULT_EMPTY_DB_ID
+    _cnxs = set() # establised connection
+    _cnx  = None  # current connection
+
+    # Too much complicated stuff. the class doesn't need to bear the repo anymore
+    @classmethod
+    def set_cnx(cls, cnx):
+        cls._cnxs.add(cnx)
+        cls._cnx = cnx
+
+    @property
+    def cnx(self):
+        return self.__class__._cnx
 
     @classproperty
     def config(cls):
@@ -199,6 +197,7 @@
         Configuration is cached on the test class.
         """
         try:
+            assert not cls is CubicWebTC, "Don't use CubicWebTC directly to prevent database caching issue"
             return cls.__dict__['_config']
         except KeyError:
             home = abspath(join(dirname(sys.modules[cls.__module__].__file__), cls.appid))
@@ -237,35 +236,32 @@
         except: # not in server only configuration
             pass
 
+    #XXX this doesn't need to a be classmethod anymore
     @classmethod
     def _init_repo(cls):
         """init the repository and connection to it.
+        """
+        # setup configuration for test
+        cls.init_config(cls.config)
+        # get or restore and working db.
+        db_handler = devtools.get_test_db_handler(cls.config)
+        db_handler.build_db_cache(cls.test_db_id, cls.pre_setup_database)
 
-        Repository and connection are cached on the test class. Once
-        initialized, we simply reset connections and repository caches.
-        """
-        if not 'repo' in cls.__dict__:
-            cls._build_repo()
-        else:
-            try:
-                cls.cnx.rollback()
-            except ProgrammingError:
-                pass
-            cls._refresh_repo()
-
-    @classmethod
-    def _build_repo(cls):
-        cls.repo, cls.cnx = devtools.init_test_database(config=cls.config)
-        cls.init_config(cls.config)
-        cls.repo.hm.call_hooks('server_startup', repo=cls.repo)
+        cls.repo, cnx = db_handler.get_repo_and_cnx(cls.test_db_id)
+        # no direct assignation to cls.cnx anymore.
+        # cnx is now an instance property that use a class protected attributes.
+        cls.set_cnx(cnx)
         cls.vreg = cls.repo.vreg
-        cls.websession = DBAPISession(cls.cnx, cls.admlogin)
-        cls._orig_cnx = (cls.cnx, cls.websession)
+        cls.websession = DBAPISession(cnx, cls.admlogin)
+        cls._orig_cnx = (cnx, cls.websession)
         cls.config.repository = lambda x=None: cls.repo
 
-    @classmethod
-    def _refresh_repo(cls):
-        refresh_repo(cls.repo, cls.reset_schema, cls.reset_vreg)
+    def _close_cnx(self):
+        for cnx in list(self._cnxs):
+            if not cnx._closed:
+                cnx.rollback()
+                cnx.close()
+            self._cnxs.remove(cnx)
 
     # global resources accessors ###############################################
 
@@ -307,34 +303,47 @@
 
     def setUp(self):
         # monkey patch send mail operation so emails are sent synchronously
-        self._old_mail_postcommit_event = SendMailOp.postcommit_event
-        SendMailOp.postcommit_event = SendMailOp.sendmails
+        self._patch_SendMailOp()
         pause_tracing()
         previous_failure = self.__class__.__dict__.get('_repo_init_failed')
         if previous_failure is not None:
             self.skipTest('repository is not initialised: %r' % previous_failure)
         try:
             self._init_repo()
+            self.addCleanup(self._close_cnx)
         except Exception, ex:
             self.__class__._repo_init_failed = ex
             raise
         resume_tracing()
-        self._cnxs = []
         self.setup_database()
         self.commit()
         MAILBOX[:] = [] # reset mailbox
 
     def tearDown(self):
-        if not self.cnx._closed:
-            self.cnx.rollback()
-        for cnx in self._cnxs:
-            if not cnx._closed:
-                cnx.close()
-        SendMailOp.postcommit_event = self._old_mail_postcommit_event
+        # XXX hack until logilab.common.testlib is fixed
+        while self._cleanups:
+            cleanup, args, kwargs = self._cleanups.pop(-1)
+            cleanup(*args, **kwargs)
+
+    def _patch_SendMailOp(self):
+        # monkey patch send mail operation so emails are sent synchronously
+        _old_mail_postcommit_event = SendMailOp.postcommit_event
+        SendMailOp.postcommit_event = SendMailOp.sendmails
+        def reverse_SendMailOp_monkey_patch():
+            SendMailOp.postcommit_event = _old_mail_postcommit_event
+        self.addCleanup(reverse_SendMailOp_monkey_patch)
 
     def setup_database(self):
         """add your database setup code by overriding this method"""
 
+    @classmethod
+    def pre_setup_database(cls, session, config):
+        """add your pre database setup code by overriding this method
+
+        Do not forget to set the cls.test_db_id value to enable caching of the
+        result.
+        """
+
     # user / session management ###############################################
 
     def user(self, req=None):
@@ -371,9 +380,8 @@
         autoclose = kwargs.pop('autoclose', True)
         if not kwargs:
             kwargs['password'] = str(login)
-        self.cnx = repo_connect(self.repo, unicode(login), **kwargs)
+        self.set_cnx(repo_connect(self.repo, unicode(login), **kwargs))
         self.websession = DBAPISession(self.cnx)
-        self._cnxs.append(self.cnx)
         if login == self.vreg.config.anonymous_user()[0]:
             self.cnx.anonymous_connection = True
         if autoclose:
@@ -384,11 +392,8 @@
         if not self.cnx is self._orig_cnx[0]:
             if not self.cnx._closed:
                 self.cnx.close()
-            try:
-                self._cnxs.remove(self.cnx)
-            except ValueError:
-                pass
-        self.cnx, self.websession = self._orig_cnx
+        cnx, self.websession = self._orig_cnx
+        self.set_cnx(cnx)
 
     # db api ##################################################################
 
@@ -660,6 +665,10 @@
     def init_authentication(self, authmode, anonuser=None):
         self.set_option('auth-mode', authmode)
         self.set_option('anonymous-user', anonuser)
+        if anonuser is None:
+            self.config.anonymous_credential = None
+        else:
+            self.config.anonymous_credential = (anonuser, anonuser)
         req = self.request()
         origsession = req.session
         req.session = req.cnx = None
@@ -950,6 +959,8 @@
     """base class for test with auto-populating of the database"""
     __abstract__ = True
 
+    test_db_id = 'autopopulate'
+
     tags = CubicWebTC.tags | Tags('autopopulated')
 
     pdbclass = CubicWebDebugger
@@ -1083,7 +1094,9 @@
     tags = AutoPopulateTest.tags | Tags('web', 'generated')
 
     def setUp(self):
-        AutoPopulateTest.setUp(self)
+        assert not self.__class__ is AutomaticWebTest, 'Please subclass AutomaticWebTest to pprevent database caching issue'
+        super(AutomaticWebTest, self).setUp()
+
         # access to self.app for proper initialization of the authentication
         # machinery (else some views may fail)
         self.app
--- a/doc/book/en/admin/setup.rst	Fri Mar 11 12:17:32 2011 +0100
+++ b/doc/book/en/admin/setup.rst	Thu Mar 24 15:21:13 2011 +0100
@@ -36,17 +36,17 @@
 Depending on the distribution you are using, add the appropriate line to your
 list of sources (for example by editing ``/etc/apt/sources.list``).
 
-For Debian Lenny::
+For Debian Squeeze (stable)::
 
-  deb http://ftp.logilab.org/dists/ lenny/
+  deb http://download.logilab.org/production/ squeeze/
 
-For Debian Sid::
+For Debian Sid (unstable)::
 
-  deb http://ftp.logilab.org/dists/ sid/
+  deb http://download.logilab.org/production/ sid/
 
-For Ubuntu Hardy::
+For Ubuntu Lucid (Long Term Support)::
 
-  deb http://ftp.logilab.org/dists/ hardy/
+  deb http://download.logilab.org/production/ lucid/
 
 
 You can now install the required packages with the following command::
@@ -75,11 +75,11 @@
 "apt-get update":
 
 1. become root using sudo
-2. download http://ftp.logilab.org/dists/logilab-dists-key.asc using e.g. wget
+2. download http://download.logilab.org/logilab-dists-key.asc using e.g. wget
 3. run "apt-key add logilab-dists-key.asc"
 4. re-run apt-get update (manually or through the package manager, whichever you prefer)
 
-.. _`Logilab's gnupg key`: http://ftp.logilab.org/dists/logilab-dists-key.asc
+.. _`Logilab's gnupg key`: http://download.logilab.org/logilab-dists-key.asc
 .. _`CubicWeb.org Forge`: http://www.cubicweb.org/project/
 .. _`cubicweb with other database`: DatabaseInstallation_
 .. _`cubicweb with postgresql datatabase` : PostgresqlConfiguration_
@@ -169,11 +169,11 @@
 
 .. _TarballInstallation:
 
-You can download the archive containing the sources from our `ftp site`_ at::
+You can download the archive containing the sources from our `download site`_ at::
 
-  http://ftp.logilab.org/pub/cubicweb/
+  http://download.logilab.org/pub/cubicweb/
 
-.. _`ftp site`: http://ftp.logilab.org/pub/cubicweb/
+.. _`download site`: http://download.logilab.org/pub/cubicweb/
 
 Make sure you also have all the :ref:`InstallDependencies`.
 
@@ -268,12 +268,12 @@
 
 A windows compiled recent version of gettext::
 
-  http://ftp.logilab.org/pub/gettext/gettext-0.17-win32-setup.exe
+  http://download.logilab.org/pub/gettext/gettext-0.17-win32-setup.exe
 
 A pre-compiled version of rql for windows (take care of retrieving the
 most recent version available there)::
 
-  http://ftp.logilab.org/pub/rql/rql-0.23.0.win32-py2.5.exe
+  http://download.logilab.org/pub/rql/rql-0.23.0.win32-py2.5.exe
 
 Pyro enables remote access to cubicweb repository instances. Get it there::
 
--- a/doc/book/en/annexes/rql/debugging.rst	Fri Mar 11 12:17:32 2011 +0100
+++ b/doc/book/en/annexes/rql/debugging.rst	Thu Mar 24 15:21:13 2011 +0100
@@ -33,7 +33,7 @@
 Enable verbose output
 ~~~~~~~~~~~~~~~~~~~~~
 
-It may be interested to enable a verboser output to debug your RQL statements:
+To debug your RQL statements, it can be useful to enable a verbose output:
 
 .. sourcecode:: python
 
--- a/doc/book/en/annexes/rql/language.rst	Fri Mar 11 12:17:32 2011 +0100
+++ b/doc/book/en/annexes/rql/language.rst	Thu Mar 24 15:21:13 2011 +0100
@@ -153,7 +153,7 @@
 - Aggregate Functions: COUNT, MIN, MAX, AVG, SUM, GROUP_CONCAT
 
 Having
-``````
+```````
 
 The HAVING clause, as in SQL, has been originally introduced to restrict a query
 according to value returned by an aggregate function, e.g.::
@@ -214,7 +214,12 @@
 
 
 Exists
-``````
+```````
+
+You can use `EXISTS` when you want to know if some expression is true and do not
+need the complete set of elements that make it true. Testing for existence is
+much faster than fetching the complete set of results.
+
 ::
 
     Any X ORDERBY PN,N
--- a/doc/book/en/devrepo/testing.rst	Fri Mar 11 12:17:32 2011 +0100
+++ b/doc/book/en/devrepo/testing.rst	Thu Mar 24 15:21:13 2011 +0100
@@ -292,6 +292,27 @@
   Take care to not let the imported `AutomaticWebTest` in your test module
   namespace, else both your subclass *and* this parent class will be run.
 
+Cache heavy database setup
+-------------------------------
+
+Some tests suite require a complex setup of the database that takes seconds (or
+event minutes) to complete. Doing the whole setup for all individual tests make
+the whole run very slow. The ``CubicWebTC`` class offer a simple way to prepare
+specific database once for multiple tests. The `test_db_id` class attribute of
+your ``CubicWebTC`` must be set a unique identifier and the
+:meth:`pre_setup_database` class method build the cached content. As the
+:meth:`pre_setup_database` method is not grantee to be called, you must not set
+any class attribut to be used during test there.  Databases for each `test_db_id`
+are automatically created if not already in cache.  Clearing the cache is up to
+the user. Cache files are found in the :file:`data/database` subdirectory of your
+test directory.
+
+.. warning::
+
+  Take care to always have the same :meth:`pre_setup_database` function for all
+  call with a given `test_db_id` otherwise you test will have unpredictable
+  result given the first encountered one.
+
 Testing on a real-life database
 -------------------------------
 
--- a/doc/book/en/tutorials/advanced/part03_bfss.rst	Fri Mar 11 12:17:32 2011 +0100
+++ b/doc/book/en/tutorials/advanced/part03_bfss.rst	Thu Mar 24 15:21:13 2011 +0100
@@ -23,20 +23,20 @@
     from cubicweb.server.sources import storage
 
     class ServerStartupHook(hook.Hook):
-	__regid__ = 'sytweb.serverstartup'
-	events = ('server_startup', 'server_maintenance')
+        __regid__ = 'sytweb.serverstartup'
+        events = ('server_startup', 'server_maintenance')
 
-	def __call__(self):
-	    bfssdir = join(self.repo.config.appdatahome, 'bfss')
-	    if not exists(bfssdir):
-		makedirs(bfssdir)
-		print 'created', bfssdir
-	    storage = storages.BytesFileSystemStorage(bfssdir)
-	    set_attribute_storage(self.repo, 'File', 'data', storage)
+        def __call__(self):
+            bfssdir = join(self.repo.config.appdatahome, 'bfss')
+            if not exists(bfssdir):
+                makedirs(bfssdir)
+                print 'created', bfssdir
+            storage = storages.BytesFileSystemStorage(bfssdir)
+            set_attribute_storage(self.repo, 'File', 'data', storage)
 
 .. Note::
 
-  * how we built the hook's registry identifier (_`_regid__`): you can introduce
+  * how we built the hook's registry identifier (`__regid__`): you can introduce
     'namespaces' by using there python module like naming identifiers. This is
     especially import for hooks where you usually want a new custom hook, not
     overriding / specializing an existant one, but the concept may be applied to
@@ -50,48 +50,48 @@
   * the path given to the storage is the place where file added through the ui
     (or in the database before migration) will be located
 
-  * be ware that by doing this, you can't anymore write queries that will try to
+  * beware that by doing this, you can't anymore write queries that will try to
     restrict on File `data` attribute. Hopefuly we don't do that usually
     on file's content or more generally on attributes for the Bytes type
 
 Now, if you've already added some photos through the web ui, you'll have to
 migrate existing data so file's content will be stored on the file-system instead
 of the database. There is a migration command to do so, let's run it in the
-cubicweb shell (in actual life, you'd have to put it in a migration script as we
-seen last time):
+cubicweb shell (in real life, you would have to put it in a migration script as we
+have seen last time):
 
 ::
 
    $ cubicweb-ctl shell sytweb
-    entering the migration python shell
-    just type migration commands or arbitrary python code and type ENTER to execute it
-    type "exit" or Ctrl-D to quit the shell and resume operation
-    >>> storage_changed('File', 'data')
-    [........................]
+   entering the migration python shell
+   just type migration commands or arbitrary python code and type ENTER to execute it
+   type "exit" or Ctrl-D to quit the shell and resume operation
+   >>> storage_changed('File', 'data')
+   [........................]
 
 
-That's it. Now, file added through the web ui will have their content stored on
+That's it. Now, files added through the web ui will have their content stored on
 the file-system, and you'll also be able to import files from the file-system as
 explained in the next part.
 
 Step 2: importing some data into the instance
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Hey, we start to have some nice features, let give us a try on this new web
+Hey, we start to have some nice features, let us give a try to this new web
 site. For instance if I have a 'photos/201005WePyrenees' containing pictures for
 a particular event, I can import it to my web site by typing ::
 
   $ cubicweb-ctl fsimport -F sytweb photos/201005WePyrenees/
   ** importing directory /home/syt/photos/201005WePyrenees
-    importing IMG_8314.JPG
-    importing IMG_8274.JPG
-    importing IMG_8286.JPG
-    importing IMG_8308.JPG
-    importing IMG_8304.JPG
+  importing IMG_8314.JPG
+  importing IMG_8274.JPG
+  importing IMG_8286.JPG
+  importing IMG_8308.JPG
+  importing IMG_8304.JPG
 
 .. Note::
-  The -F option tell that folders should be mapped, hence my photos will be
-  all under a Folder entity corresponding to the file-system folder.
+  The -F option means that folders should be mapped, hence my photos will be
+  linked to a Folder entity corresponding to the file-system folder.
 
 Let's take a look at the web ui:
 
@@ -103,11 +103,11 @@
 
 .. image:: ../../images/tutos-photowebsite_ui2.png
 
-Yeah, it's there! You can also notice that I can see some entities as well as
+Yeah, it's there! You will notice that I can see some entities as well as
 folders and images the anonymous user can't. It just works **everywhere in the
 ui** since it's handled at the repository level, thanks to our security model.
 
-Now if I click on the newly inserted folder, I can see
+Now if I click on the recently inserted folder, I can see
 
 .. image:: ../../images/tutos-photowebsite_ui3.png
 
@@ -124,7 +124,7 @@
 We started to see here an advanced feature of our repository: the ability
 to store some parts of our data-model into a custom storage, outside the
 database. There is currently only the :class:`BytesFileSystemStorage` available,
-but you can expect to see more coming in a near future (our write your own!).
+but you can expect to see more coming in a near future (or write your own!).
 
 Also, we can know start to feed our web-site with some nice pictures!
 The site isn't perfect (far from it actually) but it's usable, and we can
--- a/entities/test/unittest_wfobjs.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/entities/test/unittest_wfobjs.py	Thu Mar 24 15:21:13 2011 +0100
@@ -127,6 +127,7 @@
         # fetch the entity using the new session
         trs = list(cnx.user().cw_adapt_to('IWorkflowable').possible_transitions())
         self.assertEqual(len(trs), 0)
+        cnx.close()
 
     def _test_manager_deactivate(self, user):
         iworkflowable = user.cw_adapt_to('IWorkflowable')
@@ -211,6 +212,7 @@
         with self.assertRaises(ValidationError) as cm:
             iworkflowable.fire_transition('activate')
         self.assertEqual(cm.exception.errors, {'by_transition-subject': "transition may not be fired"})
+        cnx.close()
 
     def test_fire_transition_owned_by(self):
         self.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
--- a/hooks/test/unittest_syncschema.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/hooks/test/unittest_syncschema.py	Thu Mar 24 15:21:13 2011 +0100
@@ -30,18 +30,10 @@
 class SchemaModificationHooksTC(CubicWebTC):
     reset_schema = True
 
-    @classmethod
-    def init_config(cls, config):
-        super(SchemaModificationHooksTC, cls).init_config(config)
-        # we have to read schema from the database to get eid for schema entities
-        config._cubes = None
-        cls.repo.fill_schema()
-        cls.schema_eids = schema_eids_idx(cls.repo.schema)
-
-    @classmethod
-    def _refresh_repo(cls):
-        super(SchemaModificationHooksTC, cls)._refresh_repo()
-        restore_schema_eids_idx(cls.repo.schema, cls.schema_eids)
+    def setUp(self):
+        super(SchemaModificationHooksTC, self).setUp()
+        self.repo.fill_schema()
+        self.__class__.schema_eids = schema_eids_idx(self.repo.schema)
 
     def index_exists(self, etype, attr, unique=False):
         self.session.set_pool()
--- a/migration.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/migration.py	Thu Mar 24 15:21:13 2011 +0100
@@ -434,6 +434,9 @@
         if exists(newconfig):
             os.unlink(newconfig)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
 
 from logging import getLogger
 from cubicweb import set_log_methods
--- a/misc/migration/3.10.9_Any.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/misc/migration/3.10.9_Any.py	Thu Mar 24 15:21:13 2011 +0100
@@ -1,27 +1,29 @@
 from __future__ import with_statement
 import sys
 
-# fix some corrupted entities noticed on several instances
-rql('DELETE CWConstraint X WHERE NOT E constrained_by X')
-rql('SET X is_instance_of Y WHERE X is Y, NOT X is_instance_of Y')
-commit()
+
+if confirm('fix some corrupted entities noticed on several instances?'):
+    rql('DELETE CWConstraint X WHERE NOT E constrained_by X')
+    rql('SET X is_instance_of Y WHERE X is Y, NOT X is_instance_of Y')
+    commit()
 
 if confirm('fix existing cwuri?'):
-    from logilab.common.shellutils import ProgressBar
+    from logilab.common.shellutils import progress
     from cubicweb.server.session import hooks_control
     rset = rql('Any X, XC WHERE X cwuri XC, X cwuri ~= "%/eid/%"')
-    if sys.stdout.isatty():
-        pb = ProgressBar(nbops=rset.rowcount, size=70)
-    else:
-        pb = None
-    with hooks_control(session, session.HOOKS_DENY_ALL, 'integrity'):
-        for i,  e in enumerate(rset.entities()):
-            e.set_attributes(cwuri=e.cwuri.replace('/eid', ''))
-            if i % 100: # commit every 100 entities to limit memory consumption
-                commit(ask_confirm=False)
-            if pb is not None:
+    title = "%i entites to fix" % len(rset)
+    nbops = rset.rowcount
+    enabled = interactive_mode
+    with progress(title=title, nbops=nbops, size=30, enabled=enabled) as pb:
+        with hooks_control(session, session.HOOKS_DENY_ALL, 'integrity'):
+            for i,  row in enumerate(rset):
+                data = {'eid': row[0], 'cwuri': row[1].replace(u'/eid', u'')}
+                rql('SET X cwuri %(cwuri)s WHERE X eid %(eid)s', data)
+                if not i % 100: # commit every 100 entities to limit memory consumption
+                    pb.text = "%i committed" % i
+                    commit(ask_confirm=False)
                 pb.update()
-    commit(ask_confirm=False)
+        commit(ask_confirm=False)
 
 try:
     from cubicweb import devtools
--- a/rtags.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/rtags.py	Thu Mar 24 15:21:13 2011 +0100
@@ -160,6 +160,9 @@
             return self.get(etype, rtype, ttype, role)
         return self.get(ttype, rtype, etype, role)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
 
 
 class RelationTagsSet(RelationTags):
--- a/schema.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/schema.py	Thu Mar 24 15:21:13 2011 +0100
@@ -797,6 +797,7 @@
 
 
 class RQLExpression(object):
+
     def __init__(self, expression, mainvars, eid):
         self.eid = eid # eid of the entity representing this rql expression
         if not isinstance(mainvars, unicode):
@@ -943,6 +944,10 @@
     def minimal_rql(self):
         return 'Any %s WHERE %s' % (self.mainvars, self.expression)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
 
 class ERQLExpression(RQLExpression):
     def __init__(self, expression, mainvars=None, eid=None):
@@ -1103,6 +1108,9 @@
         """called when a file without handler associated has been found"""
         self.warning('ignoring file %r', filepath)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
 
 class CubicWebSchemaLoader(BootstrapSchemaLoader):
     """cubicweb specific schema loader, automatically adding metadata to the
@@ -1140,6 +1148,9 @@
                 self.info('loading %s', filepath)
                 self.handle_file(filepath)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
 
 set_log_methods(CubicWebSchemaLoader, getLogger('cubicweb.schemaloader'))
 set_log_methods(BootstrapSchemaLoader, getLogger('cubicweb.bootstrapschemaloader'))
--- a/server/__init__.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/__init__.py	Thu Mar 24 15:21:13 2011 +0100
@@ -62,9 +62,9 @@
         DEBUG |= debugmode
 
 class debugged(object):
-    """repository debugging context manager / decorator
+    """Context manager and decorator to help debug the repository.
 
-    Can be used either as a context manager:
+    It can be used either as a context manager:
 
     >>> with debugged(server.DBG_RQL | server.DBG_REPO):
     ...     # some code in which you want to debug repository activity,
@@ -77,8 +77,8 @@
     ...     # some code in which you want to debug repository activity,
     ...     # seing information about RQL being executed an repository events
 
-    debug mode will be reseted at its original value when leaving the "with"
-    block or the decorated function
+    The debug mode will be reset to its original value when leaving the "with"
+    block or the decorated function.
     """
     def __init__(self, debugmode):
         self.debugmode = debugmode
--- a/server/hook.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/hook.py	Thu Mar 24 15:21:13 2011 +0100
@@ -454,6 +454,9 @@
     order = 0
     # XXX deprecated
     enabled = True
+    # stop pylint from complaining about missing attributes in Hooks classes
+    eidfrom = eidto = entity = rtype = None
+
 
     @classmethod
     def check_events(cls):
@@ -757,6 +760,10 @@
     def config(self):
         return self.session.repo.config
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
 set_log_methods(Operation, getLogger('cubicweb.session'))
 
 def _container_add(container, value):
--- a/server/mssteps.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/mssteps.py	Thu Mar 24 15:21:13 2011 +0100
@@ -162,6 +162,7 @@
 
     def get_sql(self):
         self.inputmap = inputmap = self.children[-1].outputmap
+        dbhelper=self.plan.syssource.dbhelper
         # get the select clause
         clause = []
         for i, term in enumerate(self.selection):
@@ -218,12 +219,16 @@
                         if not vref.name in grouped:
                             sql[-1] += ', ' + self.inputmap[vref.name]
                             grouped.add(vref.name)
-            sql.append('ORDER BY %s' % ', '.join(clause))
-        if self.limit:
-            sql.append('LIMIT %s' % self.limit)
-        if self.offset:
-            sql.append('OFFSET %s' % self.offset)
-        return ' '.join(sql)
+            sql = dbhelper.sql_add_order_by(' '.join(sql),
+                                            clause,
+                                            None, False,
+                                            self.limit or self.offset)
+        else:
+            sql = ' '.join(sql)
+            clause = None
+
+        sql = dbhelper.sql_add_limit_offset(sql, self.limit, self.offset, clause)
+        return sql
 
     def visit_function(self, function):
         """generate SQL name for a function"""
--- a/server/querier.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/querier.py	Thu Mar 24 15:21:13 2011 +0100
@@ -747,6 +747,10 @@
         # return a result set object
         return ResultSet(results, rql, args, descr, orig_rqlst)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
 from logging import getLogger
 from cubicweb import set_log_methods
 LOGGER = getLogger('cubicweb.querier')
--- a/server/repository.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/repository.py	Thu Mar 24 15:21:13 2011 +0100
@@ -1458,6 +1458,9 @@
                    if not source is self.system_source
                    and source.support_relation(rtype))
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
 
 def pyro_unregister(config):
     """unregister the repository from the pyro name server"""
--- a/server/session.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/session.py	Thu Mar 24 15:21:13 2011 +0100
@@ -1032,6 +1032,10 @@
         """return a result set for the given eid"""
         return self.entity_from_eid(eid)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
 
 class InternalSession(Session):
     """special session created internaly by the repository"""
--- a/server/sources/rql2sql.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/sources/rql2sql.py	Thu Mar 24 15:21:13 2011 +0100
@@ -620,24 +620,30 @@
                 sql += '\nHAVING %s' % having
             # sort
             if sorts:
-                sqlsortterms = [self._sortterm_sql(sortterm, fselectidx)
-                                for sortterm in sorts]
-                sqlsortterms = [x for x in sqlsortterms if x is not None]
+                sqlsortterms = []
+                for sortterm in sorts:
+                    _term = self._sortterm_sql(sortterm, fselectidx)
+                    if _term is not None:
+                        sqlsortterms.append(_term)
                 if sqlsortterms:
-                    sql += '\nORDER BY %s' % ','.join(sqlsortterms)
-                    if sorts and fneedwrap:
-                        selection = ['T1.C%s' % i for i in xrange(len(origselection))]
-                        sql = 'SELECT %s FROM (%s) AS T1' % (','.join(selection), sql)
+                    sql = self.dbhelper.sql_add_order_by(sql, sqlsortterms,
+                                                         origselection,
+                                                         fneedwrap,
+                                                         select.limit or select.offset)
+                    ## sql += '\nORDER BY %s' % ','.join(sqlsortterms)
+                    ## if sorts and fneedwrap:
+                    ##     selection = ['T1.C%s' % i for i in xrange(len(origselection))]
+                    ##     sql = 'SELECT %s FROM (%s) AS T1' % (','.join(selection), sql)
+            else:
+                sqlsortterms = None
             state.finalize_source_cbs()
         finally:
             select.selection = origselection
         # limit / offset
-        limit = select.limit
-        if limit:
-            sql += '\nLIMIT %s' % limit
-        offset = select.offset
-        if offset:
-            sql += '\nOFFSET %s' % offset
+        sql = self.dbhelper.sql_add_limit_offset(sql,
+                                                 select.limit,
+                                                 select.offset,
+                                                 sqlsortterms)
         return sql
 
     def _subqueries_sql(self, select, state):
--- a/server/sqlutils.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/sqlutils.py	Thu Mar 24 15:21:13 2011 +0100
@@ -284,6 +284,9 @@
         attrs[SQL_PREFIX+'eid'] = entity.eid
         return attrs
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
 
 from logging import getLogger
 from cubicweb import set_log_methods
--- a/server/test/unittest_ldapuser.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/test/unittest_ldapuser.py	Thu Mar 24 15:21:13 2011 +0100
@@ -24,10 +24,11 @@
 import subprocess
 from socket import socket, error as socketerror
 
-from logilab.common.testlib import TestCase, unittest_main, mock_object
+from logilab.common.testlib import TestCase, unittest_main, mock_object, Tags
 from cubicweb.devtools.testlib import CubicWebTC
 from cubicweb.devtools.repotest import RQLGeneratorTC
 from cubicweb.devtools.httptest import get_available_port
+from cubicweb.devtools import get_test_db_handler
 
 from cubicweb.server.sources.ldapuser import *
 
@@ -64,26 +65,10 @@
 
 def setUpModule(*args):
     create_slapd_configuration(LDAPUserSourceTC.config)
-    global repo
-    try:
-        LDAPUserSourceTC._init_repo()
-        repo = LDAPUserSourceTC.repo
-        add_ldap_source(LDAPUserSourceTC.cnx)
-    except:
-        terminate_slapd()
-        raise
 
 def tearDownModule(*args):
-    global repo
-    repo.shutdown()
-    del repo
     terminate_slapd()
 
-def add_ldap_source(cnx):
-    cnx.request().create_entity('CWSource', name=u'ldapuser', type=u'ldapuser',
-                                config=CONFIG)
-    cnx.commit()
-
 def create_slapd_configuration(config):
     global slapd_process, CONFIG
     basedir = join(config.apphome, "ldapdb")
@@ -127,10 +112,19 @@
             os.kill(slapd_process.pid, signal.SIGTERM)
         slapd_process.wait()
         print "DONE"
-
     del slapd_process
 
 class LDAPUserSourceTC(CubicWebTC):
+    test_db_id = 'ldap-user'
+    tags = CubicWebTC.tags | Tags(('ldap'))
+
+    @classmethod
+    def pre_setup_database(cls, session, config):
+        session.create_entity('CWSource', name=u'ldapuser', type=u'ldapuser',
+                                    config=CONFIG)
+        session.commit()
+        # XXX keep it there
+        session.execute('CWUser U')
 
     def patch_authenticate(self):
         self._orig_authenticate = LDAPUserSource.authenticate
@@ -275,14 +269,16 @@
         self.session.create_entity('CWGroup', name=u'bougloup2')
         self.sexecute('SET U in_group G WHERE G name ~= "bougloup%", U login "admin"')
         self.sexecute('SET U in_group G WHERE G name = "bougloup1", U login %(syt)s', {'syt': SYT})
-        rset = self.sexecute('Any L,SN ORDERBY L WHERE X in_state S, S name SN, X login L, EXISTS(X in_group G, G name ~= "bougloup%")')
+        rset = self.sexecute('Any L,SN ORDERBY L WHERE X in_state S, '
+                             'S name SN, X login L, EXISTS(X in_group G, G name ~= "bougloup%")')
         self.assertEqual(rset.rows, [['admin', 'activated'], [SYT, 'activated']])
 
     def test_exists2(self):
         self.create_user('comme')
         self.create_user('cochon')
         self.sexecute('SET X copain Y WHERE X login "comme", Y login "cochon"')
-        rset = self.sexecute('Any GN ORDERBY GN WHERE X in_group G, G name GN, (G name "managers" OR EXISTS(X copain T, T login in ("comme", "cochon")))')
+        rset = self.sexecute('Any GN ORDERBY GN WHERE X in_group G, G name GN, '
+                             '(G name "managers" OR EXISTS(X copain T, T login in ("comme", "cochon")))')
         self.assertEqual(rset.rows, [['managers'], ['users']])
 
     def test_exists3(self):
@@ -292,7 +288,8 @@
         self.failUnless(self.sexecute('Any X, Y WHERE X copain Y, X login "comme", Y login "cochon"'))
         self.sexecute('SET X copain Y WHERE X login %(syt)s, Y login "cochon"', {'syt': SYT})
         self.failUnless(self.sexecute('Any X, Y WHERE X copain Y, X login %(syt)s, Y login "cochon"', {'syt': SYT}))
-        rset = self.sexecute('Any GN,L WHERE X in_group G, X login L, G name GN, G name "managers" OR EXISTS(X copain T, T login in ("comme", "cochon"))')
+        rset = self.sexecute('Any GN,L WHERE X in_group G, X login L, G name GN, G name "managers" '
+                             'OR EXISTS(X copain T, T login in ("comme", "cochon"))')
         self.assertEqual(sorted(rset.rows), [['managers', 'admin'], ['users', 'comme'], ['users', SYT]])
 
     def test_exists4(self):
@@ -397,8 +394,10 @@
 
     def test_nonregr5(self):
         # original jpl query:
-        # Any X, NOW - CD, P WHERE P is Project, U interested_in P, U is CWUser, U login "sthenault", X concerns P, X creation_date CD ORDERBY CD DESC LIMIT 5
-        rql = 'Any X, NOW - CD, P ORDERBY CD DESC LIMIT 5 WHERE P bookmarked_by U, U login "%s", P is X, X creation_date CD' % self.session.user.login
+        # Any X, NOW - CD, P WHERE P is Project, U interested_in P, U is CWUser,
+        # U login "sthenault", X concerns P, X creation_date CD ORDERBY CD DESC LIMIT 5
+        rql = ('Any X, NOW - CD, P ORDERBY CD DESC LIMIT 5 WHERE P bookmarked_by U, '
+               'U login "%s", P is X, X creation_date CD') % self.session.user.login
         self.sexecute(rql, )#{'x': })
 
     def test_nonregr6(self):
@@ -445,9 +444,20 @@
 
 class RQL2LDAPFilterTC(RQLGeneratorTC):
 
+    tags = RQLGeneratorTC.tags | Tags(('ldap'))
+
+    @property
+    def schema(self):
+        """return the application schema"""
+        return self._schema
+
     def setUp(self):
-        self.schema = repo.schema
-        RQLGeneratorTC.setUp(self)
+        self.handler = get_test_db_handler(LDAPUserSourceTC.config)
+        self.handler.build_db_cache('ldap-user', LDAPUserSourceTC.pre_setup_database)
+        self.handler.restore_database('ldap-user')
+        self._repo = repo = self.handler.get_repo()
+        self._schema = repo.schema
+        super(RQL2LDAPFilterTC, self).setUp()
         ldapsource = repo.sources[-1]
         self.pool = repo._get_pool()
         session = mock_object(pool=self.pool)
@@ -455,8 +465,8 @@
         self.ldapclasses = ''.join(ldapsource.base_filters)
 
     def tearDown(self):
-        repo._free_pool(self.pool)
-        RQLGeneratorTC.tearDown(self)
+        self._repo.turn_repo_off()
+        super(RQL2LDAPFilterTC, self).tearDown()
 
     def test_base(self):
         rqlst = self._prepare('CWUser X WHERE X login "toto"').children[0]
--- a/server/test/unittest_migractions.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/test/unittest_migractions.py	Thu Mar 24 15:21:13 2011 +0100
@@ -23,7 +23,7 @@
 from datetime import date
 from os.path import join
 
-from logilab.common.testlib import TestCase, unittest_main
+from logilab.common.testlib import TestCase, unittest_main, Tags, tag
 
 from yams.constraints import UniqueConstraint
 
@@ -37,14 +37,18 @@
 def tearDownModule(*args):
     global migrschema
     del migrschema
-    del MigrationCommandsTC.origschema
+    if hasattr(MigrationCommandsTC, 'origschema'):
+        del MigrationCommandsTC.origschema
 
 class MigrationCommandsTC(CubicWebTC):
 
+    tags = CubicWebTC.tags | Tags(('server', 'migration', 'migractions'))
+
     @classmethod
-    def init_config(cls, config):
-        super(MigrationCommandsTC, cls).init_config(config)
+    def _init_repo(cls):
+        super(MigrationCommandsTC, cls)._init_repo()
         # we have to read schema from the database to get eid for schema entities
+        config = cls.config
         config._cubes = None
         cls.repo.fill_schema()
         cls.origschema = deepcopy(cls.repo.schema)
@@ -57,18 +61,6 @@
         config._apphome = cls.datadir
         assert 'Folder' in migrschema
 
-    @classmethod
-    def _refresh_repo(cls):
-        super(MigrationCommandsTC, cls)._refresh_repo()
-        cls.repo.set_schema(deepcopy(cls.origschema), resetvreg=False)
-        # reset migration schema eids
-        for eschema in migrschema.entities():
-            eschema.eid = None
-        for rschema in migrschema.relations():
-            rschema.eid = None
-            for rdef in rschema.rdefs.values():
-                rdef.eid = None
-
     def setUp(self):
         CubicWebTC.setUp(self)
         self.mh = ServerMigrationHelper(self.repo.config, migrschema,
@@ -343,6 +335,7 @@
             self.mh.cmd_change_relation_props('Personne', 'adel', 'String',
                                               fulltextindexed=False)
 
+    @tag('longrun')
     def test_sync_schema_props_perms(self):
         cursor = self.mh.session
         cursor.set_pool()
@@ -464,6 +457,7 @@
         finally:
             self.mh.cmd_set_size_constraint('CWEType', 'description', None)
 
+    @tag('longrun')
     def test_add_remove_cube_and_deps(self):
         cubes = set(self.config.cubes())
         schema = self.repo.schema
@@ -527,6 +521,7 @@
             self.commit()
 
 
+    @tag('longrun')
     def test_add_remove_cube_no_deps(self):
         cubes = set(self.config.cubes())
         schema = self.repo.schema
@@ -558,6 +553,7 @@
             self.mh.cmd_remove_cube('file')
         self.assertEqual(str(cm.exception), "can't remove cube file, used as a dependency")
 
+    @tag('longrun')
     def test_introduce_base_class(self):
         self.mh.cmd_add_entity_type('Para')
         self.mh.repo.schema.rebuild_infered_relations()
--- a/server/test/unittest_msplanner.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/test/unittest_msplanner.py	Thu Mar 24 15:21:13 2011 +0100
@@ -430,7 +430,7 @@
         """retrieve CWUser X from both sources and return concatenation of results
         """
         self._test('CWUser X ORDERBY X LIMIT 10 OFFSET 10',
-                   [('AggrStep', 'SELECT table0.C0 FROM table0 ORDER BY table0.C0 LIMIT 10 OFFSET 10', None, [
+                   [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY table0.C0\nLIMIT 10\nOFFSET 10', None, [
                        ('FetchStep', [('Any X WHERE X is CWUser', [{'X': 'CWUser'}])],
                         [self.ldap, self.system], {}, {'X': 'table0.C0'}, []),
                        ]),
@@ -515,7 +515,7 @@
 
     def test_complex_ordered(self):
         self._test('Any L ORDERBY L WHERE X login L',
-                   [('AggrStep', 'SELECT table0.C0 FROM table0 ORDER BY table0.C0', None,
+                   [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY table0.C0', None,
                      [('FetchStep', [('Any L WHERE X login L, X is CWUser',
                                       [{'X': 'CWUser', 'L': 'String'}])],
                        [self.ldap, self.system], {}, {'X.login': 'table0.C0', 'L': 'table0.C0'}, []),
@@ -524,7 +524,7 @@
 
     def test_complex_ordered_limit_offset(self):
         self._test('Any L ORDERBY L LIMIT 10 OFFSET 10 WHERE X login L',
-                   [('AggrStep', 'SELECT table0.C0 FROM table0 ORDER BY table0.C0 LIMIT 10 OFFSET 10', None,
+                   [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY table0.C0\nLIMIT 10\nOFFSET 10', None,
                      [('FetchStep', [('Any L WHERE X login L, X is CWUser',
                                       [{'X': 'CWUser', 'L': 'String'}])],
                        [self.ldap, self.system], {}, {'X.login': 'table0.C0', 'L': 'table0.C0'}, []),
@@ -610,7 +610,7 @@
         2. return content of the table sorted
         """
         self._test('Any X,F ORDERBY F WHERE X firstname F',
-                   [('AggrStep', 'SELECT table0.C0, table0.C1 FROM table0 ORDER BY table0.C1', None,
+                   [('AggrStep', 'SELECT table0.C0, table0.C1 FROM table0\nORDER BY table0.C1', None,
                      [('FetchStep', [('Any X,F WHERE X firstname F, X is CWUser',
                                       [{'X': 'CWUser', 'F': 'String'}])],
                        [self.ldap, self.system], {},
@@ -1344,7 +1344,7 @@
         self._test('Any X ORDERBY FTIRANK(X) WHERE X has_text "bla", X firstname "bla"',
                    [('FetchStep', [('Any X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
                      [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
-                    ('AggrStep', 'SELECT table1.C1 FROM table1 ORDER BY table1.C0', None, [
+                    ('AggrStep', 'SELECT table1.C1 FROM table1\nORDER BY table1.C0', None, [
                         ('FetchStep', [('Any FTIRANK(X),X WHERE X has_text "bla", X is CWUser',
                                         [{'X': 'CWUser'}])],
                          [self.system], {'X': 'table0.C0'}, {'FTIRANK(X)': 'table1.C0', 'X': 'table1.C1'}, []),
@@ -1401,7 +1401,7 @@
 
     def test_sort_func(self):
         self._test('Note X ORDERBY DUMB_SORT(RF) WHERE X type RF',
-                   [('AggrStep', 'SELECT table0.C0 FROM table0 ORDER BY DUMB_SORT(table0.C1)', None, [
+                   [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY DUMB_SORT(table0.C1)', None, [
                        ('FetchStep', [('Any X,RF WHERE X type RF, X is Note',
                                        [{'X': 'Note', 'RF': 'String'}])],
                         [self.cards, self.system], {}, {'X': 'table0.C0', 'X.type': 'table0.C1', 'RF': 'table0.C1'}, []),
@@ -1410,7 +1410,7 @@
 
     def test_ambigous_sort_func(self):
         self._test('Any X ORDERBY DUMB_SORT(RF) WHERE X title RF, X is IN (Bookmark, Card, EmailThread)',
-                   [('AggrStep', 'SELECT table0.C0 FROM table0 ORDER BY DUMB_SORT(table0.C1)', None,
+                   [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY DUMB_SORT(table0.C1)', None,
                      [('FetchStep', [('Any X,RF WHERE X title RF, X is Card',
                                       [{'X': 'Card', 'RF': 'String'}])],
                        [self.cards, self.system], {},
@@ -1897,7 +1897,7 @@
         try:
             self._test('Any X,AA ORDERBY AA WHERE E eid %(x)s, E see_also X, X modification_date AA',
                        [('AggrStep',
-                         'SELECT table0.C0, table0.C1 FROM table0 ORDER BY table0.C1',
+                         'SELECT table0.C0, table0.C1 FROM table0\nORDER BY table0.C1',
                          None,
                          [('FetchStep',
                            [('Any X,AA WHERE 999999 see_also X, X modification_date AA, X is Note',
@@ -2071,7 +2071,7 @@
         try:
             self._test('Any X,AA ORDERBY AA WHERE E eid %(x)s, E see_also X, X modification_date AA',
                        [('AggrStep',
-                         'SELECT table0.C0, table0.C1 FROM table0 ORDER BY table0.C1',
+                         'SELECT table0.C0, table0.C1 FROM table0\nORDER BY table0.C1',
                          None,
                          [('FetchStep',
                            [('Any X,AA WHERE 999999 see_also X, X modification_date AA, X is Note',
@@ -2118,7 +2118,7 @@
                     ('FetchStep', [('Any X,D WHERE X modification_date D, X is CWUser',
                                     [{'X': 'CWUser', 'D': 'Datetime'}])],
                      [self.ldap, self.system], None, {'X': 'table1.C0', 'X.modification_date': 'table1.C1', 'D': 'table1.C1'}, []),
-                    ('AggrStep', 'SELECT table2.C0 FROM table2 ORDER BY table2.C1 DESC', None, [
+                    ('AggrStep', 'SELECT table2.C0 FROM table2\nORDER BY table2.C1 DESC', None, [
                         ('FetchStep', [('Any X,D WHERE E eid %s, E wf_info_for X, X modification_date D, E is TrInfo, X is Affaire'%treid,
                                         [{'X': 'Affaire', 'E': 'TrInfo', 'D': 'Datetime'}])],
                          [self.system],
@@ -2267,7 +2267,7 @@
                                     [{'X': 'Note', 'Z': 'Datetime'}])],
                      [self.cards, self.system], None, {'X': 'table0.C0', 'X.modification_date': 'table0.C1', 'Z': 'table0.C1'},
                      []),
-                    ('AggrStep', 'SELECT table1.C0 FROM table1 ORDER BY table1.C1 DESC', None,
+                    ('AggrStep', 'SELECT table1.C0 FROM table1\nORDER BY table1.C1 DESC', None,
                      [('FetchStep', [('Any X,Z WHERE X modification_date Z, 999999 see_also X, X is Bookmark',
                                       [{'X': 'Bookmark', 'Z': 'Datetime'}])],
                        [self.system], {},   {'X': 'table1.C0', 'X.modification_date': 'table1.C1',
--- a/server/test/unittest_multisources.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/test/unittest_multisources.py	Thu Mar 24 15:21:13 2011 +0100
@@ -20,9 +20,9 @@
 from itertools import repeat
 
 from cubicweb.devtools import TestServerConfiguration, init_test_database
-from cubicweb.devtools.testlib import CubicWebTC, refresh_repo
+from cubicweb.devtools.testlib import CubicWebTC, Tags
 from cubicweb.devtools.repotest import do_monkey_patch, undo_monkey_patch
-
+from cubicweb.devtools import get_test_db_handler
 
 class ExternalSource1Configuration(TestServerConfiguration):
     sourcefile = 'sources_extern'
@@ -52,81 +52,97 @@
                             repeat(u'write')))
 
 
-def setUpModule(*args):
-    global repo2, cnx2, repo3, cnx3
-    cfg1 = ExternalSource1Configuration('data', apphome=TwoSourcesTC.datadir)
-    repo2, cnx2 = init_test_database(config=cfg1)
-    cfg2 = ExternalSource2Configuration('data', apphome=TwoSourcesTC.datadir)
-    repo3, cnx3 = init_test_database(config=cfg2)
-    src = cnx3.request().create_entity('CWSource', name=u'extern',
-                                       type=u'pyrorql', config=EXTERN_SOURCE_CFG)
-    cnx3.commit() # must commit before adding the mapping
-    add_extern_mapping(src)
-    cnx3.commit()
+def pre_setup_database_extern(session, config):
+    session.execute('INSERT Card X: X title "C3: An external card", X wikiid "aaa"')
+    session.execute('INSERT Card X: X title "C4: Ze external card", X wikiid "zzz"')
+    session.execute('INSERT Affaire X: X ref "AFFREF"')
+    session.commit()
 
-    TestServerConfiguration.no_sqlite_wrap = True
-    # hi-jack PyroRQLSource.get_connection to access existing connection (no
-    # pyro connection)
-    PyroRQLSource.get_connection = lambda x: x.uri == 'extern-multi' and cnx3 or cnx2
-    # also necessary since the repository is closing its initial connections
-    # pool though we want to keep cnx2 valid
-    Connection.close = lambda x: None
-
-def tearDownModule(*args):
-    PyroRQLSource.get_connection = PyroRQLSource_get_connection
-    Connection.close = Connection_close
-    global repo2, cnx2, repo3, cnx3
-    repo2.shutdown()
-    repo3.shutdown()
-    del repo2, cnx2, repo3, cnx3
-    #del TwoSourcesTC.config.vreg
-    #del TwoSourcesTC.config
-    TestServerConfiguration.no_sqlite_wrap = False
+def pre_setup_database_multi(session, config):
+    session.create_entity('CWSource', name=u'extern', type=u'pyrorql',
+                                 config=EXTERN_SOURCE_CFG)
+    session.commit()
 
 class TwoSourcesTC(CubicWebTC):
     """Main repo -> extern-multi -> extern
                   \-------------/
     """
+    test_db_id= 'cw-server-multisources'
+    tags = CubicWebTC.tags | Tags(('multisources'))
+
     @classmethod
-    def _refresh_repo(cls):
-        super(TwoSourcesTC, cls)._refresh_repo()
-        cnx2.rollback()
-        refresh_repo(repo2)
-        cnx3.rollback()
-        refresh_repo(repo3)
+    def setUpClass(cls):
+        cls._cfg2 = ExternalSource1Configuration('data', apphome=TwoSourcesTC.datadir)
+        cls._cfg3 = ExternalSource2Configuration('data', apphome=TwoSourcesTC.datadir)
+        TestServerConfiguration.no_sqlite_wrap = True
+        # hi-jack PyroRQLSource.get_connection to access existing connection (no
+        # pyro connection)
+        PyroRQLSource.get_connection = lambda x: x.uri == 'extern-multi' and cls.cnx3 or cls.cnx2
+        # also necessary since the repository is closing its initial connections
+        # pool though we want to keep cnx2 valid
+        Connection.close = lambda x: None
+
+    @classmethod
+    def tearDowncls(cls):
+        PyroRQLSource.get_connection = PyroRQLSource_get_connection
+        Connection.close = Connection_close
+        cls.cnx2.close()
+        cls.cnx3.close()
+        TestServerConfiguration.no_sqlite_wrap = False
+
+
+    @classmethod
+    def _init_repo(cls):
+        repo2_handler = get_test_db_handler(cls._cfg2)
+        repo2_handler.build_db_cache('4cards-1affaire',pre_setup_func=pre_setup_database_extern)
+        cls.repo2, cls.cnx2 = repo2_handler.get_repo_and_cnx('4cards-1affaire')
+
+        repo3_handler = get_test_db_handler(cls._cfg3)
+        repo3_handler.build_db_cache('multisource',pre_setup_func=pre_setup_database_multi)
+        cls.repo3, cls.cnx3 = repo3_handler.get_repo_and_cnx('multisource')
+
+
+        super(TwoSourcesTC, cls)._init_repo()
 
     def setUp(self):
         CubicWebTC.setUp(self)
+        self.addCleanup(self.cnx2.close)
+        self.addCleanup(self.cnx3.close)
         do_monkey_patch()
 
     def tearDown(self):
         for source in self.repo.sources[1:]:
             self.repo.remove_source(source.uri)
         CubicWebTC.tearDown(self)
+        self.cnx2.close()
+        self.cnx3.close()
         undo_monkey_patch()
 
-    def setup_database(self):
-        cu = cnx2.cursor()
-        self.ec1 = cu.execute('INSERT Card X: X title "C3: An external card", X wikiid "aaa"')[0][0]
-        cu.execute('INSERT Card X: X title "C4: Ze external card", X wikiid "zzz"')
-        self.aff1 = cu.execute('INSERT Affaire X: X ref "AFFREF"')[0][0]
-        cnx2.commit()
-        for uri, config in [('extern', EXTERN_SOURCE_CFG),
+    @staticmethod
+    def pre_setup_database(session, config):
+        for uri, src_config in [('extern', EXTERN_SOURCE_CFG),
                             ('extern-multi', '''
 pyro-ns-id = extern-multi
 cubicweb-user = admin
 cubicweb-password = gingkow
 ''')]:
-            source = self.request().create_entity(
-                'CWSource', name=unicode(uri), type=u'pyrorql',
-                config=unicode(config))
-            self.commit() # must commit before adding the mapping
+            source = session.create_entity('CWSource', name=unicode(uri),
+                                           type=u'pyrorql',
+                                           config=unicode(src_config))
+            session.commit()
             add_extern_mapping(source)
-        self.commit()
+
+        session.commit()
         # trigger discovery
-        self.sexecute('Card X')
-        self.sexecute('Affaire X')
-        self.sexecute('State X')
+        session.execute('Card X')
+        session.execute('Affaire X')
+        session.execute('State X')
+
+    def setup_database(self):
+        cu2 = self.cnx2.cursor()
+        self.ec1 = cu2.execute('Any X WHERE X is Card, X title "C3: An external card", X wikiid "aaa"')[0][0]
+        self.aff1 = cu2.execute('Any X WHERE X is Affaire, X ref "AFFREF"')[0][0]
+        cu2.close()
         # add some entities
         self.ic1 = self.sexecute('INSERT Card X: X title "C1: An internal card", X wikiid "aaai"')[0][0]
         self.ic2 = self.sexecute('INSERT Card X: X title "C2: Ze internal card", X wikiid "zzzi"')[0][0]
@@ -186,25 +202,25 @@
         Connection_close(cnx.cnx) # cnx is a TestCaseConnectionProxy
 
     def test_synchronization(self):
-        cu = cnx2.cursor()
+        cu = self.cnx2.cursor()
         assert cu.execute('Any X WHERE X eid %(x)s', {'x': self.aff1})
         cu.execute('SET X ref "BLAH" WHERE X eid %(x)s', {'x': self.aff1})
         aff2 = cu.execute('INSERT Affaire X: X ref "AFFREUX"')[0][0]
-        cnx2.commit()
+        self.cnx2.commit()
         try:
             # force sync
             self.repo.sources_by_uri['extern'].synchronize(MTIME)
             self.failUnless(self.sexecute('Any X WHERE X has_text "blah"'))
             self.failUnless(self.sexecute('Any X WHERE X has_text "affreux"'))
             cu.execute('DELETE Affaire X WHERE X eid %(x)s', {'x': aff2})
-            cnx2.commit()
+            self.cnx2.commit()
             self.repo.sources_by_uri['extern'].synchronize(MTIME)
             rset = self.sexecute('Any X WHERE X has_text "affreux"')
             self.failIf(rset)
         finally:
             # restore state
             cu.execute('SET X ref "AFFREF" WHERE X eid %(x)s', {'x': self.aff1})
-            cnx2.commit()
+            self.cnx2.commit()
 
     def test_simplifiable_var(self):
         affeid = self.sexecute('Affaire X WHERE X ref "AFFREF"')[0][0]
@@ -234,9 +250,9 @@
     def test_greater_eid(self):
         rset = self.sexecute('Any X WHERE X eid > %s' % (self.ic1 - 1))
         self.assertEqual(len(rset.rows), 2) # self.ic1 and self.ic2
-        cu = cnx2.cursor()
+        cu = self.cnx2.cursor()
         ec2 = cu.execute('INSERT Card X: X title "glup"')[0][0]
-        cnx2.commit()
+        self.cnx2.commit()
         # 'X eid > something' should not trigger discovery
         rset = self.sexecute('Any X WHERE X eid > %s' % (self.ic1 - 1))
         self.assertEqual(len(rset.rows), 2)
@@ -256,16 +272,16 @@
         self.assertEqual(len(rset), 1, rset.rows)
 
     def test_attr_unification_2(self):
-        cu = cnx2.cursor()
+        cu = self.cnx2.cursor()
         ec2 = cu.execute('INSERT Card X: X title "AFFREF"')[0][0]
-        cnx2.commit()
+        self.cnx2.commit()
         try:
             c1 = self.sexecute('INSERT Card C: C title "AFFREF"')[0][0]
             rset = self.sexecute('Any X,Y WHERE X is Card, Y is Affaire, X title T, Y ref T')
             self.assertEqual(len(rset), 2, rset.rows)
         finally:
             cu.execute('DELETE Card X WHERE X eid %(x)s', {'x': ec2})
-            cnx2.commit()
+            self.cnx2.commit()
 
     def test_attr_unification_neq_1(self):
         # XXX complete
@@ -317,22 +333,22 @@
         self.assertSetEqual(notstates, states)
 
     def test_absolute_url_base_url(self):
-        cu = cnx2.cursor()
+        cu = self.cnx2.cursor()
         ceid = cu.execute('INSERT Card X: X title "without wikiid to get eid based url"')[0][0]
-        cnx2.commit()
+        self.cnx2.commit()
         lc = self.sexecute('Card X WHERE X title "without wikiid to get eid based url"').get_entity(0, 0)
         self.assertEqual(lc.absolute_url(), 'http://extern.org/card/eid/%s' % ceid)
         cu.execute('DELETE Card X WHERE X eid %(x)s', {'x':ceid})
-        cnx2.commit()
+        self.cnx2.commit()
 
     def test_absolute_url_no_base_url(self):
-        cu = cnx3.cursor()
+        cu = self.cnx3.cursor()
         ceid = cu.execute('INSERT Card X: X title "without wikiid to get eid based url"')[0][0]
-        cnx3.commit()
+        self.cnx3.commit()
         lc = self.sexecute('Card X WHERE X title "without wikiid to get eid based url"').get_entity(0, 0)
         self.assertEqual(lc.absolute_url(), 'http://testing.fr/cubicweb/card/eid/%s' % lc.eid)
         cu.execute('DELETE Card X WHERE X eid %(x)s', {'x':ceid})
-        cnx3.commit()
+        self.cnx3.commit()
 
     def test_crossed_relation_noeid_needattr(self):
         """http://www.cubicweb.org/ticket/1382452"""
--- a/server/test/unittest_querier.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/test/unittest_querier.py	Thu Mar 24 15:21:13 2011 +0100
@@ -27,9 +27,9 @@
 from cubicweb.server.sqlutils import SQL_PREFIX
 from cubicweb.server.utils import crypt_password
 from cubicweb.server.sources.native import make_schema
-from cubicweb.devtools import init_test_database
+from cubicweb.devtools import get_test_db_handler, TestServerConfiguration
+
 from cubicweb.devtools.repotest import tuplify, BaseQuerierTC
-
 from unittest_session import Variable
 
 
@@ -64,7 +64,10 @@
 
 def setUpModule(*args):
     global repo, cnx
-    repo, cnx = init_test_database(apphome=UtilsTC.datadir)
+    config = TestServerConfiguration(apphome=UtilsTC.datadir)
+    handler = get_test_db_handler(config)
+    handler.build_db_cache()
+    repo, cnx = handler.get_repo_and_cnx()
 
 def tearDownModule(*args):
     global repo, cnx
@@ -746,7 +749,7 @@
         rset = self.execute('Tag X WHERE X creation_date TODAY')
         self.assertEqual(len(rset.rows), 2)
         rset = self.execute('Any MAX(D) WHERE X is Tag, X creation_date D')
-        self.failUnless(isinstance(rset[0][0], datetime), type(rset[0][0]))
+        self.failUnless(isinstance(rset[0][0], datetime), (rset[0][0], type(rset[0][0])))
 
     def test_today(self):
         self.execute("INSERT Tag X: X name 'bidule', X creation_date TODAY")
--- a/server/test/unittest_rql2sql.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/test/unittest_rql2sql.py	Thu Mar 24 15:21:13 2011 +0100
@@ -18,6 +18,7 @@
 """unit tests for module cubicweb.server.sources.rql2sql"""
 
 import sys
+import os
 
 from logilab.common.testlib import TestCase, unittest_main, mock_object
 
@@ -37,6 +38,24 @@
 except AssertionError, ex:
     pass # already registered
 
+from logilab import database as db
+def monkey_patch_import_driver_module(driver, drivers, quiet=True):
+    if not driver in drivers:
+        raise db.UnknownDriver(driver)
+    for modname in drivers[driver]:
+        try:
+            if not quiet:
+                print >> sys.stderr, 'Trying %s' % modname
+            module = db.load_module_from_name(modname, use_sys=False)
+            break
+        except ImportError:
+            if not quiet:
+                print >> sys.stderr, '%s is not available' % modname
+            continue
+    else:
+        return None, drivers[driver][0]
+    return module, modname
+
 
 def setUpModule():
     global config, schema
@@ -46,10 +65,14 @@
     schema['in_state'].inlined = True
     schema['state_of'].inlined = False
     schema['comments'].inlined = False
+    db._backup_import_driver_module = db._import_driver_module
+    db._import_driver_module = monkey_patch_import_driver_module
 
 def tearDownModule():
     global config, schema
     del config, schema
+    db._import_driver_module = db._backup_import_driver_module
+    del db._backup_import_driver_module
 
 PARSER = [
     (r"Personne P WHERE P nom 'Zig\'oto';",
@@ -93,12 +116,6 @@
     ("Personne P WHERE P eid -1",
      '''SELECT -1'''),
 
-    ("Personne P LIMIT 20 OFFSET 10",
-     '''SELECT _P.cw_eid
-FROM cw_Personne AS _P
-LIMIT 20
-OFFSET 10'''),
-
     ("Personne P WHERE S is Societe, P travaille S, S nom 'Logilab';",
      '''SELECT rel_travaille0.eid_from
 FROM cw_Societe AS _S, travaille_relation AS rel_travaille0
@@ -186,6 +203,14 @@
 WHERE NOT (_X.cw_wikiid=_X.cw_title) AND NOT (_X.cw_title=parent)''')
 ]
 
+BASIC_WITH_LIMIT = [
+    ("Personne P LIMIT 20 OFFSET 10",
+     '''SELECT _P.cw_eid
+FROM cw_Personne AS _P
+LIMIT 20
+OFFSET 10'''),
+    ]
+
 
 ADVANCED = [
     ("Societe S WHERE S nom 'Logilab' OR S nom 'Caesium'",
@@ -279,12 +304,6 @@
 FROM cw_Note AS _S, cw_Personne AS _O
 WHERE (_S.cw_ecrit_par IS NULL OR _S.cw_ecrit_par!=_O.cw_eid) AND _S.cw_eid=1 AND _S.cw_inline1 IS NOT NULL AND _O.cw_inline2=_S.cw_inline1'''),
 
-    ('DISTINCT Any S ORDERBY stockproc(SI) WHERE NOT S ecrit_par O, S para SI',
-     '''SELECT T1.C0 FROM (SELECT DISTINCT _S.cw_eid AS C0, STOCKPROC(_S.cw_para) AS C1
-FROM cw_Note AS _S
-WHERE _S.cw_ecrit_par IS NULL
-ORDER BY 2) AS T1'''),
-
     ('Any N WHERE N todo_by U, N is Note, U eid 2, N filed_under T, T eid 3',
      # N would actually be invarient if U eid 2 had given a specific type to U
      '''SELECT _N.cw_eid
@@ -333,13 +352,6 @@
 WHERE rel_tags0.eid_to=_X.cw_eid AND _X.cw_in_state=32
 GROUP BY _X.cw_eid'''),
 
-    ('Any COUNT(S),CS GROUPBY CS ORDERBY 1 DESC LIMIT 10 WHERE S is Affaire, C is Societe, S concerne C, C nom CS, (EXISTS(S owned_by 1)) OR (EXISTS(S documented_by N, N title "published"))',
-     '''SELECT COUNT(rel_concerne0.eid_from), _C.cw_nom
-FROM concerne_relation AS rel_concerne0, cw_Societe AS _C
-WHERE rel_concerne0.eid_to=_C.cw_eid AND ((EXISTS(SELECT 1 FROM owned_by_relation AS rel_owned_by1 WHERE rel_concerne0.eid_from=rel_owned_by1.eid_from AND rel_owned_by1.eid_to=1)) OR (EXISTS(SELECT 1 FROM documented_by_relation AS rel_documented_by2, cw_Card AS _N WHERE rel_concerne0.eid_from=rel_documented_by2.eid_from AND rel_documented_by2.eid_to=_N.cw_eid AND _N.cw_title=published)))
-GROUP BY _C.cw_nom
-ORDER BY 1 DESC
-LIMIT 10'''),
 
     ('Any X WHERE Y evaluee X, Y is CWUser',
      '''SELECT rel_evaluee0.eid_to
@@ -435,13 +447,6 @@
 GROUP BY _X.cw_data_name,_X.cw_data_format
 ORDER BY 1,2,_X.cw_data_format'''),
 
-    ('DISTINCT Any MAX(X)+MIN(LENGTH(D)), N GROUPBY N ORDERBY 2, DF WHERE X data_name N, X data D, X data_format DF;',
-     '''SELECT T1.C0,T1.C1 FROM (SELECT DISTINCT (MAX(_X.cw_eid) + MIN(LENGTH(_X.cw_data))) AS C0, _X.cw_data_name AS C1, _X.cw_data_format AS C2
-FROM cw_File AS _X
-GROUP BY _X.cw_data_name,_X.cw_data_format
-ORDER BY 2,3) AS T1
-'''),
-
     # ambiguity in EXISTS() -> should union the sub-query
     ('Any T WHERE T is Tag, NOT T name in ("t1", "t2"), EXISTS(T tags X, X is IN (CWUser, CWGroup))',
      '''SELECT _T.cw_eid
@@ -512,6 +517,72 @@
 GROUP BY rel_owned_by0.eid_to
 HAVING COUNT(rel_owned_by0.eid_from)>10'''),
 
+
+    ("Any X WHERE X eid 0, X test TRUE",
+     '''SELECT _X.cw_eid
+FROM cw_Personne AS _X
+WHERE _X.cw_eid=0 AND _X.cw_test=TRUE'''),
+
+    ('Any 1 WHERE X in_group G, X is CWUser',
+     '''SELECT 1
+FROM in_group_relation AS rel_in_group0'''),
+
+    ('CWEType X WHERE X name CV, X description V HAVING NOT V=CV AND NOT V = "parent"',
+     '''SELECT _X.cw_eid
+FROM cw_CWEType AS _X
+WHERE NOT (EXISTS(SELECT 1 WHERE _X.cw_description=parent)) AND NOT (EXISTS(SELECT 1 WHERE _X.cw_description=_X.cw_name))'''),
+    ('CWEType X WHERE X name CV, X description V HAVING V!=CV AND V != "parent"',
+     '''SELECT _X.cw_eid
+FROM cw_CWEType AS _X
+WHERE _X.cw_description!=parent AND _X.cw_description!=_X.cw_name'''),
+    ]
+
+ADVANCED_WITH_GROUP_CONCAT = [
+        ("Any X,GROUP_CONCAT(TN) GROUPBY X ORDERBY XN WHERE T tags X, X name XN, T name TN, X is CWGroup",
+     '''SELECT _X.cw_eid, GROUP_CONCAT(_T.cw_name)
+FROM cw_CWGroup AS _X, cw_Tag AS _T, tags_relation AS rel_tags0
+WHERE rel_tags0.eid_from=_T.cw_eid AND rel_tags0.eid_to=_X.cw_eid
+GROUP BY _X.cw_eid,_X.cw_name
+ORDER BY _X.cw_name'''),
+
+    ("Any X,GROUP_CONCAT(TN) GROUPBY X ORDERBY XN WHERE T tags X, X name XN, T name TN",
+     '''SELECT T1.C0, GROUP_CONCAT(T1.C1) FROM (SELECT _X.cw_eid AS C0, _T.cw_name AS C1, _X.cw_name AS C2
+FROM cw_CWGroup AS _X, cw_Tag AS _T, tags_relation AS rel_tags0
+WHERE rel_tags0.eid_from=_T.cw_eid AND rel_tags0.eid_to=_X.cw_eid
+UNION ALL
+SELECT _X.cw_eid AS C0, _T.cw_name AS C1, _X.cw_name AS C2
+FROM cw_State AS _X, cw_Tag AS _T, tags_relation AS rel_tags0
+WHERE rel_tags0.eid_from=_T.cw_eid AND rel_tags0.eid_to=_X.cw_eid
+UNION ALL
+SELECT _X.cw_eid AS C0, _T.cw_name AS C1, _X.cw_name AS C2
+FROM cw_Tag AS _T, cw_Tag AS _X, tags_relation AS rel_tags0
+WHERE rel_tags0.eid_from=_T.cw_eid AND rel_tags0.eid_to=_X.cw_eid) AS T1
+GROUP BY T1.C0,T1.C2
+ORDER BY T1.C2'''),
+
+]
+
+ADVANCED_WITH_LIMIT_OR_ORDERBY = [
+    ('Any COUNT(S),CS GROUPBY CS ORDERBY 1 DESC LIMIT 10 WHERE S is Affaire, C is Societe, S concerne C, C nom CS, (EXISTS(S owned_by 1)) OR (EXISTS(S documented_by N, N title "published"))',
+     '''SELECT COUNT(rel_concerne0.eid_from), _C.cw_nom
+FROM concerne_relation AS rel_concerne0, cw_Societe AS _C
+WHERE rel_concerne0.eid_to=_C.cw_eid AND ((EXISTS(SELECT 1 FROM owned_by_relation AS rel_owned_by1 WHERE rel_concerne0.eid_from=rel_owned_by1.eid_from AND rel_owned_by1.eid_to=1)) OR (EXISTS(SELECT 1 FROM documented_by_relation AS rel_documented_by2, cw_Card AS _N WHERE rel_concerne0.eid_from=rel_documented_by2.eid_from AND rel_documented_by2.eid_to=_N.cw_eid AND _N.cw_title=published)))
+GROUP BY _C.cw_nom
+ORDER BY 1 DESC
+LIMIT 10'''),
+    ('DISTINCT Any S ORDERBY stockproc(SI) WHERE NOT S ecrit_par O, S para SI',
+     '''SELECT T1.C0 FROM (SELECT DISTINCT _S.cw_eid AS C0, STOCKPROC(_S.cw_para) AS C1
+FROM cw_Note AS _S
+WHERE _S.cw_ecrit_par IS NULL
+ORDER BY 2) AS T1'''),
+
+    ('DISTINCT Any MAX(X)+MIN(LENGTH(D)), N GROUPBY N ORDERBY 2, DF WHERE X data_name N, X data D, X data_format DF;',
+     '''SELECT T1.C0,T1.C1 FROM (SELECT DISTINCT (MAX(_X.cw_eid) + MIN(LENGTH(_X.cw_data))) AS C0, _X.cw_data_name AS C1, _X.cw_data_format AS C2
+FROM cw_File AS _X
+GROUP BY _X.cw_data_name,_X.cw_data_format
+ORDER BY 2,3) AS T1
+'''),
+
     ('DISTINCT Any X ORDERBY stockproc(X) WHERE U login X',
      '''SELECT T1.C0 FROM (SELECT DISTINCT _U.cw_login AS C0, STOCKPROC(_U.cw_login) AS C1
 FROM cw_CWUser AS _U
@@ -546,48 +617,8 @@
 ORDER BY 4 DESC'''),
 
 
-    ("Any X WHERE X eid 0, X test TRUE",
-     '''SELECT _X.cw_eid
-FROM cw_Personne AS _X
-WHERE _X.cw_eid=0 AND _X.cw_test=TRUE'''),
-
-    ("Any X,GROUP_CONCAT(TN) GROUPBY X ORDERBY XN WHERE T tags X, X name XN, T name TN, X is CWGroup",
-     '''SELECT _X.cw_eid, GROUP_CONCAT(_T.cw_name)
-FROM cw_CWGroup AS _X, cw_Tag AS _T, tags_relation AS rel_tags0
-WHERE rel_tags0.eid_from=_T.cw_eid AND rel_tags0.eid_to=_X.cw_eid
-GROUP BY _X.cw_eid,_X.cw_name
-ORDER BY _X.cw_name'''),
-
-    ("Any X,GROUP_CONCAT(TN) GROUPBY X ORDERBY XN WHERE T tags X, X name XN, T name TN",
-     '''SELECT T1.C0, GROUP_CONCAT(T1.C1) FROM (SELECT _X.cw_eid AS C0, _T.cw_name AS C1, _X.cw_name AS C2
-FROM cw_CWGroup AS _X, cw_Tag AS _T, tags_relation AS rel_tags0
-WHERE rel_tags0.eid_from=_T.cw_eid AND rel_tags0.eid_to=_X.cw_eid
-UNION ALL
-SELECT _X.cw_eid AS C0, _T.cw_name AS C1, _X.cw_name AS C2
-FROM cw_State AS _X, cw_Tag AS _T, tags_relation AS rel_tags0
-WHERE rel_tags0.eid_from=_T.cw_eid AND rel_tags0.eid_to=_X.cw_eid
-UNION ALL
-SELECT _X.cw_eid AS C0, _T.cw_name AS C1, _X.cw_name AS C2
-FROM cw_Tag AS _T, cw_Tag AS _X, tags_relation AS rel_tags0
-WHERE rel_tags0.eid_from=_T.cw_eid AND rel_tags0.eid_to=_X.cw_eid) AS T1
-GROUP BY T1.C0,T1.C2
-ORDER BY T1.C2'''),
-
-    ('Any 1 WHERE X in_group G, X is CWUser',
-     '''SELECT 1
-FROM in_group_relation AS rel_in_group0'''),
-
-    ('CWEType X WHERE X name CV, X description V HAVING NOT V=CV AND NOT V = "parent"',
-     '''SELECT _X.cw_eid
-FROM cw_CWEType AS _X
-WHERE NOT (EXISTS(SELECT 1 WHERE _X.cw_description=parent)) AND NOT (EXISTS(SELECT 1 WHERE _X.cw_description=_X.cw_name))'''),
-    ('CWEType X WHERE X name CV, X description V HAVING V!=CV AND V != "parent"',
-     '''SELECT _X.cw_eid
-FROM cw_CWEType AS _X
-WHERE _X.cw_description!=parent AND _X.cw_description!=_X.cw_name'''),
     ]
 
-
 MULTIPLE_SEL = [
     ("DISTINCT Any X,Y where P is Personne, P nom X , P prenom Y;",
      '''SELECT DISTINCT _P.cw_nom, _P.cw_prenom
@@ -712,12 +743,39 @@
      '''SELECT _S.cw_eid
 FROM cw_State AS _S
 WHERE NOT (EXISTS(SELECT 1 FROM cw_CWUser AS _X WHERE _X.cw_in_state=_S.cw_eid AND _S.cw_name=somename))'''),
+    ]
+
+HAS_TEXT_LG_INDEXER = [
+            ('Any X WHERE X has_text "toto tata"',
+             """SELECT DISTINCT appears0.uid
+FROM appears AS appears0
+WHERE appears0.word_id IN (SELECT word_id FROM word WHERE word in ('toto', 'tata'))"""),
+            ('Personne X WHERE X has_text "toto tata"',
+             """SELECT DISTINCT _X.eid
+FROM appears AS appears0, entities AS _X
+WHERE appears0.word_id IN (SELECT word_id FROM word WHERE word in ('toto', 'tata')) AND appears0.uid=_X.eid AND _X.type='Personne'"""),
+            ('Personne X WHERE X has_text %(text)s',
+             """SELECT DISTINCT _X.eid
+FROM appears AS appears0, entities AS _X
+WHERE appears0.word_id IN (SELECT word_id FROM word WHERE word in ('hip', 'hop', 'momo')) AND appears0.uid=_X.eid AND _X.type='Personne'
+"""),
+            ('Any X WHERE X has_text "toto tata", X name "tutu", X is IN (Basket,Folder)',
+             """SELECT DISTINCT _X.cw_eid
+FROM appears AS appears0, cw_Basket AS _X
+WHERE appears0.word_id IN (SELECT word_id FROM word WHERE word in ('toto', 'tata')) AND appears0.uid=_X.cw_eid AND _X.cw_name=tutu
+UNION
+SELECT DISTINCT _X.cw_eid
+FROM appears AS appears0, cw_Folder AS _X
+WHERE appears0.word_id IN (SELECT word_id FROM word WHERE word in ('toto', 'tata')) AND appears0.uid=_X.cw_eid AND _X.cw_name=tutu""")
+        ]
+
+
 
 # XXXFIXME fail
 #         ('Any X,RT WHERE X relation_type RT?, NOT X is CWAttribute',
 #      '''SELECT _X.cw_eid, _X.cw_relation_type
 # FROM cw_CWRelation AS _X'''),
-]
+
 
 OUTER_JOIN = [
     ('Any X,S WHERE X travaille S?',
@@ -965,20 +1023,22 @@
 WHERE (rel_connait0.eid_from=_X.cw_eid AND rel_connait0.eid_to=_P.cw_eid OR rel_connait0.eid_to=_X.cw_eid AND rel_connait0.eid_from=_P.cw_eid) AND _P.cw_nom=nom'''
     ),
 
-    ('Any X ORDERBY X DESC LIMIT 9 WHERE E eid 0, E connait X',
+    ('DISTINCT Any P WHERE P connait S OR S connait P, S nom "chouette"',
+     '''SELECT DISTINCT _P.cw_eid
+FROM connait_relation AS rel_connait0, cw_Personne AS _P, cw_Personne AS _S
+WHERE (rel_connait0.eid_from=_P.cw_eid AND rel_connait0.eid_to=_S.cw_eid OR rel_connait0.eid_to=_P.cw_eid AND rel_connait0.eid_from=_S.cw_eid) AND _S.cw_nom=chouette'''
+     )
+    ]
+
+SYMMETRIC_WITH_LIMIT = [
+        ('Any X ORDERBY X DESC LIMIT 9 WHERE E eid 0, E connait X',
     '''SELECT DISTINCT _X.cw_eid
 FROM connait_relation AS rel_connait0, cw_Personne AS _X
 WHERE (rel_connait0.eid_from=0 AND rel_connait0.eid_to=_X.cw_eid OR rel_connait0.eid_to=0 AND rel_connait0.eid_from=_X.cw_eid)
 ORDER BY 1 DESC
 LIMIT 9'''
      ),
-
-    ('DISTINCT Any P WHERE P connait S OR S connait P, S nom "chouette"',
-     '''SELECT DISTINCT _P.cw_eid
-FROM connait_relation AS rel_connait0, cw_Personne AS _P, cw_Personne AS _S
-WHERE (rel_connait0.eid_from=_P.cw_eid AND rel_connait0.eid_to=_S.cw_eid OR rel_connait0.eid_to=_P.cw_eid AND rel_connait0.eid_from=_S.cw_eid) AND _S.cw_nom=chouette'''
-     )
-    ]
+]
 
 INLINE = [
 
@@ -1244,11 +1304,11 @@
             yield t
 
     def test_basic_parse(self):
-        for t in self._parse(BASIC):
+        for t in self._parse(BASIC + BASIC_WITH_LIMIT):
             yield t
 
     def test_advanced_parse(self):
-        for t in self._parse(ADVANCED):
+        for t in self._parse(ADVANCED + ADVANCED_WITH_LIMIT_OR_ORDERBY + ADVANCED_WITH_GROUP_CONCAT):
             yield t
 
     def test_outer_join_parse(self):
@@ -1357,7 +1417,7 @@
         self.assertRaises(BadRQLQuery, self.o.generate, rqlst)
 
     def test_symmetric(self):
-        for t in self._parse(SYMMETRIC):
+        for t in self._parse(SYMMETRIC + SYMMETRIC_WITH_LIMIT):
             yield t
 
     def test_inline(self):
@@ -1509,6 +1569,111 @@
                     '''SELECT 1
 WHERE NOT (EXISTS(SELECT 1 FROM in_group_relation AS rel_in_group0))''')
 
+class SqlServer2005SQLGeneratorTC(PostgresSQLGeneratorTC):
+    backend = 'sqlserver2005'
+    def _norm_sql(self, sql):
+        return sql.strip().replace(' SUBSTR', ' SUBSTRING').replace(' || ', ' + ').replace(' ILIKE ', ' LIKE ')
+
+    def test_has_text(self):
+        for t in self._parse(HAS_TEXT_LG_INDEXER):
+            yield t
+
+    def test_or_having_fake_terms(self):
+        self._check('Any X WHERE X is CWUser, X creation_date D HAVING YEAR(D) = "2010" OR D = NULL',
+                    '''SELECT _X.cw_eid
+FROM cw_CWUser AS _X
+WHERE ((YEAR(_X.cw_creation_date)=2010) OR (_X.cw_creation_date IS NULL))''')
+
+    def test_date_extraction(self):
+        self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
+                    '''SELECT MONTH(_P.cw_creation_date)
+FROM cw_Personne AS _P''')
+
+    def test_symmetric(self):
+        for t in self._parse(SYMMETRIC):
+            yield t
+
+    def test_basic_parse(self):
+        for t in self._parse(BASIC):# + BASIC_WITH_LIMIT):
+            yield t
+
+    def test_advanced_parse(self):
+        for t in self._parse(ADVANCED):# + ADVANCED_WITH_LIMIT_OR_ORDERBY):
+            yield t
+
+    def test_limit_offset(self):
+        WITH_LIMIT = [
+    ("Personne P LIMIT 20 OFFSET 10",
+             '''WITH orderedrows AS (
+SELECT
+_L01
+, ROW_NUMBER() OVER (ORDER BY _L01) AS __RowNumber
+FROM (
+SELECT _P.cw_eid AS _L01 FROM  cw_Personne AS _P
+) AS _SQ1 )
+SELECT
+_L01
+FROM orderedrows WHERE
+__RowNumber <= 30 AND __RowNumber > 10
+ '''),
+
+    ('Any COUNT(S),CS GROUPBY CS ORDERBY 1 DESC LIMIT 10 WHERE S is Affaire, C is Societe, S concerne C, C nom CS, (EXISTS(S owned_by 1)) OR (EXISTS(S documented_by N, N title "published"))',
+     '''WITH orderedrows AS (
+SELECT
+_L01, _L02
+, ROW_NUMBER() OVER (ORDER BY _L01 DESC) AS __RowNumber
+FROM (
+SELECT COUNT(rel_concerne0.eid_from) AS _L01, _C.cw_nom AS _L02 FROM  concerne_relation AS rel_concerne0, cw_Societe AS _C
+WHERE rel_concerne0.eid_to=_C.cw_eid AND ((EXISTS(SELECT 1 FROM owned_by_relation AS rel_owned_by1 WHERE rel_concerne0.eid_from=rel_owned_by1.eid_from AND rel_owned_by1.eid_to=1)) OR (EXISTS(SELECT 1 FROM documented_by_relation AS rel_documented_by2, cw_Card AS _N WHERE rel_concerne0.eid_from=rel_documented_by2.eid_from AND rel_documented_by2.eid_to=_N.cw_eid AND _N.cw_title=published)))
+GROUP BY _C.cw_nom
+) AS _SQ1 )
+SELECT
+_L01, _L02
+FROM orderedrows WHERE
+__RowNumber <= 10
+     '''),
+
+    ('DISTINCT Any MAX(X)+MIN(LENGTH(D)), N GROUPBY N ORDERBY 2, DF WHERE X data_name N, X data D, X data_format DF;',
+     '''SELECT T1.C0,T1.C1 FROM (SELECT DISTINCT (MAX(_X.cw_eid) + MIN(LENGTH(_X.cw_data))) AS C0, _X.cw_data_name AS C1, _X.cw_data_format AS C2
+FROM cw_File AS _X
+GROUP BY _X.cw_data_name,_X.cw_data_format) AS T1
+ORDER BY T1.C1,T1.C2
+'''),
+
+
+    ('DISTINCT Any X ORDERBY Y WHERE B bookmarked_by X, X login Y',
+     '''SELECT T1.C0 FROM (SELECT DISTINCT _X.cw_eid AS C0, _X.cw_login AS C1
+FROM bookmarked_by_relation AS rel_bookmarked_by0, cw_CWUser AS _X
+WHERE rel_bookmarked_by0.eid_to=_X.cw_eid) AS T1
+ORDER BY T1.C1
+ '''),
+
+    ('DISTINCT Any X ORDERBY SN WHERE X in_state S, S name SN',
+     '''SELECT T1.C0 FROM (SELECT DISTINCT _X.cw_eid AS C0, _S.cw_name AS C1
+FROM cw_Affaire AS _X, cw_State AS _S
+WHERE _X.cw_in_state=_S.cw_eid
+UNION
+SELECT DISTINCT _X.cw_eid AS C0, _S.cw_name AS C1
+FROM cw_CWUser AS _X, cw_State AS _S
+WHERE _X.cw_in_state=_S.cw_eid
+UNION
+SELECT DISTINCT _X.cw_eid AS C0, _S.cw_name AS C1
+FROM cw_Note AS _X, cw_State AS _S
+WHERE _X.cw_in_state=_S.cw_eid) AS T1
+ORDER BY T1.C1'''),
+
+    ('Any O,AA,AB,AC ORDERBY AC DESC '
+     'WHERE NOT S use_email O, S eid 1, O is EmailAddress, O address AA, O alias AB, O modification_date AC, '
+     'EXISTS(A use_email O, EXISTS(A identity B, NOT B in_group D, D name "guests", D is CWGroup), A is CWUser), B eid 2',
+     '''
+SELECT _O.cw_eid, _O.cw_address, _O.cw_alias, _O.cw_modification_date
+FROM cw_EmailAddress AS _O
+WHERE NOT (EXISTS(SELECT 1 FROM use_email_relation AS rel_use_email0 WHERE rel_use_email0.eid_from=1 AND rel_use_email0.eid_to=_O.cw_eid)) AND EXISTS(SELECT 1 FROM use_email_relation AS rel_use_email1 WHERE rel_use_email1.eid_to=_O.cw_eid AND EXISTS(SELECT 1 FROM cw_CWGroup AS _D WHERE rel_use_email1.eid_from=2 AND NOT (EXISTS(SELECT 1 FROM in_group_relation AS rel_in_group2 WHERE rel_in_group2.eid_from=2 AND rel_in_group2.eid_to=_D.cw_eid)) AND _D.cw_name=guests))
+ORDER BY 4 DESC'''),
+            ]
+        for t in self._parse(WITH_LIMIT):# + ADVANCED_WITH_LIMIT_OR_ORDERBY):
+            yield t
+
 
 
 class SqliteSQLGeneratorTC(PostgresSQLGeneratorTC):
--- a/server/test/unittest_security.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/server/test/unittest_security.py	Thu Mar 24 15:21:13 2011 +0100
@@ -27,16 +27,15 @@
 
 class BaseSecurityTC(CubicWebTC):
 
-    def setUp(self):
-        CubicWebTC.setUp(self)
+    def setup_database(self):
+        super(BaseSecurityTC, self).setup_database()
         self.create_user('iaminusersgrouponly')
-        self.readoriggroups = self.schema['Personne'].permissions['read']
-        self.addoriggroups = self.schema['Personne'].permissions['add']
-
-    def tearDown(self):
-        CubicWebTC.tearDown(self)
-        self.schema['Personne'].set_action_permissions('read', self.readoriggroups)
-        self.schema['Personne'].set_action_permissions('add', self.addoriggroups)
+        readoriggroups = self.schema['Personne'].permissions['read']
+        addoriggroups = self.schema['Personne'].permissions['add']
+        def fix_perm():
+            self.schema['Personne'].set_action_permissions('read', readoriggroups)
+            self.schema['Personne'].set_action_permissions('add', addoriggroups)
+        self.addCleanup(fix_perm)
 
 
 class LowLevelSecurityFunctionTC(BaseSecurityTC):
@@ -75,7 +74,7 @@
 
     def tearDown(self):
         self.repo.system_source.__dict__.pop('syntax_tree_search', None)
-        BaseSecurityTC.tearDown(self)
+        super(SecurityRewritingTC, self).tearDown()
 
     def test_not_relation_read_security(self):
         cnx = self.login('iaminusersgrouponly')
@@ -86,6 +85,7 @@
         self.execute('Any U WHERE NOT EXISTS(A todo_by U), A is Affaire')
         self.assertEqual(self.query[0][1].as_string(),
                           'Any U WHERE NOT EXISTS(A todo_by U), A is Affaire')
+        cnx.close()
 
 class SecurityTC(BaseSecurityTC):
 
@@ -104,6 +104,7 @@
         cu.execute("INSERT Personne X: X nom 'bidule'")
         self.assertRaises(Unauthorized, cnx.commit)
         self.assertEqual(cu.execute('Personne X').rowcount, 1)
+        cnx.close()
 
     def test_insert_rql_permission(self):
         # test user can only add une affaire related to a societe he owns
@@ -120,6 +121,7 @@
         cu.execute("INSERT Societe X: X nom 'chouette'")
         cu.execute("SET A concerne S WHERE A sujet 'cool', S nom 'chouette'")
         cnx.commit()
+        cnx.close()
 
     def test_update_security_1(self):
         cnx = self.login('anon')
@@ -147,6 +149,7 @@
         cu.execute("INSERT Personne X: X nom 'biduuule'")
         cu.execute("INSERT Societe X: X nom 'looogilab'")
         cu.execute("SET X travaille S WHERE X nom 'biduuule', S nom 'looogilab'")
+        cnx.close()
 
     def test_update_rql_permission(self):
         self.execute("SET A concerne S WHERE A is Affaire, S is Societe")
@@ -165,6 +168,7 @@
         cu.execute("SET A concerne S WHERE A sujet 'pascool', S nom 'chouette'")
         cu.execute("SET X sujet 'habahsicestcool' WHERE X sujet 'pascool'")
         cnx.commit()
+        cnx.close()
 
     def test_delete_security(self):
         # FIXME: sample below fails because we don't detect "owner" can't delete
@@ -177,6 +181,7 @@
         cnx = self.login('iaminusersgrouponly')
         cu = cnx.cursor()
         self.assertRaises(Unauthorized, cu.execute, "DELETE CWGroup Y WHERE Y name 'staff'")
+        cnx.close()
 
     def test_delete_rql_permission(self):
         self.execute("SET A concerne S WHERE A is Affaire, S is Societe")
@@ -200,6 +205,7 @@
 ##         self.assertRaises(Unauthorized, cu.execute, "DELETE Affaire X")
         cu.execute("DELETE Affaire X WHERE X sujet 'pascool'")
         cnx.commit()
+        cnx.close()
 
 
     def test_insert_relation_rql_permission(self):
@@ -225,6 +231,7 @@
         cu.execute("INSERT Societe X: X nom 'chouette'")
         cu.execute("SET A concerne S WHERE A is Affaire, S nom 'chouette'")
         cnx.commit()
+        cnx.close()
 
     def test_delete_relation_rql_permission(self):
         self.execute("SET A concerne S WHERE A is Affaire, S is Societe")
@@ -249,6 +256,7 @@
         cu.execute("SET A concerne S WHERE A is Affaire, S nom 'chouette'")
         cnx.commit()
         cu.execute("DELETE A concerne S WHERE S nom 'chouette'")
+        cnx.close()
 
 
     def test_user_can_change_its_upassword(self):
@@ -260,6 +268,7 @@
         cnx.commit()
         cnx.close()
         cnx = self.login('user', password='newpwd')
+        cnx.close()
 
     def test_user_cant_change_other_upassword(self):
         ueid = self.create_user('otheruser').eid
@@ -268,6 +277,7 @@
         cu.execute('SET X upassword %(passwd)s WHERE X eid %(x)s',
                    {'x': ueid, 'passwd': 'newpwd'})
         self.assertRaises(Unauthorized, cnx.commit)
+        cnx.close()
 
     # read security test
 
@@ -277,6 +287,7 @@
         cu = cnx.cursor()
         self.assertRaises(Unauthorized,
                           cu.execute, 'Personne U where U nom "managers"')
+        cnx.close()
 
     def test_read_erqlexpr_base(self):
         eid = self.execute("INSERT Affaire X: X sujet 'cool'")[0][0]
@@ -301,6 +312,7 @@
         self.assertEqual(rset.rows, [])
         # test can't update an attribute of an entity that can't be readen
         self.assertRaises(Unauthorized, cu.execute, 'SET X sujet "hacked" WHERE X eid %(x)s', {'x': eid})
+        cnx.close()
 
 
     def test_entity_created_in_transaction(self):
@@ -337,6 +349,7 @@
         rset = cu.execute("Any X WHERE X has_text 'cool'")
         self.assertEqual(sorted(eid for eid, in rset.rows),
                           [card1, aff2])
+        cnx.close()
 
     def test_read_erqlexpr_has_text2(self):
         self.execute("INSERT Personne X: X nom 'bidule'")
@@ -349,6 +362,7 @@
         self.assertEqual(len(rset.rows), 1, rset.rows)
         rset = cu.execute('Any N WITH N BEING (Any N WHERE N has_text "bidule")')
         self.assertEqual(len(rset.rows), 1, rset.rows)
+        cnx.close()
 
     def test_read_erqlexpr_optional_rel(self):
         self.execute("INSERT Personne X: X nom 'bidule'")
@@ -359,6 +373,7 @@
         cu = cnx.cursor()
         rset = cu.execute('Any N,U WHERE N has_text "bidule", N owned_by U?')
         self.assertEqual(len(rset.rows), 1, rset.rows)
+        cnx.close()
 
     def test_read_erqlexpr_aggregat(self):
         self.execute("INSERT Affaire X: X sujet 'cool'")[0][0]
@@ -382,6 +397,7 @@
         values = dict(rset)
         self.assertEqual(values['Affaire'], 1)
         self.assertEqual(values['Societe'], 2)
+        cnx.close()
 
 
     def test_attribute_security(self):
@@ -429,6 +445,7 @@
         cnx.commit()
         cu.execute("SET X para 'chouette' WHERE X eid %(x)s", {'x': note2.eid})
         cnx.commit()
+        cnx.close()
 
     def test_attribute_read_security(self):
         # anon not allowed to see users'login, but they can see users
@@ -446,6 +463,7 @@
         self.assertEqual(x.login, None)
         self.failUnless(x.creation_date)
         cnx.rollback()
+        cnx.close()
 
 class BaseSchemaSecurityTC(BaseSecurityTC):
     """tests related to the base schema permission configuration"""
@@ -472,6 +490,7 @@
         cu.execute('DELETE Affaire X WHERE X ref "ARCT01"')
         cnx.commit()
         self.failIf(cu.execute('Affaire X'))
+        cnx.close()
 
     def test_users_and_groups_non_readable_by_guests(self):
         cnx = self.login('anon')
@@ -498,6 +517,7 @@
         # but can't modify it
         cu.execute('SET X login "toto" WHERE X eid %(x)s', {'x': anon.eid})
         self.assertRaises(Unauthorized, cnx.commit)
+        cnx.close()
 
     def test_in_group_relation(self):
         cnx = self.login('iaminusersgrouponly')
@@ -506,6 +526,7 @@
         self.assertRaises(Unauthorized, cu.execute, rql)
         rql = u"SET U in_group G WHERE U login 'admin', G name 'users'"
         self.assertRaises(Unauthorized, cu.execute, rql)
+        cnx.close()
 
     def test_owned_by(self):
         self.execute("INSERT Personne X: X nom 'bidule'")
@@ -514,6 +535,7 @@
         cu = cnx.cursor()
         rql = u"SET X owned_by U WHERE U login 'iaminusersgrouponly', X is Personne"
         self.assertRaises(Unauthorized, cu.execute, rql)
+        cnx.close()
 
     def test_bookmarked_by_guests_security(self):
         beid1 = self.execute('INSERT Bookmark B: B path "?vid=manage", B title "manage"')[0][0]
@@ -535,6 +557,7 @@
         self.assertRaises(Unauthorized,
                           cu.execute, 'SET B bookmarked_by U WHERE U eid %(x)s, B eid %(b)s',
                           {'x': anoneid, 'b': beid1})
+        cnx.close()
 
 
     def test_ambigous_ordered(self):
@@ -542,6 +565,7 @@
         cu = cnx.cursor()
         names = [t for t, in cu.execute('Any N ORDERBY lower(N) WHERE X name N')]
         self.assertEqual(names, sorted(names, key=lambda x: x.lower()))
+        cnx.close()
 
     def test_in_state_without_update_perm(self):
         """check a user change in_state without having update permission on the
@@ -575,6 +599,7 @@
             # restore orig perms
             for action, perms in affaire_perms.iteritems():
                 self.schema['Affaire'].set_action_permissions(action, perms)
+        cnx.close()
 
     def test_trinfo_security(self):
         aff = self.execute('INSERT Affaire X: X ref "ARCT01"').get_entity(0, 0)
--- a/test/unittest_migration.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/test/unittest_migration.py	Thu Mar 24 15:21:13 2011 +0100
@@ -97,7 +97,7 @@
         config.__class__.name = 'repository'
 
 
-from cubicweb.devtools import ApptestConfiguration, init_test_database, cleanup_sqlite
+from cubicweb.devtools import ApptestConfiguration, get_test_db_handler
 
 class BaseCreationTC(TestCase):
 
@@ -106,8 +106,8 @@
         config = ApptestConfiguration('data', apphome=self.datadir)
         source = config.sources()['system']
         self.assertEqual(source['db-driver'], 'sqlite')
-        cleanup_sqlite(source['db-name'], removetemplate=True)
-        init_test_database(config=config)
+        handler = get_test_db_handler(config)
+        handler.init_test_database()
 
 
 if __name__ == '__main__':
--- a/vregistry.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/vregistry.py	Thu Mar 24 15:21:13 2011 +0100
@@ -237,6 +237,10 @@
 
     select_best = deprecated('[3.6] select_best is now private')(_select_best)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
 
 class VRegistry(dict):
     """class responsible to register, propose and select the various
@@ -517,6 +521,9 @@
                 raise
             self.exception('appobject %s registration failed: %s',
                            appobjectcls, ex)
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
 
 
 # init logging
--- a/web/application.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/web/application.py	Thu Mar 24 15:21:13 2011 +0100
@@ -234,6 +234,9 @@
         req.remove_cookie(req.get_cookie(), sessioncookie)
         raise LogOut(url=goto_url)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
 
 class CubicWebPublisher(object):
     """the publisher is a singleton hold by the web frontend, and is responsible
@@ -458,6 +461,9 @@
             template = 'main-template'
         return template
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
 
 set_log_methods(CubicWebPublisher, LOGGER)
 set_log_methods(CookieSessionHandler, LOGGER)
--- a/web/data/cubicweb.css	Fri Mar 11 12:17:32 2011 +0100
+++ b/web/data/cubicweb.css	Thu Mar 24 15:21:13 2011 +0100
@@ -1,6 +1,6 @@
 /*
  *  :organization: Logilab
- *  :copyright: 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+ *  :copyright: 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
  *  :contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
  */
 
@@ -220,7 +220,7 @@
 /* header */
 
 table#header {
-  background: %(headerBgColor)s url("banner.png") repeat-x top left;
+  background: %(headerBg)s;
   width: 100%;
 }
 
--- a/web/data/cubicweb.old.css	Fri Mar 11 12:17:32 2011 +0100
+++ b/web/data/cubicweb.old.css	Thu Mar 24 15:21:13 2011 +0100
@@ -1,6 +1,6 @@
 /*
  *  :organization: Logilab
- *  :copyright: 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+ *  :copyright: 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
  *  :contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
  */
 
@@ -227,7 +227,7 @@
 /* header */
 
 table#header {
-  background: #ff7700 url("banner.png") left top repeat-x;
+  background: %(headerBg)s;
   width: 100%;
 }
 
--- a/web/data/uiprops.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/web/data/uiprops.py	Thu Mar 24 15:21:13 2011 +0100
@@ -75,6 +75,7 @@
 
 # header
 headerBgColor = '#ff7700'
+headerBg = lazystr('%(headerBgColor)s url("banner.png") repeat-x top left')
 
 # h
 h1FontSize = '1.5em' # 18px
--- a/web/propertysheet.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/web/propertysheet.py	Thu Mar 24 15:21:13 2011 +0100
@@ -115,6 +115,10 @@
     def compile(self, content):
         return self._percent_rgx.sub('%%', content) % self
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
 from cubicweb.web import LOGGER
 from logilab.common.logging_ext import set_log_methods
 set_log_methods(PropertySheet, LOGGER)
--- a/web/views/actions.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/web/views/actions.py	Thu Mar 24 15:21:13 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -28,7 +28,7 @@
 from cubicweb.appobject import objectify_selector
 from cubicweb.selectors import (EntitySelector, yes,
     one_line_rset, multi_lines_rset, one_etype_rset, relation_possible,
-    nonempty_rset, non_final_entity,
+    nonempty_rset, non_final_entity, score_entity,
     authenticated_user, match_user_groups, match_search_state,
     has_permission, has_add_permission, is_instance, debug_mode,
     )
@@ -322,7 +322,7 @@
     """when displaying the schema of a CWEType, offer to list entities of that type
     """
     __regid__ = 'entitiesoftype'
-    __select__ = one_line_rset() & is_instance('CWEType')
+    __select__ = one_line_rset() & is_instance('CWEType') & score_entity(lambda x: not x.final)
     category = 'mainactions'
     order = 40
 
--- a/web/views/basecomponents.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/web/views/basecomponents.py	Thu Mar 24 15:21:13 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -99,7 +99,9 @@
     """display the instance name"""
     __regid__ = 'appliname'
 
-    def render(self, w):
+    # XXX support kwargs for compat with other components which gets the view as
+    # argument
+    def render(self, w, **kwargs):
         title = self._cw.property_value('ui.site-title')
         if title:
             w(u'<span id="appliName"><a href="%s">%s</a></span>' % (
--- a/web/views/basetemplates.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/web/views/basetemplates.py	Thu Mar 24 15:21:13 2011 +0100
@@ -74,7 +74,7 @@
         # FIXME Deprecated code ?
         msg = self._cw._('you have been logged out')
         w(u'<h2>%s</h2>\n' % msg)
-        if self._cw.vreg.config['anonymous-user']:
+        if self._cw.vreg.config.anonymous_user()[0]:
             indexurl = self._cw.build_url('view', vid='index', __message=msg)
             w(u'<p><a href="%s">%s</a><p>' % (
                 xml_escape(indexurl),
--- a/web/views/ibreadcrumbs.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/web/views/ibreadcrumbs.py	Thu Mar 24 15:21:13 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -117,7 +117,9 @@
     link_template = u'<a href="%s">%s</a>'
     first_separator = True
 
-    def render(self, w):
+    # XXX support kwargs for compat with other components which gets the view as
+    # argument
+    def render(self, w, **kwargs):
         entity = self.cw_rset.get_entity(0, 0)
         adapter = ibreadcrumb_adapter(entity)
         view = self.cw_extra_kwargs.get('view')
@@ -178,7 +180,9 @@
 class BreadCrumbAnyRSetVComponent(BreadCrumbEntityVComponent):
     __select__ = basecomponents.HeaderComponent.__select__ & any_rset()
 
-    def render(self, w):
+    # XXX support kwargs for compat with other components which gets the view as
+    # argument
+    def render(self, w, **kwargs):
         w(u'<span id="breadcrumbs" class="pathbar">')
         if self.first_separator:
             w(self.separator)
@@ -192,7 +196,7 @@
     def cell_call(self, row, col, **kwargs):
         entity = self.cw_rset.get_entity(row, col)
         desc = xml_escape(uilib.cut(entity.dc_description(), 50))
-        # XXX remember camember : tags.a autoescapes !
+        # NOTE remember camember: tags.a autoescapes
         self.w(tags.a(entity.view('breadcrumbtext'),
                       href=entity.absolute_url(), title=desc))
 
--- a/wsgi/handler.py	Fri Mar 11 12:17:32 2011 +0100
+++ b/wsgi/handler.py	Thu Mar 24 15:21:13 2011 +0100
@@ -193,6 +193,10 @@
             content = self.appli.need_login_content(req)
         return WSGIResponse(code, req, content)
 
+    # these are overridden by set_log_methods below
+    # only defining here to prevent pylint from complaining
+    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
 
 from logging import getLogger
 from cubicweb import set_log_methods