merge 3.23
authorSylvain Thénault <sylvain.thenault@logilab.fr>
Wed, 20 Jul 2016 17:58:49 +0200
changeset 11426 f666f484e5f4
parent 11425 740cc1e09322 (diff)
parent 11423 aaa768e886ae (current diff)
child 11427 7d38eb1bcd1f
merge 3.23
cubicweb.spec
cubicweb/__pkginfo__.py
debian/changelog
setup.py
--- a/cubicweb.spec	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb.spec	Wed Jul 20 17:58:49 2016 +0200
@@ -8,7 +8,7 @@
 %{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
 
 Name:           cubicweb
-Version:        3.23.2
+Version:        3.23.1
 Release:        logilab.1%{?dist}
 Summary:        CubicWeb is a semantic web application framework
 Source0:        https://pypi.python.org/packages/source/c/cubicweb/cubicweb-%{version}.tar.gz
@@ -49,7 +49,7 @@
 %endif
 
 %install
-NO_SETUPTOOLS=1 %{__python} setup.py --quiet install --no-compile --prefix=%{_prefix} --root="$RPM_BUILD_ROOT"
+%{__python} setup.py --quiet install --no-compile --prefix=%{_prefix} --root="$RPM_BUILD_ROOT"
 mkdir -p $RPM_BUILD_ROOT/var/log/cubicweb
 
 %clean
--- a/cubicweb/__init__.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/__init__.py	Wed Jul 20 17:58:49 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -59,14 +59,14 @@
 CW_SOFTWARE_ROOT = __path__[0]
 
 
-from cubicweb.__pkginfo__ import version as __version__
+from cubicweb.__pkginfo__ import version as __version__   # noqa
 
 
 set_log_methods(sys.modules[__name__], logging.getLogger('cubicweb'))
 
 # make all exceptions accessible from the package
-from cubicweb._exceptions import *
-from logilab.common.registry import ObjectNotFound, NoSelectableObject, RegistryNotFound
+from cubicweb._exceptions import *  # noqa
+from logilab.common.registry import ObjectNotFound, NoSelectableObject, RegistryNotFound  # noqa
 
 
 # '_' is available to mark internationalized string but should not be used to
@@ -81,10 +81,6 @@
 def typed_eid(eid):
     return int(eid)
 
-#def log_thread(f, w, a):
-#    print f.f_code.co_filename, f.f_code.co_name
-#import threading
-#threading.settrace(log_thread)
 
 class Binary(BytesIO):
     """class to hold binary data. Use BytesIO to prevent use of unicode data"""
@@ -92,13 +88,13 @@
 
     def __init__(self, buf=b''):
         assert isinstance(buf, self._allowed_types), \
-               "Binary objects must use bytes/buffer objects, not %s" % buf.__class__
+            "Binary objects must use bytes/buffer objects, not %s" % buf.__class__
         # don't call super, BytesIO may be an old-style class (on python < 2.7.4)
         BytesIO.__init__(self, buf)
 
     def write(self, data):
         assert isinstance(data, self._allowed_types), \
-               "Binary objects must use bytes/buffer objects, not %s" % data.__class__
+            "Binary objects must use bytes/buffer objects, not %s" % data.__class__
         # don't call super, BytesIO may be an old-style class (on python < 2.7.4)
         BytesIO.write(self, data)
 
@@ -114,7 +110,7 @@
             while True:
                 # the 16kB chunksize comes from the shutil module
                 # in stdlib
-                chunk = self.read(16*1024)
+                chunk = self.read(16 * 1024)
                 if not chunk:
                     break
                 fobj.write(chunk)
@@ -135,7 +131,7 @@
                 while True:
                     # the 16kB chunksize comes from the shutil module
                     # in stdlib
-                    chunk = fobj.read(16*1024)
+                    chunk = fobj.read(16 * 1024)
                     if not chunk:
                         break
                     binary.write(chunk)
@@ -149,7 +145,6 @@
             return False
         return self.getvalue() == other.getvalue()
 
-
     # Binary helpers to store/fetch python objects
 
     @classmethod
@@ -168,6 +163,7 @@
     return isinstance(value, (binary_type, Binary))
 BASE_CHECKERS['Password'] = check_password
 
+
 def str_or_binary(value):
     if isinstance(value, Binary):
         return value
@@ -182,17 +178,20 @@
 #     to help in cube renaming
 CW_MIGRATION_MAP = {}
 
+
 def neg_role(role):
     if role == 'subject':
         return 'object'
     return 'subject'
 
+
 def role(obj):
     try:
         return obj.role
     except AttributeError:
         return neg_role(obj.target)
 
+
 def target(obj):
     try:
         return obj.target
@@ -220,7 +219,7 @@
         self.callbacks = {}
 
     def bind(self, event, callback, *args, **kwargs):
-        self.callbacks.setdefault(event, []).append( (callback, args, kwargs) )
+        self.callbacks.setdefault(event, []).append((callback, args, kwargs))
 
     def emit(self, event, context=None):
         for callback, args, kwargs in self.callbacks.get(event, ()):
@@ -231,6 +230,7 @@
 
 CW_EVENT_MANAGER = CubicWebEventManager()
 
+
 def onevent(event, *args, **kwargs):
     """decorator to ease event / callback binding
 
@@ -249,6 +249,7 @@
 
 from yams.schema import role_name as rname
 
+
 def validation_error(entity, errors, substitutions=None, i18nvalues=None):
     """easy way to retrieve a :class:`cubicweb.ValidationError` for an entity or eid.
 
@@ -272,7 +273,7 @@
 
 # exceptions ##################################################################
 
-class ProgrammingError(Exception): #DatabaseError):
+class ProgrammingError(Exception):
     """Exception raised for errors that are related to the database's operation
     and not necessarily under the control of the programmer, e.g. an unexpected
     disconnect occurs, the data source name is not found, a transaction could
--- a/cubicweb/__pkginfo__.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/__pkginfo__.py	Wed Jul 20 17:58:49 2016 +0200
@@ -27,8 +27,8 @@
 
 modname = distname = "cubicweb"
 
-numversion = (3, 23, 2)
-version = '.'.join(str(num) for num in numversion)
+numversion = (3, 24, 0)
+version = '.'.join(str(num) for num in numversion) + '.dev0'
 
 description = "a repository of entities / relations for knowledge management"
 author = "Logilab"
--- a/cubicweb/entities/adapters.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/entities/adapters.py	Wed Jul 20 17:58:49 2016 +0200
@@ -21,7 +21,6 @@
 from cubicweb import _
 
 from itertools import chain
-from hashlib import md5
 
 from logilab.mtconverter import TransformError
 from logilab.common.decorators import cached
@@ -413,9 +412,7 @@
         for rschema, attrschema in eschema.attribute_definitions():
             rdef = rschema.rdef(eschema, attrschema)
             for constraint in rdef.constraints:
-                if cstrname == 'cstr' + md5(
-                        (eschema.type + rschema.type + constraint.type() +
-                         (constraint.serialize() or '')).encode('ascii')).hexdigest():
+                if cstrname == constraint.name_for(rdef):
                     break
             else:
                 continue
--- a/cubicweb/hooks/syncschema.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/hooks/syncschema.py	Wed Jul 20 17:58:49 2016 +0200
@@ -28,7 +28,6 @@
 
 import json
 from copy import copy
-from hashlib import md5
 
 from yams.schema import BASE_TYPES, BadSchemaDefinition, RelationDefinitionSchema
 from yams.constraints import UniqueConstraint
@@ -42,12 +41,11 @@
                              CONSTRAINTS, UNIQUE_CONSTRAINTS, ETYPE_NAME_MAP)
 from cubicweb.server import hook, schemaserial as ss, schema2sql as y2sql
 from cubicweb.server.sqlutils import SQL_PREFIX
-from cubicweb.server.schema2sql import unique_index_name
 from cubicweb.hooks.synccomputed import RecomputeAttributeOperation
 
 # core entity and relation types which can't be removed
 CORE_TYPES = BASE_TYPES | SCHEMA_TYPES | META_RTYPES | set(
-    ('CWUser', 'CWGroup','login', 'upassword', 'name', 'in_group'))
+    ('CWUser', 'CWGroup', 'login', 'upassword', 'name', 'in_group'))
 
 
 def get_constraints(cnx, entity):
@@ -78,7 +76,8 @@
     table = SQL_PREFIX + etype
     column = SQL_PREFIX + rtype
     try:
-        cnx.system_sql(str('ALTER TABLE %s ADD %s integer REFERENCES entities (eid)' % (table, column)),
+        cnx.system_sql(str('ALTER TABLE %s ADD %s integer REFERENCES entities (eid)'
+                           % (table, column)),
                        rollback_on_failure=False)
         cnx.info('added column %s to table %s', column, table)
     except Exception:
@@ -242,7 +241,7 @@
       CWAttribute entities
     * add <meta rtype> relation by creating the necessary CWRelation entity
     """
-    entity = None # make pylint happy
+    entity = None  # make pylint happy
 
     def precommit_event(self):
         cnx = self.cnx
@@ -252,11 +251,9 @@
                                description=entity.description)
         eschema = schema.add_entity_type(etype)
         # create the necessary table
-        tablesql = y2sql.eschema2sql(cnx.repo.system_source.dbhelper,
-                                     eschema, prefix=SQL_PREFIX)
-        for sql in tablesql.split(';'):
-            if sql.strip():
-                cnx.system_sql(sql)
+        for sql in y2sql.eschema2sql(cnx.repo.system_source.dbhelper,
+                                     eschema, prefix=SQL_PREFIX):
+            cnx.system_sql(sql)
         # add meta relations
         gmap = group_mapping(cnx)
         cmap = ss.cstrtype_mapping(cnx)
@@ -326,11 +323,11 @@
                 source.create_index(cnx, new_table, SQL_PREFIX + rschema.type, unique=True)
         for attrs in eschema._unique_together or ():
             columns = ['%s%s' % (SQL_PREFIX, attr) for attr in attrs]
-            old_index_name = unique_index_name(oldname, columns)
+            old_index_name = y2sql.unique_index_name(oldname, columns)
             for sql in dbhelper.sqls_drop_multicol_unique_index(
                     new_table, columns, old_index_name):
                 sqlexec(sql)
-            new_index_name = unique_index_name(newname, columns)
+            new_index_name = y2sql.unique_index_name(newname, columns)
             for sql in dbhelper.sqls_create_multicol_unique_index(
                     new_table, columns, new_index_name):
                 sqlexec(sql)
@@ -364,11 +361,11 @@
                     op.add_data(objtype)
                     op.add_data(subjtype)
         # update the in-memory schema first
-        self.oldvalues = dict( (attr, getattr(rschema, attr)) for attr in self.values)
+        self.oldvalues = dict((attr, getattr(rschema, attr)) for attr in self.values)
         self.rschema.__dict__.update(self.values)
         # then make necessary changes to the system source database
         if 'inlined' not in self.values:
-            return # nothing to do
+            return  # nothing to do
         inlined = self.values['inlined']
         # check in-lining is possible when inlined
         if inlined:
@@ -380,12 +377,10 @@
         if not inlined:
             # need to create the relation if it has not been already done by
             # another event of the same transaction
-            if not rschema.type in cnx.transaction_data.get('createdtables', ()):
-                tablesql = y2sql.rschema2sql(rschema)
+            if rschema.type not in cnx.transaction_data.get('createdtables', ()):
                 # create the necessary table
-                for sql in tablesql.split(';'):
-                    if sql.strip():
-                        sqlexec(sql)
+                for sql in y2sql.rschema2sql(rschema):
+                    sqlexec(sql)
                 cnx.transaction_data.setdefault('createdtables', []).append(
                     rschema.type)
             # copy existant data
@@ -395,7 +390,6 @@
                 sqlexec('INSERT INTO %s_relation SELECT %s, %s FROM %s WHERE NOT %s IS NULL'
                         % (rtype, eidcolumn, column, table, column))
             # drop existant columns
-            #if cnx.repo.system_source.dbhelper.alter_column_support:
             for etype in rschema.subjects():
                 DropColumn.get_instance(cnx).add_data((str(etype), rtype))
         else:
@@ -433,7 +427,7 @@
 
 class CWComputedRTypeUpdateOp(MemSchemaOperation):
     """actually update some properties of a computed relation definition"""
-    rschema = entity = rule = None # make pylint happy
+    rschema = entity = rule = None  # make pylint happy
     old_rule = None
 
     def precommit_event(self):
@@ -455,7 +449,7 @@
 
     constraints are handled by specific hooks
     """
-    entity = None # make pylint happy
+    entity = None  # make pylint happy
 
     def init_rdef(self, **kwargs):
         entity = self.entity
@@ -530,7 +524,7 @@
         try:
             eschema = schema.eschema(rdefdef.subject)
         except KeyError:
-            return # entity type currently being added
+            return  # entity type currently being added
         # propagate attribute to children classes
         rschema = schema.rschema(rdefdef.name)
         # if relation type has been inserted in the same transaction, its final
@@ -541,7 +535,7 @@
         if default is not None:
             default = convert_default_value(self.rdefdef, default)
             cnx.system_sql('UPDATE %s SET %s=%%(default)s' % (table, column),
-                               {'default': default})
+                           {'default': default})
         # if attribute is computed, compute it
         if getattr(entity, 'formula', None):
             # add rtype attribute for RelationDefinitionSchema api compat, this
@@ -569,7 +563,7 @@
 
     constraints are handled by specific hooks
     """
-    entity = None # make pylint happy
+    entity = None  # make pylint happy
 
     def precommit_event(self):
         cnx = self.cnx
@@ -603,9 +597,8 @@
                     rtype in cnx.transaction_data.get('createdtables', ())):
                 rschema = schema.rschema(rtype)
                 # create the necessary table
-                for sql in y2sql.rschema2sql(rschema).split(';'):
-                    if sql.strip():
-                        cnx.system_sql(sql)
+                for sql in y2sql.rschema2sql(rschema):
+                    cnx.system_sql(sql)
                 cnx.transaction_data.setdefault('createdtables', []).append(
                     rtype)
 
@@ -614,7 +607,7 @@
 
 class RDefDelOp(MemSchemaOperation):
     """an actual relation has been removed"""
-    rdef = None # make pylint happy
+    rdef = None  # make pylint happy
 
     def precommit_event(self):
         cnx = self.cnx
@@ -677,7 +670,7 @@
 
 class RDefUpdateOp(MemSchemaOperation):
     """actually update some properties of a relation definition"""
-    rschema = rdefkey = values = None # make pylint happy
+    rschema = rdefkey = values = None  # make pylint happy
     rdef = oldvalues = None
     indexed_changed = null_allowed_changed = False
 
@@ -685,15 +678,15 @@
         cnx = self.cnx
         rdef = self.rdef = self.rschema.rdefs[self.rdefkey]
         # update the in-memory schema first
-        self.oldvalues = dict( (attr, getattr(rdef, attr)) for attr in self.values)
+        self.oldvalues = dict((attr, getattr(rdef, attr)) for attr in self.values)
         rdef.update(self.values)
         # then make necessary changes to the system source database
         syssource = cnx.repo.system_source
         if 'indexed' in self.values:
             syssource.update_rdef_indexed(cnx, rdef)
             self.indexed_changed = True
-        if 'cardinality' in self.values and rdef.rtype.final \
-              and self.values['cardinality'][0] != self.oldvalues['cardinality'][0]:
+        if ('cardinality' in self.values and rdef.rtype.final
+                and self.values['cardinality'][0] != self.oldvalues['cardinality'][0]):
             syssource.update_rdef_null_allowed(self.cnx, rdef)
             self.null_allowed_changed = True
         if 'fulltextindexed' in self.values:
@@ -724,7 +717,7 @@
 
 class CWConstraintDelOp(MemSchemaOperation):
     """actually remove a constraint of a relation definition"""
-    rdef = oldcstr = newcstr = None # make pylint happy
+    rdef = oldcstr = newcstr = None  # make pylint happy
     size_cstr_changed = unique_changed = False
 
     def precommit_event(self):
@@ -760,10 +753,11 @@
         elif cstrtype == 'UniqueConstraint':
             syssource.update_rdef_unique(cnx, rdef)
             self.unique_changed = True
-        if cstrtype in ('BoundaryConstraint', 'IntervalBoundConstraint', 'StaticVocabularyConstraint'):
-            cstrname = 'cstr' + md5((rdef.subject.type + rdef.rtype.type + cstrtype +
-                                     (self.oldcstr.serialize() or '')).encode('utf-8')).hexdigest()
-            cnx.system_sql('ALTER TABLE %s%s DROP CONSTRAINT %s' % (SQL_PREFIX, rdef.subject.type, cstrname))
+        elif cstrtype in ('BoundaryConstraint',
+                          'IntervalBoundConstraint',
+                          'StaticVocabularyConstraint'):
+            cnx.system_sql('ALTER TABLE %s%s DROP CONSTRAINT %s'
+                           % (SQL_PREFIX, rdef.subject, self.oldcstr.name_for(rdef)))
 
     def revertprecommit_event(self):
         # revert changes on in memory schema
@@ -781,7 +775,7 @@
 
 class CWConstraintAddOp(CWConstraintDelOp):
     """actually update constraint of a relation definition"""
-    entity = None # make pylint happy
+    entity = None  # make pylint happy
 
     def precommit_event(self):
         cnx = self.cnx
@@ -809,20 +803,24 @@
         elif cstrtype == 'UniqueConstraint' and oldcstr is None:
             syssource.update_rdef_unique(cnx, rdef)
             self.unique_changed = True
-        if cstrtype in ('BoundaryConstraint', 'IntervalBoundConstraint', 'StaticVocabularyConstraint'):
+        if cstrtype in ('BoundaryConstraint',
+                        'IntervalBoundConstraint',
+                        'StaticVocabularyConstraint'):
+            cstrname, check = y2sql.check_constraint(rdef, newcstr, syssource.dbhelper,
+                                                     prefix=SQL_PREFIX)
+            # oldcstr is the new constraint when the attribute is being added in the same
+            # transaction or when constraint value is updated. So we've to take care...
             if oldcstr is not None:
-                oldcstrname = 'cstr' + md5((rdef.subject.type + rdef.rtype.type + cstrtype +
-                                            (self.oldcstr.serialize() or '')).encode('ascii')).hexdigest()
-                cnx.system_sql('ALTER TABLE %s%s DROP CONSTRAINT %s' %
-                               (SQL_PREFIX, rdef.subject.type, oldcstrname))
-            cstrname, check = y2sql.check_constraint(rdef.subject, rdef.object, rdef.rtype.type,
-                    newcstr, syssource.dbhelper, prefix=SQL_PREFIX)
+                oldcstrname = self.oldcstr.name_for(rdef)
+                if oldcstrname != cstrname:
+                    cnx.system_sql('ALTER TABLE %s%s DROP CONSTRAINT %s'
+                                   % (SQL_PREFIX, rdef.subject, oldcstrname))
             cnx.system_sql('ALTER TABLE %s%s ADD CONSTRAINT %s CHECK(%s)' %
-                           (SQL_PREFIX, rdef.subject.type, cstrname, check))
+                           (SQL_PREFIX, rdef.subject, cstrname, check))
 
 
 class CWUniqueTogetherConstraintAddOp(MemSchemaOperation):
-    entity = None # make pylint happy
+    entity = None  # make pylint happy
 
     def precommit_event(self):
         cnx = self.cnx
@@ -843,8 +841,8 @@
 
 
 class CWUniqueTogetherConstraintDelOp(MemSchemaOperation):
-    entity = cstrname = None # for pylint
-    cols = () # for pylint
+    entity = cstrname = None  # make pylint happy
+    cols = ()  # make pylint happy
 
     def insert_index(self):
         # We need to run before CWConstraintDelOp: if a size constraint is
@@ -875,7 +873,7 @@
 
 class MemSchemaCWETypeDel(MemSchemaOperation):
     """actually remove the entity type from the instance's schema"""
-    etype = None # make pylint happy
+    etype = None  # make pylint happy
 
     def postcommit_event(self):
         # del_entity_type also removes entity's relations
@@ -884,7 +882,7 @@
 
 class MemSchemaCWRTypeAdd(MemSchemaOperation):
     """actually add the relation type to the instance's schema"""
-    rtypedef = None # make pylint happy
+    rtypedef = None  # make pylint happy
 
     def precommit_event(self):
         self.cnx.vreg.schema.add_relation_type(self.rtypedef)
@@ -895,7 +893,7 @@
 
 class MemSchemaCWRTypeDel(MemSchemaOperation):
     """actually remove the relation type from the instance's schema"""
-    rtype = None # make pylint happy
+    rtype = None  # make pylint happy
 
     def postcommit_event(self):
         try:
@@ -908,7 +906,7 @@
 class MemSchemaPermissionAdd(MemSchemaOperation):
     """synchronize schema when a *_permission relation has been added on a group
     """
-    eid = action = group_eid = expr = None # make pylint happy
+    eid = action = group_eid = expr = None  # make pylint happy
 
     def precommit_event(self):
         """the observed connections.cnxset has been commited"""
@@ -963,7 +961,7 @@
 
 
 class MemSchemaSpecializesAdd(MemSchemaOperation):
-    etypeeid = parentetypeeid = None # make pylint happy
+    etypeeid = parentetypeeid = None  # make pylint happy
 
     def precommit_event(self):
         eschema = self.cnx.vreg.schema.schema_by_eid(self.etypeeid)
@@ -975,7 +973,7 @@
 
 
 class MemSchemaSpecializesDel(MemSchemaOperation):
-    etypeeid = parentetypeeid = None # make pylint happy
+    etypeeid = parentetypeeid = None  # make pylint happy
 
     def precommit_event(self):
         try:
@@ -1079,9 +1077,9 @@
             raise validation_error(self.entity, {None: _("can't be deleted")})
         # delete relation definitions using this relation type
         self._cw.execute('DELETE CWAttribute X WHERE X relation_type Y, Y eid %(x)s',
-                        {'x': self.entity.eid})
+                         {'x': self.entity.eid})
         self._cw.execute('DELETE CWRelation X WHERE X relation_type Y, Y eid %(x)s',
-                        {'x': self.entity.eid})
+                         {'x': self.entity.eid})
         MemSchemaCWRTypeDel(self._cw, rtype=name)
 
 
@@ -1187,10 +1185,8 @@
         pendingrdefs = cnx.transaction_data.setdefault('pendingrdefs', set())
         # first delete existing relation if necessary
         if rschema.final:
-            rdeftype = 'CWAttribute'
             pendingrdefs.add((subjschema, rschema))
         else:
-            rdeftype = 'CWRelation'
             pendingrdefs.add((subjschema, rschema, objschema))
         RDefDelOp(cnx, rdef=rdef)
 
@@ -1311,6 +1307,7 @@
         else:
             CWConstraintDelOp(self._cw, rdef=rdef, oldcstr=cstr)
 
+
 # unique_together constraints
 # XXX: use setoperations and before_add_relation here (on constraint_of and relations)
 class AfterAddCWUniqueTogetherConstraintHook(SyncSchemaHook):
@@ -1353,7 +1350,7 @@
         if self._cw.entity_metas(self.eidto)['type'] == 'CWGroup':
             MemSchemaPermissionAdd(self._cw, action=action, eid=self.eidfrom,
                                    group_eid=self.eidto)
-        else: # RQLExpression
+        else:  # RQLExpression
             expr = self._cw.entity_from_eid(self.eidto).expression
             MemSchemaPermissionAdd(self._cw, action=action, eid=self.eidfrom,
                                    expr=expr)
@@ -1374,13 +1371,12 @@
         if self._cw.entity_metas(self.eidto)['type'] == 'CWGroup':
             MemSchemaPermissionDel(self._cw, action=action, eid=self.eidfrom,
                                    group_eid=self.eidto)
-        else: # RQLExpression
+        else:  # RQLExpression
             expr = self._cw.entity_from_eid(self.eidto).expression
             MemSchemaPermissionDel(self._cw, action=action, eid=self.eidfrom,
                                    expr=expr)
 
 
-
 class UpdateFTIndexOp(hook.DataOperationMixIn, hook.SingleLastOperation):
     """operation to update full text indexation of entity whose schema change
 
@@ -1412,11 +1408,8 @@
             cnx.cnxset.commit()
 
 
-
-
 # specializes synchronization hooks ############################################
 
-
 class AfterAddSpecializesHook(SyncSchemaHook):
     __regid__ = 'syncaddspecializes'
     __select__ = SyncSchemaHook.__select__ & hook.match_rtype('specializes')
--- a/cubicweb/hooks/test/unittest_syncschema.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/hooks/test/unittest_syncschema.py	Wed Jul 20 17:58:49 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -17,9 +17,8 @@
 # with CubicWeb.  If not, see <http://www.gnu.org/licenses/>.
 """cubicweb.server.hooks.syncschema unit and functional tests"""
 
-from logilab.common.testlib import unittest_main
+from yams.constraints import BoundaryConstraint
 
-from yams.constraints import BoundaryConstraint
 from cubicweb import ValidationError, Binary
 from cubicweb.schema import META_RTYPES
 from cubicweb.devtools import startpgcluster, stoppgcluster, PostgresApptestConfiguration
@@ -87,7 +86,7 @@
             attreid = cnx.execute('INSERT CWAttribute X: X cardinality "11", '
                                   'X defaultval %(default)s, X indexed TRUE, '
                                   'X relation_type RT, X from_entity E, X to_entity F '
-                                   'WHERE RT name "name", E name "Societe2", '
+                                  'WHERE RT name "name", E name "Societe2", '
                                   'F name "String"',
                                    {'default': Binary.zpickle('noname')})[0][0]
             self._set_attr_perms(cnx, attreid)
@@ -111,8 +110,8 @@
             self.assertEqual(rset.rows, [[s2eid]])
             # check that when a relation definition is deleted, existing relations are deleted
             rdefeid = cnx.execute('INSERT CWRelation X: X cardinality "**", X relation_type RT, '
-                                   '   X from_entity E, X to_entity E '
-                                   'WHERE RT name "concerne2", E name "CWUser"')[0][0]
+                                  '   X from_entity E, X to_entity E '
+                                  'WHERE RT name "concerne2", E name "CWUser"')[0][0]
             self._set_perms(cnx, rdefeid)
             cnx.commit()
             cnx.execute('DELETE CWRelation X WHERE X eid %(x)s', {'x': concerne2_rdef_eid})
@@ -136,10 +135,10 @@
         with self.admin_access.repo_cnx() as cnx:
             META_RTYPES.add('custom_meta')
             cnx.execute('INSERT CWRType X: X name "custom_meta", X description "", '
-                         'X final FALSE, X symmetric FALSE')
+                        'X final FALSE, X symmetric FALSE')
             cnx.commit()
             eeid = cnx.execute('INSERT CWEType X: X name "NEWEtype", '
-                                'X description "", X final FALSE')[0][0]
+                               'X description "", X final FALSE')[0][0]
             self._set_perms(cnx, eeid)
             cnx.commit()
             META_RTYPES.remove('custom_meta')
@@ -148,15 +147,15 @@
         with self.admin_access.repo_cnx() as cnx:
             META_RTYPES.add('custom_meta')
             cnx.execute('INSERT CWRType X: X name "custom_meta", X description "", '
-                         'X final FALSE, X symmetric FALSE')
+                        'X final FALSE, X symmetric FALSE')
             cnx.commit()
             rdefeid = cnx.execute('INSERT CWRelation X: X cardinality "**", X relation_type RT, '
-                                   '   X from_entity E, X to_entity E '
-                                   'WHERE RT name "custom_meta", E name "CWUser"')[0][0]
+                                  '   X from_entity E, X to_entity E '
+                                  'WHERE RT name "custom_meta", E name "CWUser"')[0][0]
             self._set_perms(cnx, rdefeid)
             cnx.commit()
             eeid = cnx.execute('INSERT CWEType X: X name "NEWEtype", '
-                                'X description "", X final FALSE')[0][0]
+                               'X description "", X final FALSE')[0][0]
             self._set_perms(cnx, eeid)
             cnx.commit()
             META_RTYPES.remove('custom_meta')
@@ -178,14 +177,14 @@
                                                     'S name N')]
             self.assertIn('subdiv', snames)
 
-
     def test_perms_synchronization_1(self):
         with self.admin_access.repo_cnx() as cnx:
             schema = self.repo.schema
             self.assertEqual(schema['CWUser'].get_groups('read'), set(('managers', 'users')))
             self.assertTrue(cnx.execute('Any X, Y WHERE X is CWEType, X name "CWUser", '
                                         'Y is CWGroup, Y name "users"')[0])
-            cnx.execute('DELETE X read_permission Y WHERE X is CWEType, X name "CWUser", Y name "users"')
+            cnx.execute('DELETE X read_permission Y '
+                        'WHERE X is CWEType, X name "CWUser", Y name "users"')
             self.assertEqual(schema['CWUser'].get_groups('read'), set(('managers', 'users', )))
             cnx.commit()
             self.assertEqual(schema['CWUser'].get_groups('read'), set(('managers',)))
@@ -228,7 +227,7 @@
             cnx.execute('DELETE X read_permission Y WHERE X eid %s' % eeid)
             cnx.execute('SET X final FALSE WHERE X eid %s' % eeid)
             cnx.execute('SET X read_permission Y WHERE X eid %s, Y eid in (%s, %s)'
-                         % (eeid, groupeids[0], groupeids[1]))
+                        % (eeid, groupeids[0], groupeids[1]))
             cnx.commit()
             cnx.execute('Any X WHERE X is CWEType, X name "CWEType"')
 
@@ -244,7 +243,7 @@
                 self.assertFalse(self.schema['state_of'].inlined)
                 self.assertFalse(self.index_exists(cnx, 'State', 'state_of'))
                 rset = cnx.execute('Any X, Y WHERE X state_of Y')
-                self.assertEqual(len(rset), 2) # user states
+                self.assertEqual(len(rset), 2)  # user states
             finally:
                 cnx.execute('SET X inlined TRUE WHERE X name "state_of"')
                 self.assertFalse(self.schema['state_of'].inlined)
@@ -293,8 +292,8 @@
     def test_required_change_1(self):
         with self.admin_access.repo_cnx() as cnx:
             cnx.execute('SET DEF cardinality "?1" '
-                         'WHERE DEF relation_type RT, DEF from_entity E,'
-                         'RT name "title", E name "Bookmark"')
+                        'WHERE DEF relation_type RT, DEF from_entity E,'
+                        'RT name "title", E name "Bookmark"')
             cnx.commit()
             # should now be able to add bookmark without title
             cnx.execute('INSERT Bookmark X: X path "/view"')
@@ -303,24 +302,25 @@
     def test_required_change_2(self):
         with self.admin_access.repo_cnx() as cnx:
             cnx.execute('SET DEF cardinality "11" '
-                         'WHERE DEF relation_type RT, DEF from_entity E,'
-                         'RT name "surname", E name "CWUser"')
+                        'WHERE DEF relation_type RT, DEF from_entity E,'
+                        'RT name "surname", E name "CWUser"')
             cnx.execute('SET U surname "Doe" WHERE U surname NULL')
             cnx.commit()
             # should not be able anymore to add cwuser without surname
             self.assertRaises(ValidationError, self.create_user, cnx, "toto")
             cnx.rollback()
             cnx.execute('SET DEF cardinality "?1" '
-                         'WHERE DEF relation_type RT, DEF from_entity E,'
-                         'RT name "surname", E name "CWUser"')
+                        'WHERE DEF relation_type RT, DEF from_entity E,'
+                        'RT name "surname", E name "CWUser"')
             cnx.commit()
 
     def test_add_attribute_to_base_class(self):
         with self.admin_access.repo_cnx() as cnx:
-            attreid = cnx.execute('INSERT CWAttribute X: X cardinality "11", X defaultval %(default)s, '
-                                   'X indexed TRUE, X relation_type RT, X from_entity E, X to_entity F '
-                                   'WHERE RT name "messageid", E name "BaseTransition", F name "String"',
-                                   {'default': Binary.zpickle('noname')})[0][0]
+            attreid = cnx.execute(
+                'INSERT CWAttribute X: X cardinality "11", X defaultval %(default)s, '
+                'X indexed TRUE, X relation_type RT, X from_entity E, X to_entity F '
+                'WHERE RT name "messageid", E name "BaseTransition", F name "String"',
+                {'default': Binary.zpickle('noname')})[0][0]
             assert cnx.execute('SET X read_permission Y WHERE X eid %(x)s, Y name "managers"',
                                {'x': attreid})
             cnx.commit()
@@ -357,12 +357,12 @@
             rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
             self.assertIn(cnx.user.eid, [item[0] for item in rset])
             assert cnx.execute('SET R fulltext_container NULL '
-                                'WHERE R name "use_email"')
+                               'WHERE R name "use_email"')
             cnx.commit()
             rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
             self.assertIn(target.eid, [item[0] for item in rset])
             assert cnx.execute('SET R fulltext_container "subject" '
-                                'WHERE R name "use_email"')
+                               'WHERE R name "use_email"')
             cnx.commit()
             rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
             self.assertIn(cnx.user.eid, [item[0] for item in rset])
@@ -375,10 +375,10 @@
                 # bug in schema reloading, constraint's eid not restored
                 self.skipTest('start me alone')
             cnx.execute('SET X value %(v)s WHERE X eid %(x)s',
-                         {'x': cstr.eid, 'v': u"u'normal', u'auto', u'new'"})
+                        {'x': cstr.eid, 'v': u"u'normal', u'auto', u'new'"})
             cnx.execute('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, '
                         'EDEF constrained_by X WHERE CT name %(ct)s, EDEF eid %(x)s',
-                         {'ct': 'SizeConstraint', 'value': u'max=10', 'x': rdef.eid})
+                        {'ct': 'SizeConstraint', 'value': u'max=10', 'x': rdef.eid})
             cnx.commit()
             cstr = rdef.constraint_by_type('StaticVocabularyConstraint')
             self.assertEqual(cstr.values, (u'normal', u'auto', u'new'))
@@ -405,4 +405,5 @@
 
 
 if __name__ == '__main__':
-    unittest_main()
+    import unittest
+    unittest.main()
--- a/cubicweb/misc/migration/3.21.0_Any.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/misc/migration/3.21.0_Any.py	Wed Jul 20 17:58:49 2016 +0200
@@ -162,8 +162,7 @@
     cstr = rdef.constraint_by_eid(cwconstraint.eid)
     if cstr.type() not in ('BoundaryConstraint', 'IntervalBoundConstraint', 'StaticVocabularyConstraint'):
         continue
-    cstrname, check = check_constraint(rdef.subject, rdef.object, rdef.rtype.type,
-            cstr, helper, prefix='cw_')
+    cstrname, check = check_constraint(rdef, cstr, helper, prefix='cw_')
     args = {'e': rdef.subject.type, 'c': cstrname, 'v': check}
     if repo.system_source.dbdriver == 'postgres':
         sql('ALTER TABLE cw_%(e)s DROP CONSTRAINT IF EXISTS %(c)s' % args, ask_confirm=False)
--- a/cubicweb/misc/migration/3.23.0_Any.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/misc/migration/3.23.0_Any.py	Wed Jul 20 17:58:49 2016 +0200
@@ -54,8 +54,7 @@
                            'StaticVocabularyConstraint'):
         # These cannot be translate into backend CHECK.
         continue
-    cstrname, check = check_constraint(rdef.subject, rdef.object, rdef.rtype.type,
-                                       cstr, helper, prefix='cw_')
+    cstrname, check = check_constraint(rdef, cstr, helper, prefix='cw_')
     args = {'e': rdef.subject.type, 'c': cstrname, 'v': check}
     sql('ALTER TABLE cw_%(e)s ADD CONSTRAINT %(c)s CHECK(%(v)s)' % args)
 
--- a/cubicweb/schema.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/schema.py	Wed Jul 20 17:58:49 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -22,6 +22,7 @@
 
 import re
 from os.path import join, basename
+from hashlib import md5
 from logging import getLogger
 from warnings import warn
 
@@ -31,16 +32,15 @@
 from logilab.common import tempattr
 from logilab.common.decorators import cached, clear_cache, monkeypatch, cachedproperty
 from logilab.common.logging_ext import set_log_methods
-from logilab.common.deprecation import deprecated, class_moved, moved
+from logilab.common.deprecation import deprecated
 from logilab.common.textutils import splitstrip
 from logilab.common.graph import get_cycles
 
 import yams
 from yams import BadSchemaDefinition, buildobjs as ybo
 from yams.schema import Schema, ERSchema, EntitySchema, RelationSchema, \
-     RelationDefinitionSchema, PermissionMixIn, role_name
-from yams.constraints import (BaseConstraint, FormatConstraint, BoundaryConstraint,
-                              IntervalBoundConstraint, StaticVocabularyConstraint,
+    RelationDefinitionSchema, PermissionMixIn, role_name
+from yams.constraints import (BaseConstraint, FormatConstraint,
                               cstr_json_dumps, cstr_json_loads)
 from yams.reader import (CONSTRAINTS, PyFileReader, SchemaLoader,
                          cleanup_sys_modules, fill_schema_from_namespace)
@@ -67,7 +67,7 @@
 META_RTYPES = set((
     'owned_by', 'created_by', 'is', 'is_instance_of', 'identity',
     'eid', 'creation_date', 'cw_source', 'modification_date', 'has_text', 'cwuri',
-    ))
+))
 WORKFLOW_RTYPES = set(('custom_workflow', 'in_state', 'wf_info_for'))
 WORKFLOW_DEF_RTYPES = set(('workflow_of', 'state_of', 'transition_of',
                            'initial_state', 'default_workflow',
@@ -97,7 +97,7 @@
     'constraint_of', 'relations',
     'read_permission', 'add_permission',
     'delete_permission', 'update_permission',
-    ))
+))
 
 WORKFLOW_TYPES = set(('Transition', 'State', 'TrInfo', 'Workflow',
                       'WorkflowTransition', 'BaseTransition',
@@ -116,11 +116,13 @@
 ybo.ETYPE_PROPERTIES += ('eid',)
 ybo.RTYPE_PROPERTIES += ('eid',)
 
+
 def build_schema_from_namespace(items):
     schema = CubicWebSchema('noname')
     fill_schema_from_namespace(schema, items, register_base_types=False)
     return schema
 
+
 # Bases for manipulating RQL in schema #########################################
 
 def guess_rrqlexpr_mainvars(expression):
@@ -137,6 +139,7 @@
                                   % expression)
     return mainvars
 
+
 def split_expression(rqlstring):
     for expr in rqlstring.split(','):
         for noparen1 in expr.split('('):
@@ -144,6 +147,7 @@
                 for word in noparen2.split():
                     yield word
 
+
 def normalize_expression(rqlstring):
     """normalize an rql expression to ease schema synchronization (avoid
     suppressing and reinserting an expression if only a space has been
@@ -162,35 +166,35 @@
     if len(formula_rqlst.children) != 1:
         raise BadSchemaDefinition('computed attribute %(attr)s on %(etype)s: '
                                   'can not use UNION in formula %(form)r' %
-                                  {'attr' : rdef.rtype,
-                                   'etype' : rdef.subject.type,
-                                   'form' : rdef.formula})
+                                  {'attr': rdef.rtype,
+                                   'etype': rdef.subject.type,
+                                   'form': rdef.formula})
     select = formula_rqlst.children[0]
     if len(select.selection) != 1:
         raise BadSchemaDefinition('computed attribute %(attr)s on %(etype)s: '
                                   'can only select one term in formula %(form)r' %
-                                  {'attr' : rdef.rtype,
-                                   'etype' : rdef.subject.type,
-                                   'form' : rdef.formula})
+                                  {'attr': rdef.rtype,
+                                   'etype': rdef.subject.type,
+                                   'form': rdef.formula})
     term = select.selection[0]
     types = set(term.get_type(sol) for sol in select.solutions)
     if len(types) != 1:
         raise BadSchemaDefinition('computed attribute %(attr)s on %(etype)s: '
                                   'multiple possible types (%(types)s) for formula %(form)r' %
-                                  {'attr' : rdef.rtype,
-                                   'etype' : rdef.subject.type,
-                                   'types' : list(types),
-                                   'form' : rdef.formula})
+                                  {'attr': rdef.rtype,
+                                   'etype': rdef.subject.type,
+                                   'types': list(types),
+                                   'form': rdef.formula})
     computed_type = types.pop()
     expected_type = rdef.object.type
     if computed_type != expected_type:
         raise BadSchemaDefinition('computed attribute %(attr)s on %(etype)s: '
                                   'computed attribute type (%(comp_type)s) mismatch with '
                                   'specified type (%(attr_type)s)' %
-                                  {'attr' : rdef.rtype,
-                                   'etype' : rdef.subject.type,
-                                   'comp_type' : computed_type,
-                                   'attr_type' : expected_type})
+                                  {'attr': rdef.rtype,
+                                   'etype': rdef.subject.type,
+                                   'comp_type': computed_type,
+                                   'attr_type': expected_type})
 
 
 class RQLExpression(object):
@@ -199,7 +203,7 @@
     """
     # these are overridden by set_log_methods below
     # only defining here to prevent pylint from complaining
-    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+    info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
     # to be defined in concrete classes
     predefined_variables = None
 
@@ -221,7 +225,7 @@
         :param mainvars: names of the variables being selected.
 
         """
-        self.eid = eid # eid of the entity representing this rql expression
+        self.eid = eid  # eid of the entity representing this rql expression
         assert mainvars, 'bad mainvars %s' % mainvars
         if isinstance(mainvars, string_types):
             mainvars = set(splitstrip(mainvars))
@@ -267,8 +271,10 @@
 
     def __deepcopy__(self, memo):
         return self.__class__(self.expression, self.mainvars)
+
     def __getstate__(self):
         return (self.expression, self.mainvars)
+
     def __setstate__(self, state):
         self.__init__(*state)
 
@@ -279,7 +285,8 @@
         defined = set(split_expression(self.expression))
         for varname in self.predefined_variables:
             if varname in defined:
-                select.add_eid_restriction(select.get_variable(varname), varname.lower(), 'Substitute')
+                select.add_eid_restriction(select.get_variable(varname), varname.lower(),
+                                           'Substitute')
         return select
 
     # permission rql expression specific stuff #################################
@@ -297,8 +304,8 @@
                     prefix, action, suffix = rel.r_type.split('_')
                 except ValueError:
                     continue
-                if prefix != 'has' or suffix != 'permission' or \
-                       not action in ('add', 'delete', 'update', 'read'):
+                if (prefix != 'has' or suffix != 'permission' or
+                        action not in ('add', 'delete', 'update', 'read')):
                     continue
                 if found is None:
                     found = []
@@ -398,7 +405,6 @@
                                     self.expression)
 
 
-
 # rql expressions for use in permission definition #############################
 
 class ERQLExpression(RQLExpression):
@@ -413,7 +419,7 @@
                 if creating:
                     return self._check(_cw, creating=True, **kwargs)
                 return False
-            assert creating == False
+            assert not creating
             return self._check(_cw, x=eid, **kwargs)
         return self._check(_cw, **kwargs)
 
@@ -433,11 +439,9 @@
 
     def check_permission_definitions(self):
         super(CubicWebRelationDefinitionSchema, self).check_permission_definitions()
-        schema = self.subject.schema
         for action, groups in self.permissions.items():
             for group_or_rqlexpr in groups:
-                if action == 'read' and \
-                       isinstance(group_or_rqlexpr, RQLExpression):
+                if action == 'read' and isinstance(group_or_rqlexpr, RQLExpression):
                     msg = "can't use rql expression for read permission of %s"
                     raise BadSchemaDefinition(msg % self)
                 if self.final and isinstance(group_or_rqlexpr, RRQLExpression):
@@ -447,6 +451,7 @@
                     msg = "can't use ERQLExpression on %s, use a RRQLExpression"
                     raise BadSchemaDefinition(msg % self)
 
+
 def vargraph(rqlst):
     """ builds an adjacency graph of variables from the rql syntax tree, e.g:
     Any O,S WHERE T subworkflow_exit S, T subworkflow WF, O state_of WF
@@ -462,7 +467,6 @@
         else:
             vargraph.setdefault(lhsvarname, []).append(rhsvarname)
             vargraph.setdefault(rhsvarname, []).append(lhsvarname)
-            #vargraph[(lhsvarname, rhsvarname)] = relation.r_type
     return vargraph
 
 
@@ -511,31 +515,32 @@
 
 
 PUB_SYSTEM_ENTITY_PERMS = {
-    'read':   ('managers', 'users', 'guests',),
-    'add':    ('managers',),
+    'read': ('managers', 'users', 'guests',),
+    'add': ('managers',),
     'delete': ('managers',),
     'update': ('managers',),
-    }
+}
 PUB_SYSTEM_REL_PERMS = {
-    'read':   ('managers', 'users', 'guests',),
-    'add':    ('managers',),
+    'read': ('managers', 'users', 'guests',),
+    'add': ('managers',),
     'delete': ('managers',),
-    }
+}
 PUB_SYSTEM_ATTR_PERMS = {
-    'read':   ('managers', 'users', 'guests',),
+    'read': ('managers', 'users', 'guests',),
     'add': ('managers',),
     'update': ('managers',),
-    }
+}
 RO_REL_PERMS = {
-    'read':   ('managers', 'users', 'guests',),
-    'add':    (),
+    'read': ('managers', 'users', 'guests',),
+    'add': (),
     'delete': (),
-    }
+}
 RO_ATTR_PERMS = {
-    'read':   ('managers', 'users', 'guests',),
+    'read': ('managers', 'users', 'guests',),
     'add': ybo.DEFAULT_ATTRPERMS['add'],
     'update': (),
-    }
+}
+
 
 # XXX same algorithm as in reorder_cubes and probably other place,
 # may probably extract a generic function
@@ -568,6 +573,7 @@
                         continue
     return eschemas
 
+
 def bw_normalize_etype(etype):
     if etype in ETYPE_NAME_MAP:
         msg = '%s has been renamed to %s, please update your code' % (
@@ -576,6 +582,7 @@
         etype = ETYPE_NAME_MAP[etype]
     return etype
 
+
 def display_name(req, key, form='', context=None):
     """return a internationalized string for the key (schema entity or relation
     name) in a given form
@@ -601,6 +608,7 @@
     return display_name(req, self.type, form, context)
 ERSchema.display_name = ERSchema_display_name
 
+
 @cached
 def get_groups(self, action):
     """return the groups authorized to perform <action> on entities of
@@ -613,13 +621,13 @@
     :return: names of the groups with the given permission
     """
     assert action in self.ACTIONS, action
-    #assert action in self._groups, '%s %s' % (self, action)
     try:
         return frozenset(g for g in self.permissions[action] if isinstance(g, string_types))
     except KeyError:
         return ()
 PermissionMixIn.get_groups = get_groups
 
+
 @cached
 def get_rqlexprs(self, action):
     """return the rql expressions representing queries to check the user is allowed
@@ -632,14 +640,13 @@
     :return: the rql expressions with the given permission
     """
     assert action in self.ACTIONS, action
-    #assert action in self._rqlexprs, '%s %s' % (self, action)
     try:
         return tuple(g for g in self.permissions[action] if not isinstance(g, string_types))
     except KeyError:
         return ()
 PermissionMixIn.get_rqlexprs = get_rqlexprs
 
-orig_set_action_permissions = PermissionMixIn.set_action_permissions
+
 def set_action_permissions(self, action, permissions):
     """set the groups and rql expressions allowing to perform <action> on
     entities of this type
@@ -653,8 +660,10 @@
     orig_set_action_permissions(self, action, tuple(permissions))
     clear_cache(self, 'get_rqlexprs')
     clear_cache(self, 'get_groups')
+orig_set_action_permissions = PermissionMixIn.set_action_permissions
 PermissionMixIn.set_action_permissions = set_action_permissions
 
+
 def has_local_role(self, action):
     """return true if the action *may* be granted locally (i.e. either rql
     expressions or the owners group are used in security definition)
@@ -670,6 +679,7 @@
     return False
 PermissionMixIn.has_local_role = has_local_role
 
+
 def may_have_permission(self, action, req):
     if action != 'read' and not (self.has_local_role('read') or
                                  self.has_perm(req, 'read')):
@@ -677,6 +687,7 @@
     return self.has_local_role(action) or self.has_perm(req, action)
 PermissionMixIn.may_have_permission = may_have_permission
 
+
 def has_perm(self, _cw, action, **kwargs):
     """return true if the action is granted globally or locally"""
     try:
@@ -712,8 +723,8 @@
     # NB: give _cw to user.owns since user is not be bound to a transaction on
     # the repository side
     if 'owners' in groups and (
-          kwargs.get('creating')
-          or ('eid' in kwargs and _cw.user.owns(kwargs['eid']))):
+            kwargs.get('creating')
+            or ('eid' in kwargs and _cw.user.owns(kwargs['eid']))):
         if DBG:
             print('check_perm: %r %r: user is owner or creation time' %
                   (action, _self_str))
@@ -872,7 +883,7 @@
             # avoid deleting the relation type accidentally...
             self.schema['has_text'].del_relation_def(self, self.schema['String'])
 
-    def schema_entity(self): # XXX @property for consistency with meta
+    def schema_entity(self):  # XXX @property for consistency with meta
         """return True if this entity type is used to build the schema"""
         return self.type in SCHEMA_TYPES
 
@@ -910,7 +921,7 @@
     def meta(self):
         return self.type in META_RTYPES
 
-    def schema_relation(self): # XXX @property for consistency with meta
+    def schema_relation(self):  # XXX @property for consistency with meta
         """return True if this relation type is used to build the schema"""
         return self.type in SCHEMA_TYPES
 
@@ -936,7 +947,7 @@
             else:
                 subjtype = objtype = None
         else:
-            assert not 'eid' in kwargs, kwargs
+            assert 'eid' not in kwargs, kwargs
             assert action in ('read', 'add', 'delete')
             if 'fromeid' in kwargs:
                 subjtype = _cw.entity_metas(kwargs['fromeid'])['type']
@@ -1000,6 +1011,7 @@
         rschema.final = False
 
     etype_name_re = r'[A-Z][A-Za-z0-9]*[a-z]+[A-Za-z0-9]*$'
+
     def add_entity_type(self, edef):
         edef.name = str(edef.name)
         edef.name = bw_normalize_etype(edef.name)
@@ -1055,7 +1067,7 @@
             try:
                 self._eid_index[rdef.eid] = rdefs
             except AttributeError:
-                pass # not a serialized schema
+                pass  # not a serialized schema
         return rdefs
 
     def del_relation_type(self, rtype):
@@ -1111,8 +1123,7 @@
             select.add_type_restriction(select.defined_vars['X'], str(rdef.subject))
             analyzer.visit(select)
             _check_valid_formula(rdef, rqlst)
-            rdef.formula_select = select # avoid later recomputation
-
+            rdef.formula_select = select  # avoid later recomputation
 
     def finalize_computed_relations(self):
         """Build relation definitions for computed relations
@@ -1145,6 +1156,16 @@
 
 # additional cw specific constraints ###########################################
 
+@monkeypatch(BaseConstraint)
+def name_for(self, rdef):
+    """Return a unique, size controlled, name for this constraint applied to given `rdef`.
+
+    This name may be used as name for the constraint in the database.
+    """
+    return 'cstr' + md5((rdef.subject.type + rdef.rtype.type + self.type() +
+                         (self.serialize() or '')).encode('ascii')).hexdigest()
+
+
 class BaseRQLConstraint(RRQLExpression, BaseConstraint):
     """base class for rql constraints"""
     distinct_query = None
@@ -1198,7 +1219,7 @@
     def repo_check(self, session, eidfrom, rtype, eidto):
         """raise ValidationError if the relation doesn't satisfy the constraint
         """
-        pass # this is a vocabulary constraint, not enforced
+        pass  # this is a vocabulary constraint, not enforced
 
 
 class RepoEnforcedRQLConstraintMixIn(object):
@@ -1293,6 +1314,7 @@
 
 from yams.buildobjs import _add_relation as yams_add_relation
 
+
 class workflowable_definition(ybo.metadefinition):
     """extends default EntityType's metaclass to add workflow relations
     (i.e. in_state, wf_info_for and custom_workflow). This is the default
@@ -1341,7 +1363,8 @@
 CONSTRAINTS['RQLConstraint'] = RQLConstraint
 CONSTRAINTS['RQLUniqueConstraint'] = RQLUniqueConstraint
 CONSTRAINTS['RQLVocabularyConstraint'] = RQLVocabularyConstraint
-CONSTRAINTS.pop('MultipleStaticVocabularyConstraint', None) # don't want this in cw yams schema
+# don't want MultipleStaticVocabularyConstraint in cw yams schema
+CONSTRAINTS.pop('MultipleStaticVocabularyConstraint', None)
 PyFileReader.context.update(CONSTRAINTS)
 
 
@@ -1362,7 +1385,7 @@
         # bootstraping, ignore cubes
         filepath = join(cubicweb.CW_SOFTWARE_ROOT, 'schemas', 'bootstrap.py')
         self.info('loading %s', filepath)
-        with tempattr(ybo, 'PACKAGE', 'cubicweb'): # though we don't care here
+        with tempattr(ybo, 'PACKAGE', 'cubicweb'):  # though we don't care here
             self.handle_file(filepath)
 
     def unhandled_file(self, filepath):
@@ -1371,7 +1394,8 @@
 
     # these are overridden by set_log_methods below
     # only defining here to prevent pylint from complaining
-    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+    info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
+
 
 class CubicWebSchemaLoader(BootstrapSchemaLoader):
     """cubicweb specific schema loader, automatically adding metadata to the
@@ -1412,7 +1436,7 @@
 
     # these are overridden by set_log_methods below
     # only defining here to prevent pylint from complaining
-    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+    info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
 
 
 set_log_methods(CubicWebSchemaLoader, getLogger('cubicweb.schemaloader'))
@@ -1424,6 +1448,7 @@
 MAY_USE_TEMPLATE_FORMAT = set(('managers',))
 NEED_PERM_FORMATS = [_('text/cubicweb-page-template')]
 
+
 @monkeypatch(FormatConstraint)
 def vocabulary(self, entity=None, form=None):
     cw = None
@@ -1432,11 +1457,11 @@
     elif form is not None:
         cw = form._cw
     if cw is not None:
-        if hasattr(cw, 'write_security'): # test it's a session and not a request
+        if hasattr(cw, 'write_security'):  # test it's a session and not a request
             # cw is a server session
-            hasperm = not cw.write_security or \
-                      not cw.is_hook_category_activated('integrity') or \
-                      cw.user.matching_groups(MAY_USE_TEMPLATE_FORMAT)
+            hasperm = (not cw.write_security or
+                       not cw.is_hook_category_activated('integrity') or
+                       cw.user.matching_groups(MAY_USE_TEMPLATE_FORMAT))
         else:
             hasperm = cw.user.matching_groups(MAY_USE_TEMPLATE_FORMAT)
         if hasperm:
@@ -1445,22 +1470,27 @@
 
 # XXX itou for some Statement methods
 from rql import stmts
-orig_get_etype = stmts.ScopeNode.get_etype
+
+
 def bw_get_etype(self, name):
     return orig_get_etype(self, bw_normalize_etype(name))
+orig_get_etype = stmts.ScopeNode.get_etype
 stmts.ScopeNode.get_etype = bw_get_etype
 
-orig_add_main_variable_delete = stmts.Delete.add_main_variable
+
 def bw_add_main_variable_delete(self, etype, vref):
     return orig_add_main_variable_delete(self, bw_normalize_etype(etype), vref)
+orig_add_main_variable_delete = stmts.Delete.add_main_variable
 stmts.Delete.add_main_variable = bw_add_main_variable_delete
 
-orig_add_main_variable_insert = stmts.Insert.add_main_variable
+
 def bw_add_main_variable_insert(self, etype, vref):
     return orig_add_main_variable_insert(self, bw_normalize_etype(etype), vref)
+orig_add_main_variable_insert = stmts.Insert.add_main_variable
 stmts.Insert.add_main_variable = bw_add_main_variable_insert
 
-orig_set_statement_type = stmts.Select.set_statement_type
+
 def bw_set_statement_type(self, etype):
     return orig_set_statement_type(self, bw_normalize_etype(etype))
+orig_set_statement_type = stmts.Select.set_statement_type
 stmts.Select.set_statement_type = bw_set_statement_type
--- a/cubicweb/server/__init__.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/server/__init__.py	Wed Jul 20 17:58:49 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -24,9 +24,6 @@
 
 __docformat__ = "restructuredtext en"
 
-import sys
-from os.path import join, exists
-from glob import glob
 from contextlib import contextmanager
 
 from six import text_type, string_types
@@ -39,9 +36,9 @@
 
 from yams import BASE_GROUPS
 
-from cubicweb import CW_SOFTWARE_ROOT
 from cubicweb.appobject import AppObject
 
+
 class ShuttingDown(BaseException):
     """raised when trying to access some resources while the repository is
     shutting down. Inherit from BaseException so that `except Exception` won't
@@ -90,7 +87,7 @@
 #: more verbosity
 DBG_MORE = 128
 #: all level enabled
-DBG_ALL  = DBG_RQL + DBG_SQL + DBG_REPO + DBG_MS + DBG_HOOKS + DBG_OPS + DBG_SEC + DBG_MORE
+DBG_ALL = DBG_RQL + DBG_SQL + DBG_REPO + DBG_MS + DBG_HOOKS + DBG_OPS + DBG_SEC + DBG_MORE
 
 _SECURITY_ITEMS = []
 _SECURITY_CAPS = ['read', 'add', 'update', 'delete', 'transition']
@@ -98,6 +95,7 @@
 #: current debug mode
 DEBUG = 0
 
+
 @contextmanager
 def tunesecurity(items=(), capabilities=()):
     """Context manager to use in conjunction with DBG_SEC.
@@ -136,6 +134,7 @@
     _SECURITY_ITEMS[:] = olditems
     _SECURITY_CAPS[:] = oldactions
 
+
 def set_debug(debugmode):
     """change the repository debugging mode"""
     global DEBUG
@@ -148,6 +147,7 @@
     else:
         DEBUG |= debugmode
 
+
 class debugged(object):
     """Context manager and decorator to help debug the repository.
 
@@ -184,7 +184,6 @@
     def __call__(self, func):
         """decorate function"""
         def wrapped(*args, **kwargs):
-            _clevel = DEBUG
             set_debug(self.debugmode)
             try:
                 return func(*args, **kwargs)
@@ -192,6 +191,7 @@
                 set_debug(self._clevel)
         return wrapped
 
+
 # database initialization ######################################################
 
 def create_user(session, login, pwd, *groups):
@@ -203,6 +203,7 @@
                         {'u': user.eid, 'group': text_type(group)})
     return user
 
+
 def init_repository(config, interactive=True, drop=False, vreg=None,
                     init_config=None):
     """initialise a repository database by creating tables add filling them
@@ -261,9 +262,7 @@
         # they are used sometimes by generated sql. Keeping them empty is much
         # simpler than fixing this...
         schemasql = sqlschema(schema, driver)
-        #skip_entities=[str(e) for e in schema.entities()
-        #               if not repo.system_source.support_entity(str(e))])
-        failed = sqlexec(schemasql, execute, pbtitle=_title, delimiter=';;')
+        failed = sqlexec(schemasql, execute, pbtitle=_title)
         if failed:
             print('The following SQL statements failed. You should check your schema.')
             print(failed)
@@ -291,18 +290,18 @@
             cnx.create_entity('CWGroup', name=text_type(group))
         admin = create_user(cnx, login, pwd, u'managers')
         cnx.execute('SET X owned_by U WHERE X is IN (CWGroup,CWSource), U eid %(u)s',
-                        {'u': admin.eid})
+                    {'u': admin.eid})
         cnx.commit()
     repo.shutdown()
     # re-login using the admin user
-    config._cubes = None # avoid assertion error
+    config._cubes = None  # avoid assertion error
     repo = get_repository(config=config)
     # replace previous schema by the new repo's one. This is necessary so that we give the proper
     # schema to `initialize_schema` above since it will initialize .eid attribute of schema elements
     schema = repo.schema
     with connect(repo, login, password=pwd) as cnx:
         with cnx.security_enabled(False, False):
-            repo.system_source.eid = ssource.eid # redo this manually
+            repo.system_source.eid = ssource.eid  # redo this manually
             handler = config.migration_handler(schema, interactive=False,
                                                cnx=cnx, repo=repo)
             # serialize the schema
@@ -350,7 +349,7 @@
 
 
 # sqlite'stored procedures have to be registered at connection opening time
-from logilab.database import SQL_CONNECT_HOOKS
+from logilab.database import SQL_CONNECT_HOOKS  # noqa
 
 # add to this set relations which should have their add security checking done
 # *BEFORE* adding the actual relation (done after by default)
--- a/cubicweb/server/migractions.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/server/migractions.py	Wed Jul 20 17:58:49 2016 +0200
@@ -1531,11 +1531,9 @@
         and a sql database
         """
         dbhelper = self.repo.system_source.dbhelper
-        tablesql = eschema2sql(dbhelper, self.repo.schema.eschema(etype),
-                               prefix=SQL_PREFIX)
-        for sql in tablesql.split(';'):
-            if sql.strip():
-                self.sqlexec(sql)
+        for sql in eschema2sql(dbhelper, self.repo.schema.eschema(etype),
+                               prefix=SQL_PREFIX):
+            self.sqlexec(sql)
         if commit:
             self.commit()
 
@@ -1544,10 +1542,8 @@
         This may be useful on accidental desync between the repository schema
         and a sql database
         """
-        tablesql = rschema2sql(self.repo.schema.rschema(rtype))
-        for sql in tablesql.split(';'):
-            if sql.strip():
-                self.sqlexec(sql)
+        for sql in rschema2sql(self.repo.schema.rschema(rtype)):
+            self.sqlexec(sql)
         if commit:
             self.commit()
 
--- a/cubicweb/server/schema2sql.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/server/schema2sql.py	Wed Jul 20 17:58:49 2016 +0200
@@ -39,9 +39,9 @@
 def sql_create_index(self, table, column, unique=False):
     idx = self._index_name(table, column, unique)
     if unique:
-        return 'ALTER TABLE %s ADD CONSTRAINT %s UNIQUE(%s);' % (table, idx, column)
+        return 'ALTER TABLE %s ADD CONSTRAINT %s UNIQUE(%s)' % (table, idx, column)
     else:
-        return 'CREATE INDEX %s ON %s(%s);' % (idx, table, column)
+        return 'CREATE INDEX %s ON %s(%s)' % (idx, table, column)
 
 
 @monkeypatch(database._GenericAdvFuncHelper)
@@ -53,104 +53,71 @@
 
 
 def build_index_name(table, columns, prefix='idx_'):
+    """Return a predictable-but-size-constrained name for an index on `table(*columns)`, using an
+    md5 hash.
+    """
     return '%s%s' % (prefix, md5((table +
                                   ',' +
                                   ','.join(sorted(columns))).encode('ascii')).hexdigest())
 
 
 def rschema_has_table(rschema, skip_relations):
-    """Return True if the given schema should have a table in the database"""
+    """Return True if the given schema should have a table in the database."""
     return not (rschema.final or rschema.inlined or rschema.rule or rschema.type in skip_relations)
 
 
 def schema2sql(dbhelper, schema, skip_entities=(), skip_relations=(), prefix=''):
-    """write to the output stream a SQL schema to store the objects
-    corresponding to the given schema
+    """Yield SQL statements to create a database schema for the given Yams schema.
+
+    `prefix` may be a string that will be prepended to all table / column names (usually, 'cw_').
     """
-    output = []
-    w = output.append
     for etype in sorted(schema.entities()):
         eschema = schema.eschema(etype)
         if eschema.final or eschema.type in skip_entities:
             continue
-        w(eschema2sql(dbhelper, eschema, skip_relations, prefix=prefix))
+        for sql in eschema2sql(dbhelper, eschema, skip_relations, prefix):
+            yield sql
     for rtype in sorted(schema.relations()):
         rschema = schema.rschema(rtype)
         if rschema_has_table(rschema, skip_relations):
-            w(rschema2sql(rschema))
-    return '\n'.join(output)
+            for sql in rschema2sql(rschema):
+                yield sql
 
 
-def dropschema2sql(dbhelper, schema, skip_entities=(), skip_relations=(), prefix=''):
-    """write to the output stream a SQL schema to store the objects
-    corresponding to the given schema
+def unique_index_name(eschema, attrs):
+    """Return a predictable-but-size-constrained name for a multi-columns unique index on
+    given attributes of the entity schema (actually, the later may be a schema or a string).
     """
+    # keep giving eschema instead of table name for bw compat
+    table = text_type(eschema)
+    # unique_index_name is used as name of CWUniqueConstraint, hence it should be unicode
+    return text_type(build_index_name(table, attrs, 'unique_'))
+
+
+def iter_unique_index_names(eschema):
+    """Yield (attrs, index name) where attrs is a list of entity type's attribute names that should
+    be unique together, and index name the unique index name.
+    """
+    for attrs in eschema._unique_together or ():
+        yield attrs, unique_index_name(eschema, attrs)
+
+
+def eschema2sql(dbhelper, eschema, skip_relations=(), prefix=''):
+    """Yield SQL statements to initialize database from an entity schema."""
+    table = prefix + eschema.type
     output = []
     w = output.append
-    for etype in sorted(schema.entities()):
-        eschema = schema.eschema(etype)
-        if eschema.final or eschema.type in skip_entities:
-            continue
-        stmts = dropeschema2sql(dbhelper, eschema, skip_relations, prefix=prefix)
-        for stmt in stmts:
-            w(stmt)
-    for rtype in sorted(schema.relations()):
-        rschema = schema.rschema(rtype)
-        if rschema_has_table(rschema, skip_relations):
-            w(droprschema2sql(rschema))
-    return '\n'.join(output)
-
-
-def eschema_attrs(eschema, skip_relations):
+    w('CREATE TABLE %s(' % (table))
     attrs = [attrdef for attrdef in eschema.attribute_definitions()
              if not attrdef[0].type in skip_relations]
     attrs += [(rschema, None)
               for rschema in eschema.subject_relations()
               if not rschema.final and rschema.inlined]
-    return attrs
-
-
-def unique_index_name(eschema, columns):
-    # keep giving eschema instead of table name for bw compat
-    table = text_type(eschema)
-    # unique_index_name is used as name of CWUniqueConstraint, hence it should be unicode
-    return text_type(build_index_name(table, columns, 'unique_'))
-
-
-def iter_unique_index_names(eschema):
-    for columns in eschema._unique_together or ():
-        yield columns, unique_index_name(eschema, columns)
-
-
-def dropeschema2sql(dbhelper, eschema, skip_relations=(), prefix=''):
-    """return sql to drop an entity type's table"""
-    # not necessary to drop indexes, that's implictly done when
-    # dropping the table, but we need to drop SQLServer views used to
-    # create multicol unique indices
-    statements = []
-    tablename = prefix + eschema.type
-    if eschema._unique_together is not None:
-        for columns, index_name in iter_unique_index_names(eschema):
-            cols = ['%s%s' % (prefix, col) for col in columns]
-            sqls = dbhelper.sqls_drop_multicol_unique_index(tablename, cols, index_name)
-            statements += sqls
-    statements += ['DROP TABLE %s;' % (tablename)]
-    return statements
-
-
-def eschema2sql(dbhelper, eschema, skip_relations=(), prefix=''):
-    """write an entity schema as SQL statements to stdout"""
-    output = []
-    w = output.append
-    table = prefix + eschema.type
-    w('CREATE TABLE %s(' % (table))
-    attrs = eschema_attrs(eschema, skip_relations)
     # XXX handle objectinline physical mode
     for i in range(len(attrs)):
         rschema, attrschema = attrs[i]
         if attrschema is not None:
-            sqltype = aschema2sql(dbhelper, eschema, rschema, attrschema,
-                                  indent=' ')
+            sqltype = aschema2sql(dbhelper, eschema, rschema, attrschema)
         else:  # inline relation
             sqltype = 'integer REFERENCES entities (eid)'
         if i == len(attrs) - 1:
@@ -160,32 +127,32 @@
     for rschema, aschema in attrs:
         if aschema is None:  # inline relation
             continue
-        attr = rschema.type
         rdef = rschema.rdef(eschema.type, aschema.type)
         for constraint in rdef.constraints:
-            cstrname, check = check_constraint(eschema, aschema, attr, constraint, dbhelper,
-                                               prefix=prefix)
+            cstrname, check = check_constraint(rdef, constraint, dbhelper, prefix=prefix)
             if cstrname is not None:
                 w(', CONSTRAINT %s CHECK(%s)' % (cstrname, check))
-    w(');')
+    w(')')
+    yield '\n'.join(output)
     # create indexes
     for i in range(len(attrs)):
         rschema, attrschema = attrs[i]
         if attrschema is None or eschema.rdef(rschema).indexed:
-            w(dbhelper.sql_create_index(table, prefix + rschema.type))
+            yield dbhelper.sql_create_index(table, prefix + rschema.type)
         if attrschema and any(isinstance(cstr, UniqueConstraint)
                               for cstr in eschema.rdef(rschema).constraints):
-            w(dbhelper.sql_create_index(table, prefix + rschema.type, unique=True))
-    for columns, index_name in iter_unique_index_names(eschema):
-        cols = ['%s%s' % (prefix, col) for col in columns]
-        sqls = dbhelper.sqls_create_multicol_unique_index(table, cols, index_name)
+            yield dbhelper.sql_create_index(table, prefix + rschema.type, unique=True)
+    for attrs, index_name in iter_unique_index_names(eschema):
+        columns = ['%s%s' % (prefix, attr) for attr in attrs]
+        sqls = dbhelper.sqls_create_multicol_unique_index(table, columns, index_name)
         for sql in sqls:
-            w(sql)
-    w('')
-    return '\n'.join(output)
+            yield sql.rstrip(';')  # remove trailing ';' for consistency
 
 
-def as_sql(value, dbhelper, prefix):
+def constraint_value_as_sql(value, dbhelper, prefix):
+    """Return the SQL value from a Yams constraint's value, handling special cases where it's a
+    `Attribute`, `TODAY` or `NOW` instance instead of a literal value.
+    """
     if isinstance(value, Attribute):
         return prefix + value.attr
     elif isinstance(value, TODAY):
@@ -197,20 +164,22 @@
         return value
 
 
-def check_constraint(eschema, aschema, attr, constraint, dbhelper, prefix=''):
-    # XXX should find a better name
-    cstrname = 'cstr' + md5((eschema.type + attr + constraint.type() +
-                             (constraint.serialize() or '')).encode('ascii')).hexdigest()
+def check_constraint(rdef, constraint, dbhelper, prefix=''):
+    """Return (constraint name, constraint SQL definition) for the given relation definition's
+    constraint. Maybe (None, None) if the constraint is not handled in the backend.
+    """
+    attr = rdef.rtype.type
+    cstrname = constraint.name_for(rdef)
     if constraint.type() == 'BoundaryConstraint':
-        value = as_sql(constraint.boundary, dbhelper, prefix)
+        value = constraint_value_as_sql(constraint.boundary, dbhelper, prefix)
         return cstrname, '%s%s %s %s' % (prefix, attr, constraint.operator, value)
     elif constraint.type() == 'IntervalBoundConstraint':
         condition = []
         if constraint.minvalue is not None:
-            value = as_sql(constraint.minvalue, dbhelper, prefix)
+            value = constraint_value_as_sql(constraint.minvalue, dbhelper, prefix)
             condition.append('%s%s >= %s' % (prefix, attr, value))
         if constraint.maxvalue is not None:
-            value = as_sql(constraint.maxvalue, dbhelper, prefix)
+            value = constraint_value_as_sql(constraint.maxvalue, dbhelper, prefix)
             condition.append('%s%s <= %s' % (prefix, attr, value))
         return cstrname, ' AND '.join(condition)
     elif constraint.type() == 'StaticVocabularyConstraint':
@@ -224,8 +193,8 @@
     return None, None
 
 
-def aschema2sql(dbhelper, eschema, rschema, aschema, creating=True, indent=''):
-    """write an attribute schema as SQL statements to stdout"""
+def aschema2sql(dbhelper, eschema, rschema, aschema, creating=True):
+    """Return string containing a SQL table's column definition from attribute schema."""
     attr = rschema.type
     rdef = rschema.rdef(eschema.type, aschema.type)
     sqltype = type_from_rdef(dbhelper, rdef)
@@ -253,7 +222,7 @@
 
 
 def type_from_rdef(dbhelper, rdef):
-    """return a sql type string corresponding to the relation definition"""
+    """Return a string containing SQL type name for the given relation definition."""
     constraints = list(rdef.constraints)
     sqltype = None
     if rdef.object.type == 'String':
@@ -269,6 +238,8 @@
 
 
 def sql_type(dbhelper, rdef):
+    """Return a string containing SQL type to use to store values of the given relation definition.
+    """
     sqltype = dbhelper.TYPE_MAPPING[rdef.object]
     if callable(sqltype):
         sqltype = sqltype(rdef)
@@ -283,56 +254,54 @@
 );
 
 CREATE INDEX %(from_idx)s ON %(table)s(eid_from);
-CREATE INDEX %(to_idx)s ON %(table)s(eid_to);"""
+CREATE INDEX %(to_idx)s ON %(table)s(eid_to)"""
 
 
 def rschema2sql(rschema):
+    """Yield SQL statements to create database table and indexes for a Yams relation schema."""
     assert not rschema.rule
     table = '%s_relation' % rschema.type
-    return _SQL_SCHEMA % {'table': table,
+    sqls = _SQL_SCHEMA % {'table': table,
                           'pkey_idx': build_index_name(table, ['eid_from', 'eid_to'], 'key_'),
                           'from_idx': build_index_name(table, ['eid_from'], 'idx_'),
                           'to_idx': build_index_name(table, ['eid_to'], 'idx_')}
-
-
-def droprschema2sql(rschema):
-    """return sql to drop a relation type's table"""
-    # not necessary to drop indexes, that's implictly done when dropping
-    # the table
-    return 'DROP TABLE %s_relation;' % rschema.type
+    for sql in sqls.split(';'):
+        yield sql.strip()
 
 
 def grant_schema(schema, user, set_owner=True, skip_entities=(), prefix=''):
-    """write to the output stream a SQL schema to store the objects
-    corresponding to the given schema
+    """Yield SQL statements to give all access (and ownership if `set_owner` is True) on the
+    database tables for the given Yams schema to `user`.
+
+    `prefix` may be a string that will be prepended to all table / column names (usually, 'cw_').
     """
-    output = []
-    w = output.append
     for etype in sorted(schema.entities()):
         eschema = schema.eschema(etype)
         if eschema.final or etype in skip_entities:
             continue
-        w(grant_eschema(eschema, user, set_owner, prefix=prefix))
+        for sql in grant_eschema(eschema, user, set_owner, prefix=prefix):
+            yield sql
     for rtype in sorted(schema.relations()):
         rschema = schema.rschema(rtype)
         if rschema_has_table(rschema, skip_relations=()):  # XXX skip_relations should be specified
-            w(grant_rschema(rschema, user, set_owner))
-    return '\n'.join(output)
+            for sql in grant_rschema(rschema, user, set_owner):
+                yield sql
 
 
 def grant_eschema(eschema, user, set_owner=True, prefix=''):
-    output = []
-    w = output.append
+    """Yield SQL statements to give all access (and ownership if `set_owner` is True) on the
+    database tables for the given Yams entity schema to `user`.
+    """
     etype = eschema.type
     if set_owner:
-        w('ALTER TABLE %s%s OWNER TO %s;' % (prefix, etype, user))
-    w('GRANT ALL ON %s%s TO %s;' % (prefix, etype, user))
-    return '\n'.join(output)
+        yield 'ALTER TABLE %s%s OWNER TO %s' % (prefix, etype, user)
+    yield 'GRANT ALL ON %s%s TO %s' % (prefix, etype, user)
 
 
 def grant_rschema(rschema, user, set_owner=True):
-    output = []
+    """Yield SQL statements to give all access (and ownership if `set_owner` is True) on the
+    database tables for the given Yams relation schema to `user`.
+    """
     if set_owner:
-        output.append('ALTER TABLE %s_relation OWNER TO %s;' % (rschema.type, user))
-    output.append('GRANT ALL ON %s_relation TO %s;' % (rschema.type, user))
-    return '\n'.join(output)
+        yield 'ALTER TABLE %s_relation OWNER TO %s' % (rschema.type, user)
+    yield 'GRANT ALL ON %s_relation TO %s' % (rschema.type, user)
--- a/cubicweb/server/sources/native.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/server/sources/native.py	Wed Jul 20 17:58:49 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -70,6 +70,7 @@
 NONSYSTEM_ETYPES = set()
 NONSYSTEM_RELATIONS = set()
 
+
 class LogCursor(object):
     def __init__(self, cursor):
         self.cu = cursor
@@ -142,12 +143,13 @@
     """check linked entity has not been redirected for this relation"""
     card = rdef.role_cardinality(role)
     if card in '?1' and tentity.related(rdef.rtype, role):
-        raise _UndoException(tentity._cw._(
+        msg = tentity._cw._(
             "Can't restore %(role)s relation %(rtype)s to entity %(eid)s which "
             "is already linked using this relation.")
-                            % {'role': neg_role(role),
-                               'rtype': rdef.rtype,
-                               'eid': tentity.eid})
+        raise _UndoException(msg % {'role': neg_role(role),
+                                    'rtype': rdef.rtype,
+                                    'eid': tentity.eid})
+
 
 def _undo_rel_info(cnx, subj, rtype, obj):
     entities = []
@@ -155,26 +157,27 @@
         try:
             entities.append(cnx.entity_from_eid(eid))
         except UnknownEid:
-            raise _UndoException(cnx._(
+            msg = cnx._(
                 "Can't restore relation %(rtype)s, %(role)s entity %(eid)s"
                 " doesn't exist anymore.")
-                                % {'role': cnx._(role),
-                                   'rtype': cnx._(rtype),
-                                   'eid': eid})
+            raise _UndoException(msg % {'role': cnx._(role),
+                                        'rtype': cnx._(rtype),
+                                        'eid': eid})
     sentity, oentity = entities
     try:
         rschema = cnx.vreg.schema.rschema(rtype)
         rdef = rschema.rdefs[(sentity.cw_etype, oentity.cw_etype)]
     except KeyError:
-        raise _UndoException(cnx._(
+        msg = cnx._(
             "Can't restore relation %(rtype)s between %(subj)s and "
             "%(obj)s, that relation does not exists anymore in the "
             "schema.")
-                            % {'rtype': cnx._(rtype),
-                               'subj': subj,
-                               'obj': obj})
+        raise _UndoException(msg % {'rtype': cnx._(rtype),
+                                    'subj': subj,
+                                    'obj': obj})
     return sentity, oentity, rdef
 
+
 def _undo_has_later_transaction(cnx, eid):
     return cnx.system_sql('''\
 SELECT T.tx_uuid FROM transactions AS TREF, transactions AS T
@@ -270,56 +273,56 @@
     sqlgen_class = SQLGenerator
     options = (
         ('db-driver',
-         {'type' : 'string',
+         {'type': 'string',
           'default': 'postgres',
           # XXX use choice type
           'help': 'database driver (postgres, sqlite, sqlserver2005)',
           'group': 'native-source', 'level': 0,
           }),
         ('db-host',
-         {'type' : 'string',
+         {'type': 'string',
           'default': '',
           'help': 'database host',
           'group': 'native-source', 'level': 1,
           }),
         ('db-port',
-         {'type' : 'string',
+         {'type': 'string',
           'default': '',
           'help': 'database port',
           'group': 'native-source', 'level': 1,
           }),
         ('db-name',
-         {'type' : 'string',
+         {'type': 'string',
           'default': Method('default_instance_id'),
           'help': 'database name',
           'group': 'native-source', 'level': 0,
           }),
         ('db-namespace',
-         {'type' : 'string',
+         {'type': 'string',
           'default': '',
           'help': 'database namespace (schema) name',
           'group': 'native-source', 'level': 1,
           }),
         ('db-user',
-         {'type' : 'string',
+         {'type': 'string',
           'default': CubicWebNoAppConfiguration.mode == 'user' and getlogin() or 'cubicweb',
           'help': 'database user',
           'group': 'native-source', 'level': 0,
           }),
         ('db-password',
-         {'type' : 'password',
+         {'type': 'password',
           'default': '',
           'help': 'database password',
           'group': 'native-source', 'level': 0,
           }),
         ('db-encoding',
-         {'type' : 'string',
+         {'type': 'string',
           'default': 'utf8',
           'help': 'database encoding',
           'group': 'native-source', 'level': 1,
           }),
         ('db-extra-arguments',
-         {'type' : 'string',
+         {'type': 'string',
           'default': '',
           'help': 'set to "Trusted_Connection" if you are using SQLServer and '
                   'want trusted authentication for the database connection',
@@ -421,7 +424,6 @@
         else:
             raise ValueError('Unknown format %r' % format)
 
-
     def restore(self, backupfile, confirm, drop, format='native'):
         """method called to restore a backup of source's data"""
         if self.repo.config.init_cnxset_pool:
@@ -438,13 +440,12 @@
             if self.repo.config.init_cnxset_pool:
                 self.open_source_connections()
 
-
     def init(self, activated, source_entity):
         try:
             # test if 'asource' column exists
             query = self.dbhelper.sql_add_limit_offset('SELECT asource FROM entities', 1)
             source_entity._cw.system_sql(query)
-        except Exception as ex:
+        except Exception:
             self.eid_type_source = self.eid_type_source_pre_131
         super(NativeSQLSource, self).init(activated, source_entity)
         self.init_creating(source_entity._cw.cnxset)
@@ -499,7 +500,7 @@
         try:
             self._rql_sqlgen.schema = schema
         except AttributeError:
-            pass # __init__
+            pass  # __init__
         for authentifier in self.authentifiers:
             authentifier.set_schema(self.schema)
         clear_cache(self, 'need_fti_indexation')
@@ -508,17 +509,17 @@
         """return true if the given entity's type is handled by this adapter
         if write is true, return true only if it's a RW support
         """
-        return not etype in NONSYSTEM_ETYPES
+        return etype not in NONSYSTEM_ETYPES
 
     def support_relation(self, rtype, write=False):
         """return true if the given relation's type is handled by this adapter
         if write is true, return true only if it's a RW support
         """
         if write:
-            return not rtype in NONSYSTEM_RELATIONS
+            return rtype not in NONSYSTEM_RELATIONS
         # due to current multi-sources implementation, the system source
         # can't claim not supporting a relation
-        return True #not rtype == 'content_for'
+        return True  #not rtype == 'content_for'
 
     @statsd_timeit
     def authenticate(self, cnx, login, **kwargs):
@@ -596,7 +597,7 @@
                             to_restore = handler(entity, attr)
                             restore_values.append((entity, attr, to_restore))
         try:
-            yield # 2/ execute the source's instructions
+            yield  # 2/ execute the source's instructions
         finally:
             # 3/ restore original values
             for entity, attr, value in restore_values:
@@ -631,7 +632,7 @@
             if cnx.ertype_supports_undo(entity.cw_etype):
                 attrs = [SQL_PREFIX + r.type
                          for r in entity.e_schema.subject_relations()
-                         if (r.final or r.inlined) and not r in VIRTUAL_RTYPES]
+                         if (r.final or r.inlined) and r not in VIRTUAL_RTYPES]
                 changes = self._save_attrs(cnx, entity, attrs)
                 self._record_tx_action(cnx, 'tx_entity_actions', u'D',
                                        etype=text_type(entity.cw_etype), eid=entity.eid,
@@ -642,12 +643,12 @@
 
     def add_relation(self, cnx, subject, rtype, object, inlined=False):
         """add a relation to the source"""
-        self._add_relations(cnx,  rtype, [(subject, object)], inlined)
+        self._add_relations(cnx, rtype, [(subject, object)], inlined)
         if cnx.ertype_supports_undo(rtype):
             self._record_tx_action(cnx, 'tx_relation_actions', u'A',
                                    eid_from=subject, rtype=text_type(rtype), eid_to=object)
 
-    def add_relations(self, cnx,  rtype, subj_obj_list, inlined=False):
+    def add_relations(self, cnx, rtype, subj_obj_list, inlined=False):
         """add a relations to the source"""
         self._add_relations(cnx, rtype, subj_obj_list, inlined)
         if cnx.ertype_supports_undo(rtype):
@@ -662,7 +663,7 @@
             attrs = [{'eid_from': subject, 'eid_to': object}
                      for subject, object in subj_obj_list]
             sql.append((self.sqlgen.insert('%s_relation' % rtype, attrs[0]), attrs))
-        else: # used by data import
+        else:  # used by data import
             etypes = {}
             for subject, object in subj_obj_list:
                 etype = cnx.entity_metas(subject)['type']
@@ -674,7 +675,7 @@
                 attrs = [{'cw_eid': subject, SQL_PREFIX + rtype: object}
                          for subject, object in subj_obj_list]
                 sql.append((self.sqlgen.update(SQL_PREFIX + etype, attrs[0],
-                                     ['cw_eid']),
+                                               ['cw_eid']),
                             attrs))
         for statement, attrs in sql:
             self.doexecmany(cnx, statement, attrs)
@@ -694,7 +695,7 @@
             column = SQL_PREFIX + rtype
             sql = 'UPDATE %s SET %s=NULL WHERE %seid=%%(eid)s' % (table, column,
                                                                   SQL_PREFIX)
-            attrs = {'eid' : subject}
+            attrs = {'eid': subject}
         else:
             attrs = {'eid_from': subject, 'eid_to': object}
             sql = self.sqlgen.delete('%s_relation' % rtype, attrs)
@@ -716,7 +717,7 @@
                 # during test we get those message when trying to alter sqlite
                 # db schema
                 self.info("sql: %r\n args: %s\ndbms message: %r",
-                              query, args, ex.args[0])
+                          query, args, ex.args[0])
             if rollback:
                 try:
                     cnx.cnxset.rollback()
@@ -847,7 +848,7 @@
             self.exception('failed to query entities table for eid %s', eid)
         raise UnknownEid(eid)
 
-    def eid_type_source(self, cnx, eid): # pylint: disable=E0202
+    def eid_type_source(self, cnx, eid):  # pylint: disable=E0202
         """return a tuple (type, extid, source) for the entity with id <eid>"""
         sql = 'SELECT type, extid, asource FROM entities WHERE eid=%s' % eid
         res = self._eid_type_source(cnx, eid, sql)
@@ -916,15 +917,18 @@
         # insert core relations: is, is_instance_of and cw_source
 
         if entity.e_schema.eid is not None:  # else schema has not yet been serialized
-            self._handle_is_relation_sql(cnx, 'INSERT INTO is_relation(eid_from,eid_to) VALUES (%s,%s)',
-                                         (entity.eid, entity.e_schema.eid))
+            self._handle_is_relation_sql(
+                cnx, 'INSERT INTO is_relation(eid_from,eid_to) VALUES (%s,%s)',
+                (entity.eid, entity.e_schema.eid))
             for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
-                self._handle_is_relation_sql(cnx,
-                                             'INSERT INTO is_instance_of_relation(eid_from,eid_to) VALUES (%s,%s)',
-                                             (entity.eid, eschema.eid))
+                self._handle_is_relation_sql(
+                    cnx,
+                    'INSERT INTO is_instance_of_relation(eid_from,eid_to) VALUES (%s,%s)',
+                    (entity.eid, eschema.eid))
         if source.eid is not None:  # else the source has not yet been inserted
-            self._handle_is_relation_sql(cnx, 'INSERT INTO cw_source_relation(eid_from,eid_to) VALUES (%s,%s)',
-                                         (entity.eid, source.eid))
+            self._handle_is_relation_sql(
+                cnx, 'INSERT INTO cw_source_relation(eid_from,eid_to) VALUES (%s,%s)',
+                (entity.eid, source.eid))
         # now we can update the full text index
         if self.need_fti_indexation(entity.cw_etype):
             self.index_entity(cnx, entity=entity)
@@ -969,9 +973,9 @@
         if actionfilters:
             # we will need subqueries to filter transactions according to
             # actions done
-            tearestr = {} # filters on the tx_entity_actions table
-            trarestr = {} # filters on the tx_relation_actions table
-            genrestr = {} # generic filters, appliyable to both table
+            tearestr = {}  # filters on the tx_entity_actions table
+            trarestr = {}  # filters on the tx_relation_actions table
+            genrestr = {}  # generic filters, appliyable to both table
             # unless public explicitly set to false, we only consider public
             # actions
             if actionfilters.pop('public', True):
@@ -982,7 +986,7 @@
                     # filtering on etype implies filtering on entity actions
                     # only, and with no eid specified
                     assert actionfilters.get('action', 'C') in 'CUD'
-                    assert not 'eid' in actionfilters
+                    assert 'eid' not in actionfilters
                     tearestr['etype'] = text_type(val)
                 elif key == 'eid':
                     # eid filter may apply to 'eid' of tx_entity_actions or to
@@ -1046,8 +1050,8 @@
                                   'etype', 'eid', 'changes'))
         with cnx.ensure_cnx_set:
             cu = self.doexec(cnx, sql, restr)
-            actions = [tx.EntityAction(a,p,o,et,e,c and pickle.loads(self.binary_to_str(c)))
-                       for a,p,o,et,e,c in cu.fetchall()]
+            actions = [tx.EntityAction(a, p, o, et, e, c and pickle.loads(self.binary_to_str(c)))
+                       for a, p, o, et, e, c in cu.fetchall()]
         sql = self.sqlgen.select('tx_relation_actions', restr,
                                  ('txa_action', 'txa_public', 'txa_order',
                                   'rtype', 'eid_from', 'eid_to'))
@@ -1146,12 +1150,12 @@
         for column, value in changes.items():
             rtype = column[len(SQL_PREFIX):]
             if rtype == "eid":
-                continue # XXX should even `eid` be stored in action changes?
+                continue  # XXX should even `eid` be stored in action changes?
             try:
                 rschema = getrschema[rtype]
             except KeyError:
                 err(cnx._("can't restore relation %(rtype)s of entity %(eid)s, "
-                              "this relation does not exist in the schema anymore.")
+                          "this relation does not exist in the schema anymore.")
                     % {'rtype': rtype, 'eid': eid})
             if not rschema.final:
                 if not rschema.inlined:
@@ -1160,11 +1164,11 @@
                 elif value is not None:
                     # not a deletion: we must put something in edited
                     try:
-                        entity._cw.entity_from_eid(value) # check target exists
+                        entity._cw.entity_from_eid(value)  # check target exists
                         edited[rtype] = value
                     except UnknownEid:
                         err(cnx._("can't restore entity %(eid)s of type %(eschema)s, "
-                                      "target of %(rtype)s (eid %(value)s) does not exist any longer")
+                                  "target of %(rtype)s (eid %(value)s) does not exist any longer")
                             % locals())
                         changes[column] = None
             elif eschema.destination(rtype) in ('Bytes', 'Password'):
@@ -1183,7 +1187,6 @@
         err = errors.append
         eid = action.eid
         etype = action.etype
-        _ = cnx._
         # get an entity instance
         try:
             entity = self.repo.vreg['etypes'].etype_class(etype)(cnx)
@@ -1239,8 +1242,7 @@
         # we should find an efficient way to do this (keeping current veolidf
         # massive deletion performance)
         if _undo_has_later_transaction(cnx, eid):
-            msg = cnx._('some later transaction(s) touch entity, undo them '
-                            'first')
+            msg = cnx._('some later transaction(s) touch entity, undo them first')
             raise ValidationError(eid, {None: msg})
         etype = action.etype
         # get an entity instance
@@ -1277,7 +1279,7 @@
             entity = cnx.entity_from_eid(action.eid)
         except UnknownEid:
             err(cnx._("can't restore state of entity %s, it has been "
-                          "deleted inbetween") % action.eid)
+                      "deleted inbetween") % action.eid)
             return errors
         self._reedit_entity(entity, action.changes, err)
         entity.cw_edited.check()
@@ -1346,10 +1348,9 @@
         try:
             for entity in entities:
                 cursor_unindex_object(entity.eid, cursor)
-        except Exception: # let KeyboardInterrupt / SystemExit propagate
+        except Exception:  # let KeyboardInterrupt / SystemExit propagate
             self.exception('error while unindexing %s', entity)
 
-
     def fti_index_entities(self, cnx, entities):
         """add text content of created/modified entities to the full text index
         """
@@ -1362,7 +1363,7 @@
                 cursor_index_object(entity.eid,
                                     entity.cw_adapt_to('IFTIndexable'),
                                     cursor)
-        except Exception: # let KeyboardInterrupt / SystemExit propagate
+        except Exception:  # let KeyboardInterrupt / SystemExit propagate
             self.exception('error while indexing %s', entity)
 
 
@@ -1391,14 +1392,15 @@
         source.fti_unindex_entities(cnx, to_reindex)
         source.fti_index_entities(cnx, to_reindex)
 
+
 def sql_schema(driver):
+    """Yield SQL statements to create system tables in the database."""
     helper = get_db_helper(driver)
     typemap = helper.TYPE_MAPPING
-    schema = """
-/* Create the repository's system database */
-
-%s
-
+    # XXX should return a list of sql statements rather than ';' joined statements
+    for sql in helper.sql_create_numrange('entities_id_seq').split(';'):
+        yield sql
+    for sql in ("""
 CREATE TABLE entities (
   eid INTEGER PRIMARY KEY NOT NULL,
   type VARCHAR(64) NOT NULL,
@@ -1447,48 +1449,36 @@
 CREATE INDEX tx_relation_actions_txa_public_idx ON tx_relation_actions(txa_public);;
 CREATE INDEX tx_relation_actions_eid_from_idx ON tx_relation_actions(eid_from);;
 CREATE INDEX tx_relation_actions_eid_to_idx ON tx_relation_actions(eid_to);;
-CREATE INDEX tx_relation_actions_tx_uuid_idx ON tx_relation_actions(tx_uuid);;
-""" % (helper.sql_create_numrange('entities_id_seq').replace(';', ';;'),
-       typemap['Datetime'],
-       typemap['Boolean'], typemap['Bytes'], typemap['Boolean'])
+CREATE INDEX tx_relation_actions_tx_uuid_idx ON tx_relation_actions(tx_uuid)
+""" % (typemap['Datetime'],
+       typemap['Boolean'], typemap['Bytes'], typemap['Boolean'])).split(';'):
+        yield sql
     if helper.backend_name == 'sqlite':
         # sqlite support the ON DELETE CASCADE syntax but do nothing
-        schema += '''
+        yield '''
 CREATE TRIGGER fkd_transactions
 BEFORE DELETE ON transactions
 FOR EACH ROW BEGIN
     DELETE FROM tx_entity_actions WHERE tx_uuid=OLD.tx_uuid;
     DELETE FROM tx_relation_actions WHERE tx_uuid=OLD.tx_uuid;
-END;;
+END;
 '''
     # define a multi-columns index on a single index to please sqlserver, which doesn't like several
     # null entries in a UNIQUE column
-    schema += ';;'.join(helper.sqls_create_multicol_unique_index('entities', ['extid'], 'entities_extid_idx'))
-    schema += ';;\n'
-    return schema
-
-
-def sql_drop_schema(driver):
-    helper = get_db_helper(driver)
-    return """
-%s;
-%s
-DROP TABLE entities;
-DROP TABLE tx_entity_actions;
-DROP TABLE tx_relation_actions;
-DROP TABLE transactions;
-""" % (';'.join(helper.sqls_drop_multicol_unique_index('entities', ['extid'])),
-       helper.sql_drop_numrange('entities_id_seq'))
+    for sql in helper.sqls_create_multicol_unique_index('entities', ['extid'],
+                                                        'entities_extid_idx'):
+        yield sql
 
 
 def grant_schema(user, set_owner=True):
-    result = ''
+    """Yield SQL statements to give all access (and ownership if `set_owner` is True) on the
+    database system tables to `user`.
+    """
     for table in ('entities', 'entities_id_seq',
                   'transactions', 'tx_entity_actions', 'tx_relation_actions'):
         if set_owner:
-            result = 'ALTER TABLE %s OWNER TO %s;\n' % (table, user)
-        result += 'GRANT ALL ON %s TO %s;\n' % (table, user)
-    return result
+            yield 'ALTER TABLE %s OWNER TO %s;' % (table, user)
+        yield 'GRANT ALL ON %s TO %s;' % (table, user)
 
 
 class BaseAuthentifier(object):
@@ -1500,6 +1490,7 @@
         """set the instance'schema"""
         pass
 
+
 class LoginPasswordAuthentifier(BaseAuthentifier):
     passwd_rql = 'Any P WHERE X is CWUser, X login %(login)s, X upassword P'
     auth_rql = (u'Any X WHERE X is CWUser, X login %(login)s, X upassword %(pwd)s, '
@@ -1508,7 +1499,7 @@
 
     def set_schema(self, schema):
         """set the instance'schema"""
-        if 'CWUser' in schema: # probably an empty schema if not true...
+        if 'CWUser' in schema:  # probably an empty schema if not true...
             # rql syntax trees used to authenticate users
             self._passwd_rqlst = self.source.compile_rql(self.passwd_rql, self._sols)
             self._auth_rqlst = self.source.compile_rql(self.auth_rql, self._sols)
@@ -1520,7 +1511,7 @@
         two queries are needed since passwords are stored crypted, so we have
         to fetch the salt first
         """
-        args = {'login': login, 'pwd' : None}
+        args = {'login': login, 'pwd': None}
         if password is not None:
             rset = self.source.syntax_tree_search(cnx, self._passwd_rqlst, args)
             try:
@@ -1541,15 +1532,15 @@
             # before 3.14.7), update with a fresh one
             if pwd is not None and pwd.getvalue():
                 verify, newhash = verify_and_update(password, pwd.getvalue())
-                if not verify: # should not happen, but...
+                if not verify:  # should not happen, but...
                     raise AuthenticationError('bad password')
                 if newhash:
-                    cnx.system_sql("UPDATE %s SET %s=%%(newhash)s WHERE %s=%%(login)s" % (
-                                        SQL_PREFIX + 'CWUser',
-                                        SQL_PREFIX + 'upassword',
-                                        SQL_PREFIX + 'login'),
-                                       {'newhash': self.source._binary(newhash.encode('ascii')),
-                                        'login': login})
+                    cnx.system_sql("UPDATE %s SET %s=%%(newhash)s WHERE %s=%%(login)s"
+                                   % (SQL_PREFIX + 'CWUser',
+                                      SQL_PREFIX + 'upassword',
+                                      SQL_PREFIX + 'login'),
+                                   {'newhash': self.source._binary(newhash.encode('ascii')),
+                                    'login': login})
                     cnx.commit()
             return user
         except IndexError:
@@ -1560,11 +1551,11 @@
     def authenticate(self, cnx, login, **authinfo):
         # email_auth flag prevent from infinite recursion (call to
         # repo.check_auth_info at the end of this method may lead us here again)
-        if not '@' in login or authinfo.pop('email_auth', None):
+        if '@' not in login or authinfo.pop('email_auth', None):
             raise AuthenticationError('not an email')
         rset = cnx.execute('Any L WHERE U login L, U primary_email M, '
-                               'M address %(login)s', {'login': login},
-                               build_descr=False)
+                           'M address %(login)s', {'login': login},
+                           build_descr=False)
         if rset.rowcount != 1:
             raise AuthenticationError('unexisting email')
         login = rset.rows[0][0]
@@ -1649,7 +1640,7 @@
             eschema = self.schema.eschema(etype)
             if eschema.final:
                 continue
-            etype_tables.append('%s%s'%(prefix, etype))
+            etype_tables.append('%s%s' % (prefix, etype))
         for rtype in self.schema.relations():
             rschema = self.schema.rschema(rtype)
             if rschema.final or rschema.inlined or rschema in VIRTUAL_RTYPES:
@@ -1701,7 +1692,7 @@
                 serialized = self._serialize(table, columns, rows)
                 archive.writestr('tables/%s.%04d' % (table, i), serialized)
                 self.logger.debug('wrote rows %d to %d (out of %d) to %s.%04d',
-                                  start, start+len(rows)-1,
+                                  start, start + len(rows) - 1,
                                   rowcount,
                                   table, i)
         else:
@@ -1807,7 +1798,6 @@
             self.cnx.commit()
         self.logger.info('inserted %d rows', row_count)
 
-
     def _parse_versions(self, version_str):
         versions = set()
         for line in version_str.splitlines():
--- a/cubicweb/server/sqlutils.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/server/sqlutils.py	Wed Jul 20 17:58:49 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -115,69 +115,45 @@
 def sqlgrants(schema, driver, user,
               text_index=True, set_owner=True,
               skip_relations=(), skip_entities=()):
-    """return sql to give all access privileges to the given user on the system
-    schema
+    """Return a list of SQL statements to give all access privileges to the given user on the
+    database.
     """
     from cubicweb.server.schema2sql import grant_schema
     from cubicweb.server.sources import native
-    output = []
-    w = output.append
-    w(native.grant_schema(user, set_owner))
-    w('')
+    stmts = list(native.grant_schema(user, set_owner))
     if text_index:
         dbhelper = db.get_db_helper(driver)
-        w(dbhelper.sql_grant_user_on_fti(user))
-        w('')
-    w(grant_schema(schema, user, set_owner, skip_entities=skip_entities, prefix=SQL_PREFIX))
-    return '\n'.join(output)
+        # XXX should return a list of sql statements rather than ';' joined statements
+        stmts += dbhelper.sql_grant_user_on_fti(user).split(';')
+    stmts += grant_schema(schema, user, set_owner, skip_entities=skip_entities, prefix=SQL_PREFIX)
+    return stmts
 
 
 def sqlschema(schema, driver, text_index=True,
               user=None, set_owner=False,
               skip_relations=PURE_VIRTUAL_RTYPES, skip_entities=()):
-    """return the system sql schema, according to the given parameters"""
+    """Return the database SQL schema as a list of SQL statements, according to the given parameters.
+    """
     from cubicweb.server.schema2sql import schema2sql
     from cubicweb.server.sources import native
     if set_owner:
         assert user, 'user is argument required when set_owner is true'
-    output = []
-    w = output.append
-    w(native.sql_schema(driver))
-    w('')
+    stmts = list(native.sql_schema(driver))
     dbhelper = db.get_db_helper(driver)
     if text_index:
-        w(dbhelper.sql_init_fti().replace(';', ';;'))
-        w('')
-    w(schema2sql(dbhelper, schema, prefix=SQL_PREFIX,
-                 skip_entities=skip_entities,
-                 skip_relations=skip_relations).replace(';', ';;'))
+        stmts += dbhelper.sql_init_fti().split(';')  # XXX
+    stmts += schema2sql(dbhelper, schema, prefix=SQL_PREFIX,
+                        skip_entities=skip_entities,
+                        skip_relations=skip_relations)
     if dbhelper.users_support and user:
-        w('')
-        w(sqlgrants(schema, driver, user, text_index, set_owner,
-                    skip_relations, skip_entities).replace(';', ';;'))
-    return '\n'.join(output)
-
-
-def sqldropschema(schema, driver, text_index=True,
-                  skip_relations=PURE_VIRTUAL_RTYPES, skip_entities=()):
-    """return the sql to drop the schema, according to the given parameters"""
-    from cubicweb.server.schema2sql import dropschema2sql
-    from cubicweb.server.sources import native
-    output = []
-    w = output.append
-    if text_index:
-        dbhelper = db.get_db_helper(driver)
-        w(dbhelper.sql_drop_fti())
-        w('')
-    w(dropschema2sql(dbhelper, schema, prefix=SQL_PREFIX,
-                     skip_entities=skip_entities,
-                     skip_relations=skip_relations))
-    w('')
-    w(native.sql_drop_schema(driver))
-    return '\n'.join(output)
+        stmts += sqlgrants(schema, driver, user, text_index, set_owner,
+                           skip_relations, skip_entities)
+    return stmts
 
 
 _SQL_DROP_ALL_USER_TABLES_FILTER_FUNCTION = re.compile('^(?!(sql|pg)_)').match
+
+
 def sql_drop_all_user_tables(driver_or_helper, sqlcursor):
     """Return ths sql to drop all tables found in the database system."""
     if not getattr(driver_or_helper, 'list_tables', None):
@@ -185,14 +161,16 @@
     else:
         dbhelper = driver_or_helper
 
-    cmds = [dbhelper.sql_drop_sequence('entities_id_seq')]
+    stmts = [dbhelper.sql_drop_sequence('entities_id_seq')]
     # for mssql, we need to drop views before tables
     if hasattr(dbhelper, 'list_views'):
-        cmds += ['DROP VIEW %s;' % name
-                 for name in filter(_SQL_DROP_ALL_USER_TABLES_FILTER_FUNCTION, dbhelper.list_views(sqlcursor))]
-    cmds += ['DROP TABLE %s;' % name
-             for name in filter(_SQL_DROP_ALL_USER_TABLES_FILTER_FUNCTION, dbhelper.list_tables(sqlcursor))]
-    return '\n'.join(cmds)
+        stmts += ['DROP VIEW %s;' % name
+                  for name in filter(_SQL_DROP_ALL_USER_TABLES_FILTER_FUNCTION,
+                                     dbhelper.list_views(sqlcursor))]
+    stmts += ['DROP TABLE %s;' % name
+              for name in filter(_SQL_DROP_ALL_USER_TABLES_FILTER_FUNCTION,
+                                 dbhelper.list_tables(sqlcursor))]
+    return stmts
 
 
 class ConnectionWrapper(object):
@@ -225,7 +203,7 @@
 
     def close(self, i_know_what_i_do=False):
         """close all connections in the set"""
-        if i_know_what_i_do is not True: # unexpected closing safety belt
+        if i_know_what_i_do is not True:  # unexpected closing safety belt
             raise RuntimeError('connections set shouldn\'t be closed')
         try:
             self.cu.close()
@@ -242,7 +220,7 @@
 
     def cnxset_freed(self):
         """connections set is being freed from a session"""
-        pass # no nothing by default
+        pass  # no nothing by default
 
     def reconnect(self):
         """reopen a connection for this source or all sources if none specified
@@ -293,6 +271,7 @@
             self._cnx = self._source.get_connection()
             self._cu = self._cnx.cursor()
         return self._cnx
+
     @cnx.setter
     def cnx(self, value):
         self._cnx = value
@@ -303,6 +282,7 @@
             self._cnx = self._source.get_connection()
             self._cu = self._cnx.cursor()
         return self._cu
+
     @cu.setter
     def cu(self, value):
         self._cu = value
@@ -460,7 +440,7 @@
                     # than add_entity (native) as this behavior
                     # may also be used for update.
                     value = converters[atype](value)
-                elif atype == 'Password': # XXX could be done using a TYPE_CONVERTERS callback
+                elif atype == 'Password':  # XXX could be done using a TYPE_CONVERTERS callback
                     # if value is a Binary instance, this mean we got it
                     # from a query result and so it is already encrypted
                     if isinstance(value, Binary):
@@ -470,13 +450,13 @@
                     value = self._binary(value)
                 elif isinstance(value, Binary):
                     value = self._binary(value.getvalue())
-            attrs[SQL_PREFIX+str(attr)] = value
-        attrs[SQL_PREFIX+'eid'] = entity.eid
+            attrs[SQL_PREFIX + str(attr)] = value
+        attrs[SQL_PREFIX + 'eid'] = entity.eid
         return attrs
 
     # these are overridden by set_log_methods below
     # only defining here to prevent pylint from complaining
-    info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+    info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
 
 set_log_methods(SQLAdapterMixIn, getLogger('cubicweb.sqladapter'))
 
@@ -536,9 +516,11 @@
     class group_concat(object):
         def __init__(self):
             self.values = set()
+
         def step(self, value):
             if value is not None:
                 self.values.add(value)
+
         def finalize(self):
             return ', '.join(text_type(v) for v in self.values)
 
@@ -562,11 +544,12 @@
     cnx.create_function("TEXT_LIMIT_SIZE", 2, limit_size2)
 
     from logilab.common.date import strptime
+
     def weekday(ustr):
         try:
             dt = strptime(ustr, '%Y-%m-%d %H:%M:%S')
         except:
-            dt =  strptime(ustr, '%Y-%m-%d')
+            dt = strptime(ustr, '%Y-%m-%d')
         # expect sunday to be 1, saturday 7 while weekday method return 0 for
         # monday
         return (dt.weekday() + 1) % 7
--- a/cubicweb/server/test/data-migractions/migratedapp/schema.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/server/test/data-migractions/migratedapp/schema.py	Wed Jul 20 17:58:49 2016 +0200
@@ -87,7 +87,7 @@
     mydate = Date(default='TODAY')
     oldstyledefaultdate = Date(default='2013/01/01')
     newstyledefaultdate = Date(default=dt.date(2013, 1, 1))
-    shortpara = String(maxsize=64, default='hop')
+    shortpara = String(maxsize=11, default='hop', vocabulary=['hop', 'hop hop', 'hop hop hop'])
     ecrit_par = SubjectRelation('Personne', constraints=[RQLConstraint('S concerne A, O concerne A')])
     attachment = SubjectRelation('File')
 
@@ -119,7 +119,7 @@
     nom    = String(fulltextindexed=True, required=True, maxsize=64)
     prenom = String(fulltextindexed=True, maxsize=64)
     civility   = String(maxsize=1, default='M', fulltextindexed=True)
-    promo  = String(vocabulary=('bon','pasbon'))
+    promo  = String(vocabulary=('bon','pasbon', 'pasbondutout'))
     titre  = String(fulltextindexed=True, maxsize=128)
     adel   = String(maxsize=128)
     ass    = String(maxsize=128)
--- a/cubicweb/server/test/data-schema2sql/schema/schema.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/server/test/data-schema2sql/schema/schema.py	Wed Jul 20 17:58:49 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2004-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2004-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of yams.
@@ -38,7 +38,7 @@
     nom    = String(maxsize=64, fulltextindexed=True, required=True)
     prenom = String(maxsize=64, fulltextindexed=True)
     sexe   = String(maxsize=1, default='M')
-    promo  = String(vocabulary=('bon','pasbon'))
+    promo  = String(vocabulary=('bon','pasbon','pas;bon;;du;;;tout;;;;'))
     titre  = String(maxsize=128, fulltextindexed=True)
     adel   = String(maxsize=128)
     ass    = String(maxsize=128)
--- a/cubicweb/server/test/unittest_migractions.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/server/test/unittest_migractions.py	Wed Jul 20 17:58:49 2016 +0200
@@ -22,6 +22,7 @@
 from datetime import date
 from contextlib import contextmanager
 import tempfile
+from hashlib import md5
 
 from logilab.common.testlib import unittest_main, Tags, tag, with_tempdir
 from logilab.common import tempattr
@@ -113,6 +114,14 @@
         assert result, 'no table %s' % tablename
         return dict((x[0], (x[1], x[2])) for x in result)
 
+    def table_constraints(self, mh, tablename):
+        result = mh.sqlexec(
+            "SELECT DISTINCT constraint_name FROM information_schema.constraint_column_usage "
+            "WHERE LOWER(table_name) = '%(table)s' AND constraint_name LIKE 'cstr%%'"
+            % {'table': tablename.lower()})
+        assert result, 'no table %s' % tablename
+        return set(x[0] for x in result)
+
 
 class MigrationCommandsTC(MigrationTC):
 
@@ -183,11 +192,14 @@
             self.assertEqual(self.schema['shortpara'].objects(), ('String', ))
             # test created column is actually a varchar(64)
             fields = self.table_schema(mh, '%sNote' % SQL_PREFIX)
-            self.assertEqual(fields['%sshortpara' % SQL_PREFIX], ('character varying', 64))
+            self.assertEqual(fields['%sshortpara' % SQL_PREFIX], ('character varying', 11))
             # test default value set on existing entities
             self.assertEqual(cnx.execute('Note X').get_entity(0, 0).shortpara, 'hop')
             # test default value set for next entities
-            self.assertEqual(cnx.create_entity('Note', shortpara=u'hophop').shortpara, u'hophop')
+            self.assertEqual(cnx.create_entity('Note', shortpara=u'hop hop').shortpara, u'hop hop')
+            # serialized constraint added
+            constraints = self.table_constraints(mh, 'cw_Personne')
+            self.assertEqual(len(constraints), 1, constraints)
 
     def test_add_attribute_unique(self):
         with self.mh() as (cnx, mh):
@@ -585,6 +597,13 @@
             relations = [r.name for r in rset.get_entity(0, 0).relations]
             self.assertCountEqual(relations, ('nom', 'prenom', 'datenaiss'))
 
+            # serialized constraint changed
+            constraints = self.table_constraints(mh, 'cw_Personne')
+            self.assertEqual(len(constraints), 1, constraints)
+            rdef = migrschema['promo'].rdefs['Personne', 'String']
+            cstr = rdef.constraint_by_type('StaticVocabularyConstraint')
+            self.assertIn(cstr.name_for(rdef), constraints)
+
     def _erqlexpr_rset(self, cnx, action, ertype):
         rql = 'RQLExpression X WHERE ET is CWEType, ET %s_permission X, ET name %%(name)s' % action
         return cnx.execute(rql, {'name': ertype})
--- a/cubicweb/server/test/unittest_schema2sql.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/server/test/unittest_schema2sql.py	Wed Jul 20 17:58:49 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2004-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2004-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -24,6 +24,7 @@
 from logilab.database import get_db_helper
 
 from yams.reader import SchemaLoader
+
 from cubicweb.server import schema2sql
 
 schema2sql.SET_DEFAULT = True
@@ -33,19 +34,17 @@
 schema = SchemaLoader().load([DATADIR])
 
 
-EXPECTED_DATA_NO_DROP = """
-CREATE TABLE Affaire(
+EXPECTED_DATA_NO_DROP = [
+    """CREATE TABLE Affaire(
  sujet varchar(128),
  ref varchar(12),
  inline_rel integer REFERENCES entities (eid)
-);
-CREATE INDEX idx_444e29ba3bd1f6c7ea89008613345d7b ON Affaire(inline_rel);
-
-CREATE TABLE Company(
+)""",
+    "CREATE INDEX idx_444e29ba3bd1f6c7ea89008613345d7b ON Affaire(inline_rel)",
+    """CREATE TABLE Company(
  name text
-);
-
-CREATE TABLE Datetest(
+)""",
+    """CREATE TABLE Datetest(
  dt1 timestamp,
  dt2 timestamp,
  d1 date,
@@ -53,42 +52,36 @@
  t1 time,
  t2 time
 , CONSTRAINT cstrf6a3dad792ba13c2cddcf61a2b737c00 CHECK(d1 <= CAST(clock_timestamp() AS DATE))
-);
-
-CREATE TABLE Division(
+)""",
+    """CREATE TABLE Division(
  name text
-);
-
-CREATE TABLE EPermission(
+)""",
+    """CREATE TABLE EPermission(
  name varchar(100) NOT NULL
-);
-CREATE INDEX idx_86fb596553c6f1ebc159422169f76c32 ON EPermission(name);
-
-CREATE TABLE Eetype(
+)""",
+    "CREATE INDEX idx_86fb596553c6f1ebc159422169f76c32 ON EPermission(name)",
+    """CREATE TABLE Eetype(
  name varchar(64) NOT NULL,
  description text,
  meta boolean,
  final boolean,
  initial_state integer REFERENCES entities (eid)
-);
-CREATE INDEX idx_f1f29b77c85f57921df19d2c29044d2d ON Eetype(name);
-ALTER TABLE Eetype ADD CONSTRAINT key_f1f29b77c85f57921df19d2c29044d2d UNIQUE(name);
-CREATE INDEX idx_27be7c0b18181bbdc76f3a54296dd81f ON Eetype(initial_state);
-
-CREATE TABLE Employee(
-);
-
-CREATE TABLE Note(
+)""",
+    "CREATE INDEX idx_f1f29b77c85f57921df19d2c29044d2d ON Eetype(name)",
+    "ALTER TABLE Eetype ADD CONSTRAINT key_f1f29b77c85f57921df19d2c29044d2d UNIQUE(name)",
+    "CREATE INDEX idx_27be7c0b18181bbdc76f3a54296dd81f ON Eetype(initial_state)",
+    """CREATE TABLE Employee(
+)""",
+    """CREATE TABLE Note(
  date varchar(10),
  type varchar(1),
  para varchar(512)
-);
-
-CREATE TABLE Person(
+)""",
+    """CREATE TABLE Person(
  nom varchar(64) NOT NULL,
  prenom varchar(64),
  sexe varchar(1) DEFAULT 'M',
- promo varchar(6),
+ promo varchar(22),
  titre varchar(128),
  adel varchar(128),
  ass varchar(128),
@@ -98,15 +91,14 @@
  datenaiss date,
  test boolean,
  salary float
-, CONSTRAINT cstr151c2116c0c09de13fded0619d5b4aac CHECK(promo IN ('bon', 'pasbon'))
-);
-CREATE UNIQUE INDEX unique_e6c2d219772dbf1715597f7d9a6b3892 ON Person(nom,prenom);
-
-CREATE TABLE Salaried(
+, CONSTRAINT cstrf5ac746b90a5fdd00fbe037ec9cf18eb CHECK(promo IN ('bon', 'pasbon', 'pas;bon;;du;;;tout;;;;'))
+)""",
+    "CREATE UNIQUE INDEX unique_e6c2d219772dbf1715597f7d9a6b3892 ON Person(nom,prenom)",
+    """CREATE TABLE Salaried(
  nom varchar(64) NOT NULL,
  prenom varchar(64),
  sexe varchar(1) DEFAULT 'M',
- promo varchar(6),
+ promo varchar(22),
  titre varchar(128),
  adel varchar(128),
  ass varchar(128),
@@ -116,11 +108,10 @@
  datenaiss date,
  test boolean,
  salary float
-, CONSTRAINT cstr069569cf1791dba1a2726197c53aeb44 CHECK(promo IN ('bon', 'pasbon'))
-);
-CREATE UNIQUE INDEX unique_98da0f9de8588baa8966f0b1a6f850a3 ON Salaried(nom,prenom);
-
-CREATE TABLE Societe(
+, CONSTRAINT cstrb73206eeba9fe96a05105a9db62a1509 CHECK(promo IN ('bon', 'pasbon', 'pas;bon;;du;;;tout;;;;'))
+)""",
+    "CREATE UNIQUE INDEX unique_98da0f9de8588baa8966f0b1a6f850a3 ON Salaried(nom,prenom)",
+    """CREATE TABLE Societe(
  nom varchar(64),
  web varchar(128),
  tel integer,
@@ -132,25 +123,21 @@
  cp varchar(12),
  ville varchar(32)
 , CONSTRAINT cstra0a1deaa997dcd5f9b83a77654d7c287 CHECK(fax <= tel)
-);
-ALTER TABLE Societe ADD CONSTRAINT key_abace82c402eba4a37ac54a7872607af UNIQUE(tel);
-
-CREATE TABLE State(
+)""",
+    "ALTER TABLE Societe ADD CONSTRAINT key_abace82c402eba4a37ac54a7872607af UNIQUE(tel)",
+    """CREATE TABLE State(
  eid integer PRIMARY KEY REFERENCES entities (eid),
  name varchar(256) NOT NULL,
  description text
-);
-CREATE INDEX idx_fba3802ef9056558bb9c06b5c6ba9aab ON State(name);
-
-CREATE TABLE Subcompany(
+)""",
+    "CREATE INDEX idx_fba3802ef9056558bb9c06b5c6ba9aab ON State(name)",
+    """CREATE TABLE Subcompany(
  name text
-);
-
-CREATE TABLE Subdivision(
+)""",
+    """CREATE TABLE Subdivision(
  name text
-);
-
-CREATE TABLE pkginfo(
+)""",
+    """CREATE TABLE pkginfo(
  modname varchar(30) NOT NULL,
  version varchar(10) DEFAULT '0.1' NOT NULL,
  copyright text NOT NULL,
@@ -163,125 +150,100 @@
  debian_handler varchar(6)
 , CONSTRAINT cstrbffed5ce7306d65a0db51182febd4a7b CHECK(license IN ('GPL', 'ZPL'))
 , CONSTRAINT cstr2238b33d09bf7c441e0888be354c2444 CHECK(debian_handler IN ('machin', 'bidule'))
-);
-
-
-CREATE TABLE concerne_relation (
+)""",
+    """CREATE TABLE concerne_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_19e70eabae35becb48c247bc4a688170 PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_5ee7db9477832d6e0e847d9d9cd39f5f ON concerne_relation(eid_from);
-CREATE INDEX idx_07f609872b384bb1e598cc355686a53c ON concerne_relation(eid_to);
-
-CREATE TABLE division_of_relation (
+)""",
+    "CREATE INDEX idx_5ee7db9477832d6e0e847d9d9cd39f5f ON concerne_relation(eid_from)",
+    "CREATE INDEX idx_07f609872b384bb1e598cc355686a53c ON concerne_relation(eid_to)",
+    """CREATE TABLE division_of_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_ca129a4cfa4c185c7783654e9e97da5a PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_78da9d594180fecb68ef1eba0c17a975 ON division_of_relation(eid_from);
-CREATE INDEX idx_0e6bd09d8d25129781928848e2f6d8d5 ON division_of_relation(eid_to);
-
-CREATE TABLE evaluee_relation (
+)""",
+    "CREATE INDEX idx_78da9d594180fecb68ef1eba0c17a975 ON division_of_relation(eid_from)",
+    "CREATE INDEX idx_0e6bd09d8d25129781928848e2f6d8d5 ON division_of_relation(eid_to)",
+    """CREATE TABLE evaluee_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_61aa7ea90ed7e43818c9865a3a7eb046 PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_69358dbe47990b4f8cf22af55b064dc5 ON evaluee_relation(eid_from);
-CREATE INDEX idx_634663371244297334ff655a26d6cce3 ON evaluee_relation(eid_to);
-
-CREATE TABLE next_state_relation (
+)""",
+    "CREATE INDEX idx_69358dbe47990b4f8cf22af55b064dc5 ON evaluee_relation(eid_from)",
+    "CREATE INDEX idx_634663371244297334ff655a26d6cce3 ON evaluee_relation(eid_to)",
+    """CREATE TABLE next_state_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_24a1275472da1ccc1031f6c463cdaa95 PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_e5c1a2ddc41a057eaaf6bdf9f5c6b587 ON next_state_relation(eid_from);
-CREATE INDEX idx_a3cf3cb065213186cf825e13037df826 ON next_state_relation(eid_to);
-
-CREATE TABLE obj_wildcard_relation (
+)""",
+    "CREATE INDEX idx_e5c1a2ddc41a057eaaf6bdf9f5c6b587 ON next_state_relation(eid_from)",
+    "CREATE INDEX idx_a3cf3cb065213186cf825e13037df826 ON next_state_relation(eid_to)",
+    """CREATE TABLE obj_wildcard_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_d252c56177735139c85aee463cd65703 PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_efbd9bd98c44bdfe2add479ab6704017 ON obj_wildcard_relation(eid_from);
-CREATE INDEX idx_e8c168c66f9d6057ce14e644b8436808 ON obj_wildcard_relation(eid_to);
-
-CREATE TABLE require_permission_relation (
+)""",
+    "CREATE INDEX idx_efbd9bd98c44bdfe2add479ab6704017 ON obj_wildcard_relation(eid_from)",
+    "CREATE INDEX idx_e8c168c66f9d6057ce14e644b8436808 ON obj_wildcard_relation(eid_to)",
+    """CREATE TABLE require_permission_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_24f38c4edaf84fdcc0f0d093fec3d5c7 PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_193987ddfd7c66bf43ded029ea363605 ON require_permission_relation(eid_from);
-CREATE INDEX idx_f6dd784ff5161c4461a753591fe1de94 ON require_permission_relation(eid_to);
-
-CREATE TABLE state_of_relation (
+)""",
+    "CREATE INDEX idx_193987ddfd7c66bf43ded029ea363605 ON require_permission_relation(eid_from)",
+    "CREATE INDEX idx_f6dd784ff5161c4461a753591fe1de94 ON require_permission_relation(eid_to)",
+    """CREATE TABLE state_of_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_be6983bc3072230d2e22f7631a0c9e25 PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_5f17c14443de03bd1ef79750c89c2390 ON state_of_relation(eid_from);
-CREATE INDEX idx_0ee453927e090f6eec01c412278dea9b ON state_of_relation(eid_to);
-
-CREATE TABLE subcompany_of_relation (
+)""",
+    "CREATE INDEX idx_5f17c14443de03bd1ef79750c89c2390 ON state_of_relation(eid_from)",
+    "CREATE INDEX idx_0ee453927e090f6eec01c412278dea9b ON state_of_relation(eid_to)",
+    """CREATE TABLE subcompany_of_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_25bee50df3b495a40a02aa39f832377f PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_1e6ee813030fec8d4439fc186ce752b0 ON subcompany_of_relation(eid_from);
-CREATE INDEX idx_259f9ba242f4cb80b9b2f2f9a754fca7 ON subcompany_of_relation(eid_to);
-
-CREATE TABLE subdivision_of_relation (
+)""",
+    "CREATE INDEX idx_1e6ee813030fec8d4439fc186ce752b0 ON subcompany_of_relation(eid_from)",
+    "CREATE INDEX idx_259f9ba242f4cb80b9b2f2f9a754fca7 ON subcompany_of_relation(eid_to)",
+    """CREATE TABLE subdivision_of_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_4d6f7368345676ebb66758ab71f60aef PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_a90a958166c767b50a7294e93858c1a8 ON subdivision_of_relation(eid_from);
-CREATE INDEX idx_0360028629649b26da96044a12735ad4 ON subdivision_of_relation(eid_to);
-
-CREATE TABLE subj_wildcard_relation (
+)""",
+    "CREATE INDEX idx_a90a958166c767b50a7294e93858c1a8 ON subdivision_of_relation(eid_from)",
+    "CREATE INDEX idx_0360028629649b26da96044a12735ad4 ON subdivision_of_relation(eid_to)",
+    """CREATE TABLE subj_wildcard_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_712ea3ec0bc1976bddc93ceba0acff06 PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_4dbfa4a0d44aaa0f0816560fa8b81c22 ON subj_wildcard_relation(eid_from);
-CREATE INDEX idx_09aa23f8a8b63189d05a63f8d49c7bc0 ON subj_wildcard_relation(eid_to);
-
-CREATE TABLE sym_rel_relation (
+)""",
+    "CREATE INDEX idx_4dbfa4a0d44aaa0f0816560fa8b81c22 ON subj_wildcard_relation(eid_from)",
+    "CREATE INDEX idx_09aa23f8a8b63189d05a63f8d49c7bc0 ON subj_wildcard_relation(eid_to)",
+    """CREATE TABLE sym_rel_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_c787b80522205c42402530580b0d307b PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_a46ed54f98cc4d91f0df5375d3ef73cb ON sym_rel_relation(eid_from);
-CREATE INDEX idx_0faa43abe25fc83e9400a3b96daed2b2 ON sym_rel_relation(eid_to);
-
-CREATE TABLE travaille_relation (
+)""",
+    "CREATE INDEX idx_a46ed54f98cc4d91f0df5375d3ef73cb ON sym_rel_relation(eid_from)",
+    "CREATE INDEX idx_0faa43abe25fc83e9400a3b96daed2b2 ON sym_rel_relation(eid_to)",
+    """CREATE TABLE travaille_relation (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
   eid_to INTEGER NOT NULL REFERENCES entities (eid),
   CONSTRAINT key_d7b209a1f84d9cae74a98626ef0aba0b PRIMARY KEY(eid_from, eid_to)
-);
-
-CREATE INDEX idx_b00e86c772e6577ad7a7901dd0b257b2 ON travaille_relation(eid_from);
-CREATE INDEX idx_970c052363294a9871a4824c9588e220 ON travaille_relation(eid_to);
-"""
+)""",
+    "CREATE INDEX idx_b00e86c772e6577ad7a7901dd0b257b2 ON travaille_relation(eid_from)",
+    "CREATE INDEX idx_970c052363294a9871a4824c9588e220 ON travaille_relation(eid_to)",
+]
 
 
 class SQLSchemaTC(TestCase):
 
     def test_known_values(self):
         dbhelper = get_db_helper('postgres')
-        output = schema2sql.schema2sql(dbhelper, schema, skip_relations=('works_for',))
-        self.assertMultiLineEqual(EXPECTED_DATA_NO_DROP.strip(), output.strip())
+        output = list(schema2sql.schema2sql(dbhelper, schema, skip_relations=('works_for',)))
+        self.assertEqual(output, EXPECTED_DATA_NO_DROP)
 
 
 if __name__ == '__main__':
--- a/cubicweb/server/test/unittest_schemaserial.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/server/test/unittest_schemaserial.py	Wed Jul 20 17:58:49 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
 # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
 #
 # This file is part of CubicWeb.
@@ -58,6 +58,7 @@
                'FormatConstraint': 'FormatConstraint_eid',
                }
 
+
 class Schema2RQLTC(TestCase):
 
     def test_eschema2rql1(self):
@@ -71,8 +72,8 @@
 
     def test_eschema2rql2(self):
         self.assertListEqual([
-                ('INSERT CWEType X: X description %(description)s,X final %(final)s,X name %(name)s',
-                 {'description': u'', 'final': True, 'name': u'String'})],
+            ('INSERT CWEType X: X description %(description)s,X final %(final)s,X name %(name)s',
+             {'description': u'', 'final': True, 'name': u'String'})],
                              list(eschema2rql(schema.eschema('String'))))
 
     def test_eschema2rql_specialization(self):
@@ -87,7 +88,7 @@
         expected = [('INSERT CWEType X: X description %(description)s,X final %(final)s,'
                      'X name %(name)s',
                      {'description': u'',
-                     'name': u'BabarTestType', 'final': True},)]
+                      'name': u'BabarTestType', 'final': True},)]
         got = list(eschema2rql(schema.eschema('BabarTestType')))
         self.assertListEqual(expected, got)
 
@@ -99,7 +100,7 @@
              {'description': u'link a relation definition to its relation type',
               'symmetric': False,
               'name': u'relation_type',
-              'final' : False,
+              'final': False,
               'fulltext_container': None,
               'inlined': True}),
 
@@ -124,7 +125,8 @@
               'ordernum': 1, 'cardinality': u'1*'}),
             ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X '
              'WHERE CT eid %(ct)s, EDEF eid %(x)s',
-             {'x': None, 'ct': u'RQLConstraint_eid', 'value': u'{"expression": "O final FALSE", "mainvars": ["O"], "msg": null}'}),
+             {'x': None, 'ct': u'RQLConstraint_eid',
+              'value': u'{"expression": "O final FALSE", "mainvars": ["O"], "msg": null}'}),
         ],
                              list(rschema2rql(schema.rschema('relation_type'), cstrtypemap)))
 
@@ -184,13 +186,13 @@
             ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,'
              'X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,'
              'X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
-            {'cardinality': u'**',
-             'composite': None,
-             'description': u'groups allowed to add entities/relations of this type',
-             'oe': None,
-             'ordernum': 9999,
-             'rt': None,
-             'se': None}),
+             {'cardinality': u'**',
+              'composite': None,
+              'description': u'groups allowed to add entities/relations of this type',
+              'oe': None,
+              'ordernum': 9999,
+              'rt': None,
+              'se': None}),
             ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,'
              'X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,'
              'X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
@@ -387,10 +389,10 @@
 class Perms2RQLTC(TestCase):
     GROUP_MAPPING = {
         'managers': 0,
-        'users':  1,
+        'users': 1,
         'guests': 2,
         'owners': 3,
-        }
+    }
 
     def test_eperms2rql1(self):
         self.assertListEqual([('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
@@ -422,9 +424,6 @@
                               for rql, kwargs in erperms2rql(schema.rschema('name').rdef('CWEType', 'String'),
                                                              self.GROUP_MAPPING)])
 
-    #def test_perms2rql(self):
-    #    self.assertListEqual(perms2rql(schema, self.GROUP_MAPPING),
-    #                         ['INSERT CWEType X: X name 'Societe', X final FALSE'])
 
 class ComputedAttributeAndRelationTC(CubicWebTC):
     appid = 'data-cwep002'
@@ -442,6 +441,7 @@
         self.assertEqual('Any SUM(SA) GROUPBY X WHERE P works_for X, P salary SA',
                          schema['total_salary'].rdefs['Company', 'Int'].formula)
 
+
 if __name__ == '__main__':
     from unittest import main
     main()
--- a/cubicweb/skeleton/DISTNAME.spec.tmpl	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/skeleton/DISTNAME.spec.tmpl	Wed Jul 20 17:58:49 2016 +0200
@@ -34,7 +34,7 @@
 %%endif
 
 %%install
-NO_SETUPTOOLS=1 %%{__python} setup.py --quiet install --no-compile --prefix=%%{_prefix} --root="$RPM_BUILD_ROOT"
+%%{__python} setup.py --quiet install --no-compile --prefix=%%{_prefix} --root="$RPM_BUILD_ROOT"
 # remove generated .egg-info file
 rm -rf $RPM_BUILD_ROOT/usr/lib/python*
 
--- a/cubicweb/skeleton/debian/rules	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/skeleton/debian/rules	Wed Jul 20 17:58:49 2016 +0200
@@ -1,7 +1,5 @@
 #!/usr/bin/make -f
 
-export NO_SETUPTOOLS=1
-
 %:
 	dh $@ --with python2
 
--- a/cubicweb/skeleton/setup.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/cubicweb/skeleton/setup.py	Wed Jul 20 17:58:49 2016 +0200
@@ -27,16 +27,8 @@
 import shutil
 from os.path import exists, join, dirname
 
-try:
-    if os.environ.get('NO_SETUPTOOLS'):
-        raise ImportError()  # do as there is no setuptools
-    from setuptools import setup
-    from setuptools.command import install_lib
-    USE_SETUPTOOLS = True
-except ImportError:
-    from distutils.core import setup
-    from distutils.command import install_lib
-    USE_SETUPTOOLS = False
+from setuptools import setup
+from setuptools.command import install_lib
 from distutils.command import install_data
 
 
@@ -68,14 +60,11 @@
 ext_modules = pkginfo.get('ext_modules', None)
 dependency_links = pkginfo.get('dependency_links', ())
 
-if USE_SETUPTOOLS:
-    requires = {}
-    for entry in ("__depends__",):  # "__recommends__"):
-        requires.update(pkginfo.get(entry, {}))
-    install_requires = [("%s %s" % (d, v and v or "")).strip()
-                        for d, v in requires.items()]
-else:
-    install_requires = []
+requires = {}
+for entry in ("__depends__",):  # "__recommends__"):
+    requires.update(pkginfo.get(entry, {}))
+install_requires = [("%s %s" % (d, v and v or "")).strip()
+                    for d, v in requires.items()]
 
 BASE_BLACKLIST = ('CVS', '.svn', '.hg', '.git', 'debian', 'dist', 'build')
 IGNORED_EXTENSIONS = ('.pyc', '.pyo', '.elc', '~')
@@ -147,50 +136,47 @@
 
 # re-enable copying data files in sys.prefix
 old_install_data = install_data.install_data
-if USE_SETUPTOOLS:
-    # overwrite InstallData to use sys.prefix instead of the egg directory
-    class MyInstallData(old_install_data):
-        """A class that manages data files installation"""
-        def run(self):
-            _old_install_dir = self.install_dir
-            if self.install_dir.endswith('egg'):
-                self.install_dir = sys.prefix
-            old_install_data.run(self)
-            self.install_dir = _old_install_dir
-    try:
-        # only if easy_install available
-        import setuptools.command.easy_install  # noqa
-        # monkey patch: Crack SandboxViolation verification
-        from setuptools.sandbox import DirectorySandbox as DS
-        old_ok = DS._ok
+# overwrite InstallData to use sys.prefix instead of the egg directory
+class MyInstallData(old_install_data):
+    """A class that manages data files installation"""
+    def run(self):
+        _old_install_dir = self.install_dir
+        if self.install_dir.endswith('egg'):
+            self.install_dir = sys.prefix
+        old_install_data.run(self)
+        self.install_dir = _old_install_dir
+try:
+    # only if easy_install available
+    import setuptools.command.easy_install  # noqa
+    # monkey patch: Crack SandboxViolation verification
+    from setuptools.sandbox import DirectorySandbox as DS
+    old_ok = DS._ok
 
-        def _ok(self, path):
-            """Return True if ``path`` can be written during installation."""
-            out = old_ok(self, path)  # here for side effect from setuptools
-            realpath = os.path.normcase(os.path.realpath(path))
-            allowed_path = os.path.normcase(sys.prefix)
-            if realpath.startswith(allowed_path):
-                out = True
-            return out
-        DS._ok = _ok
-    except ImportError:
-        pass
+    def _ok(self, path):
+        """Return True if ``path`` can be written during installation."""
+        out = old_ok(self, path)  # here for side effect from setuptools
+        realpath = os.path.normcase(os.path.realpath(path))
+        allowed_path = os.path.normcase(sys.prefix)
+        if realpath.startswith(allowed_path):
+            out = True
+        return out
+    DS._ok = _ok
+except ImportError:
+    pass
 
 
 def install(**kwargs):
     """setup entry point"""
-    if USE_SETUPTOOLS:
-        if '--force-manifest' in sys.argv:
-            sys.argv.remove('--force-manifest')
+    if '--force-manifest' in sys.argv:
+        sys.argv.remove('--force-manifest')
     # install-layout option was introduced in 2.5.3-1~exp1
     elif sys.version_info < (2, 5, 4) and '--install-layout=deb' in sys.argv:
         sys.argv.remove('--install-layout=deb')
     cmdclass = {'install_lib': MyInstallLib}
-    if USE_SETUPTOOLS:
-        kwargs['install_requires'] = install_requires
-        kwargs['dependency_links'] = dependency_links
-        kwargs['zip_safe'] = False
-        cmdclass['install_data'] = MyInstallData
+    kwargs['install_requires'] = install_requires
+    kwargs['dependency_links'] = dependency_links
+    kwargs['zip_safe'] = False
+    cmdclass['install_data'] = MyInstallData
 
     return setup(name=distname,
                  version=version,
--- a/debian/changelog	Tue Jul 12 12:21:41 2016 +0200
+++ b/debian/changelog	Wed Jul 20 17:58:49 2016 +0200
@@ -1,9 +1,3 @@
-cubicweb (3.23.2-1) UNRELEASED; urgency=medium
-
-  * new upstream release 
-
- -- David Douard <david.douard@logilab.fr>  Tue, 12 Jul 2016 11:34:48 +0200
-
 cubicweb (3.23.1-1) unstable; urgency=medium
 
   * new upstream release.
--- a/debian/rules	Tue Jul 12 12:21:41 2016 +0200
+++ b/debian/rules	Wed Jul 20 17:58:49 2016 +0200
@@ -8,7 +8,7 @@
 build: build-stamp
 build-stamp:
 	dh_testdir
-	NO_SETUPTOOLS=1 python setup.py build
+	python setup.py build
 	# cubicweb.foo needs to be importable by sphinx, so create a cubicweb symlink to the source dir
 	mkdir -p debian/pythonpath
 	ln -sf $(CURDIR)/cubicweb debian/pythonpath
@@ -34,7 +34,7 @@
 	dh_clean
 	dh_installdirs
 
-	NO_SETUPTOOLS=1 python setup.py -q install --no-compile --prefix=debian/tmp/usr
+	python setup.py -q install --no-compile --prefix=debian/tmp/usr
 
 	# Put all the python library and data in cubicweb-common
 	# and scripts in cubicweb-server
--- a/setup.py	Tue Jul 12 12:21:41 2016 +0200
+++ b/setup.py	Wed Jul 20 17:58:49 2016 +0200
@@ -27,16 +27,8 @@
 import shutil
 from os.path import dirname, exists, isdir, join
 
-try:
-    if os.environ.get('NO_SETUPTOOLS'):
-        raise ImportError() # do as there is no setuptools
-    from setuptools import setup
-    from setuptools.command import install_lib
-    USE_SETUPTOOLS = True
-except ImportError:
-    from distutils.core import setup
-    from distutils.command import install_lib
-    USE_SETUPTOOLS = False
+from setuptools import setup
+from setuptools.command import install_lib
 from distutils.command import install_data
 
 here = dirname(__file__)
@@ -58,14 +50,11 @@
     long_description = f.read()
 
 # import optional features
-if USE_SETUPTOOLS:
-    requires = {}
-    for entry in ("__depends__",): # "__recommends__"):
-        requires.update(__pkginfo__.get(entry, {}))
-    install_requires = [("%s %s" % (d, v and v or "")).strip()
-                       for d, v in requires.items()]
-else:
-    install_requires = []
+requires = {}
+for entry in ("__depends__",): # "__recommends__"):
+    requires.update(__pkginfo__.get(entry, {}))
+install_requires = [("%s %s" % (d, v and v or "")).strip()
+                   for d, v in requires.items()]
 
 distname = __pkginfo__.get('distname', modname)
 scripts = __pkginfo__.get('scripts', ())
@@ -179,46 +168,43 @@
         ini.close()
 
 # re-enable copying data files in sys.prefix
-if USE_SETUPTOOLS:
-    # overwrite MyInstallData to use sys.prefix instead of the egg directory
-    MyInstallMoreData = MyInstallData
-    class MyInstallData(MyInstallMoreData): # pylint: disable=E0102
-        """A class that manages data files installation"""
-        def run(self):
-            _old_install_dir = self.install_dir
-            if self.install_dir.endswith('egg'):
-                self.install_dir = sys.prefix
-            MyInstallMoreData.run(self)
-            self.install_dir = _old_install_dir
-    try:
-        import setuptools.command.easy_install # only if easy_install available
-        # monkey patch: Crack SandboxViolation verification
-        from setuptools.sandbox import DirectorySandbox as DS
-        old_ok = DS._ok
-        def _ok(self, path):
-            """Return True if ``path`` can be written during installation."""
-            out = old_ok(self, path) # here for side effect from setuptools
-            realpath = os.path.normcase(os.path.realpath(path))
-            allowed_path = os.path.normcase(sys.prefix)
-            if realpath.startswith(allowed_path):
-                out = True
-            return out
-        DS._ok = _ok
-    except ImportError:
-        pass
+# overwrite MyInstallData to use sys.prefix instead of the egg directory
+MyInstallMoreData = MyInstallData
+class MyInstallData(MyInstallMoreData): # pylint: disable=E0102
+    """A class that manages data files installation"""
+    def run(self):
+        _old_install_dir = self.install_dir
+        if self.install_dir.endswith('egg'):
+            self.install_dir = sys.prefix
+        MyInstallMoreData.run(self)
+        self.install_dir = _old_install_dir
+try:
+    import setuptools.command.easy_install # only if easy_install available
+    # monkey patch: Crack SandboxViolation verification
+    from setuptools.sandbox import DirectorySandbox as DS
+    old_ok = DS._ok
+    def _ok(self, path):
+        """Return True if ``path`` can be written during installation."""
+        out = old_ok(self, path) # here for side effect from setuptools
+        realpath = os.path.normcase(os.path.realpath(path))
+        allowed_path = os.path.normcase(sys.prefix)
+        if realpath.startswith(allowed_path):
+            out = True
+        return out
+    DS._ok = _ok
+except ImportError:
+    pass
 
 def install(**kwargs):
     """setup entry point"""
-    if USE_SETUPTOOLS:
-        if '--force-manifest' in sys.argv:
-            sys.argv.remove('--force-manifest')
+    if '--force-manifest' in sys.argv:
+        sys.argv.remove('--force-manifest')
     # install-layout option was introduced in 2.5.3-1~exp1
     elif sys.version_info < (2, 5, 4) and '--install-layout=deb' in sys.argv:
         sys.argv.remove('--install-layout=deb')
     packages = [modname] + get_packages(join(here, modname), modname)
-    if USE_SETUPTOOLS:
-        kwargs['install_requires'] = install_requires
-        kwargs['zip_safe'] = False
+    kwargs['install_requires'] = install_requires
+    kwargs['zip_safe'] = False
     kwargs['packages'] = packages
     kwargs['package_data'] = package_data
     return setup(name=distname, version=version, license=license, url=web,