--- a/cubicweb/hooks/syncschema.py Fri Apr 15 10:20:34 2016 +0200
+++ b/cubicweb/hooks/syncschema.py Mon Jun 20 15:04:14 2016 +0200
@@ -30,8 +30,7 @@
from copy import copy
from hashlib import md5
-from yams.schema import (BASE_TYPES, BadSchemaDefinition,
- RelationSchema, RelationDefinitionSchema)
+from yams.schema import BASE_TYPES, BadSchemaDefinition, RelationDefinitionSchema
from yams import buildobjs as ybo, convert_default_value
from logilab.common.decorators import clear_cache
@@ -58,6 +57,7 @@
constraints.append(cstr)
return constraints
+
def group_mapping(cw):
try:
return cw.transaction_data['groupmap']
@@ -65,6 +65,7 @@
cw.transaction_data['groupmap'] = gmap = ss.group_mapping(cw)
return gmap
+
def add_inline_relation_column(cnx, etype, rtype):
"""add necessary column and index for an inlined relation"""
attrkey = '%s.%s' % (etype, rtype)
@@ -123,7 +124,7 @@
raise validation_error(entity, errors)
-class _MockEntity(object): # XXX use a named tuple with python 2.6
+class _MockEntity(object): # XXX use a named tuple with python 2.6
def __init__(self, eid):
self.eid = eid
@@ -140,12 +141,12 @@
class DropTable(hook.Operation):
"""actually remove a database from the instance's schema"""
- table = None # make pylint happy
+ table = None # make pylint happy
+
def precommit_event(self):
- dropped = self.cnx.transaction_data.setdefault('droppedtables',
- set())
+ dropped = self.cnx.transaction_data.setdefault('droppedtables', set())
if self.table in dropped:
- return # already processed
+ return # already processed
dropped.add(self.table)
self.cnx.system_sql('DROP TABLE %s' % self.table)
self.info('dropped table %s', self.table)
@@ -286,7 +287,8 @@
class CWETypeRenameOp(MemSchemaOperation):
"""this operation updates physical storage accordingly"""
- oldname = newname = None # make pylint happy
+
+ oldname = newname = None # make pylint happy
def rename(self, oldname, newname):
self.cnx.vreg.schema.rename_entity_type(oldname, newname)
@@ -313,13 +315,14 @@
class CWRTypeUpdateOp(MemSchemaOperation):
"""actually update some properties of a relation definition"""
- rschema = entity = values = None # make pylint happy
+
+ rschema = entity = values = None # make pylint happy
oldvalues = None
def precommit_event(self):
rschema = self.rschema
if rschema.final:
- return # watched changes to final relation type are unexpected
+ return # watched changes to final relation type are unexpected
cnx = self.cnx
if 'fulltext_container' in self.values:
op = UpdateFTIndexOp.get_instance(cnx)
@@ -497,7 +500,7 @@
if extra_unique_index or entity.indexed:
try:
syssource.create_index(cnx, table, column,
- unique=extra_unique_index)
+ unique=extra_unique_index)
except Exception as ex:
self.error('error while creating index for %s.%s: %s',
table, column, ex)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.22.3_Any.py Mon Jun 20 15:04:14 2016 +0200
@@ -0,0 +1,11 @@
+from yams.constraints import UniqueConstraint
+
+for rschema in schema.relations():
+ if rschema.rule or not rschema.final:
+ continue
+ for rdef in rschema.rdefs.values():
+ if (rdef.object != 'String'
+ and any(isinstance(cstr, UniqueConstraint) for cstr in rdef.constraints)):
+ table = 'cw_{0}'.format(rdef.subject)
+ column = 'cw_{0}'.format(rdef.rtype)
+ repo.system_source.create_index(cnx, table, column, unique=True)
--- a/cubicweb/server/schema2sql.py Fri Apr 15 10:20:34 2016 +0200
+++ b/cubicweb/server/schema2sql.py Mon Jun 20 15:04:14 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2004-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2004-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of cubicweb.
@@ -31,6 +31,7 @@
# SET_DEFAULT to True
SET_DEFAULT = False
+
def rschema_has_table(rschema, skip_relations):
"""Return True if the given schema should have a table in the database"""
return not (rschema.final or rschema.inlined or rschema.rule or rschema.type in skip_relations)
@@ -82,15 +83,18 @@
if not rschema.final and rschema.inlined]
return attrs
+
def unique_index_name(eschema, columns):
return u'unique_%s' % md5((eschema.type +
- ',' +
- ','.join(sorted(columns))).encode('ascii')).hexdigest()
+ ',' +
+ ','.join(sorted(columns))).encode('ascii')).hexdigest()
+
def iter_unique_index_names(eschema):
for columns in eschema._unique_together or ():
yield columns, unique_index_name(eschema, columns)
+
def dropeschema2sql(dbhelper, eschema, skip_relations=(), prefix=''):
"""return sql to drop an entity type's table"""
# not necessary to drop indexes, that's implictly done when
@@ -100,7 +104,7 @@
tablename = prefix + eschema.type
if eschema._unique_together is not None:
for columns, index_name in iter_unique_index_names(eschema):
- cols = ['%s%s' % (prefix, col) for col in columns]
+ cols = ['%s%s' % (prefix, col) for col in columns]
sqls = dbhelper.sqls_drop_multicol_unique_index(tablename, cols, index_name)
statements += sqls
statements += ['DROP TABLE %s;' % (tablename)]
@@ -120,7 +124,7 @@
if attrschema is not None:
sqltype = aschema2sql(dbhelper, eschema, rschema, attrschema,
indent=' ')
- else: # inline relation
+ else: # inline relation
sqltype = 'integer REFERENCES entities (eid)'
if i == len(attrs) - 1:
w(' %s%s %s' % (prefix, rschema.type, sqltype))
@@ -132,7 +136,8 @@
attr = rschema.type
rdef = rschema.rdef(eschema.type, aschema.type)
for constraint in rdef.constraints:
- cstrname, check = check_constraint(eschema, aschema, attr, constraint, dbhelper, prefix=prefix)
+ cstrname, check = check_constraint(eschema, aschema, attr, constraint, dbhelper,
+ prefix=prefix)
if cstrname is not None:
w(', CONSTRAINT %s CHECK(%s)' % (cstrname, check))
w(');')
@@ -142,13 +147,14 @@
if attrschema is None or eschema.rdef(rschema).indexed:
w(dbhelper.sql_create_index(table, prefix + rschema.type))
for columns, index_name in iter_unique_index_names(eschema):
- cols = ['%s%s' % (prefix, col) for col in columns]
+ cols = ['%s%s' % (prefix, col) for col in columns]
sqls = dbhelper.sqls_create_multicol_unique_index(table, cols, index_name)
for sql in sqls:
w(sql)
w('')
return '\n'.join(output)
+
def as_sql(value, dbhelper, prefix):
if isinstance(value, Attribute):
return prefix + value.attr
@@ -160,6 +166,7 @@
# XXX more quoting for literals?
return value
+
def check_constraint(eschema, aschema, attr, constraint, dbhelper, prefix=''):
# XXX should find a better name
cstrname = 'cstr' + md5((eschema.type + attr + constraint.type() +
@@ -186,6 +193,7 @@
return cstrname, '%s%s IN (%s)' % (prefix, attr, values)
return None, None
+
def aschema2sql(dbhelper, eschema, rschema, aschema, creating=True, indent=''):
"""write an attribute schema as SQL statements to stdout"""
attr = rschema.type
@@ -218,15 +226,15 @@
"""return a sql type string corresponding to the relation definition"""
constraints = list(rdef.constraints)
unique, sqltype = False, None
- if rdef.object.type == 'String':
- for constraint in constraints:
- if isinstance(constraint, SizeConstraint):
- if constraint.max is not None:
- size_constrained_string = dbhelper.TYPE_MAPPING.get(
- 'SizeConstrainedString', 'varchar(%s)')
- sqltype = size_constrained_string % constraint.max
- elif isinstance(constraint, UniqueConstraint):
- unique = True
+ for constraint in constraints:
+ if isinstance(constraint, UniqueConstraint):
+ unique = True
+ elif (isinstance(constraint, SizeConstraint)
+ and rdef.object.type == 'String'
+ and constraint.max is not None):
+ size_constrained_string = dbhelper.TYPE_MAPPING.get(
+ 'SizeConstrainedString', 'varchar(%s)')
+ sqltype = size_constrained_string % constraint.max
if sqltype is None:
sqltype = sql_type(dbhelper, rdef)
if creating and unique:
--- a/cubicweb/server/test/data-schema2sql/schema/schema.py Fri Apr 15 10:20:34 2016 +0200
+++ b/cubicweb/server/test/data-schema2sql/schema/schema.py Mon Jun 20 15:04:14 2016 +0200
@@ -64,7 +64,7 @@
class Societe(EntityType):
nom = String(maxsize=64, fulltextindexed=True)
web = String(maxsize=128)
- tel = Int()
+ tel = Int(unique=True)
fax = Int(constraints=[BoundaryConstraint('<=', Attribute('tel'))])
rncs = String(maxsize=32)
ad1 = String(maxsize=128)
--- a/cubicweb/server/test/unittest_migractions.py Fri Apr 15 10:20:34 2016 +0200
+++ b/cubicweb/server/test/unittest_migractions.py Mon Jun 20 15:04:14 2016 +0200
@@ -17,8 +17,8 @@
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""unit tests for module cubicweb.server.migractions"""
+import os.path as osp
from datetime import date
-import os, os.path as osp
from contextlib import contextmanager
import tempfile
@@ -39,13 +39,13 @@
HERE = osp.dirname(osp.abspath(__file__))
+migrschema = None
def setUpModule():
startpgcluster(__file__)
-migrschema = None
def tearDownModule(*args):
global migrschema
del migrschema
--- a/cubicweb/server/test/unittest_schema2sql.py Fri Apr 15 10:20:34 2016 +0200
+++ b/cubicweb/server/test/unittest_schema2sql.py Mon Jun 20 15:04:14 2016 +0200
@@ -122,7 +122,7 @@
CREATE TABLE Societe(
nom varchar(64),
web varchar(128),
- tel integer,
+ tel integer UNIQUE,
fax integer,
rncs varchar(32),
ad1 varchar(128),