[server, hooks] allow callable in dbh.TYPE_MAPPING
authorSylvain Thénault <sylvain.thenault@logilab.fr>
Wed, 09 Dec 2015 17:44:18 +0100
changeset 11005 f8417bd135ed
parent 11004 14ba505fb652
child 11006 096adb786873
[server, hooks] allow callable in dbh.TYPE_MAPPING This is necessary for cubes introducing new custom final types whose generated sql may depend on some extra configuration, avoiding ugly-buggy hack as done currently by the postgis cube. Closes #7569998
hooks/syncschema.py
server/migractions.py
server/schema2sql.py
server/sources/native.py
server/test/data-migractions/cubes/fakegis/site_cubicweb.py
server/test/unittest_migractions.py
--- a/hooks/syncschema.py	Wed Dec 09 17:44:17 2015 +0100
+++ b/hooks/syncschema.py	Wed Dec 09 17:44:18 2015 +0100
@@ -440,12 +440,15 @@
             # probably buggy)
             rdef = self.cnx.vreg.schema.rschema(rdefdef.name).rdefs[rdefdef.subject, rdefdef.object]
             assert rdef.infered
+        else:
+            rdef = self.cnx.vreg.schema.rschema(rdefdef.name).rdefs[rdefdef.subject, rdefdef.object]
+
         self.cnx.execute('SET X ordernum Y+1 '
                          'WHERE X from_entity SE, SE eid %(se)s, X ordernum Y, '
                          'X ordernum >= %(order)s, NOT X eid %(x)s',
                          {'x': entity.eid, 'se': fromentity.eid,
                           'order': entity.ordernum or 0})
-        return rdefdef
+        return rdefdef, rdef
 
     def precommit_event(self):
         cnx = self.cnx
@@ -465,11 +468,10 @@
         if hasattr(entity, 'formula'):
             props['formula'] = entity.formula
         # update the in-memory schema first
-        rdefdef = self.init_rdef(**props)
+        rdefdef, rdef = self.init_rdef(**props)
         # then make necessary changes to the system source database
         syssource = cnx.repo.system_source
-        attrtype = y2sql.type_from_constraints(
-            syssource.dbhelper, rdefdef.object, rdefdef.constraints)
+        attrtype = y2sql.type_from_rdef(syssource.dbhelper, rdef)
         # XXX should be moved somehow into lgdb: sqlite doesn't support to
         # add a new column with UNIQUE, it should be added after the ALTER TABLE
         # using ADD INDEX
@@ -549,7 +551,7 @@
         cnx = self.cnx
         entity = self.entity
         # update the in-memory schema first
-        rdefdef = self.init_rdef(composite=entity.composite)
+        rdefdef, rdef = self.init_rdef(composite=entity.composite)
         # then make necessary changes to the system source database
         schema = cnx.vreg.schema
         rtype = rdefdef.name
--- a/server/migractions.py	Wed Dec 09 17:44:17 2015 +0100
+++ b/server/migractions.py	Wed Dec 09 17:44:18 2015 +0100
@@ -60,7 +60,7 @@
 from cubicweb import repoapi
 from cubicweb.migration import MigrationHelper, yes
 from cubicweb.server import hook, schemaserial as ss
-from cubicweb.server.schema2sql import eschema2sql, rschema2sql, unique_index_name
+from cubicweb.server.schema2sql import eschema2sql, rschema2sql, unique_index_name, sql_type
 from cubicweb.server.utils import manager_userpasswd
 from cubicweb.server.sqlutils import sqlexec, SQL_PREFIX
 
@@ -1508,8 +1508,10 @@
                "WHERE cw_eid=%s") % (newtype, rdef.eid)
         self.sqlexec(sql, ask_confirm=False)
         dbhelper = self.repo.system_source.dbhelper
-        sqltype = dbhelper.TYPE_MAPPING[newtype]
+        newrdef = self.fs_schema.rschema(attr).rdef(etype, newtype)
+        sqltype = sql_type(dbhelper, newrdef)
         cursor = self.cnx.cnxset.cu
+        # consider former cardinality by design, since cardinality change is not handled here
         allownull = rdef.cardinality[0] != '1'
         dbhelper.change_col_type(cursor, 'cw_%s' % etype, 'cw_%s' % attr, sqltype, allownull)
         if commit:
--- a/server/schema2sql.py	Wed Dec 09 17:44:17 2015 +0100
+++ b/server/schema2sql.py	Wed Dec 09 17:44:18 2015 +0100
@@ -190,8 +190,7 @@
     """write an attribute schema as SQL statements to stdout"""
     attr = rschema.type
     rdef = rschema.rdef(eschema.type, aschema.type)
-    sqltype = type_from_constraints(dbhelper, aschema.type, rdef.constraints,
-                                    creating)
+    sqltype = type_from_rdef(dbhelper, rdef, creating)
     if SET_DEFAULT:
         default = eschema.default(attr)
         if default is not None:
@@ -215,11 +214,11 @@
     return sqltype
 
 
-def type_from_constraints(dbhelper, etype, constraints, creating=True):
-    """return a sql type string corresponding to the constraints"""
-    constraints = list(constraints)
+def type_from_rdef(dbhelper, rdef, creating=True):
+    """return a sql type string corresponding to the relation definition"""
+    constraints = list(rdef.constraints)
     unique, sqltype = False, None
-    if etype == 'String':
+    if rdef.object.type == 'String':
         for constraint in constraints:
             if isinstance(constraint, SizeConstraint):
                 if constraint.max is not None:
@@ -229,12 +228,19 @@
             elif isinstance(constraint, UniqueConstraint):
                 unique = True
     if sqltype is None:
-        sqltype = dbhelper.TYPE_MAPPING[etype]
+        sqltype = sql_type(dbhelper, rdef)
     if creating and unique:
         sqltype += ' UNIQUE'
     return sqltype
 
 
+def sql_type(dbhelper, rdef):
+    sqltype = dbhelper.TYPE_MAPPING[rdef.object]
+    if callable(sqltype):
+        sqltype = sqltype(rdef)
+    return sqltype
+
+
 _SQL_SCHEMA = """
 CREATE TABLE %(table)s (
   eid_from INTEGER NOT NULL REFERENCES entities (eid),
--- a/server/sources/native.py	Wed Dec 09 17:44:17 2015 +0100
+++ b/server/sources/native.py	Wed Dec 09 17:44:18 2015 +0100
@@ -119,12 +119,9 @@
     """return backend type and a boolean flag if NULL values should be allowed
     for a given relation definition
     """
-    if rdef.object.final:
-        ttype = rdef.object
-    else:
-        ttype = 'Int' # eid type
-    coltype = y2sql.type_from_constraints(dbhelper, ttype,
-                                          rdef.constraints, creating=False)
+    if not rdef.object.final:
+        return dbhelper.TYPE_MAPPING['Int']
+    coltype = y2sql.type_from_rdef(dbhelper, rdef, creating=False)
     allownull = rdef.cardinality[0] != '1'
     return coltype, allownull
 
--- a/server/test/data-migractions/cubes/fakegis/site_cubicweb.py	Wed Dec 09 17:44:17 2015 +0100
+++ b/server/test/data-migractions/cubes/fakegis/site_cubicweb.py	Wed Dec 09 17:44:18 2015 +0100
@@ -7,7 +7,23 @@
 
 # Add the datatype to the helper mapping
 pghelper = get_db_helper('postgres')
-pghelper.TYPE_MAPPING['Geometry'] = 'geometry'
+
+
+def pg_geometry_sqltype(rdef):
+    """Return a PostgreSQL column type corresponding to rdef's geom_type, srid
+    and coord_dimension.
+    """
+    target_geom_type = rdef.geom_type
+    if rdef.coord_dimension >= 3:  # XXX: handle 2D+M
+        target_geom_type += 'Z'
+    if rdef.coord_dimension == 4:
+        target_geom_type += 'M'
+    assert target_geom_type
+    assert rdef.srid
+    return 'geometry(%s, %s)' % (target_geom_type, rdef.srid)
+
+
+pghelper.TYPE_MAPPING['Geometry'] = pg_geometry_sqltype
 
 
 # Add a converter for Geometry
--- a/server/test/unittest_migractions.py	Wed Dec 09 17:44:17 2015 +0100
+++ b/server/test/unittest_migractions.py	Wed Dec 09 17:44:18 2015 +0100
@@ -298,6 +298,9 @@
             self.assertEqual(rdef.geom_type, 'GEOMETRYCOLLECTION')
             self.assertEqual(rdef.coord_dimension, 2)
             self.assertEqual(rdef.srid, 4326)
+            #self.assertEqual(mh.sqlexec('SELECT pg_typeof("cw_geometry") FROM cw_Location'), '')
+            fields = self.table_schema(mh, '%sLocation' % SQL_PREFIX)
+            self.assertEqual(fields['%sgeometry' % SQL_PREFIX], ('USER-DEFINED', None)) # XXX
 
     def test_add_drop_entity_type(self):
         with self.mh() as (cnx, mh):