backport stable
authorSylvain Thénault <sylvain.thenault@logilab.fr>
Thu, 25 Mar 2010 14:26:13 +0100
changeset 5016 b3b0b808a0ed
parent 5004 4cc020ee70e2 (current diff)
parent 5015 55e2602545cd (diff)
child 5017 b2cc2a51706f
backport stable
schemas/_regproc_bss.postgres.sql
server/sources/rql2sql.py
server/test/unittest_rql2sql.py
web/facet.py
--- a/cwconfig.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/cwconfig.py	Thu Mar 25 14:26:13 2010 +0100
@@ -90,7 +90,8 @@
 from logilab.common.configuration import (Configuration, Method,
                                           ConfigurationMixIn, merge_options)
 
-from cubicweb import CW_SOFTWARE_ROOT, CW_MIGRATION_MAP, ConfigurationError
+from cubicweb import (CW_SOFTWARE_ROOT, CW_MIGRATION_MAP,
+                      ConfigurationError, Binary)
 from cubicweb.toolsutils import env_path, create_dir
 
 CONFIGURATIONS = []
@@ -1050,7 +1051,24 @@
 
 
     class FSPATH(FunctionDescr):
-        supported_backends = ('postgres', 'sqlite',)
-        rtype = 'Bytes'
+        """return path of some bytes attribute stored using the Bytes
+        File-System Storage (bfss)
+        """
+        rtype = 'Bytes' # XXX return a String? potential pb with fs encoding
+
+        def update_cb_stack(self, stack):
+            assert len(stack) == 1
+            stack[0] = self.source_execute
+
+        def as_sql(self, backend, args):
+            raise NotImplementedError('source only callback')
+
+        def source_execute(self, source, value):
+            fpath = source.binary_to_str(value)
+            try:
+                return Binary(fpath)
+            except OSError, ex:
+                self.critical("can't open %s: %s", fpath, ex)
+                return None
 
     register_function(FSPATH)
--- a/entity.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/entity.py	Thu Mar 25 14:26:13 2010 +0100
@@ -286,6 +286,18 @@
             self.edited_attributes.add(attr)
             self.skip_security_attributes.add(attr)
 
+    def pop(self, attr, default=_marker):
+        """override pop to update self.edited_attributes on cleanup of
+        undesired changes introduced in the entity's dict. See `__delitem__`
+        """
+        if default is _marker:
+            value = super(Entity, self).pop(attr)
+        else:
+            value = super(Entity, self).pop(attr, default)
+        if hasattr(self, 'edited_attributes') and attr in self.edited_attributes:
+            self.edited_attributes.remove(attr)
+        return value
+
     def rql_set_value(self, attr, value):
         """call by rql execution plan when some attribute is modified
 
--- a/hooks/integrity.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/hooks/integrity.py	Thu Mar 25 14:26:13 2010 +0100
@@ -228,45 +228,6 @@
                     raise ValidationError(entity.eid, {attr: msg % val})
 
 
-class _DelayedDeleteOp(hook.Operation):
-    """delete the object of composite relation except if the relation
-    has actually been redirected to another composite
-    """
-
-    def precommit_event(self):
-        session = self.session
-        # don't do anything if the entity is being created or deleted
-        if not (session.deleted_in_transaction(self.eid) or
-                session.added_in_transaction(self.eid)):
-            etype = session.describe(self.eid)[0]
-            session.execute('DELETE %s X WHERE X eid %%(x)s, NOT %s'
-                            % (etype, self.relation),
-                            {'x': self.eid}, 'x')
-
-
-class DeleteCompositeOrphanHook(IntegrityHook):
-    """delete the composed of a composite relation when this relation is deleted
-    """
-    __regid__ = 'deletecomposite'
-    events = ('before_delete_relation',)
-
-    def __call__(self):
-        # if the relation is being delete, don't delete composite's components
-        # automatically
-        pendingrdefs = self._cw.transaction_data.get('pendingrdefs', ())
-        if (self._cw.describe(self.eidfrom)[0], self.rtype,
-            self._cw.describe(self.eidto)[0]) in pendingrdefs:
-            return
-        composite = self._cw.schema_rproperty(self.rtype, self.eidfrom, self.eidto,
-                                                 'composite')
-        if composite == 'subject':
-            _DelayedDeleteOp(self._cw, eid=self.eidto,
-                             relation='Y %s X' % self.rtype)
-        elif composite == 'object':
-            _DelayedDeleteOp(self._cw, eid=self.eidfrom,
-                             relation='X %s Y' % self.rtype)
-
-
 class DontRemoveOwnersGroupHook(IntegrityHook):
     """delete the composed of a composite relation when this relation is deleted
     """
@@ -314,3 +275,46 @@
         user = self.entity
         if 'login' in user.edited_attributes and user.login:
             user.login = user.login.strip()
+
+
+# 'active' integrity hooks: you usually don't want to deactivate them, they are
+# not really integrity check, they maintain consistency on changes
+
+class _DelayedDeleteOp(hook.Operation):
+    """delete the object of composite relation except if the relation
+    has actually been redirected to another composite
+    """
+
+    def precommit_event(self):
+        session = self.session
+        # don't do anything if the entity is being created or deleted
+        if not (session.deleted_in_transaction(self.eid) or
+                session.added_in_transaction(self.eid)):
+            etype = session.describe(self.eid)[0]
+            session.execute('DELETE %s X WHERE X eid %%(x)s, NOT %s'
+                            % (etype, self.relation),
+                            {'x': self.eid}, 'x')
+
+
+class DeleteCompositeOrphanHook(hook.Hook):
+    """delete the composed of a composite relation when this relation is deleted
+    """
+    __regid__ = 'deletecomposite'
+    events = ('before_delete_relation',)
+    category = 'activeintegrity'
+
+    def __call__(self):
+        # if the relation is being delete, don't delete composite's components
+        # automatically
+        pendingrdefs = self._cw.transaction_data.get('pendingrdefs', ())
+        if (self._cw.describe(self.eidfrom)[0], self.rtype,
+            self._cw.describe(self.eidto)[0]) in pendingrdefs:
+            return
+        composite = self._cw.schema_rproperty(self.rtype, self.eidfrom, self.eidto,
+                                              'composite')
+        if composite == 'subject':
+            _DelayedDeleteOp(self._cw, eid=self.eidto,
+                             relation='Y %s X' % self.rtype)
+        elif composite == 'object':
+            _DelayedDeleteOp(self._cw, eid=self.eidfrom,
+                             relation='X %s Y' % self.rtype)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.7.2_Any.py	Thu Mar 25 14:26:13 2010 +0100
@@ -0,0 +1,2 @@
+sql('DROP FUNCTION IF EXISTS _fsopen(bytea)')
+sql('DROP FUNCTION IF EXISTS fspath(bigint, text, text)')
--- a/schemas/_regproc_bss.postgres.sql	Wed Mar 24 18:04:59 2010 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-/* -*- sql -*-
-
-   postgres specific registered procedures for the Bytes File System storage,
-   require the plpythonu language installed
-
-*/
-
-
-CREATE OR REPLACE FUNCTION _fsopen(bytea) RETURNS bytea AS $$
-    fpath = args[0]
-    if fpath:
-        try:
-            data = file(fpath, 'rb').read()
-            #/* XXX due to plpython bug we have to replace some characters... */
-            return data.replace("\\", r"\134").replace("\000", r"\000").replace("'", r"\047") #'
-        except Exception, ex:
-            plpy.warning('failed to get content for %s: %s', fpath, ex)
-    return None
-$$ LANGUAGE plpythonu
-/* WITH(ISCACHABLE) XXX does postgres handle caching of large data nicely */
-;;
-
-/* fspath(eid, entity type, attribute) */
-CREATE OR REPLACE FUNCTION fspath(bigint, text, text) RETURNS bytea AS $$
-    pkey = 'plan%s%s' % (args[1], args[2])
-    try:
-        plan = SD[pkey]
-    except KeyError:
-        #/* then prepare and cache plan to get versioned file information from a
-        # version content eid */
-        plan = plpy.prepare(
-            'SELECT X.cw_%s FROM cw_%s as X WHERE X.cw_eid=$1' % (args[2], args[1]),
-            ['bigint'])
-        SD[pkey] = plan
-    return plpy.execute(plan, [args[0]])[0]['cw_' + args[2]]
-$$ LANGUAGE plpythonu
-/* WITH(ISCACHABLE) XXX does postgres handle caching of large data nicely */
-;;
--- a/server/session.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/server/session.py	Thu Mar 25 14:26:13 2010 +0100
@@ -591,10 +591,7 @@
             ecache[entity.eid] = entity
 
     def entity_cache(self, eid):
-        try:
-            return self.transaction_data['ecache'][eid]
-        except:
-            raise
+        return self.transaction_data['ecache'][eid]
 
     def cached_entities(self):
         return self.transaction_data.get('ecache', {}).values()
--- a/server/sources/extlite.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/server/sources/extlite.py	Thu Mar 25 14:26:13 2010 +0100
@@ -187,9 +187,10 @@
         if self._need_sql_create:
             return []
         assert dbg_st_search(self.uri, union, varmap, args, cachekey)
-        sql, query_args = self.rqlsqlgen.generate(union, args)
-        args = self.sqladapter.merge_args(args, query_args)
-        results = self.sqladapter.process_result(self.doexec(session, sql, args))
+        sql, qargs, cbs = self.rqlsqlgen.generate(union, args)
+        args = self.sqladapter.merge_args(args, qargs)
+        cursor = self.doexec(session, sql, args)
+        results = self.sqladapter.process_result(cursor, cbs)
         assert dbg_results(results)
         return results
 
--- a/server/sources/native.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/server/sources/native.py	Thu Mar 25 14:26:13 2010 +0100
@@ -264,8 +264,9 @@
     def init(self):
         self.init_creating()
 
-    def map_attribute(self, etype, attr, cb):
-        self._rql_sqlgen.attr_map['%s.%s' % (etype, attr)] = cb
+    # XXX deprecates [un]map_attribute ?
+    def map_attribute(self, etype, attr, cb, sourcedb=True):
+        self._rql_sqlgen.attr_map['%s.%s' % (etype, attr)] = (cb, sourcedb)
 
     def unmap_attribute(self, etype, attr):
         self._rql_sqlgen.attr_map.pop('%s.%s' % (etype, attr), None)
@@ -273,7 +274,8 @@
     def set_storage(self, etype, attr, storage):
         storage_dict = self._storages.setdefault(etype, {})
         storage_dict[attr] = storage
-        self.map_attribute(etype, attr, storage.sqlgen_callback)
+        self.map_attribute(etype, attr,
+                           storage.callback, storage.is_source_callback)
 
     def unset_storage(self, etype, attr):
         self._storages[etype].pop(attr)
@@ -348,17 +350,17 @@
         if cachekey is None:
             self.no_cache += 1
             # generate sql query if we are able to do so (not supported types...)
-            sql, query_args = self._rql_sqlgen.generate(union, args, varmap)
+            sql, qargs, cbs = self._rql_sqlgen.generate(union, args, varmap)
         else:
             # sql may be cached
             try:
-                sql, query_args = self._cache[cachekey]
+                sql, qargs, cbs = self._cache[cachekey]
                 self.cache_hit += 1
             except KeyError:
                 self.cache_miss += 1
-                sql, query_args = self._rql_sqlgen.generate(union, args, varmap)
-                self._cache[cachekey] = sql, query_args
-        args = self.merge_args(args, query_args)
+                sql, qargs, cbs = self._rql_sqlgen.generate(union, args, varmap)
+                self._cache[cachekey] = sql, qargs, cbs
+        args = self.merge_args(args, qargs)
         assert isinstance(sql, basestring), repr(sql)
         try:
             cursor = self.doexec(session, sql, args)
@@ -367,7 +369,7 @@
             self.info("request failed '%s' ... retry with a new cursor", sql)
             session.pool.reconnect(self)
             cursor = self.doexec(session, sql, args)
-        results = self.process_result(cursor)
+        results = self.process_result(cursor, cbs)
         assert dbg_results(results)
         return results
 
@@ -381,9 +383,9 @@
             self.uri, union, varmap, args,
             prefix='ON THE FLY temp data insertion into %s from' % table)
         # generate sql queries if we are able to do so
-        sql, query_args = self._rql_sqlgen.generate(union, args, varmap)
+        sql, qargs, cbs = self._rql_sqlgen.generate(union, args, varmap)
         query = 'INSERT INTO %s %s' % (table, sql.encode(self._dbencoding))
-        self.doexec(session, query, self.merge_args(args, query_args))
+        self.doexec(session, query, self.merge_args(args, qargs))
 
     def manual_insert(self, results, table, session):
         """insert given result into a temporary table on the system source"""
@@ -428,10 +430,14 @@
         orig_values = {}
         etype = entity.__regid__
         for attr, storage in self._storages.get(etype, {}).items():
-            if attr in entity.edited_attributes:
-                orig_values[attr] = entity[attr]
-                handler = getattr(storage, 'entity_%s' % event)
-                handler(entity, attr)
+            try:
+                if attr in entity.edited_attributes:
+                    orig_values[attr] = entity[attr]
+                    handler = getattr(storage, 'entity_%s' % event)
+                    handler(entity, attr)
+            except AttributeError:
+                assert event == 'deleted'
+                getattr(storage, 'entity_deleted')(entity, attr)
         yield # 2/ execute the source's instructions
         # 3/ restore original values
         for attr, value in orig_values.items():
--- a/server/sources/rql2sql.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/server/sources/rql2sql.py	Thu Mar 25 14:26:13 2010 +0100
@@ -33,16 +33,30 @@
 
 import threading
 
+from logilab.database import FunctionDescr, SQL_FUNCTIONS_REGISTRY
+
 from rql import BadRQLQuery, CoercionError
 from rql.stmts import Union, Select
 from rql.nodes import (SortTerm, VariableRef, Constant, Function, Not,
                        Variable, ColumnAlias, Relation, SubQuery, Exists)
 
+from cubicweb import QueryError
 from cubicweb.server.sqlutils import SQL_PREFIX
 from cubicweb.server.utils import cleanup_solutions
 
 ColumnAlias._q_invariant = False # avoid to check for ColumnAlias / Variable
 
+FunctionDescr.source_execute = None
+
+def default_update_cb_stack(self, stack):
+    stack.append(self.source_execute)
+FunctionDescr.update_cb_stack = default_update_cb_stack
+
+LENGTH = SQL_FUNCTIONS_REGISTRY.get_function('LENGTH')
+def length_source_execute(source, value):
+    return len(value.getvalue())
+LENGTH.source_execute = length_source_execute
+
 def _new_var(select, varname):
     newvar = select.get_variable(varname)
     if not 'relations' in newvar.stinfo:
@@ -252,14 +266,44 @@
                         selectedidx.append(vref.name)
                         rqlst.selection.append(vref)
 
-# IGenerator implementation for RQL->SQL ######################################
+def iter_mapped_var_sels(stmt, variable):
+    # variable is a Variable or ColumnAlias node mapped to a source side
+    # callback
+    if not (len(variable.stinfo['rhsrelations']) <= 1 and # < 1 on column alias
+            variable.stinfo['selected']):
+        raise QueryError("can't use %s as a restriction variable"
+                         % variable.name)
+    for selectidx in variable.stinfo['selected']:
+        vrefs = stmt.selection[selectidx].get_nodes(VariableRef)
+        if len(vrefs) != 1:
+            raise QueryError()
+        yield selectidx, vrefs[0]
 
+def update_source_cb_stack(state, stmt, node, stack):
+    while True:
+        node = node.parent
+        if node is stmt:
+            break
+        if not isinstance(node, Function):
+            raise QueryError()
+        func = SQL_FUNCTIONS_REGISTRY.get_function(node.name)
+        if func.source_execute is None:
+            raise QueryError('%s can not be called on mapped attribute'
+                             % node.name)
+        state.source_cb_funcs.add(node)
+        func.update_cb_stack(stack)
+
+
+# IGenerator implementation for RQL->SQL #######################################
 
 class StateInfo(object):
     def __init__(self, existssols, unstablevars):
         self.existssols = existssols
         self.unstablevars = unstablevars
         self.subtables = {}
+        self.needs_source_cb = None
+        self.subquery_source_cb = None
+        self.source_cb_funcs = set()
 
     def reset(self, solution):
         """reset some visit variables"""
@@ -276,6 +320,17 @@
         self.restrictions = []
         self._restr_stack = []
         self.ignore_varmap = False
+        self._needs_source_cb = {}
+
+    def merge_source_cbs(self, needs_source_cb):
+        if self.needs_source_cb is None:
+            self.needs_source_cb = needs_source_cb
+        elif needs_source_cb != self.needs_source_cb:
+            raise QueryError('query fetch some source mapped attribute, some not')
+
+    def finalize_source_cbs(self):
+        if self.subquery_source_cb is not None:
+            self.needs_source_cb.update(self.subquery_source_cb)
 
     def add_restriction(self, restr):
         if restr:
@@ -332,16 +387,16 @@
     protected by a lock
     """
 
-    def __init__(self, schema, dbms_helper, attrmap=None):
+    def __init__(self, schema, dbhelper, attrmap=None):
         self.schema = schema
-        self.dbms_helper = dbms_helper
-        self.dbencoding = dbms_helper.dbencoding
-        self.keyword_map = {'NOW' : self.dbms_helper.sql_current_timestamp,
-                            'TODAY': self.dbms_helper.sql_current_date,
+        self.dbhelper = dbhelper
+        self.dbencoding = dbhelper.dbencoding
+        self.keyword_map = {'NOW' : self.dbhelper.sql_current_timestamp,
+                            'TODAY': self.dbhelper.sql_current_date,
                             }
-        if not self.dbms_helper.union_parentheses_support:
+        if not self.dbhelper.union_parentheses_support:
             self.union_sql = self.noparen_union_sql
-        if self.dbms_helper.fti_need_distinct:
+        if self.dbhelper.fti_need_distinct:
             self.__union_sql = self.union_sql
             self.union_sql = self.has_text_need_distinct_union_sql
         self._lock = threading.Lock()
@@ -373,7 +428,7 @@
             # union query for each rqlst / solution
             sql = self.union_sql(union)
             # we are done
-            return sql, self._query_attrs
+            return sql, self._query_attrs, self._state.needs_source_cb
         finally:
             self._lock.release()
 
@@ -391,9 +446,10 @@
         return '\nUNION ALL\n'.join(sqls)
 
     def noparen_union_sql(self, union, needalias=False):
-        # needed for sqlite backend which doesn't like parentheses around
-        # union query. This may cause bug in some condition (sort in one of
-        # the subquery) but will work in most case
+        # needed for sqlite backend which doesn't like parentheses around union
+        # query. This may cause bug in some condition (sort in one of the
+        # subquery) but will work in most case
+        #
         # see http://www.sqlite.org/cvstrac/tktview?tn=3074
         sqls = (self.select_sql(select, needalias)
                 for i, select in enumerate(union.children))
@@ -435,6 +491,9 @@
         else:
             existssols, unstable = {}, ()
         state = StateInfo(existssols, unstable)
+        if self._state is not None:
+            # state from a previous unioned select
+            state.merge_source_cbs(self._state.needs_source_cb)
         # treat subqueries
         self._subqueries_sql(select, state)
         # generate sql for this select node
@@ -490,6 +549,7 @@
                 if fneedwrap:
                     selection = ['T1.C%s' % i for i in xrange(len(origselection))]
                     sql = 'SELECT %s FROM (%s) AS T1' % (','.join(selection), sql)
+            state.finalize_source_cbs()
         finally:
             select.selection = origselection
         # limit / offset
@@ -504,13 +564,24 @@
     def _subqueries_sql(self, select, state):
         for i, subquery in enumerate(select.with_):
             sql = self.union_sql(subquery.query, needalias=True)
-            tablealias = '_T%s' % i
+            tablealias = '_T%s' % i # XXX nested subqueries
             sql = '(%s) AS %s' % (sql, tablealias)
             state.subtables[tablealias] = (0, sql)
+            latest_state = self._state
             for vref in subquery.aliases:
                 alias = vref.variable
                 alias._q_sqltable = tablealias
                 alias._q_sql = '%s.C%s' % (tablealias, alias.colnum)
+                try:
+                    stack = latest_state.needs_source_cb[alias.colnum]
+                    if state.subquery_source_cb is None:
+                        state.subquery_source_cb = {}
+                    for selectidx, vref in iter_mapped_var_sels(select, alias):
+                        stack = stack[:]
+                        update_source_cb_stack(state, select, vref, stack)
+                        state.subquery_source_cb[selectidx] = stack
+                except KeyError:
+                    continue
 
     def _solutions_sql(self, select, solutions, distinct, needalias):
         sqls = []
@@ -522,17 +593,18 @@
             sql = [self._selection_sql(select.selection, distinct, needalias)]
             if self._state.restrictions:
                 sql.append('WHERE %s' % ' AND '.join(self._state.restrictions))
+            self._state.merge_source_cbs(self._state._needs_source_cb)
             # add required tables
             assert len(self._state.actual_tables) == 1, self._state.actual_tables
             tables = self._state.actual_tables[-1]
             if tables:
                 # sort for test predictability
                 sql.insert(1, 'FROM %s' % ', '.join(sorted(tables)))
-            elif self._state.restrictions and self.dbms_helper.needs_from_clause:
+            elif self._state.restrictions and self.dbhelper.needs_from_clause:
                 sql.insert(1, 'FROM (SELECT 1) AS _T')
             sqls.append('\n'.join(sql))
         if select.need_intersect:
-            #if distinct or not self.dbms_helper.intersect_all_support:
+            #if distinct or not self.dbhelper.intersect_all_support:
             return '\nINTERSECT\n'.join(sqls)
             #else:
             #    return '\nINTERSECT ALL\n'.join(sqls)
@@ -894,7 +966,13 @@
             except KeyError:
                 mapkey = '%s.%s' % (self._state.solution[lhs.name], rel.r_type)
                 if mapkey in self.attr_map:
-                    lhssql = self.attr_map[mapkey](self, lhs.variable, rel)
+                    cb, sourcecb = self.attr_map[mapkey]
+                    if sourcecb:
+                        # callback is a source callback, we can't use this
+                        # attribute in restriction
+                        raise QueryError("can't use %s (%s) in restriction"
+                                         % (mapkey, rel.as_string()))
+                    lhssql = cb(self, lhs.variable, rel)
                 elif rel.r_type == 'eid':
                     lhssql = lhs.variable._q_sql
                 else:
@@ -943,7 +1021,7 @@
             not_ = True
         else:
             not_ = False
-        return self.dbms_helper.fti_restriction_sql(alias, const.eval(self._args),
+        return self.dbhelper.fti_restriction_sql(alias, const.eval(self._args),
                                                     jointo, not_) + restriction
 
     def visit_comparison(self, cmp):
@@ -956,7 +1034,7 @@
             rhs = cmp.children[0]
         operator = cmp.operator
         if operator in ('IS', 'LIKE', 'ILIKE'):
-            if operator == 'ILIKE' and not self.dbms_helper.ilike_support:
+            if operator == 'ILIKE' and not self.dbhelper.ilike_support:
                 operator = ' LIKE '
             else:
                 operator = ' %s ' % operator
@@ -986,9 +1064,13 @@
 
     def visit_function(self, func):
         """generate SQL name for a function"""
-        # func_sql_call will check function is supported by the backend
-        return self.dbms_helper.func_as_sql(func.name,
-                                            [c.accept(self) for c in func.children])
+        args = [c.accept(self) for c in func.children]
+        if func in self._state.source_cb_funcs:
+            # function executed as a callback on the source
+            assert len(args) == 1
+            return args[0]
+        # func_as_sql will check function is supported by the backend
+        return self.dbhelper.func_as_sql(func.name, args)
 
     def visit_constant(self, constant):
         """generate SQL name for a constant"""
@@ -1003,7 +1085,7 @@
                 rel._q_needcast = value
             return self.keyword_map[value]()
         if constant.type == 'Boolean':
-            value = self.dbms_helper.boolean_value(value)
+            value = self.dbhelper.boolean_value(value)
         if constant.type == 'Substitute':
             _id = constant.value
             if isinstance(_id, unicode):
@@ -1065,7 +1147,7 @@
                     self._state.add_restriction(restr)
             elif principal.r_type == 'has_text':
                 sql = '%s.%s' % (self._fti_table(principal),
-                                 self.dbms_helper.fti_uid_attr)
+                                 self.dbhelper.fti_uid_attr)
             elif principal in variable.stinfo['rhsrelations']:
                 if self.schema.rschema(principal.r_type).inlined:
                     sql = self._linked_var_sql(variable)
@@ -1155,12 +1237,20 @@
         if isinstance(linkedvar, ColumnAlias):
             raise BadRQLQuery('variable %s should be selected by the subquery'
                               % variable.name)
-        mapkey = '%s.%s' % (self._state.solution[linkedvar.name], rel.r_type)
-        if mapkey in self.attr_map:
-            return self.attr_map[mapkey](self, linkedvar, rel)
         try:
             sql = self._varmap['%s.%s' % (linkedvar.name, rel.r_type)]
         except KeyError:
+            mapkey = '%s.%s' % (self._state.solution[linkedvar.name], rel.r_type)
+            if mapkey in self.attr_map:
+                cb, sourcecb = self.attr_map[mapkey]
+                if not sourcecb:
+                    return cb(self, linkedvar, rel)
+                # attribute mapped at the source level (bfss for instance)
+                stmt = rel.stmt
+                for selectidx, vref in iter_mapped_var_sels(stmt, variable):
+                    stack = [cb]
+                    update_source_cb_stack(self._state, stmt, vref, stack)
+                    self._state._needs_source_cb[selectidx] = stack
             linkedvar.accept(self)
             sql = '%s.%s%s' % (linkedvar._q_sqltable, SQL_PREFIX, rel.r_type)
         return sql
@@ -1267,7 +1357,7 @@
             except AttributeError:
                 pass
         self._state.done.add(relation)
-        alias = self.alias_and_add_table(self.dbms_helper.fti_table)
+        alias = self.alias_and_add_table(self.dbhelper.fti_table)
         relation._q_sqltable = alias
         return alias
 
--- a/server/sources/storages.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/server/sources/storages.py	Thu Mar 25 14:26:13 2010 +0100
@@ -11,10 +11,31 @@
     repo.system_source.unset_storage(etype, attr)
 
 class Storage(object):
-    """abstract storage"""
-    def sqlgen_callback(self, generator, relation, linkedvar):
-        """sql generator callback when some attribute with a custom storage is
-        accessed
+    """abstract storage
+
+    * If `source_callback` is true (by default), the callback will be run during
+      query result process of fetched attribute's valu and should have the
+      following prototype::
+
+        callback(self, source, value)
+
+      where `value` is the value actually stored in the backend. None values
+      will be skipped (eg callback won't be called).
+
+    * if `source_callback` is false, the callback will be run during sql
+      generation when some attribute with a custom storage is accessed and
+      should have the following prototype::
+
+        callback(self, generator, relation, linkedvar)
+
+      where `generator` is the sql generator, `relation` the current rql syntax
+      tree relation and linkedvar the principal syntax tree variable holding the
+      attribute.
+    """
+    is_source_callback = True
+
+    def callback(self, *args):
+        """see docstring for prototype, which vary according to is_source_callback
         """
         raise NotImplementedError()
 
@@ -38,14 +59,16 @@
     def __init__(self, defaultdir):
         self.default_directory = defaultdir
 
-    def sqlgen_callback(self, generator, linkedvar, relation):
+    def callback(self, source, value):
         """sql generator callback when some attribute with a custom storage is
         accessed
         """
-        linkedvar.accept(generator)
-        return '_fsopen(%s.cw_%s)' % (
-            linkedvar._q_sql.split('.', 1)[0], # table name
-            relation.r_type) # attribute name
+        fpath = source.binary_to_str(value)
+        try:
+            return Binary(file(fpath).read())
+        except OSError, ex:
+            source.critical("can't open %s: %s", value, ex)
+            return None
 
     def entity_added(self, entity, attr):
         """an entity using this storage for attr has been added"""
@@ -87,7 +110,8 @@
         cu = sysource.doexec(entity._cw,
                              'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s' % (
                                  attr, entity.__regid__, entity.eid))
-        return sysource._process_value(cu.fetchone()[0], [None, dbmod.BINARY],
+        BINARY = sysource.dbhelper.dbapi_module.BINARY
+        return sysource._process_value(cu.fetchone()[0], [None, BINARY],
                                        binarywrap=str)
 
 
--- a/server/sqlutils.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/server/sqlutils.py	Thu Mar 25 14:26:13 2010 +0100
@@ -188,9 +188,17 @@
             return newargs
         return query_args
 
-    def process_result(self, cursor):
+    def process_result(self, cursor, column_callbacks=None):
         """return a list of CubicWeb compliant values from data in the given cursor
         """
+        # use two different implementations to avoid paying the price of
+        # callback lookup for each *cell* in results when there is nothing to
+        # lookup
+        if not column_callbacks:
+            return self._process_result(cursor)
+        return self._cb_process_result(cursor, column_callbacks)
+
+    def _process_result(self, cursor, column_callbacks=None):
         # begin bind to locals for optimization
         descr = cursor.description
         encoding = self._dbencoding
@@ -208,6 +216,30 @@
             results[i] = result
         return results
 
+    def _cb_process_result(self, cursor, column_callbacks):
+        # begin bind to locals for optimization
+        descr = cursor.description
+        encoding = self._dbencoding
+        process_value = self._process_value
+        binary = Binary
+        # /end
+        results = cursor.fetchall()
+        for i, line in enumerate(results):
+            result = []
+            for col, value in enumerate(line):
+                if value is None:
+                    result.append(value)
+                    continue
+                cbstack = column_callbacks.get(col, None)
+                if cbstack is None:
+                    value = process_value(value, descr[col], encoding, binary)
+                else:
+                    for cb in cbstack:
+                        value = cb(self, value)
+                result.append(value)
+            results[i] = result
+        return results
+
     def preprocess_entity(self, entity):
         """return a dictionary to use as extra argument to cursor.execute
         to insert/update an entity into a SQL database
@@ -277,28 +309,5 @@
     import yams.constraints
     yams.constraints.patch_sqlite_decimal()
 
-    def fspath(eid, etype, attr):
-        try:
-            cu = cnx.cursor()
-            cu.execute('SELECT X.cw_%s FROM cw_%s as X '
-                       'WHERE X.cw_eid=%%(eid)s' % (attr, etype),
-                       {'eid': eid})
-            return cu.fetchone()[0]
-        except:
-            import traceback
-            traceback.print_exc()
-            raise
-    cnx.create_function('fspath', 3, fspath)
-
-    def _fsopen(fspath):
-        if fspath:
-            try:
-                return buffer(file(fspath).read())
-            except:
-                import traceback
-                traceback.print_exc()
-                raise
-    cnx.create_function('_fsopen', 1, _fsopen)
-
 sqlite_hooks = SQL_CONNECT_HOOKS.setdefault('sqlite', [])
 sqlite_hooks.append(init_sqlite_connexion)
--- a/server/test/unittest_rql2sql.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/server/test/unittest_rql2sql.py	Thu Mar 25 14:26:13 2010 +0100
@@ -1102,8 +1102,8 @@
     #capture = True
     def setUp(self):
         RQLGeneratorTC.setUp(self)
-        dbms_helper = get_db_helper('postgres')
-        self.o = SQLGenerator(schema, dbms_helper)
+        dbhelper = get_db_helper('postgres')
+        self.o = SQLGenerator(schema, dbhelper)
 
     def _norm_sql(self, sql):
         return sql.strip()
@@ -1113,8 +1113,8 @@
             args = {'text': 'hip hop momo'}
         try:
             union = self._prepare(rql)
-            r, nargs = self.o.generate(union, args,
-                                      varmap=varmap)
+            r, nargs, cbs = self.o.generate(union, args,
+                                            varmap=varmap)
             args.update(nargs)
             self.assertLinesEquals((r % args).strip(), self._norm_sql(sql), striplines=True)
         except Exception, ex:
@@ -1135,7 +1135,7 @@
     def _checkall(self, rql, sql):
         try:
             rqlst = self._prepare(rql)
-            r, args = self.o.generate(rqlst)
+            r, args, cbs = self.o.generate(rqlst)
             self.assertEqual((r.strip(), args), sql)
         except Exception, ex:
             print rql
@@ -1197,7 +1197,7 @@
 
     def test_is_null_transform(self):
         union = self._prepare('Any X WHERE X login %(login)s')
-        r, args = self.o.generate(union, {'login': None})
+        r, args, cbs = self.o.generate(union, {'login': None})
         self.assertLinesEquals((r % args).strip(),
                                '''SELECT _X.cw_eid
 FROM cw_CWUser AS _X
@@ -1386,11 +1386,11 @@
                     '''SELECT COUNT(1)
 WHERE EXISTS(SELECT 1 FROM owned_by_relation AS rel_owned_by0, cw_Affaire AS _P WHERE rel_owned_by0.eid_from=_P.cw_eid AND rel_owned_by0.eid_to=1 UNION SELECT 1 FROM owned_by_relation AS rel_owned_by1, cw_Note AS _P WHERE rel_owned_by1.eid_from=_P.cw_eid AND rel_owned_by1.eid_to=1)''')
 
-    def test_attr_map(self):
+    def test_attr_map_sqlcb(self):
         def generate_ref(gen, linkedvar, rel):
             linkedvar.accept(gen)
             return 'VERSION_DATA(%s)' % linkedvar._q_sql
-        self.o.attr_map['Affaire.ref'] = generate_ref
+        self.o.attr_map['Affaire.ref'] = (generate_ref, False)
         try:
             self._check('Any R WHERE X ref R',
                         '''SELECT VERSION_DATA(_X.cw_eid)
@@ -1402,13 +1402,24 @@
         finally:
             self.o.attr_map.clear()
 
+    def test_attr_map_sourcecb(self):
+        cb = lambda x,y: None
+        self.o.attr_map['Affaire.ref'] = (cb, True)
+        try:
+            union = self._prepare('Any R WHERE X ref R')
+            r, nargs, cbs = self.o.generate(union, args={})
+            self.assertLinesEquals(r.strip(), 'SELECT _X.cw_ref\nFROM cw_Affaire AS _X')
+            self.assertEquals(cbs, {0: [cb]})
+        finally:
+            self.o.attr_map.clear()
+
 
 class SqliteSQLGeneratorTC(PostgresSQLGeneratorTC):
 
     def setUp(self):
         RQLGeneratorTC.setUp(self)
-        dbms_helper = get_db_helper('sqlite')
-        self.o = SQLGenerator(schema, dbms_helper)
+        dbhelper = get_db_helper('sqlite')
+        self.o = SQLGenerator(schema, dbhelper)
 
     def _norm_sql(self, sql):
         return sql.strip().replace(' ILIKE ', ' LIKE ').replace('\nINTERSECT ALL\n', '\nINTERSECT\n')
@@ -1515,8 +1526,8 @@
 
     def setUp(self):
         RQLGeneratorTC.setUp(self)
-        dbms_helper = get_db_helper('mysql')
-        self.o = SQLGenerator(schema, dbms_helper)
+        dbhelper = get_db_helper('mysql')
+        self.o = SQLGenerator(schema, dbhelper)
 
     def _norm_sql(self, sql):
         sql = sql.strip().replace(' ILIKE ', ' LIKE ').replace('TRUE', '1').replace('FALSE', '0')
--- a/server/test/unittest_storage.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/server/test/unittest_storage.py	Thu Mar 25 14:26:13 2010 +0100
@@ -13,7 +13,7 @@
 import shutil
 import tempfile
 
-from cubicweb import Binary
+from cubicweb import Binary, QueryError
 from cubicweb.selectors import implements
 from cubicweb.server.sources import storages
 from cubicweb.server.hook import Hook, Operation
@@ -51,41 +51,114 @@
         shutil.rmtree(self.tempdir)
 
 
-    def create_file(self, content):
+    def create_file(self, content='the-data'):
         req = self.request()
         return req.create_entity('File', data=Binary(content),
                                  data_format=u'text/plain', data_name=u'foo')
 
-    def test_bfs_storage(self):
-        f1 = self.create_file(content='the-data')
+    def test_bfss_storage(self):
+        f1 = self.create_file()
         expected_filepath = osp.join(self.tempdir, '%s_data' % f1.eid)
         self.failUnless(osp.isfile(expected_filepath))
         self.assertEquals(file(expected_filepath).read(), 'the-data')
+        self.rollback()
+        self.failIf(osp.isfile(expected_filepath))
+        f1 = self.create_file()
+        self.commit()
+        self.assertEquals(file(expected_filepath).read(), 'the-data')
+        f1.set_attributes(data=Binary('the new data'))
+        self.rollback()
+        self.assertEquals(file(expected_filepath).read(), 'the-data')
+        f1.delete()
+        self.failUnless(osp.isfile(expected_filepath))
+        self.rollback()
+        self.failUnless(osp.isfile(expected_filepath))
+        f1.delete()
+        self.commit()
+        self.failIf(osp.isfile(expected_filepath))
 
-    def test_sqlite_fspath(self):
-        f1 = self.create_file(content='the-data')
+    def test_bfss_sqlite_fspath(self):
+        f1 = self.create_file()
         expected_filepath = osp.join(self.tempdir, '%s_data' % f1.eid)
-        fspath = self.execute('Any fspath(F, "File", "data") WHERE F eid %(f)s',
+        fspath = self.execute('Any fspath(D) WHERE F eid %(f)s, F data D',
                               {'f': f1.eid})[0][0]
         self.assertEquals(fspath.getvalue(), expected_filepath)
 
-    def test_fs_importing_doesnt_touch_path(self):
+    def test_bfss_fs_importing_doesnt_touch_path(self):
         self.session.transaction_data['fs_importing'] = True
         f1 = self.session.create_entity('File', data=Binary('/the/path'),
                                         data_format=u'text/plain', data_name=u'foo')
-        fspath = self.execute('Any fspath(F, "File", "data") WHERE F eid %(f)s',
+        fspath = self.execute('Any fspath(D) WHERE F eid %(f)s, F data D',
                               {'f': f1.eid})[0][0]
         self.assertEquals(fspath.getvalue(), '/the/path')
 
-    def test_storage_transparency(self):
+    def test_source_storage_transparency(self):
         self.vreg._loadedmods[__name__] = {}
         self.vreg.register(DummyBeforeHook)
         self.vreg.register(DummyAfterHook)
         try:
-            self.create_file(content='the-data')
+            self.create_file()
         finally:
             self.vreg.unregister(DummyBeforeHook)
             self.vreg.unregister(DummyAfterHook)
 
+    def test_source_mapped_attribute_error_cases(self):
+        ex = self.assertRaises(QueryError, self.execute,
+                               'Any X WHERE X data ~= "hop", X is File')
+        self.assertEquals(str(ex), 'can\'t use File.data (X data ILIKE "hop") in restriction')
+        ex = self.assertRaises(QueryError, self.execute,
+                               'Any X, Y WHERE X data D, Y data D, '
+                               'NOT X identity Y, X is File, Y is File')
+        self.assertEquals(str(ex), "can't use D as a restriction variable")
+        # query returning mix of mapped / regular attributes (only file.data
+        # mapped, not image.data for instance)
+        ex = self.assertRaises(QueryError, self.execute,
+                               'Any X WITH X BEING ('
+                               ' (Any NULL)'
+                               '  UNION '
+                               ' (Any D WHERE X data D, X is File)'
+                               ')')
+        self.assertEquals(str(ex), 'query fetch some source mapped attribute, some not')
+        ex = self.assertRaises(QueryError, self.execute,
+                               '(Any D WHERE X data D, X is File)'
+                               ' UNION '
+                               '(Any D WHERE X data D, X is Image)')
+        self.assertEquals(str(ex), 'query fetch some source mapped attribute, some not')
+        ex = self.assertRaises(QueryError,
+                               self.execute, 'Any D WHERE X data D')
+        self.assertEquals(str(ex), 'query fetch some source mapped attribute, some not')
+
+    def test_source_mapped_attribute_advanced(self):
+        f1 = self.create_file()
+        rset = self.execute('Any X,D WITH D,X BEING ('
+                            ' (Any D, X WHERE X eid %(x)s, X data D)'
+                            '  UNION '
+                            ' (Any D, X WHERE X eid %(x)s, X data D)'
+                            ')', {'x': f1.eid}, 'x')
+        self.assertEquals(len(rset), 2)
+        self.assertEquals(rset[0][0], f1.eid)
+        self.assertEquals(rset[1][0], f1.eid)
+        self.assertEquals(rset[0][1].getvalue(), 'the-data')
+        self.assertEquals(rset[1][1].getvalue(), 'the-data')
+        rset = self.execute('Any X,LENGTH(D) WHERE X eid %(x)s, X data D',
+                            {'x': f1.eid}, 'x')
+        self.assertEquals(len(rset), 1)
+        self.assertEquals(rset[0][0], f1.eid)
+        self.assertEquals(rset[0][1], len('the-data'))
+        rset = self.execute('Any X,LENGTH(D) WITH D,X BEING ('
+                            ' (Any D, X WHERE X eid %(x)s, X data D)'
+                            '  UNION '
+                            ' (Any D, X WHERE X eid %(x)s, X data D)'
+                            ')', {'x': f1.eid}, 'x')
+        self.assertEquals(len(rset), 2)
+        self.assertEquals(rset[0][0], f1.eid)
+        self.assertEquals(rset[1][0], f1.eid)
+        self.assertEquals(rset[0][1], len('the-data'))
+        self.assertEquals(rset[1][1], len('the-data'))
+        ex = self.assertRaises(QueryError, self.execute,
+                               'Any X,UPPER(D) WHERE X eid %(x)s, X data D',
+                               {'x': f1.eid}, 'x')
+        self.assertEquals(str(ex), 'UPPER can not be called on mapped attribute')
+
 if __name__ == '__main__':
     unittest_main()
--- a/web/facet.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/web/facet.py	Thu Mar 25 14:26:13 2010 +0100
@@ -341,10 +341,11 @@
 
 class RelationFacet(VocabularyFacet):
     __select__ = partial_relation_possible() & match_context_prop()
-    # class attributes to configure the rel ation facet
+    # class attributes to configure the relation facet
     rtype = None
     role = 'subject'
     target_attr = 'eid'
+    target_type = None
     # set this to a stored procedure name if you want to sort on the result of
     # this function's result instead of direct value
     sortfunc = None
@@ -368,8 +369,11 @@
             sort = self.sortasc
         try:
             mainvar = self.filtered_variable
-            insert_attr_select_relation(rqlst, mainvar, self.rtype, self.role,
-                                        self.target_attr, self.sortfunc, sort)
+            var = insert_attr_select_relation(
+                rqlst, mainvar, self.rtype, self.role, self.target_attr,
+                self.sortfunc, sort)
+            if self.target_type is not None:
+                rqlst.add_type_restriction(var, self.target_type)
             try:
                 rset = self.rqlexec(rqlst.as_string(), self.cw_rset.args, self.cw_rset.cachekey)
             except:
--- a/web/webconfig.py	Wed Mar 24 18:04:59 2010 +0100
+++ b/web/webconfig.py	Thu Mar 25 14:26:13 2010 +0100
@@ -303,11 +303,13 @@
         baseurl = self['base-url'] or self.default_base_url()
         if baseurl and baseurl[-1] != '/':
             baseurl += '/'
-        self.global_set_option('base-url', baseurl)
+        if not self.repairing:
+            self.global_set_option('base-url', baseurl)
         httpsurl = self['https-url']
         if httpsurl and httpsurl[-1] != '/':
             httpsurl += '/'
-            self.global_set_option('https-url', httpsurl)
+            if not self.repairing:
+                self.global_set_option('https-url', httpsurl)
 
     def _build_ext_resources(self):
         libresourcesfile = join(self.shared_dir(), 'data', 'external_resources')