--- a/.hgtags Wed Mar 24 08:40:00 2010 +0100
+++ b/.hgtags Wed Mar 24 10:23:57 2010 +0100
@@ -107,5 +107,9 @@
0a16f07112b90fb61d2e905855fece77e5a7e39c cubicweb-debian-version-3.6.1-2
bfebe3d14d5390492925fc294dfdafad890a7104 cubicweb-version-3.6.2
f3b4bb9121a0e7ee5961310ff79e61c890948a77 cubicweb-debian-version-3.6.2-1
+270aba1e6fa21dac6b070e7815e6d1291f9c87cd cubicweb-version-3.7.0
+0c9ff7e496ce344b7e6bf5c9dd2847daf9034e5e cubicweb-debian-version-3.7.0-1
+6b0832bbd1daf27c2ce445af5b5222e1e522fb90 cubicweb-version-3.7.1
+9194740f070e64da5a89f6a9a31050a8401ebf0c cubicweb-debian-version-3.7.1-1
9c342fa4f1b73e06917d7dc675949baff442108b cubicweb-version-3.6.3
f9fce56d6a0c2bc6c4b497b66039a8bbbbdc8074 cubicweb-debian-version-3.6.3-1
--- a/__pkginfo__.py Wed Mar 24 08:40:00 2010 +0100
+++ b/__pkginfo__.py Wed Mar 24 10:23:57 2010 +0100
@@ -7,7 +7,7 @@
distname = "cubicweb"
modname = "cubicweb"
-numversion = (3, 6, 3)
+numversion = (3, 7, 1)
version = '.'.join(str(num) for num in numversion)
license = 'LGPL'
@@ -30,7 +30,7 @@
web = 'http://www.cubicweb.org'
ftp = 'ftp://ftp.logilab.org/pub/cubicweb'
-pyversions = ['2.4', '2.5']
+pyversions = ['2.5', '2.6']
classifiers = [
'Environment :: Web Environment',
--- a/cwconfig.py Wed Mar 24 08:40:00 2010 +0100
+++ b/cwconfig.py Wed Mar 24 10:23:57 2010 +0100
@@ -1002,7 +1002,7 @@
_EXT_REGISTERED = False
def register_stored_procedures():
- from logilab.common.adbh import FunctionDescr
+ from logilab.database import FunctionDescr
from rql.utils import register_function, iter_funcnode_variables
global _EXT_REGISTERED
@@ -1014,8 +1014,7 @@
supported_backends = ('postgres', 'sqlite',)
rtype = 'String'
- @classmethod
- def st_description(cls, funcnode, mainindex, tr):
+ def st_description(self, funcnode, mainindex, tr):
return ', '.join(sorted(term.get_description(mainindex, tr)
for term in iter_funcnode_variables(funcnode)))
@@ -1027,6 +1026,7 @@
register_function(CONCAT_STRINGS) # XXX bw compat
+
class GROUP_CONCAT(CONCAT_STRINGS):
supported_backends = ('mysql', 'postgres', 'sqlite',)
@@ -1037,8 +1037,7 @@
supported_backends = ('postgres', 'sqlite',)
rtype = 'String'
- @classmethod
- def st_description(cls, funcnode, mainindex, tr):
+ def st_description(self, funcnode, mainindex, tr):
return funcnode.children[0].get_description(mainindex, tr)
register_function(LIMIT_SIZE)
@@ -1050,7 +1049,6 @@
register_function(TEXT_LIMIT_SIZE)
-
class FSPATH(FunctionDescr):
supported_backends = ('postgres', 'sqlite',)
rtype = 'Bytes'
--- a/cwvreg.py Wed Mar 24 08:40:00 2010 +0100
+++ b/cwvreg.py Wed Mar 24 10:23:57 2010 +0100
@@ -312,9 +312,7 @@
"""set instance'schema and load application objects"""
self._set_schema(schema)
# now we can load application's web objects
- searchpath = self.config.vregistry_path()
- self.reset(searchpath, force_reload=False)
- self.register_objects(searchpath, force_reload=False)
+ self._reload(self.config.vregistry_path(), force_reload=False)
# map lowered entity type names to their actual name
self.case_insensitive_etypes = {}
for eschema in self.schema.entities():
@@ -323,6 +321,14 @@
clear_cache(eschema, 'ordered_relations')
clear_cache(eschema, 'meta_attributes')
+ def _reload(self, path, force_reload):
+ CW_EVENT_MANAGER.emit('before-registry-reload')
+ # modification detected, reset and reload
+ self.reset(path, force_reload)
+ super(CubicWebVRegistry, self).register_objects(
+ path, force_reload, self.config.extrapath)
+ CW_EVENT_MANAGER.emit('after-registry-reload')
+
def _set_schema(self, schema):
"""set instance'schema"""
self.schema = schema
@@ -363,12 +369,7 @@
super(CubicWebVRegistry, self).register_objects(
path, force_reload, self.config.extrapath)
except RegistryOutOfDate:
- CW_EVENT_MANAGER.emit('before-registry-reload')
- # modification detected, reset and reload
- self.reset(path, force_reload)
- super(CubicWebVRegistry, self).register_objects(
- path, force_reload, self.config.extrapath)
- CW_EVENT_MANAGER.emit('after-registry-reload')
+ self._reload(path, force_reload)
def initialization_completed(self):
"""cw specific code once vreg initialization is completed:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/dataimport.py Wed Mar 24 10:23:57 2010 +0100
@@ -0,0 +1,695 @@
+# -*- coding: utf-8 -*-
+"""This module provides tools to import tabular data.
+
+:organization: Logilab
+:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+
+
+Example of use (run this with `cubicweb-ctl shell instance import-script.py`):
+
+.. sourcecode:: python
+
+ from cubicweb.devtools.dataimport import *
+ # define data generators
+ GENERATORS = []
+
+ USERS = [('Prenom', 'firstname', ()),
+ ('Nom', 'surname', ()),
+ ('Identifiant', 'login', ()),
+ ]
+
+ def gen_users(ctl):
+ for row in ctl.get_data('utilisateurs'):
+ entity = mk_entity(row, USERS)
+ entity['upassword'] = u'motdepasse'
+ ctl.check('login', entity['login'], None)
+ ctl.store.add('CWUser', entity)
+ email = {'address': row['email']}
+ ctl.store.add('EmailAddress', email)
+ ctl.store.relate(entity['eid'], 'use_email', email['eid'])
+ ctl.store.rql('SET U in_group G WHERE G name "users", U eid %(x)s', {'x':entity['eid']})
+
+ CHK = [('login', check_doubles, 'Utilisateurs Login',
+ 'Deux utilisateurs ne devraient pas avoir le même login.'),
+ ]
+
+ GENERATORS.append( (gen_users, CHK) )
+
+ # create controller
+ ctl = CWImportController(RQLObjectStore(cnx))
+ ctl.askerror = 1
+ ctl.generators = GENERATORS
+ ctl.data['utilisateurs'] = lazytable(utf8csvreader(open('users.csv')))
+ # run
+ ctl.run()
+
+.. BUG file with one column are not parsable
+.. TODO rollback() invocation is not possible yet
+"""
+__docformat__ = "restructuredtext en"
+
+import sys
+import csv
+import traceback
+import os.path as osp
+from StringIO import StringIO
+from copy import copy
+
+from logilab.common import shellutils
+from logilab.common.date import strptime
+from logilab.common.decorators import cached
+from logilab.common.deprecation import deprecated
+
+
+def ucsvreader_pb(filepath, encoding='utf-8', separator=',', quote='"',
+ skipfirst=False, withpb=True):
+ """same as ucsvreader but a progress bar is displayed as we iter on rows"""
+ if not osp.exists(filepath):
+ raise Exception("file doesn't exists: %s" % filepath)
+ rowcount = int(shellutils.Execute('wc -l "%s"' % filepath).out.strip().split()[0])
+ if skipfirst:
+ rowcount -= 1
+ if withpb:
+ pb = shellutils.ProgressBar(rowcount, 50)
+ for urow in ucsvreader(file(filepath), encoding, separator, quote, skipfirst):
+ yield urow
+ if withpb:
+ pb.update()
+ print ' %s rows imported' % rowcount
+
+def ucsvreader(stream, encoding='utf-8', separator=',', quote='"',
+ skipfirst=False):
+ """A csv reader that accepts files with any encoding and outputs unicode
+ strings
+ """
+ it = iter(csv.reader(stream, delimiter=separator, quotechar=quote))
+ if skipfirst:
+ it.next()
+ for row in it:
+ yield [item.decode(encoding) for item in row]
+
+def commit_every(nbit, store, it):
+ for i, x in enumerate(it):
+ yield x
+ if nbit is not None and i % nbit:
+ store.commit()
+ if nbit is not None:
+ store.commit()
+
+def lazytable(reader):
+ """The first row is taken to be the header of the table and
+ used to output a dict for each row of data.
+
+ >>> data = lazytable(utf8csvreader(open(filename)))
+ """
+ header = reader.next()
+ for row in reader:
+ yield dict(zip(header, row))
+
+def mk_entity(row, map):
+ """Return a dict made from sanitized mapped values.
+
+ ValueError can be raised on unexpected values found in checkers
+
+ >>> row = {'myname': u'dupont'}
+ >>> map = [('myname', u'name', (call_transform_method('title'),))]
+ >>> mk_entity(row, map)
+ {'name': u'Dupont'}
+ >>> row = {'myname': u'dupont', 'optname': u''}
+ >>> map = [('myname', u'name', (call_transform_method('title'),)),
+ ... ('optname', u'MARKER', (optional,))]
+ >>> mk_entity(row, map)
+ {'name': u'Dupont', 'optname': None}
+ """
+ res = {}
+ assert isinstance(row, dict)
+ assert isinstance(map, list)
+ for src, dest, funcs in map:
+ res[dest] = row[src]
+ try:
+ for func in funcs:
+ res[dest] = func(res[dest])
+ if res[dest] is None:
+ break
+ except ValueError, err:
+ raise ValueError('error with %r field: %s' % (src, err))
+ return res
+
+
+# user interactions ############################################################
+
+def tell(msg):
+ print msg
+
+def confirm(question):
+ """A confirm function that asks for yes/no/abort and exits on abort."""
+ answer = shellutils.ASK.ask(question, ('Y', 'n', 'abort'), 'Y')
+ if answer == 'abort':
+ sys.exit(1)
+ return answer == 'Y'
+
+
+class catch_error(object):
+ """Helper for @contextmanager decorator."""
+
+ def __init__(self, ctl, key='unexpected error', msg=None):
+ self.ctl = ctl
+ self.key = key
+ self.msg = msg
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if type is not None:
+ if issubclass(type, (KeyboardInterrupt, SystemExit)):
+ return # re-raise
+ if self.ctl.catcherrors:
+ self.ctl.record_error(self.key, None, type, value, traceback)
+ return True # silent
+
+
+# base sanitizing/coercing functions ###########################################
+
+def optional(value):
+ """checker to filter optional field
+
+ If value is undefined (ex: empty string), return None that will
+ break the checkers validation chain
+
+ General use is to add 'optional' check in first condition to avoid
+ ValueError by further checkers
+
+ >>> MAPPER = [(u'value', 'value', (optional, int))]
+ >>> row = {'value': u'XXX'}
+ >>> mk_entity(row, MAPPER)
+ {'value': None}
+ >>> row = {'value': u'100'}
+ >>> mk_entity(row, MAPPER)
+ {'value': 100}
+ """
+ if value:
+ return value
+ return None
+
+def required(value):
+ """raise ValueError is value is empty
+
+ This check should be often found in last position in the chain.
+ """
+ if value:
+ return value
+ raise ValueError("required")
+
+def todatetime(format='%d/%m/%Y'):
+ """return a transformation function to turn string input value into a
+ `datetime.datetime` instance, using given format.
+
+ Follow it by `todate` or `totime` functions from `logilab.common.date` if
+ you want a `date`/`time` instance instead of `datetime`.
+ """
+ def coerce(value):
+ return strptime(value, format)
+ return coerce
+
+def call_transform_method(methodname, *args, **kwargs):
+ """return value returned by calling the given method on input"""
+ def coerce(value):
+ return getattr(value, methodname)(*args, **kwargs)
+ return coerce
+
+def call_check_method(methodname, *args, **kwargs):
+ """check value returned by calling the given method on input is true,
+ else raise ValueError
+ """
+ def check(value):
+ if getattr(value, methodname)(*args, **kwargs):
+ return value
+ raise ValueError('%s not verified on %r' % (methodname, value))
+ return check
+
+# base integrity checking functions ############################################
+
+def check_doubles(buckets):
+ """Extract the keys that have more than one item in their bucket."""
+ return [(k, len(v)) for k, v in buckets.items() if len(v) > 1]
+
+def check_doubles_not_none(buckets):
+ """Extract the keys that have more than one item in their bucket."""
+ return [(k, len(v)) for k, v in buckets.items()
+ if k is not None and len(v) > 1]
+
+
+# object stores #################################################################
+
+class ObjectStore(object):
+ """Store objects in memory for *faster* validation (development mode)
+
+ But it will not enforce the constraints of the schema and hence will miss some problems
+
+ >>> store = ObjectStore()
+ >>> user = {'login': 'johndoe'}
+ >>> store.add('CWUser', user)
+ >>> group = {'name': 'unknown'}
+ >>> store.add('CWUser', group)
+ >>> store.relate(user['eid'], 'in_group', group['eid'])
+ """
+ def __init__(self):
+ self.items = []
+ self.eids = {}
+ self.types = {}
+ self.relations = set()
+ self.indexes = {}
+ self._rql = None
+ self._commit = None
+
+ def _put(self, type, item):
+ self.items.append(item)
+ return len(self.items) - 1
+
+ def add(self, type, item):
+ assert isinstance(item, dict), 'item is not a dict but a %s' % type(item)
+ eid = item['eid'] = self._put(type, item)
+ self.eids[eid] = item
+ self.types.setdefault(type, []).append(eid)
+
+ def relate(self, eid_from, rtype, eid_to, inlined=False):
+ """Add new relation"""
+ relation = eid_from, rtype, eid_to
+ self.relations.add(relation)
+ return relation
+
+ def commit(self):
+ """this commit method do nothing by default
+
+ This is voluntary to use the frequent autocommit feature in CubicWeb
+ when you are using hooks or another
+
+ If you want override commit method, please set it by the
+ constructor
+ """
+ pass
+
+ def rql(self, *args):
+ if self._rql is not None:
+ return self._rql(*args)
+
+ @property
+ def nb_inserted_entities(self):
+ return len(self.eids)
+ @property
+ def nb_inserted_types(self):
+ return len(self.types)
+ @property
+ def nb_inserted_relations(self):
+ return len(self.relations)
+
+ @deprecated("[3.7] index support will disappear")
+ def build_index(self, name, type, func=None, can_be_empty=False):
+ """build internal index for further search"""
+ index = {}
+ if func is None or not callable(func):
+ func = lambda x: x['eid']
+ for eid in self.types[type]:
+ index.setdefault(func(self.eids[eid]), []).append(eid)
+ if not can_be_empty:
+ assert index, "new index '%s' cannot be empty" % name
+ self.indexes[name] = index
+
+ @deprecated("[3.7] index support will disappear")
+ def build_rqlindex(self, name, type, key, rql, rql_params=False,
+ func=None, can_be_empty=False):
+ """build an index by rql query
+
+ rql should return eid in first column
+ ctl.store.build_index('index_name', 'users', 'login', 'Any U WHERE U is CWUser')
+ """
+ self.types[type] = []
+ rset = self.rql(rql, rql_params or {})
+ if not can_be_empty:
+ assert rset, "new index type '%s' cannot be empty (0 record found)" % type
+ for entity in rset.entities():
+ getattr(entity, key) # autopopulate entity with key attribute
+ self.eids[entity.eid] = dict(entity)
+ if entity.eid not in self.types[type]:
+ self.types[type].append(entity.eid)
+
+ # Build index with specified key
+ func = lambda x: x[key]
+ self.build_index(name, type, func, can_be_empty=can_be_empty)
+
+ @deprecated("[3.7] index support will disappear")
+ def fetch(self, name, key, unique=False, decorator=None):
+ """index fetcher method
+
+ decorator is a callable method or an iterator of callable methods (usually a lambda function)
+ decorator=lambda x: x[:1] (first value is returned)
+ decorator=lambda x: x.lower (lowercased value is returned)
+
+ decorator is handy when you want to improve index keys but without
+ changing the original field
+
+ Same check functions can be reused here.
+ """
+ eids = self.indexes[name].get(key, [])
+ if decorator is not None:
+ if not hasattr(decorator, '__iter__'):
+ decorator = (decorator,)
+ for f in decorator:
+ eids = f(eids)
+ if unique:
+ assert len(eids) == 1, u'expected a single one value for key "%s" in index "%s". Got %i' % (key, name, len(eids))
+ eids = eids[0]
+ return eids
+
+ @deprecated("[3.7] index support will disappear")
+ def find(self, type, key, value):
+ for idx in self.types[type]:
+ item = self.items[idx]
+ if item[key] == value:
+ yield item
+
+ @deprecated("[3.7] checkpoint() deprecated. use commit() instead")
+ def checkpoint(self):
+ self.commit()
+
+
+class RQLObjectStore(ObjectStore):
+ """ObjectStore that works with an actual RQL repository (production mode)"""
+ _rql = None # bw compat
+
+ def __init__(self, session=None, commit=None):
+ ObjectStore.__init__(self)
+ if session is not None:
+ if not hasattr(session, 'set_pool'):
+ # connection
+ cnx = session
+ session = session.request()
+ session.set_pool = lambda : None
+ commit = commit or cnx.commit
+ else:
+ session.set_pool()
+ self.session = session
+ self._commit = commit or session.commit
+ elif commit is not None:
+ self._commit = commit
+ # XXX .session
+
+ @deprecated("[3.7] checkpoint() deprecated. use commit() instead")
+ def checkpoint(self):
+ self.commit()
+
+ def commit(self):
+ self._commit()
+ self.session.set_pool()
+
+ def rql(self, *args):
+ if self._rql is not None:
+ return self._rql(*args)
+ return self.session.execute(*args)
+
+ def create_entity(self, *args, **kwargs):
+ entity = self.session.create_entity(*args, **kwargs)
+ self.eids[entity.eid] = entity
+ self.types.setdefault(args[0], []).append(entity.eid)
+ return entity
+
+ def _put(self, type, item):
+ query = ('INSERT %s X: ' % type) + ', '.join('X %s %%(%s)s' % (k, k)
+ for k in item)
+ return self.rql(query, item)[0][0]
+
+ def relate(self, eid_from, rtype, eid_to, inlined=False):
+ eid_from, rtype, eid_to = super(RQLObjectStore, self).relate(
+ eid_from, rtype, eid_to)
+ self.rql('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
+ {'x': int(eid_from), 'y': int(eid_to)}, ('x', 'y'))
+
+
+# the import controller ########################################################
+
+class CWImportController(object):
+ """Controller of the data import process.
+
+ >>> ctl = CWImportController(store)
+ >>> ctl.generators = list_of_data_generators
+ >>> ctl.data = dict_of_data_tables
+ >>> ctl.run()
+ """
+
+ def __init__(self, store, askerror=0, catcherrors=None, tell=tell,
+ commitevery=50):
+ self.store = store
+ self.generators = None
+ self.data = {}
+ self.errors = None
+ self.askerror = askerror
+ if catcherrors is None:
+ catcherrors = askerror
+ self.catcherrors = catcherrors
+ self.commitevery = commitevery # set to None to do a single commit
+ self._tell = tell
+
+ def check(self, type, key, value):
+ self._checks.setdefault(type, {}).setdefault(key, []).append(value)
+
+ def check_map(self, entity, key, map, default):
+ try:
+ entity[key] = map[entity[key]]
+ except KeyError:
+ self.check(key, entity[key], None)
+ entity[key] = default
+
+ def record_error(self, key, msg=None, type=None, value=None, tb=None):
+ tmp = StringIO()
+ if type is None:
+ traceback.print_exc(file=tmp)
+ else:
+ traceback.print_exception(type, value, tb, file=tmp)
+ print tmp.getvalue()
+ # use a list to avoid counting a <nb lines> errors instead of one
+ errorlog = self.errors.setdefault(key, [])
+ if msg is None:
+ errorlog.append(tmp.getvalue().splitlines())
+ else:
+ errorlog.append( (msg, tmp.getvalue().splitlines()) )
+
+ def run(self):
+ self.errors = {}
+ for func, checks in self.generators:
+ self._checks = {}
+ func_name = func.__name__
+ self.tell("Run import function '%s'..." % func_name)
+ try:
+ func(self)
+ except:
+ if self.catcherrors:
+ self.record_error(func_name, 'While calling %s' % func.__name__)
+ else:
+ self._print_stats()
+ raise
+ for key, func, title, help in checks:
+ buckets = self._checks.get(key)
+ if buckets:
+ err = func(buckets)
+ if err:
+ self.errors[title] = (help, err)
+ self.store.commit()
+ self._print_stats()
+ if self.errors:
+ if self.askerror == 2 or (self.askerror and confirm('Display errors ?')):
+ from pprint import pformat
+ for errkey, error in self.errors.items():
+ self.tell("\n%s (%s): %d\n" % (error[0], errkey, len(error[1])))
+ self.tell(pformat(sorted(error[1])))
+
+ def _print_stats(self):
+ nberrors = sum(len(err[1]) for err in self.errors.values())
+ self.tell('\nImport statistics: %i entities, %i types, %i relations and %i errors'
+ % (self.store.nb_inserted_entities,
+ self.store.nb_inserted_types,
+ self.store.nb_inserted_relations,
+ nberrors))
+
+ def get_data(self, key):
+ return self.data.get(key)
+
+ def index(self, name, key, value, unique=False):
+ """create a new index
+
+ If unique is set to True, only first occurence will be kept not the following ones
+ """
+ if unique:
+ try:
+ if value in self.store.indexes[name][key]:
+ return
+ except KeyError:
+ # we're sure that one is the first occurence; so continue...
+ pass
+ self.store.indexes.setdefault(name, {}).setdefault(key, []).append(value)
+
+ def tell(self, msg):
+ self._tell(msg)
+
+ def iter_and_commit(self, datakey):
+ """iter rows, triggering commit every self.commitevery iterations"""
+ return commit_every(self.commitevery, self.store, self.get_data(datakey))
+
+
+
+from datetime import datetime
+from cubicweb.schema import META_RTYPES, VIRTUAL_RTYPES
+
+
+class NoHookRQLObjectStore(RQLObjectStore):
+ """ObjectStore that works with an actual RQL repository (production mode)"""
+ _rql = None # bw compat
+
+ def __init__(self, session, metagen=None, baseurl=None):
+ super(NoHookRQLObjectStore, self).__init__(session)
+ self.source = session.repo.system_source
+ self.rschema = session.repo.schema.rschema
+ self.add_relation = self.source.add_relation
+ if metagen is None:
+ metagen = MetaGenerator(session, baseurl)
+ self.metagen = metagen
+ self._nb_inserted_entities = 0
+ self._nb_inserted_types = 0
+ self._nb_inserted_relations = 0
+ self.rql = session.unsafe_execute
+ # disable undoing
+ session.undo_actions = frozenset()
+
+ def create_entity(self, etype, **kwargs):
+ for k, v in kwargs.iteritems():
+ kwargs[k] = getattr(v, 'eid', v)
+ entity, rels = self.metagen.base_etype_dicts(etype)
+ entity = copy(entity)
+ entity._related_cache = {}
+ self.metagen.init_entity(entity)
+ entity.update(kwargs)
+ session = self.session
+ self.source.add_entity(session, entity)
+ self.source.add_info(session, entity, self.source, complete=False)
+ for rtype, targeteids in rels.iteritems():
+ # targeteids may be a single eid or a list of eids
+ inlined = self.rschema(rtype).inlined
+ try:
+ for targeteid in targeteids:
+ self.add_relation(session, entity.eid, rtype, targeteid,
+ inlined)
+ except TypeError:
+ self.add_relation(session, entity.eid, rtype, targeteids,
+ inlined)
+ self._nb_inserted_entities += 1
+ return entity
+
+ def relate(self, eid_from, rtype, eid_to):
+ assert not rtype.startswith('reverse_')
+ self.add_relation(self.session, eid_from, rtype, eid_to,
+ self.rschema(rtype).inlined)
+ self._nb_inserted_relations += 1
+
+ @property
+ def nb_inserted_entities(self):
+ return self._nb_inserted_entities
+ @property
+ def nb_inserted_types(self):
+ return self._nb_inserted_types
+ @property
+ def nb_inserted_relations(self):
+ return self._nb_inserted_relations
+
+ def _put(self, type, item):
+ raise RuntimeError('use create entity')
+
+
+class MetaGenerator(object):
+ def __init__(self, session, baseurl=None):
+ self.session = session
+ self.source = session.repo.system_source
+ self.time = datetime.now()
+ if baseurl is None:
+ config = session.vreg.config
+ baseurl = config['base-url'] or config.default_base_url()
+ if not baseurl[-1] == '/':
+ baseurl += '/'
+ self.baseurl = baseurl
+ # attributes/relations shared by all entities of the same type
+ self.etype_attrs = []
+ self.etype_rels = []
+ # attributes/relations specific to each entity
+ self.entity_attrs = ['eid', 'cwuri']
+ #self.entity_rels = [] XXX not handled (YAGNI?)
+ schema = session.vreg.schema
+ rschema = schema.rschema
+ for rtype in META_RTYPES:
+ if rtype in ('eid', 'cwuri') or rtype in VIRTUAL_RTYPES:
+ continue
+ if rschema(rtype).final:
+ self.etype_attrs.append(rtype)
+ else:
+ self.etype_rels.append(rtype)
+ if not schema._eid_index:
+ # test schema loaded from the fs
+ self.gen_is = self.test_gen_is
+ self.gen_is_instance_of = self.test_gen_is_instanceof
+
+ @cached
+ def base_etype_dicts(self, etype):
+ entity = self.session.vreg['etypes'].etype_class(etype)(self.session)
+ # entity are "surface" copied, avoid shared dict between copies
+ del entity.cw_extra_kwargs
+ for attr in self.etype_attrs:
+ entity[attr] = self.generate(entity, attr)
+ rels = {}
+ for rel in self.etype_rels:
+ rels[rel] = self.generate(entity, rel)
+ return entity, rels
+
+ def init_entity(self, entity):
+ for attr in self.entity_attrs:
+ entity[attr] = self.generate(entity, attr)
+ entity.eid = entity['eid']
+
+ def generate(self, entity, rtype):
+ return getattr(self, 'gen_%s' % rtype)(entity)
+
+ def gen_eid(self, entity):
+ return self.source.create_eid(self.session)
+
+ def gen_cwuri(self, entity):
+ return u'%seid/%s' % (self.baseurl, entity['eid'])
+
+ def gen_creation_date(self, entity):
+ return self.time
+ def gen_modification_date(self, entity):
+ return self.time
+
+ def gen_is(self, entity):
+ return entity.e_schema.eid
+ def gen_is_instance_of(self, entity):
+ eids = []
+ for etype in entity.e_schema.ancestors() + [entity.e_schema]:
+ eids.append(entity.e_schema.eid)
+ return eids
+
+ def gen_created_by(self, entity):
+ return self.session.user.eid
+ def gen_owned_by(self, entity):
+ return self.session.user.eid
+
+ # implementations of gen_is / gen_is_instance_of to use during test where
+ # schema has been loaded from the fs (hence entity type schema eids are not
+ # known)
+ def test_gen_is(self, entity):
+ from cubicweb.hooks.metadata import eschema_eid
+ return eschema_eid(self.session, entity.e_schema)
+ def test_gen_is_instanceof(self, entity):
+ from cubicweb.hooks.metadata import eschema_eid
+ eids = []
+ for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
+ eids.append(eschema_eid(self.session, eschema))
+ return eids
--- a/dbapi.py Wed Mar 24 08:40:00 2010 +0100
+++ b/dbapi.py Wed Mar 24 10:23:57 2010 +0100
@@ -57,6 +57,7 @@
etypescls = cwvreg.VRegistry.REGISTRY_FACTORY['etypes']
etypescls.etype_class = etypescls.orig_etype_class
+
class ConnectionProperties(object):
def __init__(self, cnxtype=None, lang=None, close=True, log=False):
self.cnxtype = cnxtype or 'pyro'
@@ -203,11 +204,6 @@
self.pgettext = lambda x, y: y
self.debug('request default language: %s', self.lang)
- def decorate_rset(self, rset):
- rset.vreg = self.vreg
- rset.req = self
- return rset
-
def describe(self, eid):
"""return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
return self.cnx.describe(eid)
@@ -242,7 +238,7 @@
def get_session_data(self, key, default=None, pop=False):
"""return value associated to `key` in session data"""
if self.cnx is None:
- return None # before the connection has been established
+ return default # before the connection has been established
return self.cnx.get_session_data(key, default, pop)
def set_session_data(self, key, value):
@@ -398,14 +394,20 @@
def check(self):
"""raise `BadSessionId` if the connection is no more valid"""
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
self._repo.check_session(self.sessionid)
def set_session_props(self, **props):
"""raise `BadSessionId` if the connection is no more valid"""
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
self._repo.set_session_props(self.sessionid, props)
def get_shared_data(self, key, default=None, pop=False):
"""return value associated to `key` in shared data"""
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
return self._repo.get_shared_data(self.sessionid, key, default, pop)
def set_shared_data(self, key, value, querydata=False):
@@ -416,6 +418,8 @@
transaction, and won't be available through the connexion, only on the
repository side.
"""
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
return self._repo.set_shared_data(self.sessionid, key, value, querydata)
def get_schema(self):
@@ -501,6 +505,8 @@
def user(self, req=None, props=None):
"""return the User object associated to this connection"""
# cnx validity is checked by the call to .user_info
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
eid, login, groups, properties = self._repo.user_info(self.sessionid,
props)
if req is None:
@@ -521,6 +527,8 @@
pass
def describe(self, eid):
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
return self._repo.describe(self.sessionid, eid)
def close(self):
@@ -535,19 +543,20 @@
if self._closed:
raise ProgrammingError('Connection is already closed')
self._repo.close(self.sessionid)
+ del self._repo # necessary for proper garbage collection
self._closed = 1
def commit(self):
- """Commit any pending transaction to the database. Note that if the
- database supports an auto-commit feature, this must be initially off. An
- interface method may be provided to turn it back on.
+ """Commit pending transaction for this connection to the repository.
- Database modules that do not support transactions should implement this
- method with void functionality.
+ may raises `Unauthorized` or `ValidationError` if we attempted to do
+ something we're not allowed to for security or integrity reason.
+
+ If the transaction is undoable, a transaction id will be returned.
"""
if not self._closed is None:
raise ProgrammingError('Connection is already closed')
- self._repo.commit(self.sessionid)
+ return self._repo.commit(self.sessionid)
def rollback(self):
"""This method is optional since not all databases provide transaction
@@ -574,6 +583,73 @@
req = self.request()
return self.cursor_class(self, self._repo, req=req)
+ # undo support ############################################################
+
+ def undoable_transactions(self, ueid=None, req=None, **actionfilters):
+ """Return a list of undoable transaction objects by the connection's
+ user, ordered by descendant transaction time.
+
+ Managers may filter according to user (eid) who has done the transaction
+ using the `ueid` argument. Others will only see their own transactions.
+
+ Additional filtering capabilities is provided by using the following
+ named arguments:
+
+ * `etype` to get only transactions creating/updating/deleting entities
+ of the given type
+
+ * `eid` to get only transactions applied to entity of the given eid
+
+ * `action` to get only transactions doing the given action (action in
+ 'C', 'U', 'D', 'A', 'R'). If `etype`, action can only be 'C', 'U' or
+ 'D'.
+
+ * `public`: when additional filtering is provided, their are by default
+ only searched in 'public' actions, unless a `public` argument is given
+ and set to false.
+ """
+ txinfos = self._repo.undoable_transactions(self.sessionid, ueid,
+ **actionfilters)
+ if req is None:
+ req = self.request()
+ for txinfo in txinfos:
+ txinfo.req = req
+ return txinfos
+
+ def transaction_info(self, txuuid, req=None):
+ """Return transaction object for the given uid.
+
+ raise `NoSuchTransaction` if not found or if session's user is not
+ allowed (eg not in managers group and the transaction doesn't belong to
+ him).
+ """
+ txinfo = self._repo.transaction_info(self.sessionid, txuuid)
+ if req is None:
+ req = self.request()
+ txinfo.req = req
+ return txinfo
+
+ def transaction_actions(self, txuuid, public=True):
+ """Return an ordered list of action effectued during that transaction.
+
+ If public is true, return only 'public' actions, eg not ones triggered
+ under the cover by hooks, else return all actions.
+
+ raise `NoSuchTransaction` if the transaction is not found or if
+ session's user is not allowed (eg not in managers group and the
+ transaction doesn't belong to him).
+ """
+ return self._repo.transaction_actions(self.sessionid, txuuid, public)
+
+ def undo_transaction(self, txuuid):
+ """Undo the given transaction. Return potential restoration errors.
+
+ raise `NoSuchTransaction` if not found or if session's user is not
+ allowed (eg not in managers group and the transaction doesn't belong to
+ him).
+ """
+ return self._repo.undo_transaction(self.sessionid, txuuid)
+
# cursor object ###############################################################
@@ -646,11 +722,11 @@
Return values are not defined by the DB-API, but this here it returns a
ResultSet object.
"""
- self._res = res = self._repo.execute(self._sessid, operation,
- parameters, eid_key, build_descr)
- self.req.decorate_rset(res)
+ self._res = rset = self._repo.execute(self._sessid, operation,
+ parameters, eid_key, build_descr)
+ rset.req = self.req
self._index = 0
- return res
+ return rset
def executemany(self, operation, seq_of_parameters):
--- a/debian/changelog Wed Mar 24 08:40:00 2010 +0100
+++ b/debian/changelog Wed Mar 24 10:23:57 2010 +0100
@@ -1,4 +1,20 @@
-cubicweb (3.6.3-1) unstable; urgency=low
+cubicweb (3.7.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Fri, 19 Mar 2010 14:47:23 +0100
+
+cubicweb (3.7.0-1) unstable; urgency=low
+
+ * remove postgresql-contrib from cubicweb dependency (using tsearch
+ which is included with postgres >= 8.3)
+ * add postgresql-client | mysql-client to cubicweb-server dependencies using two
+ new cubicweb-[postgresql|mysql]-support virtual packages (necessary for
+ dump/restore of database)
+
+ -- Sylvain Thénault <sylvain.thenault@logilab.fr> Tue, 16 Mar 2010 17:55:37 +0100
+
+ cubicweb (3.6.3-1) unstable; urgency=low
* remove postgresql-contrib from cubicweb dependency (using tsearch
which is included with postgres >= 8.3)
--- a/debian/control Wed Mar 24 08:40:00 2010 +0100
+++ b/debian/control Wed Mar 24 10:23:57 2010 +0100
@@ -7,10 +7,10 @@
Adrien Di Mascio <Adrien.DiMascio@logilab.fr>,
Aurélien Campéas <aurelien.campeas@logilab.fr>,
Nicolas Chauvat <nicolas.chauvat@logilab.fr>
-Build-Depends: debhelper (>= 5), python-dev (>=2.4), python-central (>= 0.5)
+Build-Depends: debhelper (>= 5), python-dev (>=2.5), python-central (>= 0.5)
Standards-Version: 3.8.0
Homepage: http://www.cubicweb.org
-XS-Python-Version: >= 2.4, << 2.6
+XS-Python-Version: >= 2.5, << 2.6
Package: cubicweb
Architecture: all
@@ -33,8 +33,7 @@
Conflicts: cubicweb-multisources
Replaces: cubicweb-multisources
Provides: cubicweb-multisources
-# postgresql/mysql -client packages for backup/restore of non local database
-Depends: ${python:Depends}, cubicweb-common (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-indexer (>= 0.6.1), cubicweb-postgresql-support | cubicweb-mysql-support | python-pysqlite2
+Depends: ${python:Depends}, cubicweb-common (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-logilab-database, cubicweb-postgresql-support | cubicweb-mysql-support | python-pysqlite2
Recommends: pyro, cubicweb-documentation (= ${source:Version})
Description: server part of the CubicWeb framework
CubicWeb is a semantic web application framework.
@@ -98,7 +97,7 @@
Package: cubicweb-common
Architecture: all
XB-Python-Version: ${python:Versions}
-Depends: ${python:Depends}, graphviz, gettext, python-logilab-mtconverter (>= 0.6.0), python-logilab-common (>= 0.48.1), python-yams (>= 0.28.0), python-rql (>= 0.24.0), python-lxml
+Depends: ${python:Depends}, graphviz, gettext, python-logilab-mtconverter (>= 0.6.0), python-logilab-common (>= 0.49.0), python-yams (>= 0.28.1), python-rql (>= 0.25.0), python-lxml
Recommends: python-simpletal (>= 4.0), python-crypto
Conflicts: cubicweb-core
Replaces: cubicweb-core
--- a/devtools/__init__.py Wed Mar 24 08:40:00 2010 +0100
+++ b/devtools/__init__.py Wed Mar 24 10:23:57 2010 +0100
@@ -106,8 +106,6 @@
self.init_log(log_threshold, force=True)
# need this, usually triggered by cubicweb-ctl
self.load_cwctl_plugins()
- self.global_set_option('anonymous-user', 'anon')
- self.global_set_option('anonymous-password', 'anon')
anonymous_user = TwistedConfiguration.anonymous_user.im_func
@@ -123,6 +121,8 @@
super(TestServerConfiguration, self).load_configuration()
self.global_set_option('anonymous-user', 'anon')
self.global_set_option('anonymous-password', 'anon')
+ # no undo support in tests
+ self.global_set_option('undo-support', '')
def main_config_file(self):
"""return instance's control configuration file"""
--- a/devtools/dataimport.py Wed Mar 24 08:40:00 2010 +0100
+++ b/devtools/dataimport.py Wed Mar 24 10:23:57 2010 +0100
@@ -1,752 +1,4 @@
-# -*- coding: utf-8 -*-
-"""This module provides tools to import tabular data.
-
-:organization: Logilab
-:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-
-
-Example of use (run this with `cubicweb-ctl shell instance import-script.py`):
-
-.. sourcecode:: python
-
- from cubicweb.devtools.dataimport import *
- # define data generators
- GENERATORS = []
-
- USERS = [('Prenom', 'firstname', ()),
- ('Nom', 'surname', ()),
- ('Identifiant', 'login', ()),
- ]
-
- def gen_users(ctl):
- for row in ctl.get_data('utilisateurs'):
- entity = mk_entity(row, USERS)
- entity['upassword'] = u'motdepasse'
- ctl.check('login', entity['login'], None)
- ctl.store.add('CWUser', entity)
- email = {'address': row['email']}
- ctl.store.add('EmailAddress', email)
- ctl.store.relate(entity['eid'], 'use_email', email['eid'])
- ctl.store.rql('SET U in_group G WHERE G name "users", U eid %(x)s', {'x':entity['eid']})
-
- CHK = [('login', check_doubles, 'Utilisateurs Login',
- 'Deux utilisateurs ne devraient pas avoir le même login.'),
- ]
-
- GENERATORS.append( (gen_users, CHK) )
-
- # create controller
- ctl = CWImportController(RQLObjectStore())
- ctl.askerror = 1
- ctl.generators = GENERATORS
- ctl.store._checkpoint = checkpoint
- ctl.store._rql = rql
- ctl.data['utilisateurs'] = lazytable(utf8csvreader(open('users.csv')))
- # run
- ctl.run()
- sys.exit(0)
-
-
-.. BUG fichier à une colonne pose un problème de parsing
-.. TODO rollback()
-"""
-__docformat__ = "restructuredtext en"
-
-import sys
-import csv
-import traceback
-import os.path as osp
-from StringIO import StringIO
-from copy import copy
-
-from logilab.common import shellutils
-from logilab.common.date import strptime
-from logilab.common.decorators import cached
-from logilab.common.deprecation import deprecated
-
-
-def ucsvreader_pb(filepath, encoding='utf-8', separator=',', quote='"',
- skipfirst=False, withpb=True):
- """same as ucsvreader but a progress bar is displayed as we iter on rows"""
- if not osp.exists(filepath):
- raise Exception("file doesn't exists: %s" % filepath)
- rowcount = int(shellutils.Execute('wc -l "%s"' % filepath).out.strip().split()[0])
- if skipfirst:
- rowcount -= 1
- if withpb:
- pb = shellutils.ProgressBar(rowcount, 50)
- for urow in ucsvreader(file(filepath), encoding, separator, quote, skipfirst):
- yield urow
- if withpb:
- pb.update()
- print ' %s rows imported' % rowcount
-
-def ucsvreader(stream, encoding='utf-8', separator=',', quote='"',
- skipfirst=False):
- """A csv reader that accepts files with any encoding and outputs unicode
- strings
- """
- it = iter(csv.reader(stream, delimiter=separator, quotechar=quote))
- if skipfirst:
- it.next()
- for row in it:
- yield [item.decode(encoding) for item in row]
-
-def commit_every(nbit, store, it):
- for i, x in enumerate(it):
- yield x
- if nbit is not None and i % nbit:
- store.checkpoint()
- if nbit is not None:
- store.checkpoint()
-
-def lazytable(reader):
- """The first row is taken to be the header of the table and
- used to output a dict for each row of data.
-
- >>> data = lazytable(utf8csvreader(open(filename)))
- """
- header = reader.next()
- for row in reader:
- yield dict(zip(header, row))
-
-def mk_entity(row, map):
- """Return a dict made from sanitized mapped values.
-
- ValidationError can be raised on unexpected values found in checkers
-
- >>> row = {'myname': u'dupont'}
- >>> map = [('myname', u'name', (capitalize_if_unicase,))]
- >>> mk_entity(row, map)
- {'name': u'Dupont'}
- >>> row = {'myname': u'dupont', 'optname': u''}
- >>> map = [('myname', u'name', (capitalize_if_unicase,)),
- ... ('optname', u'MARKER', (optional,))]
- >>> mk_entity(row, map)
- {'name': u'Dupont'}
- """
- res = {}
- assert isinstance(row, dict)
- assert isinstance(map, list)
- for src, dest, funcs in map:
- assert not (required in funcs and optional in funcs), \
- "optional and required checks are exclusive"
- res[dest] = row[src]
- try:
- for func in funcs:
- res[dest] = func(res[dest])
- if res[dest] is None:
- break
- except ValueError, err:
- raise ValueError('error with %r field: %s' % (src, err))
- return res
-
-
-# user interactions ############################################################
-
-def tell(msg):
- print msg
-
-def confirm(question):
- """A confirm function that asks for yes/no/abort and exits on abort."""
- answer = shellutils.ASK.ask(question, ('Y', 'n', 'abort'), 'Y')
- if answer == 'abort':
- sys.exit(1)
- return answer == 'Y'
-
-
-class catch_error(object):
- """Helper for @contextmanager decorator."""
-
- def __init__(self, ctl, key='unexpected error', msg=None):
- self.ctl = ctl
- self.key = key
- self.msg = msg
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, traceback):
- if type is not None:
- if issubclass(type, (KeyboardInterrupt, SystemExit)):
- return # re-raise
- if self.ctl.catcherrors:
- self.ctl.record_error(self.key, None, type, value, traceback)
- return True # silent
-
-
-# base sanitizing/coercing functions ###########################################
-
-def optional(value):
- """validation error will not been raised if you add this checker in chain"""
- if value:
- return value
- return None
-
-def required(value):
- """raise ValueError is value is empty
-
- This check should be often found in last position in the chain.
- """
- if value:
- return value
- raise ValueError("required")
-
-def todatetime(format='%d/%m/%Y'):
- """return a transformation function to turn string input value into a
- `datetime.datetime` instance, using given format.
-
- Follow it by `todate` or `totime` functions from `logilab.common.date` if
- you want a `date`/`time` instance instead of `datetime`.
- """
- def coerce(value):
- return strptime(value, format)
- return coerce
-
-def call_transform_method(methodname, *args, **kwargs):
- """return value returned by calling the given method on input"""
- def coerce(value):
- return getattr(value, methodname)(*args, **kwargs)
- return coerce
-
-def call_check_method(methodname, *args, **kwargs):
- """check value returned by calling the given method on input is true,
- else raise ValueError
- """
- def check(value):
- if getattr(value, methodname)(*args, **kwargs):
- return value
- raise ValueError('%s not verified on %r' % (methodname, value))
- return check
-
-# base integrity checking functions ############################################
-
-def check_doubles(buckets):
- """Extract the keys that have more than one item in their bucket."""
- return [(k, len(v)) for k, v in buckets.items() if len(v) > 1]
-
-def check_doubles_not_none(buckets):
- """Extract the keys that have more than one item in their bucket."""
- return [(k, len(v)) for k, v in buckets.items()
- if k is not None and len(v) > 1]
-
-
-# object stores #################################################################
-
-class ObjectStore(object):
- """Store objects in memory for *faster* validation (development mode)
-
- But it will not enforce the constraints of the schema and hence will miss some problems
-
- >>> store = ObjectStore()
- >>> user = {'login': 'johndoe'}
- >>> store.add('CWUser', user)
- >>> group = {'name': 'unknown'}
- >>> store.add('CWUser', group)
- >>> store.relate(user['eid'], 'in_group', group['eid'])
- """
- def __init__(self):
- self.items = []
- self.eids = {}
- self.types = {}
- self.relations = set()
- self.indexes = {}
- self._rql = None
- self._checkpoint = None
-
- def _put(self, type, item):
- self.items.append(item)
- return len(self.items) - 1
-
- def add(self, type, item):
- assert isinstance(item, dict), 'item is not a dict but a %s' % type(item)
- eid = item['eid'] = self._put(type, item)
- self.eids[eid] = item
- self.types.setdefault(type, []).append(eid)
-
- def relate(self, eid_from, rtype, eid_to, inlined=False):
- """Add new relation (reverse type support is available)
-
- >>> 1,2 = eid_from, eid_to
- >>> self.relate(eid_from, 'in_group', eid_to)
- 1, 'in_group', 2
- >>> self.relate(eid_from, 'reverse_in_group', eid_to)
- 2, 'in_group', 1
- """
- if rtype.startswith('reverse_'):
- eid_from, eid_to = eid_to, eid_from
- rtype = rtype[8:]
- relation = eid_from, rtype, eid_to
- self.relations.add(relation)
- return relation
-
- def build_index(self, name, type, func=None):
- index = {}
- if func is None or not callable(func):
- func = lambda x: x['eid']
- for eid in self.types[type]:
- index.setdefault(func(self.eids[eid]), []).append(eid)
- assert index, "new index '%s' cannot be empty" % name
- self.indexes[name] = index
-
- def build_rqlindex(self, name, type, key, rql, rql_params=False, func=None):
- """build an index by rql query
-
- rql should return eid in first column
- ctl.store.build_index('index_name', 'users', 'login', 'Any U WHERE U is CWUser')
- """
- rset = self.rql(rql, rql_params or {})
- for entity in rset.entities():
- getattr(entity, key) # autopopulate entity with key attribute
- self.eids[entity.eid] = dict(entity)
- if entity.eid not in self.types.setdefault(type, []):
- self.types[type].append(entity.eid)
- assert self.types[type], "new index type '%s' cannot be empty (0 record found)" % type
-
- # Build index with specified key
- func = lambda x: x[key]
- self.build_index(name, type, func)
-
- def fetch(self, name, key, unique=False, decorator=None):
- """
- decorator is a callable method or an iterator of callable methods (usually a lambda function)
- decorator=lambda x: x[:1] (first value is returned)
-
- We can use validation check function available in _entity
- """
- eids = self.indexes[name].get(key, [])
- if decorator is not None:
- if not hasattr(decorator, '__iter__'):
- decorator = (decorator,)
- for f in decorator:
- eids = f(eids)
- if unique:
- assert len(eids) == 1, u'expected a single one value for key "%s" in index "%s". Got %i' % (key, name, len(eids))
- eids = eids[0] # FIXME maybe it's better to keep an iterator here ?
- return eids
-
- def find(self, type, key, value):
- for idx in self.types[type]:
- item = self.items[idx]
- if item[key] == value:
- yield item
-
- def rql(self, *args):
- if self._rql is not None:
- return self._rql(*args)
-
- def checkpoint(self):
- pass
-
- @property
- def nb_inserted_entities(self):
- return len(self.eids)
- @property
- def nb_inserted_types(self):
- return len(self.types)
- @property
- def nb_inserted_relations(self):
- return len(self.relations)
-
- @deprecated('[3.6] get_many() deprecated. Use fetch() instead')
- def get_many(self, name, key):
- return self.fetch(name, key, unique=False)
-
- @deprecated('[3.6] get_one() deprecated. Use fetch(..., unique=True) instead')
- def get_one(self, name, key):
- return self.fetch(name, key, unique=True)
-
-
-class RQLObjectStore(ObjectStore):
- """ObjectStore that works with an actual RQL repository (production mode)"""
- _rql = None # bw compat
-
- def __init__(self, session=None, checkpoint=None):
- ObjectStore.__init__(self)
- if session is not None:
- if not hasattr(session, 'set_pool'):
- # connection
- cnx = session
- session = session.request()
- session.set_pool = lambda : None
- checkpoint = checkpoint or cnx.commit
- else:
- session.set_pool()
- self.session = session
- self._checkpoint = checkpoint or session.commit
- elif checkpoint is not None:
- self._checkpoint = checkpoint
- # XXX .session
-
- def checkpoint(self):
- self._checkpoint()
- self.session.set_pool()
-
- def rql(self, *args):
- if self._rql is not None:
- return self._rql(*args)
- return self.session.execute(*args)
-
- def create_entity(self, *args, **kwargs):
- entity = self.session.create_entity(*args, **kwargs)
- self.eids[entity.eid] = entity
- self.types.setdefault(args[0], []).append(entity.eid)
- return entity
-
- def _put(self, type, item):
- query = ('INSERT %s X: ' % type) + ', '.join('X %s %%(%s)s' % (k, k)
- for k in item)
- return self.rql(query, item)[0][0]
-
- def relate(self, eid_from, rtype, eid_to, inlined=False):
- # if reverse relation is found, eids are exchanged
- eid_from, rtype, eid_to = super(RQLObjectStore, self).relate(
- eid_from, rtype, eid_to)
- self.rql('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
- {'x': int(eid_from), 'y': int(eid_to)}, ('x', 'y'))
-
-
-# the import controller ########################################################
-
-class CWImportController(object):
- """Controller of the data import process.
-
- >>> ctl = CWImportController(store)
- >>> ctl.generators = list_of_data_generators
- >>> ctl.data = dict_of_data_tables
- >>> ctl.run()
- """
-
- def __init__(self, store, askerror=0, catcherrors=None, tell=tell,
- commitevery=50):
- self.store = store
- self.generators = None
- self.data = {}
- self.errors = None
- self.askerror = askerror
- if catcherrors is None:
- catcherrors = askerror
- self.catcherrors = catcherrors
- self.commitevery = commitevery # set to None to do a single commit
- self._tell = tell
-
- def check(self, type, key, value):
- self._checks.setdefault(type, {}).setdefault(key, []).append(value)
-
- def check_map(self, entity, key, map, default):
- try:
- entity[key] = map[entity[key]]
- except KeyError:
- self.check(key, entity[key], None)
- entity[key] = default
-
- def record_error(self, key, msg=None, type=None, value=None, tb=None):
- tmp = StringIO()
- if type is None:
- traceback.print_exc(file=tmp)
- else:
- traceback.print_exception(type, value, tb, file=tmp)
- print tmp.getvalue()
- # use a list to avoid counting a <nb lines> errors instead of one
- errorlog = self.errors.setdefault(key, [])
- if msg is None:
- errorlog.append(tmp.getvalue().splitlines())
- else:
- errorlog.append( (msg, tmp.getvalue().splitlines()) )
-
- def run(self):
- self.errors = {}
- for func, checks in self.generators:
- self._checks = {}
- func_name = func.__name__[4:] # XXX
- self.tell("Import '%s'..." % func_name)
- try:
- func(self)
- except:
- if self.catcherrors:
- self.record_error(func_name, 'While calling %s' % func.__name__)
- else:
- raise
- for key, func, title, help in checks:
- buckets = self._checks.get(key)
- if buckets:
- err = func(buckets)
- if err:
- self.errors[title] = (help, err)
- self.store.checkpoint()
- nberrors = sum(len(err[1]) for err in self.errors.values())
- self.tell('\nImport completed: %i entities, %i types, %i relations and %i errors'
- % (self.store.nb_inserted_entities,
- self.store.nb_inserted_types,
- self.store.nb_inserted_relations,
- nberrors))
- if self.errors:
- if self.askerror == 2 or (self.askerror and confirm('Display errors ?')):
- from pprint import pformat
- for errkey, error in self.errors.items():
- self.tell("\n%s (%s): %d\n" % (error[0], errkey, len(error[1])))
- self.tell(pformat(sorted(error[1])))
-
- def get_data(self, key):
- return self.data.get(key)
-
- def index(self, name, key, value, unique=False):
- """create a new index
-
- If unique is set to True, only first occurence will be kept not the following ones
- """
- if unique:
- try:
- if value in self.store.indexes[name][key]:
- return
- except KeyError:
- # we're sure that one is the first occurence; so continue...
- pass
- self.store.indexes.setdefault(name, {}).setdefault(key, []).append(value)
-
- def tell(self, msg):
- self._tell(msg)
-
- def iter_and_commit(self, datakey):
- """iter rows, triggering commit every self.commitevery iterations"""
- return commit_every(self.commitevery, self.store, self.get_data(datakey))
-
-
-
-from datetime import datetime
-from cubicweb.schema import META_RTYPES, VIRTUAL_RTYPES
-
-
-class NoHookRQLObjectStore(RQLObjectStore):
- """ObjectStore that works with an actual RQL repository (production mode)"""
- _rql = None # bw compat
-
- def __init__(self, session, metagen=None, baseurl=None):
- super(NoHookRQLObjectStore, self).__init__(session)
- self.source = session.repo.system_source
- self.rschema = session.repo.schema.rschema
- self.add_relation = self.source.add_relation
- if metagen is None:
- metagen = MetaGenerator(session, baseurl)
- self.metagen = metagen
- self._nb_inserted_entities = 0
- self._nb_inserted_types = 0
- self._nb_inserted_relations = 0
- self.rql = session.unsafe_execute
-
- def create_entity(self, etype, **kwargs):
- for k, v in kwargs.iteritems():
- kwargs[k] = getattr(v, 'eid', v)
- entity, rels = self.metagen.base_etype_dicts(etype)
- entity = copy(entity)
- entity._related_cache = {}
- self.metagen.init_entity(entity)
- entity.update(kwargs)
- session = self.session
- self.source.add_entity(session, entity)
- self.source.add_info(session, entity, self.source, complete=False)
- for rtype, targeteids in rels.iteritems():
- # targeteids may be a single eid or a list of eids
- inlined = self.rschema(rtype).inlined
- try:
- for targeteid in targeteids:
- self.add_relation(session, entity.eid, rtype, targeteid,
- inlined)
- except TypeError:
- self.add_relation(session, entity.eid, rtype, targeteids,
- inlined)
- self._nb_inserted_entities += 1
- return entity
-
- def relate(self, eid_from, rtype, eid_to):
- assert not rtype.startswith('reverse_')
- self.add_relation(self.session, eid_from, rtype, eid_to,
- self.rschema(rtype).inlined)
- self._nb_inserted_relations += 1
-
- @property
- def nb_inserted_entities(self):
- return self._nb_inserted_entities
- @property
- def nb_inserted_types(self):
- return self._nb_inserted_types
- @property
- def nb_inserted_relations(self):
- return self._nb_inserted_relations
-
- def _put(self, type, item):
- raise RuntimeError('use create entity')
-
-
-class MetaGenerator(object):
- def __init__(self, session, baseurl=None):
- self.session = session
- self.source = session.repo.system_source
- self.time = datetime.now()
- if baseurl is None:
- config = session.vreg.config
- baseurl = config['base-url'] or config.default_base_url()
- if not baseurl[-1] == '/':
- baseurl += '/'
- self.baseurl = baseurl
- # attributes/relations shared by all entities of the same type
- self.etype_attrs = []
- self.etype_rels = []
- # attributes/relations specific to each entity
- self.entity_attrs = ['eid', 'cwuri']
- #self.entity_rels = [] XXX not handled (YAGNI?)
- schema = session.vreg.schema
- rschema = schema.rschema
- for rtype in META_RTYPES:
- if rtype in ('eid', 'cwuri') or rtype in VIRTUAL_RTYPES:
- continue
- if rschema(rtype).final:
- self.etype_attrs.append(rtype)
- else:
- self.etype_rels.append(rtype)
- if not schema._eid_index:
- # test schema loaded from the fs
- self.gen_is = self.test_gen_is
- self.gen_is_instance_of = self.test_gen_is_instanceof
-
- @cached
- def base_etype_dicts(self, etype):
- entity = self.session.vreg['etypes'].etype_class(etype)(self.session)
- # entity are "surface" copied, avoid shared dict between copies
- del entity.cw_extra_kwargs
- for attr in self.etype_attrs:
- entity[attr] = self.generate(entity, attr)
- rels = {}
- for rel in self.etype_rels:
- rels[rel] = self.generate(entity, rel)
- return entity, rels
-
- def init_entity(self, entity):
- for attr in self.entity_attrs:
- entity[attr] = self.generate(entity, attr)
- entity.eid = entity['eid']
-
- def generate(self, entity, rtype):
- return getattr(self, 'gen_%s' % rtype)(entity)
-
- def gen_eid(self, entity):
- return self.source.create_eid(self.session)
-
- def gen_cwuri(self, entity):
- return u'%seid/%s' % (self.baseurl, entity['eid'])
-
- def gen_creation_date(self, entity):
- return self.time
- def gen_modification_date(self, entity):
- return self.time
-
- def gen_is(self, entity):
- return entity.e_schema.eid
- def gen_is_instance_of(self, entity):
- eids = []
- for etype in entity.e_schema.ancestors() + [entity.e_schema]:
- eids.append(entity.e_schema.eid)
- return eids
-
- def gen_created_by(self, entity):
- return self.session.user.eid
- def gen_owned_by(self, entity):
- return self.session.user.eid
-
- # implementations of gen_is / gen_is_instance_of to use during test where
- # schema has been loaded from the fs (hence entity type schema eids are not
- # known)
- def test_gen_is(self, entity):
- from cubicweb.hooks.metadata import eschema_eid
- return eschema_eid(self.session, entity.e_schema)
- def test_gen_is_instanceof(self, entity):
- from cubicweb.hooks.metadata import eschema_eid
- eids = []
- for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
- eids.append(eschema_eid(self.session, eschema))
- return eids
-
-
-################################################################################
-
-utf8csvreader = deprecated('[3.6] use ucsvreader instead')(ucsvreader)
-
-@deprecated('[3.6] use required')
-def nonempty(value):
- return required(value)
-
-@deprecated("[3.6] use call_check_method('isdigit')")
-def alldigits(txt):
- if txt.isdigit():
- return txt
- else:
- return u''
-
-@deprecated("[3.7] too specific, will move away, copy me")
-def capitalize_if_unicase(txt):
- if txt.isupper() or txt.islower():
- return txt.capitalize()
- return txt
-
-@deprecated("[3.7] too specific, will move away, copy me")
-def yesno(value):
- """simple heuristic that returns boolean value
-
- >>> yesno("Yes")
- True
- >>> yesno("oui")
- True
- >>> yesno("1")
- True
- >>> yesno("11")
- True
- >>> yesno("")
- False
- >>> yesno("Non")
- False
- >>> yesno("blablabla")
- False
- """
- if value:
- return value.lower()[0] in 'yo1'
- return False
-
-@deprecated("[3.7] use call_check_method('isalpha')")
-def isalpha(value):
- if value.isalpha():
- return value
- raise ValueError("not all characters in the string alphabetic")
-
-@deprecated("[3.7] use call_transform_method('upper')")
-def uppercase(txt):
- return txt.upper()
-
-@deprecated("[3.7] use call_transform_method('lower')")
-def lowercase(txt):
- return txt.lower()
-
-@deprecated("[3.7] use call_transform_method('replace', ' ', '')")
-def no_space(txt):
- return txt.replace(' ','')
-
-@deprecated("[3.7] use call_transform_method('replace', u'\xa0', '')")
-def no_uspace(txt):
- return txt.replace(u'\xa0','')
-
-@deprecated("[3.7] use call_transform_method('replace', '-', '')")
-def no_dash(txt):
- return txt.replace('-','')
-
-@deprecated("[3.7] use call_transform_method('strip')")
-def strip(txt):
- return txt.strip()
-
-@deprecated("[3.7] use call_transform_method('replace', ',', '.'), float")
-def decimal(value):
- return comma_float(value)
-
-@deprecated('[3.7] use int builtin')
-def integer(value):
- return int(value)
+# pylint: disable-msg=W0614,W0401
+from warnings import warn
+warn('moved to cubicweb.dataimport', DeprecationWarning, stacklevel=2)
+from cubicweb.dataimport import *
--- a/devtools/fake.py Wed Mar 24 08:40:00 2010 +0100
+++ b/devtools/fake.py Wed Mar 24 10:23:57 2010 +0100
@@ -7,9 +7,7 @@
"""
__docformat__ = "restructuredtext en"
-from logilab.common.adbh import get_adv_func_helper
-
-from indexer import get_indexer
+from logilab.database import get_db_helper
from cubicweb.req import RequestSessionBase
from cubicweb.cwvreg import CubicWebVRegistry
@@ -118,17 +116,6 @@
def validate_cache(self):
pass
- # session compatibility (in some test are using this class to test server
- # side views...)
- def actual_session(self):
- """return the original parent session if any, else self"""
- return self
-
- def unsafe_execute(self, *args, **kwargs):
- """return the original parent session if any, else self"""
- kwargs.pop('propagate', None)
- return self.execute(*args, **kwargs)
-
class FakeUser(object):
login = 'toto'
@@ -138,18 +125,19 @@
class FakeSession(RequestSessionBase):
+ read_security = write_security = True
+ set_read_security = set_write_security = lambda *args, **kwargs: None
+
def __init__(self, repo=None, user=None):
self.repo = repo
self.vreg = getattr(self.repo, 'vreg', CubicWebVRegistry(FakeConfig(), initlog=False))
self.pool = FakePool()
self.user = user or FakeUser()
self.is_internal_session = False
- self.is_super_session = self.user.eid == -1
self.transaction_data = {}
- def execute(self, *args):
+ def execute(self, *args, **kwargs):
pass
- unsafe_execute = execute
def commit(self, *args):
self.transaction_data.clear()
@@ -158,11 +146,6 @@
def system_sql(self, sql, args=None):
pass
- def decorate_rset(self, rset, propagate=False):
- rset.vreg = self.vreg
- rset.req = self
- return rset
-
def set_entity_cache(self, entity):
pass
@@ -200,12 +183,7 @@
class FakeSource(object):
- dbhelper = get_adv_func_helper('sqlite')
- indexer = get_indexer('sqlite', 'UTF8')
- dbhelper.fti_uid_attr = indexer.uid_attr
- dbhelper.fti_table = indexer.table
- dbhelper.fti_restriction_sql = indexer.restriction_sql
- dbhelper.fti_need_distinct_query = indexer.need_distinct
+ dbhelper = get_db_helper('sqlite')
def __init__(self, uri):
self.uri = uri
--- a/devtools/repotest.py Wed Mar 24 08:40:00 2010 +0100
+++ b/devtools/repotest.py Wed Mar 24 10:23:57 2010 +0100
@@ -95,6 +95,31 @@
def __iter__(self):
return iter(sorted(self.origdict, key=self.sortkey))
+def schema_eids_idx(schema):
+ """return a dictionary mapping schema types to their eids so we can reread
+ it from the fs instead of the db (too costly) between tests
+ """
+ schema_eids = {}
+ for x in schema.entities():
+ schema_eids[x] = x.eid
+ for x in schema.relations():
+ schema_eids[x] = x.eid
+ for rdef in x.rdefs.itervalues():
+ schema_eids[(rdef.subject, rdef.rtype, rdef.object)] = rdef.eid
+ return schema_eids
+
+def restore_schema_eids_idx(schema, schema_eids):
+ """rebuild schema eid index"""
+ for x in schema.entities():
+ x.eid = schema_eids[x]
+ schema._eid_index[x.eid] = x
+ for x in schema.relations():
+ x.eid = schema_eids[x]
+ schema._eid_index[x.eid] = x
+ for rdef in x.rdefs.itervalues():
+ rdef.eid = schema_eids[(rdef.subject, rdef.rtype, rdef.object)]
+ schema._eid_index[rdef.eid] = rdef
+
from logilab.common.testlib import TestCase
from rql import RQLHelper
@@ -150,17 +175,23 @@
self.pool = self.session.set_pool()
self.maxeid = self.get_max_eid()
do_monkey_patch()
+ self._dumb_sessions = []
def get_max_eid(self):
- return self.session.unsafe_execute('Any MAX(X)')[0][0]
+ return self.session.execute('Any MAX(X)')[0][0]
def cleanup(self):
- self.session.unsafe_execute('DELETE Any X WHERE X eid > %s' % self.maxeid)
+ self.session.set_pool()
+ self.session.execute('DELETE Any X WHERE X eid > %s' % self.maxeid)
def tearDown(self):
undo_monkey_patch()
self.session.rollback()
self.cleanup()
self.commit()
+ # properly close dumb sessions
+ for session in self._dumb_sessions:
+ session.rollback()
+ session.close()
self.repo._free_pool(self.pool)
assert self.session.user.eid != -1
@@ -198,6 +229,8 @@
u._groups = set(groups)
s = Session(u, self.repo)
s._threaddata.pool = self.pool
+ # register session to ensure it gets closed
+ self._dumb_sessions.append(s)
return s
def execute(self, rql, args=None, eid_key=None, build_descr=True):
@@ -223,6 +256,7 @@
self.sources = self.o._repo.sources
self.system = self.sources[-1]
do_monkey_patch()
+ self._dumb_sessions = [] # by hi-jacked parent setup
def add_source(self, sourcecls, uri):
self.sources.append(sourcecls(self.repo, self.o.schema,
@@ -237,6 +271,9 @@
del self.repo.sources_by_uri[source.uri]
self.newsources -= 1
undo_monkey_patch()
+ for session in self._dumb_sessions:
+ session._threaddata.pool = None
+ session.close()
def _prepare_plan(self, rql, kwargs=None):
rqlst = self.o.parse(rql, annotate=True)
--- a/devtools/test/unittest_testlib.py Wed Mar 24 08:40:00 2010 +0100
+++ b/devtools/test/unittest_testlib.py Wed Mar 24 10:23:57 2010 +0100
@@ -9,12 +9,12 @@
from cStringIO import StringIO
from unittest import TestSuite
-
-from logilab.common.testlib import (TestCase, unittest_main,
+from logilab.common.testlib import (TestCase, unittest_main,
SkipAwareTextTestRunner)
from cubicweb.devtools import htmlparser
from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.pytestconf import clean_repo_test_cls
class WebTestTC(TestCase):
@@ -37,7 +37,7 @@
self.assertEquals(result.testsRun, 2)
self.assertEquals(len(result.errors), 0)
self.assertEquals(len(result.failures), 1)
-
+ clean_repo_test_cls(MyWebTest)
HTML_PAGE = u"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
--- a/devtools/testlib.py Wed Mar 24 08:40:00 2010 +0100
+++ b/devtools/testlib.py Wed Mar 24 10:23:57 2010 +0100
@@ -207,6 +207,7 @@
def _build_repo(cls):
cls.repo, cls.cnx = devtools.init_test_database(config=cls.config)
cls.init_config(cls.config)
+ cls.repo.hm.call_hooks('server_startup', repo=cls.repo)
cls.vreg = cls.repo.vreg
cls._orig_cnx = cls.cnx
cls.config.repository = lambda x=None: cls.repo
@@ -228,7 +229,9 @@
@property
def session(self):
"""return current server side session (using default manager account)"""
- return self.repo._sessions[self.cnx.sessionid]
+ session = self.repo._sessions[self.cnx.sessionid]
+ session.set_pool()
+ return session
@property
def adminsession(self):
@@ -319,7 +322,10 @@
@nocoverage
def commit(self):
- self.cnx.commit()
+ try:
+ return self.cnx.commit()
+ finally:
+ self.session.set_pool() # ensure pool still set after commit
@nocoverage
def rollback(self):
@@ -327,6 +333,8 @@
self.cnx.rollback()
except ProgrammingError:
pass
+ finally:
+ self.session.set_pool() # ensure pool still set after commit
# # server side db api #######################################################
--- a/doc/book/en/development/devweb/js.rst Wed Mar 24 08:40:00 2010 +0100
+++ b/doc/book/en/development/devweb/js.rst Wed Mar 24 10:23:57 2010 +0100
@@ -40,6 +40,21 @@
snippet inline in the html headers. This is quite useful for setting
up early jQuery(document).ready(...) initialisations.
+CubicWeb javascript events
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* ``server-response``: this event is triggered on HTTP responses (both
+ standard and ajax). The two following extra parameters are passed
+ to callbacks :
+
+ - ``ajax``: a boolean that says if the reponse was issued by an
+ ajax request
+
+ - ``node``: the DOM node returned by the server in case of an
+ ajax request, otherwise the document itself for standard HTTP
+ requests.
+
+
Overview of what's available
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--- a/doc/tools/generate_modules.py Wed Mar 24 08:40:00 2010 +0100
+++ b/doc/tools/generate_modules.py Wed Mar 24 10:23:57 2010 +0100
@@ -16,7 +16,7 @@
cw_gen = ModuleGenerator('cubicweb', '../..')
cw_gen.generate("../book/en/annexes/api_cubicweb.rst",
EXCLUDE_DIRS + ('cwdesklets', 'misc', 'skel', 'skeleton'))
- for modname in ('indexer', 'logilab', 'rql', 'yams'):
+ for modname in ('logilab', 'rql', 'yams'):
cw_gen = ModuleGenerator(modname, '../../../' + modname)
cw_gen.generate("../book/en/annexes/api_%s.rst" % modname,
EXCLUDE_DIRS + ('tools',))
--- a/entities/authobjs.py Wed Mar 24 08:40:00 2010 +0100
+++ b/entities/authobjs.py Wed Mar 24 10:23:57 2010 +0100
@@ -93,15 +93,10 @@
return self.groups == frozenset(('guests', ))
def owns(self, eid):
- if hasattr(self._cw, 'unsafe_execute'):
- # use unsafe_execute on the repository side, in case
- # session's user doesn't have access to CWUser
- execute = self._cw.unsafe_execute
- else:
- execute = self._cw.execute
try:
- return execute('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
- {'x': eid, 'u': self.eid}, 'x')
+ return self._cw.execute(
+ 'Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
+ {'x': eid, 'u': self.eid}, 'x')
except Unauthorized:
return False
owns = cached(owns, keyarg=1)
--- a/entities/test/unittest_wfobjs.py Wed Mar 24 08:40:00 2010 +0100
+++ b/entities/test/unittest_wfobjs.py Wed Mar 24 10:23:57 2010 +0100
@@ -1,5 +1,7 @@
+from __future__ import with_statement
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb import ValidationError
+from cubicweb.server.session import security_enabled
def add_wf(self, etype, name=None, default=False):
if name is None:
@@ -126,10 +128,11 @@
wf = add_wf(self, 'CWUser')
s = wf.add_state(u'foo', initial=True)
self.commit()
- ex = self.assertRaises(ValidationError, self.session.unsafe_execute,
+ with security_enabled(self.session, write=False):
+ ex = self.assertRaises(ValidationError, self.session.execute,
'SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
{'x': self.user().eid, 's': s.eid}, 'x')
- self.assertEquals(ex.errors, {'in_state': "state doesn't belong to entity's workflow. "
+ self.assertEquals(ex.errors, {'in_state': "state doesn't belong to entity's workflow. "
"You may want to set a custom workflow for this entity first."})
def test_fire_transition(self):
@@ -505,7 +508,7 @@
{'wf': self.wf.eid})
self.commit()
- # XXX currently, we've to rely on hooks to set initial state, or to use unsafe_execute
+ # XXX currently, we've to rely on hooks to set initial state, or to use execute
# def test_initial_state(self):
# cnx = self.login('stduser')
# cu = cnx.cursor()
--- a/entities/wfobjs.py Wed Mar 24 08:40:00 2010 +0100
+++ b/entities/wfobjs.py Wed Mar 24 10:23:57 2010 +0100
@@ -158,7 +158,7 @@
todelstate = self.state_by_name(todelstate)
if not hasattr(replacement, 'eid'):
replacement = self.state_by_name(replacement)
- execute = self._cw.unsafe_execute
+ execute = self._cw.execute
execute('SET X in_state S WHERE S eid %(s)s', {'s': todelstate.eid}, 's')
execute('SET X from_state NS WHERE X to_state OS, OS eid %(os)s, NS eid %(ns)s',
{'os': todelstate.eid, 'ns': replacement.eid}, 's')
--- a/entity.py Wed Mar 24 08:40:00 2010 +0100
+++ b/entity.py Wed Mar 24 10:23:57 2010 +0100
@@ -20,6 +20,7 @@
from cubicweb.rset import ResultSet
from cubicweb.selectors import yes
from cubicweb.appobject import AppObject
+from cubicweb.req import _check_cw_unsafe
from cubicweb.schema import RQLVocabularyConstraint, RQLConstraint
from cubicweb.rqlrewrite import RQLRewriter
@@ -59,7 +60,7 @@
:cvar skip_copy_for: a list of relations that should be skipped when copying
this kind of entity. Note that some relations such
as composite relations or relations that have '?1' as object
- cardinality are always skipped.
+ cardinality are always skipped.
"""
__registry__ = 'etypes'
__select__ = yes()
@@ -224,6 +225,47 @@
def __cmp__(self, other):
raise NotImplementedError('comparison not implemented for %s' % self.__class__)
+ def __getitem__(self, key):
+ if key == 'eid':
+ warn('[3.7] entity["eid"] is deprecated, use entity.eid instead',
+ DeprecationWarning, stacklevel=2)
+ return self.eid
+ return super(Entity, self).__getitem__(key)
+
+ def __setitem__(self, attr, value):
+ """override __setitem__ to update self.edited_attributes.
+
+ Typically, a before_update_hook could do::
+
+ entity['generated_attr'] = generated_value
+
+ and this way, edited_attributes will be updated accordingly
+ """
+ if attr == 'eid':
+ warn('[3.7] entity["eid"] = value is deprecated, use entity.eid = value instead',
+ DeprecationWarning, stacklevel=2)
+ self.eid = value
+ else:
+ super(Entity, self).__setitem__(attr, value)
+ if hasattr(self, 'edited_attributes'):
+ self.edited_attributes.add(attr)
+ self.skip_security_attributes.add(attr)
+
+ def setdefault(self, attr, default):
+ """override setdefault to update self.edited_attributes"""
+ super(Entity, self).setdefault(attr, default)
+ if hasattr(self, 'edited_attributes'):
+ self.edited_attributes.add(attr)
+ self.skip_security_attributes.add(attr)
+
+ def rql_set_value(self, attr, value):
+ """call by rql execution plan when some attribute is modified
+
+ don't use dict api in such case since we don't want attribute to be
+ added to skip_security_attributes.
+ """
+ super(Entity, self).__setitem__(attr, value)
+
def pre_add_hook(self):
"""hook called by the repository before doing anything to add the entity
(before_add entity hooks have not been called yet). This give the
@@ -234,7 +276,7 @@
return self
def set_eid(self, eid):
- self.eid = self['eid'] = eid
+ self.eid = eid
def has_eid(self):
"""return True if the entity has an attributed eid (False
@@ -440,7 +482,8 @@
"""returns a resultset containing `self` information"""
rset = ResultSet([(self.eid,)], 'Any X WHERE X eid %(x)s',
{'x': self.eid}, [(self.__regid__,)])
- return self._cw.decorate_rset(rset)
+ rset.req = self._cw
+ return rset
def to_complete_relations(self):
"""by default complete final relations to when calling .complete()"""
@@ -459,7 +502,7 @@
all(matching_groups(e.get_groups('read')) for e in targets):
yield rschema, 'subject'
- def to_complete_attributes(self, skip_bytes=True):
+ def to_complete_attributes(self, skip_bytes=True, skip_pwd=True):
for rschema, attrschema in self.e_schema.attribute_definitions():
# skip binary data by default
if skip_bytes and attrschema.type == 'Bytes':
@@ -470,13 +513,13 @@
# password retreival is blocked at the repository server level
rdef = rschema.rdef(self.e_schema, attrschema)
if not self._cw.user.matching_groups(rdef.get_groups('read')) \
- or attrschema.type == 'Password':
+ or (attrschema.type == 'Password' and skip_pwd):
self[attr] = None
continue
yield attr
_cw_completed = False
- def complete(self, attributes=None, skip_bytes=True):
+ def complete(self, attributes=None, skip_bytes=True, skip_pwd=True):
"""complete this entity by adding missing attributes (i.e. query the
repository to fill the entity)
@@ -493,7 +536,7 @@
V = varmaker.next()
rql = ['WHERE %s eid %%(x)s' % V]
selected = []
- for attr in (attributes or self.to_complete_attributes(skip_bytes)):
+ for attr in (attributes or self.to_complete_attributes(skip_bytes, skip_pwd)):
# if attribute already in entity, nothing to do
if self.has_key(attr):
continue
@@ -531,8 +574,8 @@
# if some outer join are included to fetch inlined relations
rql = 'Any %s,%s %s' % (V, ','.join(var for attr, var in selected),
','.join(rql))
- execute = getattr(self._cw, 'unsafe_execute', self._cw.execute)
- rset = execute(rql, {'x': self.eid}, 'x', build_descr=False)[0]
+ rset = self._cw.execute(rql, {'x': self.eid}, 'x',
+ build_descr=False)[0]
# handle attributes
for i in xrange(1, lastattr):
self[str(selected[i-1][0])] = rset[i]
@@ -542,7 +585,7 @@
value = rset[i]
if value is None:
rrset = ResultSet([], rql, {'x': self.eid})
- self._cw.decorate_rset(rrset)
+ rrset.req = self._cw
else:
rrset = self._cw.eid_rset(value)
self.set_related_cache(rtype, role, rrset)
@@ -560,11 +603,8 @@
if not self.is_saved():
return None
rql = "Any A WHERE X eid %%(x)s, X %s A" % name
- # XXX should we really use unsafe_execute here? I think so (syt),
- # see #344874
- execute = getattr(self._cw, 'unsafe_execute', self._cw.execute)
try:
- rset = execute(rql, {'x': self.eid}, 'x')
+ rset = self._cw.execute(rql, {'x': self.eid}, 'x')
except Unauthorized:
self[name] = value = None
else:
@@ -595,10 +635,7 @@
pass
assert self.has_eid()
rql = self.related_rql(rtype, role)
- # XXX should we really use unsafe_execute here? I think so (syt),
- # see #344874
- execute = getattr(self._cw, 'unsafe_execute', self._cw.execute)
- rset = execute(rql, {'x': self.eid}, 'x')
+ rset = self._cw.execute(rql, {'x': self.eid}, 'x')
self.set_related_cache(rtype, role, rset)
return self.related(rtype, role, limit, entities)
@@ -785,10 +822,6 @@
haseid = 'eid' in self
self._cw_completed = False
self.clear()
- # set eid if it was in, else we may get nasty error while editing this
- # entity if it's bound to a repo session
- if haseid:
- self['eid'] = self.eid
# clear relations cache
for rschema, _, role in self.e_schema.relation_definitions():
self.clear_related_cache(rschema.type, role)
@@ -800,8 +833,9 @@
# raw edition utilities ###################################################
- def set_attributes(self, _cw_unsafe=False, **kwargs):
+ def set_attributes(self, **kwargs):
assert kwargs
+ _check_cw_unsafe(kwargs)
relations = []
for key in kwargs:
relations.append('X %s %%(%s)s' % (key, key))
@@ -809,25 +843,18 @@
self.update(kwargs)
# and now update the database
kwargs['x'] = self.eid
- if _cw_unsafe:
- self._cw.unsafe_execute(
- 'SET %s WHERE X eid %%(x)s' % ','.join(relations), kwargs, 'x')
- else:
- self._cw.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations),
- kwargs, 'x')
+ self._cw.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations),
+ kwargs, 'x')
- def set_relations(self, _cw_unsafe=False, **kwargs):
+ def set_relations(self, **kwargs):
"""add relations to the given object. To set a relation where this entity
is the object of the relation, use 'reverse_'<relation> as argument name.
Values may be an entity, a list of entity, or None (meaning that all
relations of the given type from or to this object should be deleted).
"""
- if _cw_unsafe:
- execute = self._cw.unsafe_execute
- else:
- execute = self._cw.execute
# XXX update cache
+ _check_cw_unsafe(kwargs)
for attr, values in kwargs.iteritems():
if attr.startswith('reverse_'):
restr = 'Y %s X' % attr[len('reverse_'):]
@@ -839,24 +866,30 @@
continue
if not isinstance(values, (tuple, list, set, frozenset)):
values = (values,)
- execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
+ self._cw.execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
restr, ','.join(str(r.eid) for r in values)),
- {'x': self.eid}, 'x')
+ {'x': self.eid}, 'x')
- def delete(self):
+ def delete(self, **kwargs):
assert self.has_eid(), self.eid
self._cw.execute('DELETE %s X WHERE X eid %%(x)s' % self.e_schema,
- {'x': self.eid})
+ {'x': self.eid}, **kwargs)
# server side utilities ###################################################
+ @property
+ def skip_security_attributes(self):
+ try:
+ return self._skip_security_attributes
+ except:
+ self._skip_security_attributes = set()
+ return self._skip_security_attributes
+
def set_defaults(self):
"""set default values according to the schema"""
- self._default_set = set()
for attr, value in self.e_schema.defaults():
if not self.has_key(attr):
self[str(attr)] = value
- self._default_set.add(attr)
def check(self, creation=False):
"""check this entity against its schema. Only final relation
@@ -868,7 +901,15 @@
_ = unicode
else:
_ = self._cw._
- self.e_schema.check(self, creation=creation, _=_)
+ if creation or not hasattr(self, 'edited_attributes'):
+ # on creations, we want to check all relations, especially
+ # required attributes
+ relations = None
+ else:
+ relations = [self._cw.vreg.schema.rschema(rtype)
+ for rtype in self.edited_attributes]
+ self.e_schema.check(self, creation=creation, _=_,
+ relations=relations)
def fti_containers(self, _done=None):
if _done is None:
@@ -894,12 +935,12 @@
"""used by the full text indexer to get words to index
this method should only be used on the repository side since it depends
- on the indexer package
+ on the logilab.database package
:rtype: list
:return: the list of indexable word of this entity
"""
- from indexer.query_objects import tokenize
+ from logilab.database.fti import tokenize
# take care to cases where we're modyfying the schema
pending = self._cw.transaction_data.setdefault('pendingrdefs', set())
words = []
@@ -942,8 +983,6 @@
def __set__(self, eobj, value):
eobj[self._attrname] = value
- if hasattr(eobj, 'edited_attributes'):
- eobj.edited_attributes.add(self._attrname)
class Relation(object):
"""descriptor that controls schema relation access"""
--- a/etwist/server.py Wed Mar 24 08:40:00 2010 +0100
+++ b/etwist/server.py Wed Mar 24 10:23:57 2010 +0100
@@ -11,7 +11,6 @@
import os
import select
import errno
-import hotshot
from time import mktime
from datetime import date, timedelta
from urlparse import urlsplit, urlunsplit
@@ -113,8 +112,6 @@
if config.repo_method == 'inmemory':
reactor.addSystemEventTrigger('before', 'shutdown',
self.shutdown_event)
- # monkey patch start_looping_task to get proper reactor integration
- #self.appli.repo.__class__.start_looping_tasks = start_looping_tasks
if config.pyro_enabled():
# if pyro is enabled, we have to register to the pyro name
# server, create a pyro daemon, and create a task to handle pyro
@@ -337,32 +334,131 @@
set_log_methods(CubicWebRootResource, getLogger('cubicweb.twisted'))
+listiterator = type(iter([]))
-def _gc_debug():
+def _gc_debug(all=True):
import gc
from pprint import pprint
from cubicweb.appobject import AppObject
gc.collect()
count = 0
acount = 0
+ fcount = 0
+ rcount = 0
+ ccount = 0
+ scount = 0
ocount = {}
+ from rql.stmts import Union
+ from cubicweb.schema import CubicWebSchema
+ from cubicweb.rset import ResultSet
+ from cubicweb.dbapi import Connection, Cursor
+ from cubicweb.req import RequestSessionBase
+ from cubicweb.server.repository import Repository
+ from cubicweb.server.sources.native import NativeSQLSource
+ from cubicweb.server.session import Session
+ from cubicweb.devtools.testlib import CubicWebTC
+ from logilab.common.testlib import TestSuite
+ from optparse import Values
+ import types, weakref
for obj in gc.get_objects():
- if isinstance(obj, CubicWebTwistedRequestAdapter):
+ if isinstance(obj, RequestSessionBase):
count += 1
+ if isinstance(obj, Session):
+ print ' session', obj, referrers(obj, True)
elif isinstance(obj, AppObject):
acount += 1
- else:
+ elif isinstance(obj, ResultSet):
+ rcount += 1
+ #print ' rset', obj, referrers(obj)
+ elif isinstance(obj, Repository):
+ print ' REPO', obj, referrers(obj, True)
+ #elif isinstance(obj, NativeSQLSource):
+ # print ' SOURCe', obj, referrers(obj)
+ elif isinstance(obj, CubicWebTC):
+ print ' TC', obj, referrers(obj)
+ elif isinstance(obj, TestSuite):
+ print ' SUITE', obj, referrers(obj)
+ #elif isinstance(obj, Values):
+ # print ' values', '%#x' % id(obj), referrers(obj, True)
+ elif isinstance(obj, Connection):
+ ccount += 1
+ #print ' cnx', obj, referrers(obj)
+ #elif isinstance(obj, Cursor):
+ # ccount += 1
+ # print ' cursor', obj, referrers(obj)
+ elif isinstance(obj, file):
+ fcount += 1
+ # print ' open file', file.name, file.fileno
+ elif isinstance(obj, CubicWebSchema):
+ scount += 1
+ print ' schema', obj, referrers(obj)
+ elif not isinstance(obj, (type, tuple, dict, list, set, frozenset,
+ weakref.ref, weakref.WeakKeyDictionary,
+ listiterator,
+ property, classmethod,
+ types.ModuleType, types.MemberDescriptorType,
+ types.FunctionType, types.MethodType)):
try:
ocount[obj.__class__] += 1
except KeyError:
ocount[obj.__class__] = 1
except AttributeError:
pass
- print 'IN MEM REQUESTS', count
- print 'IN MEM APPOBJECTS', acount
- ocount = sorted(ocount.items(), key=lambda x: x[1], reverse=True)[:20]
- pprint(ocount)
- print 'UNREACHABLE', gc.garbage
+ if count:
+ print ' NB REQUESTS/SESSIONS', count
+ if acount:
+ print ' NB APPOBJECTS', acount
+ if ccount:
+ print ' NB CONNECTIONS', ccount
+ if rcount:
+ print ' NB RSETS', rcount
+ if scount:
+ print ' NB SCHEMAS', scount
+ if fcount:
+ print ' NB FILES', fcount
+ if all:
+ ocount = sorted(ocount.items(), key=lambda x: x[1], reverse=True)[:20]
+ pprint(ocount)
+ if gc.garbage:
+ print 'UNREACHABLE', gc.garbage
+
+def referrers(obj, showobj=False):
+ try:
+ return sorted(set((type(x), showobj and x or getattr(x, '__name__', '%#x' % id(x)))
+ for x in _referrers(obj)))
+ except TypeError:
+ s = set()
+ unhashable = []
+ for x in _referrers(obj):
+ try:
+ s.add(x)
+ except TypeError:
+ unhashable.append(x)
+ return sorted(s) + unhashable
+
+def _referrers(obj, seen=None, level=0):
+ import gc, types
+ from cubicweb.schema import CubicWebRelationSchema, CubicWebEntitySchema
+ interesting = []
+ if seen is None:
+ seen = set()
+ for x in gc.get_referrers(obj):
+ if id(x) in seen:
+ continue
+ seen.add(id(x))
+ if isinstance(x, types.FrameType):
+ continue
+ if isinstance(x, (CubicWebRelationSchema, CubicWebEntitySchema)):
+ continue
+ if isinstance(x, (list, tuple, set, dict, listiterator)):
+ if level >= 5:
+ pass
+ #interesting.append(x)
+ else:
+ interesting += _referrers(x, seen, level+1)
+ else:
+ interesting.append(x)
+ return interesting
def run(config, debug):
# create the site
@@ -397,7 +493,7 @@
root_resource.start_service()
logger.info('instance started on %s', root_resource.base_url)
if config['profile']:
- prof = hotshot.Profile(config['profile'])
- prof.runcall(reactor.run)
+ import cProfile
+ cProfile.runctx('reactor.run()', globals(), locals(), config['profile'])
else:
reactor.run()
--- a/ext/html4zope.py Wed Mar 24 08:40:00 2010 +0100
+++ b/ext/html4zope.py Wed Mar 24 10:23:57 2010 +0100
@@ -24,12 +24,13 @@
__docformat__ = 'reStructuredText'
+import os
+
from logilab.mtconverter import xml_escape
from docutils import nodes
from docutils.writers.html4css1 import Writer as CSS1Writer
from docutils.writers.html4css1 import HTMLTranslator as CSS1HTMLTranslator
-import os
default_level = int(os.environ.get('STX_DEFAULT_LEVEL', 3))
--- a/ext/rest.py Wed Mar 24 08:40:00 2010 +0100
+++ b/ext/rest.py Wed Mar 24 10:23:57 2010 +0100
@@ -25,7 +25,7 @@
from os.path import join
from docutils import statemachine, nodes, utils, io
-from docutils.core import publish_string
+from docutils.core import Publisher
from docutils.parsers.rst import Parser, states, directives
from docutils.parsers.rst.roles import register_canonical_role, set_classes
@@ -92,14 +92,15 @@
in `docutils.parsers.rst.directives.misc`
"""
context = state.document.settings.context
+ cw = context._cw
source = state_machine.input_lines.source(
lineno - state_machine.input_offset - 1)
#source_dir = os.path.dirname(os.path.abspath(source))
fid = arguments[0]
- for lang in chain((context._cw.lang, context.vreg.property_value('ui.language')),
- context.config.available_languages()):
+ for lang in chain((cw.lang, cw.vreg.property_value('ui.language')),
+ cw.vreg.config.available_languages()):
rid = '%s_%s.rst' % (fid, lang)
- resourcedir = context.config.locate_doc_file(rid)
+ resourcedir = cw.vreg.config.locate_doc_file(rid)
if resourcedir:
break
else:
@@ -196,6 +197,15 @@
self.finish_parse()
+# XXX docutils keep a ref on context, can't find a correct way to remove it
+class CWReSTPublisher(Publisher):
+ def __init__(self, context, settings, **kwargs):
+ Publisher.__init__(self, **kwargs)
+ self.set_components('standalone', 'restructuredtext', 'pseudoxml')
+ self.process_programmatic_settings(None, settings, None)
+ self.settings.context = context
+
+
def rest_publish(context, data):
"""publish a string formatted as ReStructured Text to HTML
@@ -218,7 +228,7 @@
# remove unprintable characters unauthorized in xml
data = data.translate(ESC_CAR_TABLE)
settings = {'input_encoding': encoding, 'output_encoding': 'unicode',
- 'warning_stream': StringIO(), 'context': context,
+ 'warning_stream': StringIO(),
# dunno what's the max, severe is 4, and we never want a crash
# (though try/except may be a better option...)
'halt_level': 10,
@@ -233,9 +243,17 @@
else:
base_url = None
try:
- return publish_string(writer=Writer(base_url=base_url),
- parser=CubicWebReSTParser(), source=data,
- settings_overrides=settings)
+ pub = CWReSTPublisher(context, settings,
+ parser=CubicWebReSTParser(),
+ writer=Writer(base_url=base_url),
+ source_class=io.StringInput,
+ destination_class=io.StringOutput)
+ pub.set_source(data)
+ pub.set_destination()
+ res = pub.publish(enable_exit_status=None)
+ # necessary for proper garbage collection, else a ref is kept somewhere in docutils...
+ del pub.settings.context
+ return res
except Exception:
LOGGER.exception('error while publishing ReST text')
if not isinstance(data, unicode):
--- a/goa/appobjects/dbmgmt.py Wed Mar 24 08:40:00 2010 +0100
+++ b/goa/appobjects/dbmgmt.py Wed Mar 24 10:23:57 2010 +0100
@@ -172,7 +172,7 @@
skip_etypes = ('CWGroup', 'CWUser')
def call(self):
- # XXX should use unsafe_execute with all hooks deactivated
+ # XXX should use unsafe execute with all hooks deactivated
# XXX step by catching datastore errors?
for eschema in self.schema.entities():
if eschema.final or eschema in self.skip_etypes:
--- a/goa/db.py Wed Mar 24 08:40:00 2010 +0100
+++ b/goa/db.py Wed Mar 24 10:23:57 2010 +0100
@@ -86,7 +86,7 @@
entity = vreg.etype_class(eschema.type)(req, rset, i, j)
rset._get_entity_cache_ = {(i, j): entity}
rset.rowcount = len(rows)
- req.decorate_rset(rset)
+ rset.req = req
return rset
--- a/goa/dbinit.py Wed Mar 24 08:40:00 2010 +0100
+++ b/goa/dbinit.py Wed Mar 24 10:23:57 2010 +0100
@@ -84,7 +84,7 @@
Put(gaeentity)
def init_persistent_schema(ssession, schema):
- execute = ssession.unsafe_execute
+ execute = ssession.execute
rql = ('INSERT CWEType X: X name %(name)s, X description %(descr)s,'
'X final FALSE')
eschema = schema.eschema('CWEType')
@@ -96,7 +96,7 @@
'descr': unicode(eschema.description)})
def insert_versions(ssession, config):
- execute = ssession.unsafe_execute
+ execute = ssession.execute
# insert versions
execute('INSERT CWProperty X: X pkey %(pk)s, X value%(v)s',
{'pk': u'system.version.cubicweb',
--- a/goa/gaesource.py Wed Mar 24 08:40:00 2010 +0100
+++ b/goa/gaesource.py Wed Mar 24 10:23:57 2010 +0100
@@ -255,10 +255,11 @@
if asession.user.eid == entity.eid:
asession.user.update(dict(gaeentity))
- def delete_entity(self, session, etype, eid):
+ def delete_entity(self, session, entity):
"""delete an entity from the source"""
# do not delay delete_entity as other modifications to ensure
# consistency
+ eid = entity.eid
key = Key(eid)
Delete(key)
session.clear_datastore_cache(key)
--- a/hooks/__init__.py Wed Mar 24 08:40:00 2010 +0100
+++ b/hooks/__init__.py Wed Mar 24 10:23:57 2010 +0100
@@ -1,1 +1,36 @@
-"""core hooks"""
+"""core hooks
+
+:organization: Logilab
+:copyright: 2009-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+"""
+__docformat__ = "restructuredtext en"
+
+from datetime import timedelta, datetime
+from cubicweb.server import hook
+
+class ServerStartupHook(hook.Hook):
+ """task to cleanup expirated auth cookie entities"""
+ __regid__ = 'cw_cleanup_transactions'
+ events = ('server_startup',)
+
+ def __call__(self):
+ # XXX use named args and inner functions to avoid referencing globals
+ # which may cause reloading pb
+ lifetime = timedelta(days=self.repo.config['keep-transaction-lifetime'])
+ def cleanup_old_transactions(repo=self.repo, lifetime=lifetime):
+ mindate = datetime.now() - lifetime
+ session = repo.internal_session()
+ try:
+ session.system_sql(
+ 'DELETE FROM transaction WHERE tx_time < %(time)s',
+ {'time': mindate})
+ # cleanup deleted entities
+ session.system_sql(
+ 'DELETE FROM deleted_entities WHERE dtime < %(time)s',
+ {'time': mindate})
+ session.commit()
+ finally:
+ session.close()
+ self.repo.looping_task(60*60*24, cleanup_old_transactions, self.repo)
--- a/hooks/email.py Wed Mar 24 08:40:00 2010 +0100
+++ b/hooks/email.py Wed Mar 24 10:23:57 2010 +0100
@@ -26,7 +26,7 @@
def precommit_event(self):
if self.condition():
- self.session.unsafe_execute(
+ self.session.execute(
'SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % self.rtype,
{'x': self.entity.eid, 'y': self.email.eid}, 'x')
--- a/hooks/integrity.py Wed Mar 24 08:40:00 2010 +0100
+++ b/hooks/integrity.py Wed Mar 24 10:23:57 2010 +0100
@@ -35,13 +35,12 @@
RQLUniqueConstraint in two different transactions, as explained in
http://intranet.logilab.fr/jpl/ticket/36564
"""
- asession = session.actual_session()
- if 'uniquecstrholder' in asession.transaction_data:
+ if 'uniquecstrholder' in session.transaction_data:
return
_UNIQUE_CONSTRAINTS_LOCK.acquire()
- asession.transaction_data['uniquecstrholder'] = True
+ session.transaction_data['uniquecstrholder'] = True
# register operation responsible to release the lock on commit/rollback
- _ReleaseUniqueConstraintsOperation(asession)
+ _ReleaseUniqueConstraintsOperation(session)
def _release_unique_cstr_lock(session):
if 'uniquecstrholder' in session.transaction_data:
@@ -69,7 +68,7 @@
return
if self.rtype in self.session.transaction_data.get('pendingrtypes', ()):
return
- if self.session.unsafe_execute(*self._rql()).rowcount < 1:
+ if self.session.execute(*self._rql()).rowcount < 1:
etype = self.session.describe(self.eid)[0]
_ = self.session._
msg = _('at least one relation %(rtype)s is required on %(etype)s (%(eid)s)')
@@ -99,12 +98,8 @@
__abstract__ = True
category = 'integrity'
-class UserIntegrityHook(IntegrityHook):
- __abstract__ = True
- __select__ = IntegrityHook.__select__ & hook.regular_session()
-
-class CheckCardinalityHook(UserIntegrityHook):
+class CheckCardinalityHook(IntegrityHook):
"""check cardinalities are satisfied"""
__regid__ = 'checkcard'
events = ('after_add_entity', 'before_delete_relation')
@@ -176,7 +171,7 @@
pass
-class CheckConstraintHook(UserIntegrityHook):
+class CheckConstraintHook(IntegrityHook):
"""check the relation satisfy its constraints
this is delayed to a precommit time operation since other relation which
@@ -194,7 +189,7 @@
rdef=(self.eidfrom, self.rtype, self.eidto))
-class CheckAttributeConstraintHook(UserIntegrityHook):
+class CheckAttributeConstraintHook(IntegrityHook):
"""check the attribute relation satisfy its constraints
this is delayed to a precommit time operation since other relation which
@@ -214,7 +209,7 @@
rdef=(self.entity.eid, attr, None))
-class CheckUniqueHook(UserIntegrityHook):
+class CheckUniqueHook(IntegrityHook):
__regid__ = 'checkunique'
events = ('before_add_entity', 'before_update_entity')
@@ -227,7 +222,7 @@
if val is None:
continue
rql = '%s X WHERE X %s %%(val)s' % (entity.e_schema, attr)
- rset = self._cw.unsafe_execute(rql, {'val': val})
+ rset = self._cw.execute(rql, {'val': val})
if rset and rset[0][0] != entity.eid:
msg = self._cw._('the value "%s" is already used, use another one')
raise ValidationError(entity.eid, {attr: msg % val})
@@ -244,9 +239,9 @@
if not (session.deleted_in_transaction(self.eid) or
session.added_in_transaction(self.eid)):
etype = session.describe(self.eid)[0]
- session.unsafe_execute('DELETE %s X WHERE X eid %%(x)s, NOT %s'
- % (etype, self.relation),
- {'x': self.eid}, 'x')
+ session.execute('DELETE %s X WHERE X eid %%(x)s, NOT %s'
+ % (etype, self.relation),
+ {'x': self.eid}, 'x')
class DeleteCompositeOrphanHook(IntegrityHook):
@@ -290,7 +285,7 @@
self.entity['name'] = newname
-class TidyHtmlFields(UserIntegrityHook):
+class TidyHtmlFields(IntegrityHook):
"""tidy HTML in rich text strings"""
__regid__ = 'htmltidy'
events = ('before_add_entity', 'before_update_entity')
--- a/hooks/metadata.py Wed Mar 24 08:40:00 2010 +0100
+++ b/hooks/metadata.py Wed Mar 24 10:23:57 2010 +0100
@@ -19,7 +19,7 @@
# eschema.eid is None if schema has been readen from the filesystem, not
# from the database (eg during tests)
if eschema.eid is None:
- eschema.eid = session.unsafe_execute(
+ eschema.eid = session.execute(
'Any X WHERE X is CWEType, X name %(name)s',
{'name': str(eschema)})[0][0]
return eschema.eid
@@ -103,18 +103,17 @@
events = ('after_add_entity',)
def __call__(self):
- asession = self._cw.actual_session()
- if not asession.is_internal_session:
- self._cw.add_relation(self.entity.eid, 'owned_by', asession.user.eid)
- _SetCreatorOp(asession, entity=self.entity)
+ if not self._cw.is_internal_session:
+ self._cw.add_relation(self.entity.eid, 'owned_by', self._cw.user.eid)
+ _SetCreatorOp(self._cw, entity=self.entity)
class _SyncOwnersOp(hook.Operation):
def precommit_event(self):
- self.session.unsafe_execute('SET X owned_by U WHERE C owned_by U, C eid %(c)s,'
- 'NOT EXISTS(X owned_by U, X eid %(x)s)',
- {'c': self.compositeeid, 'x': self.composedeid},
- ('c', 'x'))
+ self.session.execute('SET X owned_by U WHERE C owned_by U, C eid %(c)s,'
+ 'NOT EXISTS(X owned_by U, X eid %(x)s)',
+ {'c': self.compositeeid, 'x': self.composedeid},
+ ('c', 'x'))
class SyncCompositeOwner(MetaDataHook):
--- a/hooks/notification.py Wed Mar 24 08:40:00 2010 +0100
+++ b/hooks/notification.py Wed Mar 24 10:23:57 2010 +0100
@@ -103,20 +103,19 @@
class EntityUpdateHook(NotificationHook):
__regid__ = 'notifentityupdated'
__abstract__ = True # do not register by default
-
+ __select__ = NotificationHook.__select__ & hook.from_dbapi_query()
events = ('before_update_entity',)
skip_attrs = set()
def __call__(self):
session = self._cw
- if self.entity.eid in session.transaction_data.get('neweids', ()):
+ if session.added_in_transaction(self.entity.eid):
return # entity is being created
- if session.is_super_session:
- return # ignore changes triggered by hooks
# then compute changes
changes = session.transaction_data.setdefault('changes', {})
thisentitychanges = changes.setdefault(self.entity.eid, set())
- attrs = [k for k in self.entity.edited_attributes if not k in self.skip_attrs]
+ attrs = [k for k in self.entity.edited_attributes
+ if not k in self.skip_attrs]
if not attrs:
return
rqlsel, rqlrestr = [], ['X eid %(x)s']
@@ -125,7 +124,7 @@
rqlsel.append(var)
rqlrestr.append('X %s %s' % (attr, var))
rql = 'Any %s WHERE %s' % (','.join(rqlsel), ','.join(rqlrestr))
- rset = session.unsafe_execute(rql, {'x': self.entity.eid}, 'x')
+ rset = session.execute(rql, {'x': self.entity.eid}, 'x')
for i, attr in enumerate(attrs):
oldvalue = rset[0][i]
newvalue = self.entity[attr]
@@ -139,13 +138,11 @@
class SomethingChangedHook(NotificationHook):
__regid__ = 'supervising'
+ __select__ = NotificationHook.__select__ & hook.from_dbapi_query()
events = ('before_add_relation', 'before_delete_relation',
'after_add_entity', 'before_update_entity')
def __call__(self):
- # XXX use proper selectors
- if self._cw.is_super_session or self._cw.repo.config.repairing:
- return # ignore changes triggered by hooks or maintainance shell
dest = self._cw.vreg.config['supervising-addrs']
if not dest: # no supervisors, don't do this for nothing...
return
--- a/hooks/security.py Wed Mar 24 08:40:00 2010 +0100
+++ b/hooks/security.py Wed Mar 24 10:23:57 2010 +0100
@@ -9,23 +9,27 @@
__docformat__ = "restructuredtext en"
from cubicweb import Unauthorized
+from cubicweb.selectors import objectify_selector, lltrace
from cubicweb.server import BEFORE_ADD_RELATIONS, ON_COMMIT_ADD_RELATIONS, hook
def check_entity_attributes(session, entity, editedattrs=None):
eid = entity.eid
eschema = entity.e_schema
- # ._default_set is only there on entity creation to indicate unspecified
- # attributes which has been set to a default value defined in the schema
- defaults = getattr(entity, '_default_set', ())
+ # .skip_security_attributes is there to bypass security for attributes
+ # set by hooks by modifying the entity's dictionnary
+ dontcheck = entity.skip_security_attributes
if editedattrs is None:
try:
editedattrs = entity.edited_attributes
except AttributeError:
- editedattrs = entity
+ editedattrs = entity # XXX unexpected
for attr in editedattrs:
- if attr in defaults:
+ try:
+ dontcheck.remove(attr)
continue
+ except KeyError:
+ pass
rdef = eschema.rdef(attr)
if rdef.final: # non final relation are checked by other hooks
# add/delete should be equivalent (XXX: unify them into 'update' ?)
@@ -53,10 +57,17 @@
pass
+@objectify_selector
+@lltrace
+def write_security_enabled(cls, req, **kwargs):
+ if req is None or not req.write_security:
+ return 0
+ return 1
+
class SecurityHook(hook.Hook):
__abstract__ = True
category = 'security'
- __select__ = hook.Hook.__select__ & hook.regular_session()
+ __select__ = hook.Hook.__select__ & write_security_enabled()
class AfterAddEntitySecurityHook(SecurityHook):
--- a/hooks/storages.py Wed Mar 24 08:40:00 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-"""hooks to handle attributes mapped to a custom storage
-"""
-from cubicweb.server.hook import Hook
-from cubicweb.server.sources.storages import ETYPE_ATTR_STORAGE
-
-
-class BFSSHook(Hook):
- """abstract class for bytes file-system storage hooks"""
- __abstract__ = True
- category = 'bfss'
-
-
-class PreAddEntityHook(BFSSHook):
- """"""
- __regid__ = 'bfss_add_entity'
- events = ('before_add_entity', )
-
- def __call__(self):
- etype = self.entity.__regid__
- for attr in ETYPE_ATTR_STORAGE.get(etype, ()):
- ETYPE_ATTR_STORAGE[etype][attr].entity_added(self.entity, attr)
-
-class PreUpdateEntityHook(BFSSHook):
- """"""
- __regid__ = 'bfss_update_entity'
- events = ('before_update_entity', )
-
- def __call__(self):
- etype = self.entity.__regid__
- for attr in ETYPE_ATTR_STORAGE.get(etype, ()):
- ETYPE_ATTR_STORAGE[etype][attr].entity_updated(self.entity, attr)
-
-class PreDeleteEntityHook(BFSSHook):
- """"""
- __regid__ = 'bfss_delete_entity'
- events = ('before_delete_entity', )
-
- def __call__(self):
- etype = self.entity.__regid__
- for attr in ETYPE_ATTR_STORAGE.get(etype, ()):
- ETYPE_ATTR_STORAGE[etype][attr].entity_deleted(self.entity, attr)
--- a/hooks/syncschema.py Wed Mar 24 08:40:00 2010 +0100
+++ b/hooks/syncschema.py Wed Mar 24 10:23:57 2010 +0100
@@ -12,11 +12,12 @@
"""
__docformat__ = "restructuredtext en"
+from copy import copy
from yams.schema import BASE_TYPES, RelationSchema, RelationDefinitionSchema
-from yams.buildobjs import EntityType, RelationType, RelationDefinition
-from yams.schema2sql import eschema2sql, rschema2sql, type_from_constraints
+from yams import buildobjs as ybo, schema2sql as y2sql
from logilab.common.decorators import clear_cache
+from logilab.common.testlib import mock_object
from cubicweb import ValidationError
from cubicweb.selectors import implements
@@ -255,7 +256,7 @@
# need to create the relation if it has not been already done by
# another event of the same transaction
if not rschema.type in session.transaction_data.get('createdtables', ()):
- tablesql = rschema2sql(rschema)
+ tablesql = y2sql.rschema2sql(rschema)
# create the necessary table
for sql in tablesql.split(';'):
if sql.strip():
@@ -323,13 +324,13 @@
rtype = entity.rtype.name
obj = str(entity.otype.name)
constraints = get_constraints(self.session, entity)
- rdef = RelationDefinition(subj, rtype, obj,
- description=entity.description,
- cardinality=entity.cardinality,
- constraints=constraints,
- order=entity.ordernum,
- eid=entity.eid,
- **kwargs)
+ rdef = ybo.RelationDefinition(subj, rtype, obj,
+ description=entity.description,
+ cardinality=entity.cardinality,
+ constraints=constraints,
+ order=entity.ordernum,
+ eid=entity.eid,
+ **kwargs)
MemSchemaRDefAdd(self.session, rdef)
return rdef
@@ -347,8 +348,8 @@
'internationalizable': entity.internationalizable}
rdef = self.init_rdef(**props)
sysource = session.pool.source('system')
- attrtype = type_from_constraints(sysource.dbhelper, rdef.object,
- rdef.constraints)
+ attrtype = y2sql.type_from_constraints(
+ sysource.dbhelper, rdef.object, rdef.constraints)
# XXX should be moved somehow into lgc.adbh: sqlite doesn't support to
# add a new column with UNIQUE, it should be added after the ALTER TABLE
# using ADD INDEX
@@ -379,12 +380,13 @@
self.error('error while creating index for %s.%s: %s',
table, column, ex)
# final relations are not infered, propagate
+ schema = session.vreg.schema
try:
- eschema = session.vreg.schema.eschema(rdef.subject)
+ eschema = schema.eschema(rdef.subject)
except KeyError:
return # entity type currently being added
# propagate attribute to children classes
- rschema = session.vreg.schema.rschema(rdef.name)
+ rschema = schema.rschema(rdef.name)
# if relation type has been inserted in the same transaction, its final
# attribute is still set to False, so we've to ensure it's False
rschema.final = True
@@ -394,15 +396,19 @@
'cardinality': rdef.cardinality,
'constraints': rdef.constraints,
'permissions': rdef.get_permissions(),
- 'order': rdef.order})
+ 'order': rdef.order,
+ 'infered': False, 'eid': None
+ })
+ cstrtypemap = ss.cstrtype_mapping(session)
groupmap = group_mapping(session)
+ object = schema.eschema(rdef.object)
for specialization in eschema.specialized_by(False):
if (specialization, rdef.object) in rschema.rdefs:
continue
- sperdef = RelationDefinitionSchema(specialization, rschema, rdef.object, props)
- for rql, args in ss.rdef2rql(rschema, str(specialization),
- rdef.object, sperdef, groupmap=groupmap):
- session.execute(rql, args)
+ sperdef = RelationDefinitionSchema(specialization, rschema,
+ object, props)
+ ss.execschemarql(session.execute, sperdef,
+ ss.rdef2rql(sperdef, cstrtypemap, groupmap))
# set default value, using sql for performance and to avoid
# modification_date update
if default:
@@ -451,13 +457,13 @@
rtype in session.transaction_data.get('createdtables', ())):
try:
rschema = schema.rschema(rtype)
- tablesql = rschema2sql(rschema)
+ tablesql = y2sql.rschema2sql(rschema)
except KeyError:
# fake we add it to the schema now to get a correctly
# initialized schema but remove it before doing anything
# more dangerous...
rschema = schema.add_relation_type(rdef)
- tablesql = rschema2sql(rschema)
+ tablesql = y2sql.rschema2sql(rschema)
schema.del_relation_type(rtype)
# create the necessary table
for sql in tablesql.split(';'):
@@ -490,11 +496,11 @@
return
atype = self.rschema.objects(etype)[0]
constraints = self.rschema.rdef(etype, atype).constraints
- coltype = type_from_constraints(adbh, atype, constraints,
- creating=False)
+ coltype = y2sql.type_from_constraints(adbh, atype, constraints,
+ creating=False)
# XXX check self.values['cardinality'][0] actually changed?
- sql = adbh.sql_set_null_allowed(table, column, coltype,
- self.values['cardinality'][0] != '1')
+ notnull = self.values['cardinality'][0] != '1'
+ sql = adbh.sql_set_null_allowed(table, column, coltype, notnull)
session.system_sql(sql)
if 'fulltextindexed' in self.values:
UpdateFTIndexOp(session)
@@ -527,8 +533,8 @@
oldcstr is None or oldcstr.max != newcstr.max):
adbh = self.session.pool.source('system').dbhelper
card = rtype.rdef(subjtype, objtype).cardinality
- coltype = type_from_constraints(adbh, objtype, [newcstr],
- creating=False)
+ coltype = y2sql.type_from_constraints(adbh, objtype, [newcstr],
+ creating=False)
sql = adbh.sql_change_col_type(table, column, coltype, card != '1')
try:
session.system_sql(sql, rollback_on_failure=False)
@@ -796,7 +802,7 @@
if name in CORE_ETYPES:
raise ValidationError(self.entity.eid, {None: self._cw._('can\'t be deleted')})
# delete every entities of this type
- self._cw.unsafe_execute('DELETE %s X' % name)
+ self._cw.execute('DELETE %s X' % name)
DropTable(self._cw, table=SQL_PREFIX + name)
MemSchemaCWETypeDel(self._cw, name)
@@ -828,23 +834,26 @@
return
schema = self._cw.vreg.schema
name = entity['name']
- etype = EntityType(name=name, description=entity.get('description'),
- meta=entity.get('meta')) # don't care about final
+ etype = ybo.EntityType(name=name, description=entity.get('description'),
+ meta=entity.get('meta')) # don't care about final
# fake we add it to the schema now to get a correctly initialized schema
# but remove it before doing anything more dangerous...
schema = self._cw.vreg.schema
eschema = schema.add_entity_type(etype)
# generate table sql and rql to add metadata
- tablesql = eschema2sql(self._cw.pool.source('system').dbhelper, eschema,
- prefix=SQL_PREFIX)
- relrqls = []
+ tablesql = y2sql.eschema2sql(self._cw.pool.source('system').dbhelper,
+ eschema, prefix=SQL_PREFIX)
+ rdefrqls = []
+ gmap = group_mapping(self._cw)
+ cmap = ss.cstrtype_mapping(self._cw)
for rtype in (META_RTYPES - VIRTUAL_RTYPES):
rschema = schema[rtype]
sampletype = rschema.subjects()[0]
desttype = rschema.objects()[0]
- props = rschema.rdef(sampletype, desttype)
- relrqls += list(ss.rdef2rql(rschema, name, desttype, props,
- groupmap=group_mapping(self._cw)))
+ rdef = copy(rschema.rdef(sampletype, desttype))
+ rdef.subject = mock_object(eid=entity.eid)
+ mock = mock_object(eid=None)
+ rdefrqls.append( (mock, tuple(ss.rdef2rql(rdef, cmap, gmap))) )
# now remove it !
schema.del_entity_type(name)
# create the necessary table
@@ -857,8 +866,8 @@
etype.eid = entity.eid
MemSchemaCWETypeAdd(self._cw, etype)
# add meta relations
- for rql, kwargs in relrqls:
- self._cw.execute(rql, kwargs)
+ for rdef, relrqls in rdefrqls:
+ ss.execschemarql(self._cw.execute, rdef, relrqls)
class BeforeUpdateCWETypeHook(DelCWETypeHook):
@@ -915,12 +924,12 @@
def __call__(self):
entity = self.entity
- rtype = RelationType(name=entity.name,
- description=entity.get('description'),
- meta=entity.get('meta', False),
- inlined=entity.get('inlined', False),
- symmetric=entity.get('symmetric', False),
- eid=entity.eid)
+ rtype = ybo.RelationType(name=entity.name,
+ description=entity.get('description'),
+ meta=entity.get('meta', False),
+ inlined=entity.get('inlined', False),
+ symmetric=entity.get('symmetric', False),
+ eid=entity.eid)
MemSchemaCWRTypeAdd(self._cw, rtype)
@@ -974,7 +983,7 @@
if not (subjschema.eid in pendings or objschema.eid in pendings):
session.execute('DELETE X %s Y WHERE X is %s, Y is %s'
% (rschema, subjschema, objschema))
- execute = session.unsafe_execute
+ execute = session.execute
rset = execute('Any COUNT(X) WHERE X is %s, X relation_type R,'
'R eid %%(x)s' % rdeftype, {'x': self.eidto})
lastrel = rset[0][0] == 0
--- a/hooks/test/unittest_syncschema.py Wed Mar 24 08:40:00 2010 +0100
+++ b/hooks/test/unittest_syncschema.py Wed Mar 24 10:23:57 2010 +0100
@@ -3,9 +3,11 @@
from cubicweb import ValidationError
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.server.sqlutils import SQL_PREFIX
-
+from cubicweb.devtools.repotest import schema_eids_idx, restore_schema_eids_idx
-SCHEMA_EIDS = {}
+def teardown_module(*args):
+ del SchemaModificationHooksTC.schema_eids
+
class SchemaModificationHooksTC(CubicWebTC):
reset_schema = True
@@ -15,29 +17,12 @@
# we have to read schema from the database to get eid for schema entities
config._cubes = None
cls.repo.fill_schema()
- # remember them so we can reread it from the fs instead of the db (too
- # costly) between tests
- for x in cls.repo.schema.entities():
- SCHEMA_EIDS[x] = x.eid
- for x in cls.repo.schema.relations():
- SCHEMA_EIDS[x] = x.eid
- for rdef in x.rdefs.itervalues():
- SCHEMA_EIDS[(rdef.subject, rdef.rtype, rdef.object)] = rdef.eid
+ cls.schema_eids = schema_eids_idx(cls.repo.schema)
@classmethod
def _refresh_repo(cls):
super(SchemaModificationHooksTC, cls)._refresh_repo()
- # rebuild schema eid index
- schema = cls.repo.schema
- for x in schema.entities():
- x.eid = SCHEMA_EIDS[x]
- schema._eid_index[x.eid] = x
- for x in cls.repo.schema.relations():
- x.eid = SCHEMA_EIDS[x]
- schema._eid_index[x.eid] = x
- for rdef in x.rdefs.itervalues():
- rdef.eid = SCHEMA_EIDS[(rdef.subject, rdef.rtype, rdef.object)]
- schema._eid_index[rdef.eid] = rdef
+ restore_schema_eids_idx(cls.repo.schema, cls.schema_eids)
def index_exists(self, etype, attr, unique=False):
self.session.set_pool()
--- a/hooks/workflow.py Wed Mar 24 08:40:00 2010 +0100
+++ b/hooks/workflow.py Wed Mar 24 10:23:57 2010 +0100
@@ -19,8 +19,8 @@
nocheck = session.transaction_data.setdefault('skip-security', set())
nocheck.add((x, 'in_state', oldstate))
nocheck.add((x, 'in_state', newstate))
- # delete previous state first in case we're using a super session,
- # unless in_state isn't stored in the system source
+ # delete previous state first unless in_state isn't stored in the system
+ # source
fromsource = session.describe(x)[1]
if fromsource == 'system' or \
not session.repo.sources_by_uri[fromsource].support_relation('in_state'):
@@ -42,9 +42,7 @@
and entity.current_workflow:
state = entity.current_workflow.initial
if state:
- # use super session to by-pass security checks
- session.super_session.add_relation(entity.eid, 'in_state',
- state.eid)
+ session.add_relation(entity.eid, 'in_state', state.eid)
class _FireAutotransitionOp(hook.Operation):
@@ -122,14 +120,7 @@
msg = session._('exiting from subworkflow %s')
msg %= session._(forentity.current_workflow.name)
session.transaction_data[(forentity.eid, 'subwfentrytr')] = True
- # XXX iirk
- req = forentity._cw
- forentity._cw = session.super_session
- try:
- trinfo = forentity.change_state(tostate, msg, u'text/plain',
- tr=wftr)
- finally:
- forentity._cw = req
+ forentity.change_state(tostate, msg, u'text/plain', tr=wftr)
# hooks ########################################################################
@@ -195,7 +186,8 @@
raise ValidationError(entity.eid, {None: msg})
# True if we are coming back from subworkflow
swtr = session.transaction_data.pop((forentity.eid, 'subwfentrytr'), None)
- cowpowers = session.is_super_session or 'managers' in session.user.groups
+ cowpowers = ('managers' in session.user.groups
+ or not session.write_security)
# no investigate the requested state change...
try:
treid = entity['by_transition']
@@ -266,7 +258,7 @@
class CheckInStateChangeAllowed(WorkflowHook):
- """check state apply, in case of direct in_state change using unsafe_execute
+ """check state apply, in case of direct in_state change using unsafe execute
"""
__regid__ = 'wfcheckinstate'
__select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
@@ -307,8 +299,7 @@
return
entity = self._cw.entity_from_eid(self.eidfrom)
try:
- entity.set_attributes(modification_date=datetime.now(),
- _cw_unsafe=True)
+ entity.set_attributes(modification_date=datetime.now())
except RepositoryError, ex:
# usually occurs if entity is coming from a read-only source
# (eg ldap user)
--- a/i18n/en.po Wed Mar 24 08:40:00 2010 +0100
+++ b/i18n/en.po Wed Mar 24 10:23:57 2010 +0100
@@ -308,6 +308,30 @@
msgid "CWUser_plural"
msgstr "Users"
+#, python-format
+msgid ""
+"Can't restore %(role)s relation %(rtype)s to entity %(eid)s which is already "
+"linked using this relation."
+msgstr ""
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s between %(subj)s and %(obj)s, that relation "
+"does not exists anymore in the schema."
+msgstr ""
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s of entity %(eid)s, this relation does not "
+"exists anymore in the schema."
+msgstr ""
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s, %(role)s entity %(eid)s doesn't exist "
+"anymore."
+msgstr ""
+
msgid "Date"
msgstr "Date"
@@ -2107,9 +2131,15 @@
msgid "entity created"
msgstr ""
+msgid "entity creation"
+msgstr ""
+
msgid "entity deleted"
msgstr ""
+msgid "entity deletion"
+msgstr ""
+
msgid "entity edited"
msgstr ""
@@ -2130,6 +2160,9 @@
msgid "entity types which may use this workflow"
msgstr ""
+msgid "entity update"
+msgstr ""
+
msgid "error while embedding page"
msgstr ""
@@ -3099,6 +3132,12 @@
msgid "relation %(relname)s of %(ent)s"
msgstr ""
+msgid "relation add"
+msgstr ""
+
+msgid "relation removal"
+msgstr ""
+
msgid "relation_type"
msgstr "relation type"
@@ -3313,6 +3352,9 @@
msgid "site-wide property can't be set for user"
msgstr ""
+msgid "some errors occured:"
+msgstr ""
+
msgid "sorry, the server is unable to handle this query"
msgstr ""
@@ -3584,6 +3626,9 @@
msgid "toggle check boxes"
msgstr ""
+msgid "transaction undoed"
+msgstr ""
+
#, python-format
msgid "transition %(tr)s isn't allowed from %(st)s"
msgstr ""
@@ -3682,6 +3727,9 @@
msgid "unauthorized value"
msgstr ""
+msgid "undo"
+msgstr ""
+
msgid "unique identifier used to connect to the application"
msgstr ""
--- a/i18n/es.po Wed Mar 24 08:40:00 2010 +0100
+++ b/i18n/es.po Wed Mar 24 10:23:57 2010 +0100
@@ -316,6 +316,30 @@
msgid "CWUser_plural"
msgstr "Usuarios"
+#, python-format
+msgid ""
+"Can't restore %(role)s relation %(rtype)s to entity %(eid)s which is already "
+"linked using this relation."
+msgstr ""
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s between %(subj)s and %(obj)s, that relation "
+"does not exists anymore in the schema."
+msgstr ""
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s of entity %(eid)s, this relation does not "
+"exists anymore in the schema."
+msgstr ""
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s, %(role)s entity %(eid)s doesn't exist "
+"anymore."
+msgstr ""
+
msgid "Date"
msgstr "Fecha"
@@ -2152,9 +2176,15 @@
msgid "entity created"
msgstr "entidad creada"
+msgid "entity creation"
+msgstr ""
+
msgid "entity deleted"
msgstr "Entidad eliminada"
+msgid "entity deletion"
+msgstr ""
+
msgid "entity edited"
msgstr "entidad modificada"
@@ -2177,6 +2207,9 @@
msgid "entity types which may use this workflow"
msgstr ""
+msgid "entity update"
+msgstr ""
+
msgid "error while embedding page"
msgstr "Error durante la inclusión de la página"
@@ -3172,6 +3205,12 @@
msgid "relation %(relname)s of %(ent)s"
msgstr "relación %(relname)s de %(ent)s"
+msgid "relation add"
+msgstr ""
+
+msgid "relation removal"
+msgstr ""
+
msgid "relation_type"
msgstr "tipo de relación"
@@ -3394,6 +3433,9 @@
msgstr ""
"una propiedad especifica para el sitio no puede establecerse para el usuario"
+msgid "some errors occured:"
+msgstr ""
+
msgid "sorry, the server is unable to handle this query"
msgstr "lo sentimos, el servidor no puede manejar esta consulta"
@@ -3665,6 +3707,9 @@
msgid "toggle check boxes"
msgstr "cambiar valor"
+msgid "transaction undoed"
+msgstr ""
+
#, python-format
msgid "transition %(tr)s isn't allowed from %(st)s"
msgstr ""
@@ -3763,6 +3808,9 @@
msgid "unauthorized value"
msgstr "valor no permitido"
+msgid "undo"
+msgstr ""
+
msgid "unique identifier used to connect to the application"
msgstr "identificador unico utilizado para conectar a la aplicación"
--- a/i18n/fr.po Wed Mar 24 08:40:00 2010 +0100
+++ b/i18n/fr.po Wed Mar 24 10:23:57 2010 +0100
@@ -315,6 +315,37 @@
msgid "CWUser_plural"
msgstr "Utilisateurs"
+#, python-format
+msgid ""
+"Can't restore %(role)s relation %(rtype)s to entity %(eid)s which is already "
+"linked using this relation."
+msgstr ""
+"Ne peut restaurer la relation %(role)s %(rtype)s vers l'entité %(eid)s qui est "
+"déja lié à une autre entité par cette relation."
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s between %(subj)s and %(obj)s, that relation "
+"does not exists anymore in the schema."
+msgstr ""
+"Ne peut restaurer la relation %(rtype)s entre %(subj)s et %(obj)s, "
+"cette relation n'existe plus dans le schéma."
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s of entity %(eid)s, this relation does not "
+"exists anymore in the schema."
+msgstr ""
+"Ne peut restaurer la relation %(rtype)s de l'entité %(eid)s, cette relation"
+"n'existe plus dans le schéma"
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s, %(role)s entity %(eid)s doesn't exist "
+"anymore."
+msgstr ""
+"Ne peut restaurer la relation %(rtype)s, l'entité %(role)s %(eid)s n'existe plus."
+
msgid "Date"
msgstr "Date"
@@ -2175,9 +2206,15 @@
msgid "entity created"
msgstr "entité créée"
+msgid "entity creation"
+msgstr "création d'entité"
+
msgid "entity deleted"
msgstr "entité supprimée"
+msgid "entity deletion"
+msgstr "suppression d'entité"
+
msgid "entity edited"
msgstr "entité éditée"
@@ -2199,6 +2236,9 @@
msgid "entity types which may use this workflow"
msgstr "types d'entité pouvant utiliser ce workflow"
+msgid "entity update"
+msgstr "mise à jour d'entité"
+
msgid "error while embedding page"
msgstr "erreur pendant l'inclusion de la page"
@@ -3196,6 +3236,12 @@
msgid "relation %(relname)s of %(ent)s"
msgstr "relation %(relname)s de %(ent)s"
+msgid "relation add"
+msgstr "ajout de relation"
+
+msgid "relation removal"
+msgstr "suppression de relation"
+
msgid "relation_type"
msgstr "type de relation"
@@ -3418,6 +3464,9 @@
msgid "site-wide property can't be set for user"
msgstr "une propriété spécifique au site ne peut être propre à un utilisateur"
+msgid "some errors occured:"
+msgstr "des erreurs sont survenues"
+
msgid "sorry, the server is unable to handle this query"
msgstr "désolé, le serveur ne peut traiter cette requête"
@@ -3694,6 +3743,9 @@
msgid "toggle check boxes"
msgstr "inverser les cases à cocher"
+msgid "transaction undoed"
+msgstr "transaction annulées"
+
#, python-format
msgid "transition %(tr)s isn't allowed from %(st)s"
msgstr "la transition %(tr)s n'est pas autorisée depuis l'état %(st)s"
@@ -3792,6 +3844,9 @@
msgid "unauthorized value"
msgstr "valeur non autorisée"
+msgid "undo"
+msgstr "annuler"
+
msgid "unique identifier used to connect to the application"
msgstr "identifiant unique utilisé pour se connecter à l'application"
--- a/mail.py Wed Mar 24 08:40:00 2010 +0100
+++ b/mail.py Wed Mar 24 10:23:57 2010 +0100
@@ -215,16 +215,9 @@
"""return a list of either 2-uple (email, language) or user entity to
who this email should be sent
"""
- # use super_session when available, we don't want to consider security
- # when selecting recipients_finder
- try:
- req = self._cw.super_session
- except AttributeError:
- req = self._cw
- finder = self._cw.vreg['components'].select('recipients_finder', req,
- rset=self.cw_rset,
- row=self.cw_row or 0,
- col=self.cw_col or 0)
+ finder = self._cw.vreg['components'].select(
+ 'recipients_finder', self._cw, rset=self.cw_rset,
+ row=self.cw_row or 0, col=self.cw_col or 0)
return finder.recipients()
def send_now(self, recipients, msg):
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.7.0_Any.py Wed Mar 24 10:23:57 2010 +0100
@@ -0,0 +1,40 @@
+typemap = repo.system_source.dbhelper.TYPE_MAPPING
+sqls = """
+CREATE TABLE transactions (
+ tx_uuid CHAR(32) PRIMARY KEY NOT NULL,
+ tx_user INTEGER NOT NULL,
+ tx_time %s NOT NULL
+);;
+CREATE INDEX transactions_tx_user_idx ON transactions(tx_user);;
+
+CREATE TABLE tx_entity_actions (
+ tx_uuid CHAR(32) REFERENCES transactions(tx_uuid) ON DELETE CASCADE,
+ txa_action CHAR(1) NOT NULL,
+ txa_public %s NOT NULL,
+ txa_order INTEGER,
+ eid INTEGER NOT NULL,
+ etype VARCHAR(64) NOT NULL,
+ changes %s
+);;
+CREATE INDEX tx_entity_actions_txa_action_idx ON tx_entity_actions(txa_action);;
+CREATE INDEX tx_entity_actions_txa_public_idx ON tx_entity_actions(txa_public);;
+CREATE INDEX tx_entity_actions_eid_idx ON tx_entity_actions(eid);;
+CREATE INDEX tx_entity_actions_etype_idx ON tx_entity_actions(etype);;
+
+CREATE TABLE tx_relation_actions (
+ tx_uuid CHAR(32) REFERENCES transactions(tx_uuid) ON DELETE CASCADE,
+ txa_action CHAR(1) NOT NULL,
+ txa_public %s NOT NULL,
+ txa_order INTEGER,
+ eid_from INTEGER NOT NULL,
+ eid_to INTEGER NOT NULL,
+ rtype VARCHAR(256) NOT NULL
+);;
+CREATE INDEX tx_relation_actions_txa_action_idx ON tx_relation_actions(txa_action);;
+CREATE INDEX tx_relation_actions_txa_public_idx ON tx_relation_actions(txa_public);;
+CREATE INDEX tx_relation_actions_eid_from_idx ON tx_relation_actions(eid_from);;
+CREATE INDEX tx_relation_actions_eid_to_idx ON tx_relation_actions(eid_to)
+""" % (typemap['Datetime'],
+ typemap['Boolean'], typemap['Bytes'], typemap['Boolean'])
+for statement in sqls.split(';;'):
+ sql(statement)
--- a/misc/migration/bootstrapmigration_repository.py Wed Mar 24 08:40:00 2010 +0100
+++ b/misc/migration/bootstrapmigration_repository.py Wed Mar 24 10:23:57 2010 +0100
@@ -7,89 +7,93 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
+from cubicweb.server.session import hooks_control
+from cubicweb.server import schemaserial as ss
applcubicwebversion, cubicwebversion = versions_map['cubicweb']
-from cubicweb.server import schemaserial as ss
def _add_relation_definition_no_perms(subjtype, rtype, objtype):
rschema = fsschema.rschema(rtype)
- for query, args in ss.rdef2rql(rschema, subjtype, objtype, groupmap=None):
- rql(query, args, ask_confirm=False)
+ rdef = rschema.rdefs[(subjtype, objtype)]
+ rdef.rtype = schema.rschema(rtype)
+ rdef.subject = schema.eschema(subjtype)
+ rdef.object = schema.eschema(objtype)
+ ss.execschemarql(rql, rdef, ss.rdef2rql(rdef, CSTRMAP, groupmap=None))
commit(ask_confirm=False)
if applcubicwebversion == (3, 6, 0) and cubicwebversion >= (3, 6, 0):
+ CSTRMAP = dict(rql('Any T, X WHERE X is CWConstraintType, X name T',
+ ask_confirm=False))
_add_relation_definition_no_perms('CWAttribute', 'update_permission', 'CWGroup')
_add_relation_definition_no_perms('CWAttribute', 'update_permission', 'RQLExpression')
- session.set_pool()
- session.unsafe_execute('SET X update_permission Y WHERE X is CWAttribute, X add_permission Y')
+ rql('SET X update_permission Y WHERE X is CWAttribute, X add_permission Y')
drop_relation_definition('CWAttribute', 'add_permission', 'CWGroup')
drop_relation_definition('CWAttribute', 'add_permission', 'RQLExpression')
drop_relation_definition('CWAttribute', 'delete_permission', 'CWGroup')
drop_relation_definition('CWAttribute', 'delete_permission', 'RQLExpression')
elif applcubicwebversion < (3, 6, 0) and cubicwebversion >= (3, 6, 0):
+ CSTRMAP = dict(rql('Any T, X WHERE X is CWConstraintType, X name T',
+ ask_confirm=False))
session.set_pool()
- session.execute = session.unsafe_execute
permsdict = ss.deserialize_ertype_permissions(session)
- config.disabled_hooks_categories.add('integrity')
- for rschema in repo.schema.relations():
- rpermsdict = permsdict.get(rschema.eid, {})
- for rdef in rschema.rdefs.values():
- for action in rdef.ACTIONS:
- actperms = []
- for something in rpermsdict.get(action == 'update' and 'add' or action, ()):
- if isinstance(something, tuple):
- actperms.append(rdef.rql_expression(*something))
- else: # group name
- actperms.append(something)
- rdef.set_action_permissions(action, actperms)
- for action in ('read', 'add', 'delete'):
- _add_relation_definition_no_perms('CWRelation', '%s_permission' % action, 'CWGroup')
- _add_relation_definition_no_perms('CWRelation', '%s_permission' % action, 'RQLExpression')
- for action in ('read', 'update'):
- _add_relation_definition_no_perms('CWAttribute', '%s_permission' % action, 'CWGroup')
- _add_relation_definition_no_perms('CWAttribute', '%s_permission' % action, 'RQLExpression')
- for action in ('read', 'add', 'delete'):
- rql('SET X %s_permission Y WHERE X is CWRelation, '
- 'RT %s_permission Y, X relation_type RT, Y is CWGroup' % (action, action))
+ with hooks_control(session, session.HOOKS_ALLOW_ALL, 'integrity'):
+ for rschema in repo.schema.relations():
+ rpermsdict = permsdict.get(rschema.eid, {})
+ for rdef in rschema.rdefs.values():
+ for action in rdef.ACTIONS:
+ actperms = []
+ for something in rpermsdict.get(action == 'update' and 'add' or action, ()):
+ if isinstance(something, tuple):
+ actperms.append(rdef.rql_expression(*something))
+ else: # group name
+ actperms.append(something)
+ rdef.set_action_permissions(action, actperms)
+ for action in ('read', 'add', 'delete'):
+ _add_relation_definition_no_perms('CWRelation', '%s_permission' % action, 'CWGroup')
+ _add_relation_definition_no_perms('CWRelation', '%s_permission' % action, 'RQLExpression')
+ for action in ('read', 'update'):
+ _add_relation_definition_no_perms('CWAttribute', '%s_permission' % action, 'CWGroup')
+ _add_relation_definition_no_perms('CWAttribute', '%s_permission' % action, 'RQLExpression')
+ for action in ('read', 'add', 'delete'):
+ rql('SET X %s_permission Y WHERE X is CWRelation, '
+ 'RT %s_permission Y, X relation_type RT, Y is CWGroup' % (action, action))
+ rql('INSERT RQLExpression Y: Y exprtype YET, Y mainvars YMV, Y expression YEX, '
+ 'X %s_permission Y WHERE X is CWRelation, '
+ 'X relation_type RT, RT %s_permission Y2, Y2 exprtype YET, '
+ 'Y2 mainvars YMV, Y2 expression YEX' % (action, action))
+ rql('SET X read_permission Y WHERE X is CWAttribute, '
+ 'RT read_permission Y, X relation_type RT, Y is CWGroup')
rql('INSERT RQLExpression Y: Y exprtype YET, Y mainvars YMV, Y expression YEX, '
- 'X %s_permission Y WHERE X is CWRelation, '
- 'X relation_type RT, RT %s_permission Y2, Y2 exprtype YET, '
- 'Y2 mainvars YMV, Y2 expression YEX' % (action, action))
- rql('SET X read_permission Y WHERE X is CWAttribute, '
- 'RT read_permission Y, X relation_type RT, Y is CWGroup')
- rql('INSERT RQLExpression Y: Y exprtype YET, Y mainvars YMV, Y expression YEX, '
- 'X read_permission Y WHERE X is CWAttribute, '
- 'X relation_type RT, RT read_permission Y2, Y2 exprtype YET, '
- 'Y2 mainvars YMV, Y2 expression YEX')
- rql('SET X update_permission Y WHERE X is CWAttribute, '
- 'RT add_permission Y, X relation_type RT, Y is CWGroup')
- rql('INSERT RQLExpression Y: Y exprtype YET, Y mainvars YMV, Y expression YEX, '
- 'X update_permission Y WHERE X is CWAttribute, '
- 'X relation_type RT, RT add_permission Y2, Y2 exprtype YET, '
- 'Y2 mainvars YMV, Y2 expression YEX')
- for action in ('read', 'add', 'delete'):
- drop_relation_definition('CWRType', '%s_permission' % action, 'CWGroup', commit=False)
- drop_relation_definition('CWRType', '%s_permission' % action, 'RQLExpression')
- config.disabled_hooks_categories.remove('integrity')
+ 'X read_permission Y WHERE X is CWAttribute, '
+ 'X relation_type RT, RT read_permission Y2, Y2 exprtype YET, '
+ 'Y2 mainvars YMV, Y2 expression YEX')
+ rql('SET X update_permission Y WHERE X is CWAttribute, '
+ 'RT add_permission Y, X relation_type RT, Y is CWGroup')
+ rql('INSERT RQLExpression Y: Y exprtype YET, Y mainvars YMV, Y expression YEX, '
+ 'X update_permission Y WHERE X is CWAttribute, '
+ 'X relation_type RT, RT add_permission Y2, Y2 exprtype YET, '
+ 'Y2 mainvars YMV, Y2 expression YEX')
+ for action in ('read', 'add', 'delete'):
+ drop_relation_definition('CWRType', '%s_permission' % action, 'CWGroup', commit=False)
+ drop_relation_definition('CWRType', '%s_permission' % action, 'RQLExpression')
if applcubicwebversion < (3, 4, 0) and cubicwebversion >= (3, 4, 0):
- session.set_shared_data('do-not-insert-cwuri', True)
- deactivate_verification_hooks()
- add_relation_type('cwuri')
- base_url = session.base_url()
- # use an internal session since some entity might forbid modifications to admin
- isession = repo.internal_session()
- for eid, in rql('Any X', ask_confirm=False):
- type, source, extid = session.describe(eid)
- if source == 'system':
- isession.execute('SET X cwuri %(u)s WHERE X eid %(x)s',
- {'x': eid, 'u': base_url + u'eid/%s' % eid})
- isession.commit()
- reactivate_verification_hooks()
- session.set_shared_data('do-not-insert-cwuri', False)
+ with hooks_control(session, session.HOOKS_ALLOW_ALL, 'integrity'):
+ session.set_shared_data('do-not-insert-cwuri', True)
+ add_relation_type('cwuri')
+ base_url = session.base_url()
+ for eid, in rql('Any X', ask_confirm=False):
+ type, source, extid = session.describe(eid)
+ if source == 'system':
+ rql('SET X cwuri %(u)s WHERE X eid %(x)s',
+ {'x': eid, 'u': base_url + u'eid/%s' % eid})
+ isession.commit()
+ session.set_shared_data('do-not-insert-cwuri', False)
if applcubicwebversion < (3, 5, 0) and cubicwebversion >= (3, 5, 0):
# check that migration is not doomed
--- a/misc/migration/postcreate.py Wed Mar 24 08:40:00 2010 +0100
+++ b/misc/migration/postcreate.py Wed Mar 24 10:23:57 2010 +0100
@@ -42,8 +42,8 @@
# need this since we already have at least one user in the database (the default admin)
for user in rql('Any X WHERE X is CWUser').entities():
- session.unsafe_execute('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
- {'x': user.eid, 's': activated.eid}, 'x')
+ rql('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
+ {'x': user.eid, 's': activated.eid}, 'x')
# on interactive mode, ask for level 0 persistent options
if interactive_mode:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/pytestconf.py Wed Mar 24 10:23:57 2010 +0100
@@ -0,0 +1,34 @@
+"""pytest configuration file: we need this to properly remove ressources
+cached on test classes, at least until we've proper support for teardown_class
+"""
+import sys
+from os.path import split, splitext
+from logilab.common.pytest import PyTester
+
+from cubicweb.etwist.server import _gc_debug
+
+class CustomPyTester(PyTester):
+ def testfile(self, filename, batchmode=False):
+ try:
+ return super(CustomPyTester, self).testfile(filename, batchmode)
+ finally:
+ modname = splitext(split(filename)[1])[0]
+ try:
+ module = sys.modules[modname]
+ except KeyError:
+ # error during test module import
+ return
+ for cls in vars(module).values():
+ if getattr(cls, '__module__', None) != modname:
+ continue
+ clean_repo_test_cls(cls)
+ #_gc_debug()
+
+def clean_repo_test_cls(cls):
+ if 'repo' in cls.__dict__:
+ if not cls.repo._shutting_down:
+ cls.repo.shutdown()
+ del cls.repo
+ for clsattr in ('cnx', '_orig_cnx', 'config', '_config', 'vreg', 'schema'):
+ if clsattr in cls.__dict__:
+ delattr(cls, clsattr)
--- a/req.py Wed Mar 24 08:40:00 2010 +0100
+++ b/req.py Wed Mar 24 10:23:57 2010 +0100
@@ -7,6 +7,7 @@
"""
__docformat__ = "restructuredtext en"
+from warnings import warn
from urlparse import urlsplit, urlunsplit
from urllib import quote as urlquote, unquote as urlunquote
from datetime import time, datetime, timedelta
@@ -23,6 +24,12 @@
CACHE_REGISTRY = {}
+def _check_cw_unsafe(kwargs):
+ if kwargs.pop('_cw_unsafe', False):
+ warn('[3.7] _cw_unsafe argument is deprecated, now unsafe by '
+ 'default, control it using cw_[read|write]_security.',
+ DeprecationWarning, stacklevel=3)
+
class Cache(dict):
def __init__(self):
super(Cache, self).__init__()
@@ -71,7 +78,8 @@
def get_entity(row, col=0, etype=etype, req=self, rset=rset):
return req.vreg.etype_class(etype)(req, rset, row, col)
rset.get_entity = get_entity
- return self.decorate_rset(rset)
+ rset.req = self
+ return rset
def eid_rset(self, eid, etype=None):
"""return a result set for the given eid without doing actual query
@@ -83,14 +91,17 @@
etype = self.describe(eid)[0]
rset = ResultSet([(eid,)], 'Any X WHERE X eid %(x)s', {'x': eid},
[(etype,)])
- return self.decorate_rset(rset)
+ rset.req = self
+ return rset
def empty_rset(self):
"""return a result set for the given eid without doing actual query
(we have the eid, we can suppose it exists and user has access to the
entity)
"""
- return self.decorate_rset(ResultSet([], 'Any X WHERE X eid -1'))
+ rset = ResultSet([], 'Any X WHERE X eid -1')
+ rset.req = self
+ return rset
def entity_from_eid(self, eid, etype=None):
"""return an entity instance for the given eid. No query is done"""
@@ -111,19 +122,18 @@
# XXX move to CWEntityManager or even better as factory method (unclear
# where yet...)
- def create_entity(self, etype, _cw_unsafe=False, **kwargs):
+ def create_entity(self, etype, **kwargs):
"""add a new entity of the given type
Example (in a shell session):
- c = create_entity('Company', name=u'Logilab')
- create_entity('Person', works_for=c, firstname=u'John', lastname=u'Doe')
+ >>> c = create_entity('Company', name=u'Logilab')
+ >>> create_entity('Person', firstname=u'John', lastname=u'Doe',
+ ... works_for=c)
"""
- if _cw_unsafe:
- execute = self.unsafe_execute
- else:
- execute = self.execute
+ _check_cw_unsafe(kwargs)
+ execute = self.execute
rql = 'INSERT %s X' % etype
relations = []
restrictions = set()
@@ -163,7 +173,7 @@
restr = 'X %s Y' % attr
execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
restr, ','.join(str(r.eid) for r in values)),
- {'x': created.eid}, 'x')
+ {'x': created.eid}, 'x', build_descr=False)
return created
def ensure_ro_rql(self, rql):
@@ -301,7 +311,7 @@
userinfo['name'] = "cubicweb"
userinfo['email'] = ""
return userinfo
- user = self.actual_session().user
+ user = self.user
userinfo['login'] = user.login
userinfo['name'] = user.name()
userinfo['email'] = user.get_email()
@@ -402,10 +412,6 @@
"""return the root url of the instance"""
raise NotImplementedError
- def decorate_rset(self, rset):
- """add vreg/req (at least) attributes to the given result set """
- raise NotImplementedError
-
def describe(self, eid):
"""return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
raise NotImplementedError
--- a/rqlrewrite.py Wed Mar 24 08:40:00 2010 +0100
+++ b/rqlrewrite.py Wed Mar 24 10:23:57 2010 +0100
@@ -11,7 +11,7 @@
__docformat__ = "restructuredtext en"
from rql import nodes as n, stmts, TypeResolverException
-
+from yams import BadSchemaDefinition
from logilab.common.graph import has_path
from cubicweb import Unauthorized, typed_eid
@@ -185,7 +185,17 @@
vi['const'] = typed_eid(selectvar) # XXX gae
vi['rhs_rels'] = vi['lhs_rels'] = {}
except ValueError:
- vi['stinfo'] = sti = self.select.defined_vars[selectvar].stinfo
+ try:
+ vi['stinfo'] = sti = self.select.defined_vars[selectvar].stinfo
+ except KeyError:
+ # variable has been moved to a newly inserted subquery
+ # we should insert snippet in that subquery
+ subquery = self.select.aliases[selectvar].query
+ assert len(subquery.children) == 1
+ subselect = subquery.children[0]
+ RQLRewriter(self.session).rewrite(subselect, [(varmap, rqlexprs)],
+ subselect.solutions, self.kwargs)
+ continue
if varexistsmap is None:
vi['rhs_rels'] = dict( (r.r_type, r) for r in sti['rhsrelations'])
vi['lhs_rels'] = dict( (r.r_type, r) for r in sti['relations']
@@ -294,21 +304,40 @@
"""introduce the given snippet in a subquery"""
subselect = stmts.Select()
selectvar = varmap[0]
- subselect.append_selected(n.VariableRef(
- subselect.get_variable(selectvar)))
+ subselectvar = subselect.get_variable(selectvar)
+ subselect.append_selected(n.VariableRef(subselectvar))
+ snippetrqlst = n.Exists(transformedsnippet.copy(subselect))
aliases = [selectvar]
- subselect.add_restriction(transformedsnippet.copy(subselect))
stinfo = self.varinfo['stinfo']
+ need_null_test = False
for rel in stinfo['relations']:
rschema = self.schema.rschema(rel.r_type)
if rschema.final or (rschema.inlined and
- not rel in stinfo['rhsrelations']):
- self.select.remove_node(rel)
- rel.children[0].name = selectvar
+ not rel in stinfo['rhsrelations']):
+ rel.children[0].name = selectvar # XXX explain why
subselect.add_restriction(rel.copy(subselect))
for vref in rel.children[1].iget_nodes(n.VariableRef):
+ if isinstance(vref.variable, n.ColumnAlias):
+ # XXX could probably be handled by generating the subquery
+ # into the detected subquery
+ raise BadSchemaDefinition(
+ "cant insert security because of usage two inlined "
+ "relations in this query. You should probably at "
+ "least uninline %s" % rel.r_type)
subselect.append_selected(vref.copy(subselect))
aliases.append(vref.name)
+ self.select.remove_node(rel)
+ # when some inlined relation has to be copied in the subquery,
+ # we need to test that either value is NULL or that the snippet
+ # condition is satisfied
+ if rschema.inlined and rel.optional:
+ need_null_test = True
+ if need_null_test:
+ snippetrqlst = n.Or(
+ n.make_relation(subselectvar, 'is', (None, None), n.Constant,
+ operator='IS'),
+ snippetrqlst)
+ subselect.add_restriction(snippetrqlst)
if self.u_varname:
# generate an identifier for the substitution
argname = subselect.allocate_varname()
--- a/rset.py Wed Mar 24 08:40:00 2010 +0100
+++ b/rset.py Wed Mar 24 10:23:57 2010 +0100
@@ -50,7 +50,6 @@
# .limit method
self.limited = None
# set by the cursor which returned this resultset
- self.vreg = None
self.req = None
# actions cache
self._rsetactions = None
@@ -83,7 +82,7 @@
try:
return self._rsetactions[key]
except KeyError:
- actions = self.vreg['actions'].poss_visible_objects(
+ actions = self.req.vreg['actions'].poss_visible_objects(
self.req, rset=self, **kwargs)
self._rsetactions[key] = actions
return actions
@@ -115,14 +114,16 @@
# method anymore (syt)
rset = ResultSet(self.rows+rset.rows, self.rql, self.args,
self.description +rset.description)
- return self.req.decorate_rset(rset)
+ rset.req = self.req
+ return rset
def copy(self, rows=None, descr=None):
if rows is None:
rows = self.rows[:]
descr = self.description[:]
rset = ResultSet(rows, self.rql, self.args, descr)
- return self.req.decorate_rset(rset)
+ rset.req = self.req
+ return rset
def transformed_rset(self, transformcb):
""" the result set according to a given column types
@@ -258,8 +259,8 @@
# try to get page boundaries from the navigation component
# XXX we should probably not have a ref to this component here (eg in
# cubicweb)
- nav = self.vreg['components'].select_or_none('navigation', self.req,
- rset=self)
+ nav = self.req.vreg['components'].select_or_none('navigation', self.req,
+ rset=self)
if nav:
start, stop = nav.page_boundaries()
rql = self._limit_offset_rql(stop - start, start)
@@ -391,7 +392,7 @@
"""
etype = self.description[row][col]
try:
- eschema = self.vreg.schema.eschema(etype)
+ eschema = self.req.vreg.schema.eschema(etype)
if eschema.final:
raise NotAnEntity(etype)
except KeyError:
@@ -435,8 +436,8 @@
return entity
# build entity instance
etype = self.description[row][col]
- entity = self.vreg['etypes'].etype_class(etype)(req, rset=self,
- row=row, col=col)
+ entity = self.req.vreg['etypes'].etype_class(etype)(req, rset=self,
+ row=row, col=col)
entity.set_eid(eid)
# cache entity
req.set_entity_cache(entity)
@@ -472,7 +473,7 @@
else:
rql = 'Any Y WHERE Y %s X, X eid %s'
rrset = ResultSet([], rql % (attr, entity.eid))
- req.decorate_rset(rrset)
+ rrset.req = req
else:
rrset = self._build_entity(row, outerselidx).as_rset()
entity.set_related_cache(attr, role, rrset)
@@ -489,10 +490,10 @@
rqlst = self._rqlst.copy()
# to avoid transport overhead when pyro is used, the schema has been
# unset from the syntax tree
- rqlst.schema = self.vreg.schema
- self.vreg.rqlhelper.annotate(rqlst)
+ rqlst.schema = self.req.vreg.schema
+ self.req.vreg.rqlhelper.annotate(rqlst)
else:
- rqlst = self.vreg.parse(self.req, self.rql, self.args)
+ rqlst = self.req.vreg.parse(self.req, self.rql, self.args)
return rqlst
@cached
@@ -532,7 +533,7 @@
etype = self.description[row][col]
# final type, find a better one to locate the correct subquery
# (ambiguous if possible)
- eschema = self.vreg.schema.eschema
+ eschema = self.req.vreg.schema.eschema
if eschema(etype).final:
for select in rqlst.children:
try:
--- a/schema.py Wed Mar 24 08:40:00 2010 +0100
+++ b/schema.py Wed Mar 24 10:23:57 2010 +0100
@@ -34,14 +34,15 @@
PURE_VIRTUAL_RTYPES = set(('identity', 'has_text',))
VIRTUAL_RTYPES = set(('eid', 'identity', 'has_text',))
-# set of meta-relations available for every entity types
+# set of meta-relations available for every entity types
META_RTYPES = set((
'owned_by', 'created_by', 'is', 'is_instance_of', 'identity',
'eid', 'creation_date', 'modification_date', 'has_text', 'cwuri',
))
-SYSTEM_RTYPES = set(('require_permission', 'custom_workflow', 'in_state', 'wf_info_for'))
+SYSTEM_RTYPES = set(('require_permission', 'custom_workflow', 'in_state',
+ 'wf_info_for'))
-# set of entity and relation types used to build the schema
+# set of entity and relation types used to build the schema
SCHEMA_TYPES = set((
'CWEType', 'CWRType', 'CWAttribute', 'CWRelation',
'CWConstraint', 'CWConstraintType', 'RQLExpression',
@@ -704,7 +705,7 @@
rql = 'Any %s WHERE %s' % (self.mainvars, restriction)
if self.distinct_query:
rql = 'DISTINCT ' + rql
- return session.unsafe_execute(rql, args, ck, build_descr=False)
+ return session.execute(rql, args, ck, build_descr=False)
class RQLConstraint(RepoEnforcedRQLConstraintMixIn, RQLVocabularyConstraint):
@@ -830,13 +831,10 @@
return True
return False
if keyarg is None:
- # on the server side, use unsafe_execute, but this is not available
- # on the client side (session is actually a request)
- execute = getattr(session, 'unsafe_execute', session.execute)
kwargs.setdefault('u', session.user.eid)
cachekey = kwargs.keys()
try:
- rset = execute(rql, kwargs, cachekey, build_descr=True)
+ rset = session.execute(rql, kwargs, cachekey, build_descr=True)
except NotImplementedError:
self.critical('cant check rql expression, unsupported rql %s', rql)
if self.eid is not None:
@@ -1084,10 +1082,10 @@
elif form is not None:
cw = form._cw
if cw is not None:
- if hasattr(cw, 'is_super_session'):
+ if hasattr(cw, 'write_security'): # test it's a session and not a request
# cw is a server session
- hasperm = cw.is_super_session or \
- not cw.vreg.config.is_hook_category_activated('integrity') or \
+ hasperm = not cw.write_security or \
+ not cw.is_hook_category_activated('integrity') or \
cw.user.has_permission(PERM_USE_TEMPLATE_FORMAT)
else:
hasperm = cw.user.has_permission(PERM_USE_TEMPLATE_FORMAT)
--- a/selectors.py Wed Mar 24 08:40:00 2010 +0100
+++ b/selectors.py Wed Mar 24 10:23:57 2010 +0100
@@ -23,17 +23,15 @@
You can log the selectors involved for *calendar* by replacing the line
above by::
- # in Python2.5
from cubicweb.selectors import traced_selection
with traced_selection():
self.view('calendar', myrset)
- # in Python2.4
- from cubicweb import selectors
- selectors.TRACED_OIDS = ('calendar',)
- self.view('calendar', myrset)
- selectors.TRACED_OIDS = ()
+With python 2.5, think to add:
+ from __future__ import with_statement
+
+at the top of your module.
:organization: Logilab
:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
--- a/server/__init__.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/__init__.py Wed Mar 24 10:23:57 2010 +0100
@@ -8,6 +8,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
@@ -148,7 +150,7 @@
schemasql = sqlschema(schema, driver)
#skip_entities=[str(e) for e in schema.entities()
# if not repo.system_source.support_entity(str(e))])
- sqlexec(schemasql, execute, pbtitle=_title)
+ sqlexec(schemasql, execute, pbtitle=_title, delimiter=';;')
sqlcursor.close()
sqlcnx.commit()
sqlcnx.close()
@@ -197,6 +199,7 @@
cnx.commit()
cnx.close()
session.close()
+ repo.shutdown()
# restore initial configuration
config.creating = False
config.read_instance_schema = read_instance_schema
@@ -208,33 +211,30 @@
def initialize_schema(config, schema, mhandler, event='create'):
from cubicweb.server.schemaserial import serialize_schema
- # deactivate every hooks but those responsible to set metadata
- # so, NO INTEGRITY CHECKS are done, to have quicker db creation
- oldmode = config.set_hooks_mode(config.DENY_ALL)
- changes = config.enable_hook_category('metadata')
+ from cubicweb.server.session import hooks_control
+ session = mhandler.session
paths = [p for p in config.cubes_path() + [config.apphome]
if exists(join(p, 'migration'))]
- # execute cubicweb's pre<event> script
- mhandler.exec_event_script('pre%s' % event)
- # execute cubes pre<event> script if any
- for path in reversed(paths):
- mhandler.exec_event_script('pre%s' % event, path)
- # enter instance'schema into the database
- mhandler.session.set_pool()
- serialize_schema(mhandler.session, schema)
- # execute cubicweb's post<event> script
- mhandler.exec_event_script('post%s' % event)
- # execute cubes'post<event> script if any
- for path in reversed(paths):
- mhandler.exec_event_script('post%s' % event, path)
- # restore hooks config
- if changes:
- config.disable_hook_category(changes)
- config.set_hooks_mode(oldmode)
+ # deactivate every hooks but those responsible to set metadata
+ # so, NO INTEGRITY CHECKS are done, to have quicker db creation
+ with hooks_control(session, session.HOOKS_DENY_ALL, 'metadata'):
+ # execute cubicweb's pre<event> script
+ mhandler.exec_event_script('pre%s' % event)
+ # execute cubes pre<event> script if any
+ for path in reversed(paths):
+ mhandler.exec_event_script('pre%s' % event, path)
+ # enter instance'schema into the database
+ session.set_pool()
+ serialize_schema(session, schema)
+ # execute cubicweb's post<event> script
+ mhandler.exec_event_script('post%s' % event)
+ # execute cubes'post<event> script if any
+ for path in reversed(paths):
+ mhandler.exec_event_script('post%s' % event, path)
-# sqlite'stored procedures have to be registered at connexion opening time
-SQL_CONNECT_HOOKS = {}
+# sqlite'stored procedures have to be registered at connection opening time
+from logilab.database import SQL_CONNECT_HOOKS
# add to this set relations which should have their add security checking done
# *BEFORE* adding the actual relation (done after by default)
--- a/server/checkintegrity.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/checkintegrity.py Wed Mar 24 10:23:57 2010 +0100
@@ -6,6 +6,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
@@ -15,6 +17,7 @@
from cubicweb.schema import PURE_VIRTUAL_RTYPES
from cubicweb.server.sqlutils import SQL_PREFIX
+from cubicweb.server.session import security_enabled
def has_eid(sqlcursor, eid, eids):
"""return true if the eid is a valid eid"""
@@ -70,15 +73,9 @@
# to be updated due to the reindexation
repo = session.repo
cursor = session.pool['system']
- if not repo.system_source.indexer.has_fti_table(cursor):
- from indexer import get_indexer
+ if not repo.system_source.dbhelper.has_fti_table(cursor):
print 'no text index table'
- indexer = get_indexer(repo.system_source.dbdriver)
- # XXX indexer.init_fti(cursor) once index 0.7 is out
- indexer.init_extensions(cursor)
- cursor.execute(indexer.sql_init_fti())
- repo.config.disabled_hooks_categories.add('metadata')
- repo.config.disabled_hooks_categories.add('integrity')
+ dbhelper.init_fti(cursor)
repo.system_source.do_fti = True # ensure full-text indexation is activated
etypes = set()
for eschema in schema.entities():
@@ -94,9 +91,6 @@
if withpb:
pb = ProgressBar(len(etypes) + 1)
# first monkey patch Entity.check to disable validation
- from cubicweb.entity import Entity
- _check = Entity.check
- Entity.check = lambda self, creation=False: True
# clear fti table first
session.system_sql('DELETE FROM %s' % session.repo.system_source.dbhelper.fti_table)
if withpb:
@@ -106,14 +100,9 @@
source = repo.system_source
for eschema in etypes:
for entity in session.execute('Any X WHERE X is %s' % eschema).entities():
- source.fti_unindex_entity(session, entity.eid)
source.fti_index_entity(session, entity)
if withpb:
pb.update()
- # restore Entity.check
- Entity.check = _check
- repo.config.disabled_hooks_categories.remove('metadata')
- repo.config.disabled_hooks_categories.remove('integrity')
def check_schema(schema, session, eids, fix=1):
@@ -291,9 +280,10 @@
# yo, launch checks
if checks:
eids_cache = {}
- for check in checks:
- check_func = globals()['check_%s' % check]
- check_func(repo.schema, session, eids_cache, fix=fix)
+ with security_enabled(session, read=False): # ensure no read security
+ for check in checks:
+ check_func = globals()['check_%s' % check]
+ check_func(repo.schema, session, eids_cache, fix=fix)
if fix:
cnx.commit()
else:
--- a/server/hook.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/hook.py Wed Mar 24 10:23:57 2010 +0100
@@ -33,6 +33,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
from warnings import warn
@@ -47,7 +49,7 @@
from cubicweb.selectors import (objectify_selector, lltrace, ExpectedValueSelector,
implements)
from cubicweb.appobject import AppObject
-
+from cubicweb.server.session import security_enabled
ENTITIES_HOOKS = set(('before_add_entity', 'after_add_entity',
'before_update_entity', 'after_update_entity',
@@ -76,13 +78,20 @@
event, obj.__module__, obj.__name__))
super(HooksRegistry, self).register(obj, **kwargs)
- def call_hooks(self, event, req=None, **kwargs):
+ def call_hooks(self, event, session=None, **kwargs):
kwargs['event'] = event
- for hook in sorted(self.possible_objects(req, **kwargs), key=lambda x: x.order):
- if hook.enabled:
+ if session is None:
+ for hook in sorted(self.possible_objects(session, **kwargs),
+ key=lambda x: x.order):
hook()
- else:
- warn('[3.6] %s: enabled is deprecated' % hook.__class__)
+ else:
+ # by default, hooks are executed with security turned off
+ with security_enabled(session, read=False):
+ hooks = sorted(self.possible_objects(session, **kwargs),
+ key=lambda x: x.order)
+ with security_enabled(session, write=False):
+ for hook in hooks:
+ hook()
VRegistry.REGISTRY_FACTORY['hooks'] = HooksRegistry
@@ -104,6 +113,14 @@
@objectify_selector
@lltrace
+def _bw_is_enabled(cls, req, **kwargs):
+ if cls.enabled:
+ return 1
+ warn('[3.6] %s: enabled is deprecated' % cls)
+ return 0
+
+@objectify_selector
+@lltrace
def match_event(cls, req, **kwargs):
if kwargs.get('event') in cls.events:
return 1
@@ -113,19 +130,15 @@
@lltrace
def enabled_category(cls, req, **kwargs):
if req is None:
- # server startup / shutdown event
- config = kwargs['repo'].config
- else:
- config = req.vreg.config
- return config.is_hook_activated(cls)
+ return True # XXX how to deactivate server startup / shutdown event
+ return req.is_hook_activated(cls)
@objectify_selector
@lltrace
-def regular_session(cls, req, **kwargs):
- if req is None or req.is_super_session:
- return 0
- return 1
-
+def from_dbapi_query(cls, req, **kwargs):
+ if req.running_dbapi_query:
+ return 1
+ return 0
class rechain(object):
def __init__(self, *iterators):
@@ -178,7 +191,7 @@
class Hook(AppObject):
__registry__ = 'hooks'
- __select__ = match_event() & enabled_category()
+ __select__ = match_event() & enabled_category() & _bw_is_enabled()
# set this in derivated classes
events = None
category = None
@@ -263,7 +276,7 @@
else:
assert self.rtype in self.object_relations
meid, seid = self.eidto, self.eidfrom
- self._cw.unsafe_execute(
+ self._cw.execute(
'SET E %s P WHERE X %s P, X eid %%(x)s, E eid %%(e)s, NOT E %s P'\
% (self.main_rtype, self.main_rtype, self.main_rtype),
{'x': meid, 'e': seid}, ('x', 'e'))
@@ -281,7 +294,7 @@
def __call__(self):
eschema = self._cw.vreg.schema.eschema(self._cw.describe(self.eidfrom)[0])
- execute = self._cw.unsafe_execute
+ execute = self._cw.execute
for rel in self.subject_relations:
if rel in eschema.subjrels:
execute('SET R %s P WHERE X eid %%(x)s, P eid %%(p)s, '
@@ -306,7 +319,7 @@
def __call__(self):
eschema = self._cw.vreg.schema.eschema(self._cw.describe(self.eidfrom)[0])
- execute = self._cw.unsafe_execute
+ execute = self._cw.execute
for rel in self.subject_relations:
if rel in eschema.subjrels:
execute('DELETE R %s P WHERE X eid %%(x)s, P eid %%(p)s, '
@@ -510,6 +523,6 @@
class RQLPrecommitOperation(Operation):
def precommit_event(self):
- execute = self.session.unsafe_execute
+ execute = self.session.execute
for rql in self.rqls:
execute(*rql)
--- a/server/migractions.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/migractions.py Wed Mar 24 10:23:57 2010 +0100
@@ -15,6 +15,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
@@ -25,10 +27,12 @@
import os.path as osp
from datetime import datetime
from glob import glob
+from copy import copy
from warnings import warn
from logilab.common.deprecation import deprecated
from logilab.common.decorators import cached, clear_cache
+from logilab.common.testlib import mock_object
from yams.constraints import SizeConstraint
from yams.schema2sql import eschema2sql, rschema2sql
@@ -38,7 +42,7 @@
CubicWebRelationSchema, order_eschemas)
from cubicweb.dbapi import get_repository, repo_connect
from cubicweb.migration import MigrationHelper, yes
-
+from cubicweb.server.session import hooks_control
try:
from cubicweb.server import SOURCE_TYPES, schemaserial as ss
from cubicweb.server.utils import manager_userpasswd, ask_source_config
@@ -94,7 +98,9 @@
self.backup_database()
elif options.backup_db:
self.backup_database(askconfirm=False)
- super(ServerMigrationHelper, self).migrate(vcconf, toupgrade, options)
+ # disable notification during migration
+ with hooks_control(self.session, self.session.HOOKS_ALLOW_ALL, 'notification'):
+ super(ServerMigrationHelper, self).migrate(vcconf, toupgrade, options)
def cmd_process_script(self, migrscript, funcname=None, *args, **kwargs):
"""execute a migration script
@@ -240,21 +246,30 @@
@property
def session(self):
if self.config is not None:
- return self.repo._get_session(self.cnx.sessionid)
+ session = self.repo._get_session(self.cnx.sessionid)
+ if session.pool is None:
+ session.set_read_security(False)
+ session.set_write_security(False)
+ session.set_pool()
+ return session
# no access to session on remote instance
return None
def commit(self):
if hasattr(self, '_cnx'):
self._cnx.commit()
+ if self.session:
+ self.session.set_pool()
def rollback(self):
if hasattr(self, '_cnx'):
self._cnx.rollback()
+ if self.session:
+ self.session.set_pool()
def rqlexecall(self, rqliter, cachekey=None, ask_confirm=True):
for rql, kwargs in rqliter:
- self.rqlexec(rql, kwargs, cachekey, ask_confirm)
+ self.rqlexec(rql, kwargs, cachekey, ask_confirm=ask_confirm)
@cached
def _create_context(self):
@@ -282,6 +297,11 @@
"""cached group mapping"""
return ss.group_mapping(self._cw)
+ @cached
+ def cstrtype_mapping(self):
+ """cached constraint types mapping"""
+ return ss.cstrtype_mapping(self._cw)
+
def exec_event_script(self, event, cubepath=None, funcname=None,
*args, **kwargs):
if cubepath:
@@ -305,7 +325,6 @@
self.cmd_reactivate_verification_hooks()
def install_custom_sql_scripts(self, directory, driver):
- self.session.set_pool() # ensure pool is set
for fpath in glob(osp.join(directory, '*.sql.%s' % driver)):
newname = osp.basename(fpath).replace('.sql.%s' % driver,
'.%s.sql' % driver)
@@ -399,14 +418,17 @@
return
self._synchronized.add(rtype)
rschema = self.fs_schema.rschema(rtype)
+ reporschema = self.repo.schema.rschema(rtype)
if syncprops:
- self.rqlexecall(ss.updaterschema2rql(rschema),
+ assert reporschema.eid, reporschema
+ self.rqlexecall(ss.updaterschema2rql(rschema, reporschema.eid),
ask_confirm=self.verbosity>=2)
if syncrdefs:
- reporschema = self.repo.schema.rschema(rtype)
for subj, obj in rschema.rdefs:
if (subj, obj) not in reporschema.rdefs:
continue
+ if rschema in VIRTUAL_RTYPES:
+ continue
self._synchronize_rdef_schema(subj, rschema, obj,
syncprops=syncprops,
syncperms=syncperms)
@@ -439,9 +461,11 @@
'Y is CWEType, Y name %(y)s',
{'x': str(repoeschema), 'y': str(espschema)},
ask_confirm=False)
- self.rqlexecall(ss.updateeschema2rql(eschema),
+ self.rqlexecall(ss.updateeschema2rql(eschema, repoeschema.eid),
ask_confirm=self.verbosity >= 2)
for rschema, targettypes, role in eschema.relation_definitions(True):
+ if rschema in VIRTUAL_RTYPES:
+ continue
if role == 'subject':
if not rschema in repoeschema.subject_relations():
continue
@@ -483,7 +507,7 @@
confirm = self.verbosity >= 2
if syncprops:
# properties
- self.rqlexecall(ss.updaterdef2rql(rschema, subjtype, objtype),
+ self.rqlexecall(ss.updaterdef2rql(rdef, repordef.eid),
ask_confirm=confirm)
# constraints
newconstraints = list(rdef.constraints)
@@ -509,10 +533,10 @@
{'x': cstr.eid, 'v': value}, 'x',
ask_confirm=confirm)
# 2. add new constraints
- for newcstr in newconstraints:
- self.rqlexecall(ss.constraint2rql(rschema, subjtype, objtype,
- newcstr),
- ask_confirm=confirm)
+ cstrtype_map = self.cstrtype_mapping()
+ self.rqlexecall(ss.constraints2rql(cstrtype_map, newconstraints,
+ repordef.eid),
+ ask_confirm=confirm)
if syncperms and not rschema in VIRTUAL_RTYPES:
self._synchronize_permissions(rdef, repordef.eid)
@@ -673,18 +697,20 @@
targeted type is known
"""
instschema = self.repo.schema
- if etype in instschema:
- # XXX (syt) plz explain: if we're adding an entity type, it should
- # not be there...
- eschema = instschema[etype]
- if eschema.final:
- instschema.del_entity_type(etype)
- else:
- eschema = self.fs_schema.eschema(etype)
+ assert not etype in instschema
+ # # XXX (syt) plz explain: if we're adding an entity type, it should
+ # # not be there...
+ # eschema = instschema[etype]
+ # if eschema.final:
+ # instschema.del_entity_type(etype)
+ # else:
+ eschema = self.fs_schema.eschema(etype)
confirm = self.verbosity >= 2
groupmap = self.group_mapping()
+ cstrtypemap = self.cstrtype_mapping()
# register the entity into CWEType
- self.rqlexecall(ss.eschema2rql(eschema, groupmap), ask_confirm=confirm)
+ execute = self._cw.execute
+ ss.execschemarql(execute, eschema, ss.eschema2rql(eschema, groupmap))
# add specializes relation if needed
self.rqlexecall(ss.eschemaspecialize2rql(eschema), ask_confirm=confirm)
# register entity's attributes
@@ -697,9 +723,8 @@
# actually in the schema
self.cmd_add_relation_type(rschema.type, False, commit=True)
# register relation definition
- self.rqlexecall(ss.rdef2rql(rschema, etype, attrschema.type,
- groupmap=groupmap),
- ask_confirm=confirm)
+ rdef = self._get_rdef(rschema, eschema, eschema.destination(rschema))
+ ss.execschemarql(execute, rdef, ss.rdef2rql(rdef, cstrtypemap, groupmap),)
# take care to newly introduced base class
# XXX some part of this should probably be under the "if auto" block
for spschema in eschema.specialized_by(recursive=False):
@@ -759,10 +784,12 @@
# remember this two avoid adding twice non symmetric relation
# such as "Emailthread forked_from Emailthread"
added.append((etype, rschema.type, targettype))
- self.rqlexecall(ss.rdef2rql(rschema, etype, targettype,
- groupmap=groupmap),
- ask_confirm=confirm)
+ rdef = self._get_rdef(rschema, eschema, targetschema)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cstrtypemap, groupmap))
for rschema in eschema.object_relations():
+ if rschema.type in META_RTYPES:
+ continue
rtypeadded = rschema.type in instschema or rschema.type in added
for targetschema in rschema.subjects(etype):
# ignore relations where the targeted type is not in the
@@ -780,9 +807,9 @@
elif (targettype, rschema.type, etype) in added:
continue
# register relation definition
- self.rqlexecall(ss.rdef2rql(rschema, targettype, etype,
- groupmap=groupmap),
- ask_confirm=confirm)
+ rdef = self._get_rdef(rschema, targetschema, eschema)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cstrtypemap, groupmap))
if commit:
self.commit()
@@ -821,15 +848,23 @@
committing depends on the `commit` argument value).
"""
+ reposchema = self.repo.schema
rschema = self.fs_schema.rschema(rtype)
+ execute = self._cw.execute
# register the relation into CWRType and insert necessary relation
# definitions
- self.rqlexecall(ss.rschema2rql(rschema, addrdef=False),
- ask_confirm=self.verbosity>=2)
+ ss.execschemarql(execute, rschema, ss.rschema2rql(rschema, addrdef=False))
if addrdef:
self.commit()
- self.rqlexecall(ss.rdef2rql(rschema, groupmap=self.group_mapping()),
- ask_confirm=self.verbosity>=2)
+ gmap = self.group_mapping()
+ cmap = self.cstrtype_mapping()
+ for rdef in rschema.rdefs.itervalues():
+ if not (reposchema.has_entity(rdef.subject)
+ and reposchema.has_entity(rdef.object)):
+ continue
+ self._set_rdef_eid(rdef)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cmap, gmap))
if rtype in META_RTYPES:
# if the relation is in META_RTYPES, ensure we're adding it for
# all entity types *in the persistent schema*, not only those in
@@ -838,15 +873,14 @@
if not etype in self.fs_schema:
# get sample object type and rproperties
objtypes = rschema.objects()
- assert len(objtypes) == 1
+ assert len(objtypes) == 1, objtypes
objtype = objtypes[0]
- props = rschema.rproperties(
- rschema.subjects(objtype)[0], objtype)
- assert props
- self.rqlexecall(ss.rdef2rql(rschema, etype, objtype, props,
- groupmap=self.group_mapping()),
- ask_confirm=self.verbosity>=2)
-
+ rdef = copy(rschema.rdef(rschema.subjects(objtype)[0], objtype))
+ rdef.subject = etype
+ rdef.rtype = self.repo.schema.rschema(rschema)
+ rdef.object = self.repo.schema.rschema(objtype)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cmap, gmap))
if commit:
self.commit()
@@ -876,12 +910,25 @@
rschema = self.fs_schema.rschema(rtype)
if not rtype in self.repo.schema:
self.cmd_add_relation_type(rtype, addrdef=False, commit=True)
- self.rqlexecall(ss.rdef2rql(rschema, subjtype, objtype,
- groupmap=self.group_mapping()),
- ask_confirm=self.verbosity>=2)
+ execute = self._cw.execute
+ rdef = self._get_rdef(rschema, subjtype, objtype)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, self.cstrtype_mapping(),
+ self.group_mapping()))
if commit:
self.commit()
+ def _get_rdef(self, rschema, subjtype, objtype):
+ return self._set_rdef_eid(rschema.rdefs[(subjtype, objtype)])
+
+ def _set_rdef_eid(self, rdef):
+ for attr in ('rtype', 'subject', 'object'):
+ schemaobj = getattr(rdef, attr)
+ if getattr(schemaobj, 'eid', None) is None:
+ schemaobj.eid = self.repo.schema[schemaobj].eid
+ assert schemaobj.eid is not None
+ return rdef
+
def cmd_drop_relation_definition(self, subjtype, rtype, objtype, commit=True):
"""unregister an existing relation definition"""
rschema = self.repo.schema.rschema(rtype)
@@ -1139,7 +1186,6 @@
level actions
"""
if not ask_confirm or self.confirm('Execute sql: %s ?' % sql):
- self.session.set_pool() # ensure pool is set
try:
cu = self.session.system_sql(sql, args)
except:
@@ -1153,15 +1199,13 @@
# no result to fetch
return
- def rqlexec(self, rql, kwargs=None, cachekey=None, ask_confirm=True):
+ def rqlexec(self, rql, kwargs=None, cachekey=None, build_descr=True,
+ ask_confirm=True):
"""rql action"""
if not isinstance(rql, (tuple, list)):
rql = ( (rql, kwargs), )
res = None
- try:
- execute = self._cw.unsafe_execute
- except AttributeError:
- execute = self._cw.execute
+ execute = self._cw.execute
for rql, kwargs in rql:
if kwargs:
msg = '%s (%s)' % (rql, kwargs)
@@ -1169,7 +1213,7 @@
msg = rql
if not ask_confirm or self.confirm('Execute rql: %s ?' % msg):
try:
- res = execute(rql, kwargs, cachekey)
+ res = execute(rql, kwargs, cachekey, build_descr=build_descr)
except Exception, ex:
if self.confirm('Error: %s\nabort?' % ex):
raise
@@ -1178,12 +1222,6 @@
def rqliter(self, rql, kwargs=None, ask_confirm=True):
return ForRqlIterator(self, rql, None, ask_confirm)
- def cmd_deactivate_verification_hooks(self):
- self.config.disabled_hooks_categories.add('integrity')
-
- def cmd_reactivate_verification_hooks(self):
- self.config.disabled_hooks_categories.remove('integrity')
-
# broken db commands ######################################################
def cmd_change_attribute_type(self, etype, attr, newtype, commit=True):
@@ -1234,6 +1272,14 @@
if commit:
self.commit()
+ @deprecated("[3.7] use session.disable_hook_categories('integrity')")
+ def cmd_deactivate_verification_hooks(self):
+ self.session.disable_hook_categories('integrity')
+
+ @deprecated("[3.7] use session.enable_hook_categories('integrity')")
+ def cmd_reactivate_verification_hooks(self):
+ self.session.enable_hook_categories('integrity')
+
class ForRqlIterator:
"""specific rql iterator to make the loop skipable"""
--- a/server/msplanner.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/msplanner.py Wed Mar 24 10:23:57 2010 +0100
@@ -313,8 +313,6 @@
if varobj.stinfo['uidrels']:
vrels = varobj.stinfo['relations'] - varobj.stinfo['uidrels']
for rel in varobj.stinfo['uidrels']:
- if rel.neged(strict=True) or rel.operator() != '=':
- continue
for const in rel.children[1].get_nodes(Constant):
eid = const.eval(self.plan.args)
source = self._session.source_from_eid(eid)
--- a/server/querier.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/querier.py Wed Mar 24 10:23:57 2010 +0100
@@ -6,6 +6,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
from itertools import repeat
@@ -22,9 +24,8 @@
from cubicweb.server.utils import cleanup_solutions
from cubicweb.server.rqlannotation import SQLGenAnnotator, set_qdata
-from cubicweb.server.ssplanner import add_types_restriction
-
-READ_ONLY_RTYPES = set(('eid', 'has_text', 'is', 'is_instance_of', 'identity'))
+from cubicweb.server.ssplanner import READ_ONLY_RTYPES, add_types_restriction
+from cubicweb.server.session import security_enabled
def empty_rset(rql, args, rqlst=None):
"""build an empty result set object"""
@@ -41,17 +42,6 @@
# permission utilities ########################################################
-def var_kwargs(restriction, args):
- varkwargs = {}
- for rel in restriction.iget_nodes(Relation):
- cmp = rel.children[1]
- if rel.r_type == 'eid' and cmp.operator == '=' and \
- not rel.neged(strict=True) and \
- isinstance(cmp.children[0], Constant) and \
- cmp.children[0].type == 'Substitute':
- varkwargs[rel.children[0].name] = typed_eid(cmp.children[0].eval(args))
- return varkwargs
-
def check_no_password_selected(rqlst):
"""check that Password entities are not selected"""
for solution in rqlst.solutions:
@@ -79,32 +69,31 @@
rdef = rschema.rdef(solution[rel.children[0].name],
solution[rel.children[1].children[0].name])
if not user.matching_groups(rdef.get_groups('read')):
+ # XXX rqlexpr not allowed
raise Unauthorized('read', rel.r_type)
localchecks = {}
# iterate on defined_vars and not on solutions to ignore column aliases
for varname in rqlst.defined_vars:
- etype = solution[varname]
- eschema = schema.eschema(etype)
+ eschema = schema.eschema(solution[varname])
if eschema.final:
continue
if not user.matching_groups(eschema.get_groups('read')):
erqlexprs = eschema.get_rqlexprs('read')
if not erqlexprs:
- ex = Unauthorized('read', etype)
+ ex = Unauthorized('read', solution[varname])
ex.var = varname
raise ex
- #assert len(erqlexprs) == 1
- localchecks[varname] = tuple(erqlexprs)
+ localchecks[varname] = erqlexprs
return localchecks
-def noinvariant_vars(restricted, select, nbtrees):
+def add_noinvariant(noinvariant, restricted, select, nbtrees):
# a variable can actually be invariant if it has not been restricted for
# security reason or if security assertion hasn't modified the possible
# solutions for the query
if nbtrees != 1:
for vname in restricted:
try:
- yield select.defined_vars[vname]
+ noinvariant.add(select.defined_vars[vname])
except KeyError:
# this is an alias
continue
@@ -116,7 +105,7 @@
# this is an alias
continue
if len(var.stinfo['possibletypes']) != 1:
- yield var
+ noinvariant.add(var)
def _expand_selection(terms, selected, aliases, select, newselect):
for term in terms:
@@ -200,12 +189,35 @@
return rqlst to actually execute
"""
- noinvariant = set()
- if security and not self.session.is_super_session:
- self._insert_security(union, noinvariant)
- self.rqlhelper.simplify(union)
- self.sqlannotate(union)
- set_qdata(self.schema.rschema, union, noinvariant)
+ cached = None
+ if security and self.session.read_security:
+ # ensure security is turned of when security is inserted,
+ # else we may loop for ever...
+ if self.session.transaction_data.get('security-rqlst-cache'):
+ key = self.cache_key
+ else:
+ key = None
+ if key is not None and key in self.session.transaction_data:
+ cachedunion, args = self.session.transaction_data[key]
+ union.children[:] = []
+ for select in cachedunion.children:
+ union.append(select)
+ union.has_text_query = cachedunion.has_text_query
+ args.update(self.args)
+ self.args = args
+ cached = True
+ else:
+ noinvariant = set()
+ with security_enabled(self.session, read=False):
+ self._insert_security(union, noinvariant)
+ if key is not None:
+ self.session.transaction_data[key] = (union, self.args)
+ else:
+ noinvariant = ()
+ if cached is None:
+ self.rqlhelper.simplify(union)
+ self.sqlannotate(union)
+ set_qdata(self.schema.rschema, union, noinvariant)
if union.has_text_query:
self.cache_key = None
@@ -273,14 +285,13 @@
myrqlst = select.copy(solutions=lchecksolutions)
myunion.append(myrqlst)
# in-place rewrite + annotation / simplification
- lcheckdef = [((varmap, 'X'), rqlexprs)
- for varmap, rqlexprs in lcheckdef]
+ lcheckdef = [((var, 'X'), rqlexprs) for var, rqlexprs in lcheckdef]
rewrite(myrqlst, lcheckdef, lchecksolutions, self.args)
- noinvariant.update(noinvariant_vars(restricted, myrqlst, nbtrees))
+ add_noinvariant(noinvariant, restricted, myrqlst, nbtrees)
if () in localchecks:
select.set_possible_types(localchecks[()])
add_types_restriction(self.schema, select)
- noinvariant.update(noinvariant_vars(restricted, select, nbtrees))
+ add_noinvariant(noinvariant, restricted, select, nbtrees)
def _check_permissions(self, rqlst):
"""return a dict defining "local checks", e.g. RQLExpression defined in
@@ -300,17 +311,26 @@
note: rqlst should not have been simplified at this point
"""
- assert not self.session.is_super_session
- user = self.session.user
+ session = self.session
+ user = session.user
schema = self.schema
msgs = []
+ neweids = session.transaction_data.get('neweids', ())
+ varkwargs = {}
+ if not session.transaction_data.get('security-rqlst-cache'):
+ for var in rqlst.defined_vars.itervalues():
+ for rel in var.stinfo['uidrels']:
+ const = rel.children[1].children[0]
+ try:
+ varkwargs[var.name] = typed_eid(const.eval(self.args))
+ break
+ except AttributeError:
+ #from rql.nodes import Function
+ #assert isinstance(const, Function)
+ # X eid IN(...)
+ pass
# dictionnary of variables restricted for security reason
localchecks = {}
- if rqlst.where is not None:
- varkwargs = var_kwargs(rqlst.where, self.args)
- neweids = self.session.transaction_data.get('neweids', ())
- else:
- varkwargs = None
restricted_vars = set()
newsolutions = []
for solution in rqlst.solutions:
@@ -323,21 +343,20 @@
LOGGER.info(msg)
else:
newsolutions.append(solution)
- if varkwargs:
- # try to benefit of rqlexpr.check cache for entities which
- # are specified by eid in query'args
- for varname, eid in varkwargs.iteritems():
- try:
- rqlexprs = localcheck.pop(varname)
- except KeyError:
- continue
- if eid in neweids:
- continue
- for rqlexpr in rqlexprs:
- if rqlexpr.check(self.session, eid):
- break
- else:
- raise Unauthorized()
+ # try to benefit of rqlexpr.check cache for entities which
+ # are specified by eid in query'args
+ for varname, eid in varkwargs.iteritems():
+ try:
+ rqlexprs = localcheck.pop(varname)
+ except KeyError:
+ continue
+ if eid in neweids:
+ continue
+ for rqlexpr in rqlexprs:
+ if rqlexpr.check(session, eid):
+ break
+ else:
+ raise Unauthorized()
restricted_vars.update(localcheck)
localchecks.setdefault(tuple(localcheck.iteritems()), []).append(solution)
# raise Unautorized exception if the user can't access to any solution
@@ -377,39 +396,6 @@
self._r_obj_index = {}
self._expanded_r_defs = {}
- def relation_definitions(self, rqlst, to_build):
- """add constant values to entity def, mark variables to be selected
- """
- to_select = {}
- for relation in rqlst.main_relations:
- lhs, rhs = relation.get_variable_parts()
- rtype = relation.r_type
- if rtype in READ_ONLY_RTYPES:
- raise QueryError("can't assign to %s" % rtype)
- try:
- edef = to_build[str(lhs)]
- except KeyError:
- # lhs var is not to build, should be selected and added as an
- # object relation
- edef = to_build[str(rhs)]
- to_select.setdefault(edef, []).append((rtype, lhs, 1))
- else:
- if isinstance(rhs, Constant) and not rhs.uid:
- # add constant values to entity def
- value = rhs.eval(self.args)
- eschema = edef.e_schema
- attrtype = eschema.subjrels[rtype].objects(eschema)[0]
- if attrtype == 'Password' and isinstance(value, unicode):
- value = value.encode('UTF8')
- edef[rtype] = value
- elif to_build.has_key(str(rhs)):
- # create a relation between two newly created variables
- self.add_relation_def((edef, rtype, to_build[rhs.name]))
- else:
- to_select.setdefault(edef, []).append( (rtype, rhs, 0) )
- return to_select
-
-
def add_entity_def(self, edef):
"""add an entity definition to build"""
edef.querier_pending_relations = {}
@@ -629,20 +615,20 @@
try:
self.solutions(session, rqlst, args)
except UnknownEid:
- # we want queries such as "Any X WHERE X eid 9999"
- # return an empty result instead of raising UnknownEid
+ # we want queries such as "Any X WHERE X eid 9999" return an
+ # empty result instead of raising UnknownEid
return empty_rset(rql, args, rqlst)
self._rql_cache[cachekey] = rqlst
orig_rqlst = rqlst
if not rqlst.TYPE == 'select':
- if not session.is_super_session:
+ if session.read_security:
check_no_password_selected(rqlst)
- # write query, ensure session's mode is 'write' so connections
- # won't be released until commit/rollback
+ # write query, ensure session's mode is 'write' so connections won't
+ # be released until commit/rollback
session.mode = 'write'
cachekey = None
else:
- if not session.is_super_session:
+ if session.read_security:
for select in rqlst.children:
check_no_password_selected(select)
# on select query, always copy the cached rqlst so we don't have to
--- a/server/repository.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/repository.py Wed Mar 24 10:23:57 2010 +0100
@@ -15,6 +15,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
@@ -22,9 +24,11 @@
from os.path import join
from datetime import datetime
from time import time, localtime, strftime
+#from pickle import dumps
from logilab.common.decorators import cached
from logilab.common.compat import any
+from logilab.common import flatten
from yams import BadSchemaDefinition
from rql import RQLSyntaxError
@@ -36,7 +40,7 @@
typed_eid)
from cubicweb import cwvreg, schema, server
from cubicweb.server import utils, hook, pool, querier, sources
-from cubicweb.server.session import Session, InternalSession
+from cubicweb.server.session import Session, InternalSession, security_enabled
class CleanupEidTypeCacheOp(hook.SingleLastOperation):
@@ -80,12 +84,12 @@
this kind of behaviour has to be done in the repository so we don't have
hooks order hazardness
"""
- # XXX now that rql in migraction default to unsafe_execute we don't want to
- # skip that for super session (though we can still skip it for internal
- # sessions). Also we should imo rely on the orm to first fetch existing
- # entity if any then delete it.
+ # skip that for internal session or if integrity explicitly disabled
+ #
+ # XXX we should imo rely on the orm to first fetch existing entity if any
+ # then delete it.
if session.is_internal_session \
- or not session.vreg.config.is_hook_category_activated('integrity'):
+ or not session.is_hook_category_activated('integrity'):
return
card = session.schema_rproperty(rtype, eidfrom, eidto, 'cardinality')
# one may be tented to check for neweids but this may cause more than one
@@ -100,23 +104,15 @@
rschema = session.repo.schema.rschema(rtype)
if card[0] in '1?':
if not rschema.inlined: # inlined relations will be implicitly deleted
- rset = session.unsafe_execute('Any X,Y WHERE X %s Y, X eid %%(x)s, '
- 'NOT Y eid %%(y)s' % rtype,
- {'x': eidfrom, 'y': eidto}, 'x')
- if rset:
- safe_delete_relation(session, rschema, *rset[0])
+ with security_enabled(session, read=False):
+ session.execute('DELETE X %s Y WHERE X eid %%(x)s, '
+ 'NOT Y eid %%(y)s' % rtype,
+ {'x': eidfrom, 'y': eidto}, 'x')
if card[1] in '1?':
- rset = session.unsafe_execute('Any X,Y WHERE X %s Y, Y eid %%(y)s, '
- 'NOT X eid %%(x)s' % rtype,
- {'x': eidfrom, 'y': eidto}, 'y')
- if rset:
- safe_delete_relation(session, rschema, *rset[0])
-
-
-def safe_delete_relation(session, rschema, subject, object):
- if not rschema.has_perm(session, 'delete', fromeid=subject, toeid=object):
- raise Unauthorized()
- session.repo.glob_delete_relation(session, subject, rschema.type, object)
+ with security_enabled(session, read=False):
+ session.execute('DELETE X %sY WHERE Y eid %%(y)s, '
+ 'NOT X eid %%(x)s' % rtype,
+ {'x': eidfrom, 'y': eidto}, 'y')
class Repository(object):
@@ -223,11 +219,6 @@
self._available_pools.put_nowait(self.pools[-1])
self._shutting_down = False
self.hm = self.vreg['hooks']
- if not (config.creating or config.repairing):
- # call instance level initialisation hooks
- self.hm.call_hooks('server_startup', repo=self)
- # register a task to cleanup expired session
- self.looping_task(config['session-time']/3., self.clean_sessions)
# internals ###############################################################
@@ -276,6 +267,11 @@
self.set_schema(appschema)
def start_looping_tasks(self):
+ if not (self.config.creating or self.config.repairing):
+ # call instance level initialisation hooks
+ self.hm.call_hooks('server_startup', repo=self)
+ # register a task to cleanup expired session
+ self.looping_task(self.config['session-time']/3., self.clean_sessions)
assert isinstance(self._looping_tasks, list), 'already started'
for i, (interval, func, args) in enumerate(self._looping_tasks):
self._looping_tasks[i] = task = utils.LoopTask(interval, func, args)
@@ -327,6 +323,7 @@
"""called on server stop event to properly close opened sessions and
connections
"""
+ assert not self._shutting_down, 'already shutting down'
self._shutting_down = True
if isinstance(self._looping_tasks, tuple): # if tasks have been started
for looptask in self._looping_tasks:
@@ -636,7 +633,7 @@
"""commit transaction for the session with the given id"""
self.debug('begin commit for session %s', sessionid)
try:
- self._get_session(sessionid).commit()
+ return self._get_session(sessionid).commit()
except (ValidationError, Unauthorized):
raise
except:
@@ -685,10 +682,42 @@
custom properties)
"""
session = self._get_session(sessionid, setpool=False)
- # update session properties
for prop, value in props.items():
session.change_property(prop, value)
+ def undoable_transactions(self, sessionid, ueid=None, **actionfilters):
+ """See :class:`cubicweb.dbapi.Connection.undoable_transactions`"""
+ session = self._get_session(sessionid, setpool=True)
+ try:
+ return self.system_source.undoable_transactions(session, ueid,
+ **actionfilters)
+ finally:
+ session.reset_pool()
+
+ def transaction_info(self, sessionid, txuuid):
+ """See :class:`cubicweb.dbapi.Connection.transaction_info`"""
+ session = self._get_session(sessionid, setpool=True)
+ try:
+ return self.system_source.tx_info(session, txuuid)
+ finally:
+ session.reset_pool()
+
+ def transaction_actions(self, sessionid, txuuid, public=True):
+ """See :class:`cubicweb.dbapi.Connection.transaction_actions`"""
+ session = self._get_session(sessionid, setpool=True)
+ try:
+ return self.system_source.tx_actions(session, txuuid, public)
+ finally:
+ session.reset_pool()
+
+ def undo_transaction(self, sessionid, txuuid):
+ """See :class:`cubicweb.dbapi.Connection.undo_transaction`"""
+ session = self._get_session(sessionid, setpool=True)
+ try:
+ return self.system_source.undo_transaction(session, txuuid)
+ finally:
+ session.reset_pool()
+
# public (inter-repository) interface #####################################
def entities_modified_since(self, etypes, mtime):
@@ -892,59 +921,58 @@
self.system_source.add_info(session, entity, source, extid, complete)
CleanupEidTypeCacheOp(session)
- def delete_info(self, session, eid):
- self._prepare_delete_info(session, eid)
- self._delete_info(session, eid)
+ def delete_info(self, session, entity, sourceuri, extid):
+ """called by external source when some entity known by the system source
+ has been deleted in the external source
+ """
+ self._prepare_delete_info(session, entity, sourceuri)
+ self._delete_info(session, entity, sourceuri, extid)
- def _prepare_delete_info(self, session, eid):
+ def _prepare_delete_info(self, session, entity, sourceuri):
"""prepare the repository for deletion of an entity:
* update the fti
* mark eid as being deleted in session info
* setup cache update operation
+ * if undoable, get back all entity's attributes and relation
"""
+ eid = entity.eid
self.system_source.fti_unindex_entity(session, eid)
pending = session.transaction_data.setdefault('pendingeids', set())
pending.add(eid)
CleanupEidTypeCacheOp(session)
- def _delete_info(self, session, eid):
+ def _delete_info(self, session, entity, sourceuri, extid):
+ # attributes=None, relations=None):
"""delete system information on deletion of an entity:
- * delete all relations on this entity
- * transfer record from the entities table to the deleted_entities table
+ * delete all remaining relations from/to this entity
+ * call delete info on the system source which will transfer record from
+ the entities table to the deleted_entities table
"""
- etype, uri, extid = self.type_and_source_from_eid(eid, session)
- self._clear_eid_relations(session, etype, eid)
- self.system_source.delete_info(session, eid, etype, uri, extid)
-
- def _clear_eid_relations(self, session, etype, eid):
- """when a entity is deleted, build and execute rql query to delete all
- its relations
- """
- rql = []
- eschema = self.schema.eschema(etype)
pendingrtypes = session.transaction_data.get('pendingrtypes', ())
- for rschema, targetschemas, x in eschema.relation_definitions():
- rtype = rschema.type
- if rtype in schema.VIRTUAL_RTYPES or rtype in pendingrtypes:
- continue
- var = '%s%s' % (rtype.upper(), x.upper())
- if x == 'subject':
- # don't skip inlined relation so they are regularly
- # deleted and so hooks are correctly called
- selection = 'X %s %s' % (rtype, var)
- else:
- selection = '%s %s X' % (var, rtype)
- rql = 'DELETE %s WHERE X eid %%(x)s' % selection
- # unsafe_execute since we suppose that if user can delete the entity,
- # he can delete all its relations without security checking
- session.unsafe_execute(rql, {'x': eid}, 'x', build_descr=False)
+ # delete remaining relations: if user can delete the entity, he can
+ # delete all its relations without security checking
+ with security_enabled(session, read=False, write=False):
+ eid = entity.eid
+ for rschema, _, role in entity.e_schema.relation_definitions():
+ rtype = rschema.type
+ if rtype in schema.VIRTUAL_RTYPES or rtype in pendingrtypes:
+ continue
+ if role == 'subject':
+ # don't skip inlined relation so they are regularly
+ # deleted and so hooks are correctly called
+ selection = 'X %s Y' % rtype
+ else:
+ selection = 'Y %s X' % rtype
+ rql = 'DELETE %s WHERE X eid %%(x)s' % selection
+ session.execute(rql, {'x': eid}, 'x', build_descr=False)
+ self.system_source.delete_info(session, entity, sourceuri, extid)
def locate_relation_source(self, session, subject, rtype, object):
subjsource = self.source_from_eid(subject, session)
objsource = self.source_from_eid(object, session)
if not subjsource is objsource:
source = self.system_source
- if not (subjsource.may_cross_relation(rtype)
+ if not (subjsource.may_cross_relation(rtype)
and objsource.may_cross_relation(rtype)):
raise MultiSourcesError(
"relation %s can't be crossed among sources"
@@ -992,12 +1020,13 @@
self.hm.call_hooks('before_add_entity', session, entity=entity)
# XXX use entity.keys here since edited_attributes is not updated for
# inline relations
- for attr in entity.keys():
+ for attr in entity.iterkeys():
rschema = eschema.subjrels[attr]
if not rschema.final: # inlined relation
relations.append((attr, entity[attr]))
entity.set_defaults()
- entity.check(creation=True)
+ if session.is_hook_category_activated('integrity'):
+ entity.check(creation=True)
source.add_entity(session, entity)
if source.uri != 'system':
extid = source.get_extid(entity)
@@ -1044,7 +1073,8 @@
print 'UPDATE entity', etype, entity.eid, \
dict(entity), edited_attributes
entity.edited_attributes = edited_attributes
- entity.check()
+ if session.is_hook_category_activated('integrity'):
+ entity.check()
eschema = entity.e_schema
session.set_entity_cache(entity)
only_inline_rels, need_fti_update = True, False
@@ -1101,19 +1131,16 @@
def glob_delete_entity(self, session, eid):
"""delete an entity and all related entities from the repository"""
- # call delete_info before hooks
- self._prepare_delete_info(session, eid)
- etype, uri, extid = self.type_and_source_from_eid(eid, session)
+ entity = session.entity_from_eid(eid)
+ etype, sourceuri, extid = self.type_and_source_from_eid(eid, session)
+ self._prepare_delete_info(session, entity, sourceuri)
if server.DEBUG & server.DBG_REPO:
print 'DELETE entity', etype, eid
- if eid == 937:
- server.DEBUG |= (server.DBG_SQL | server.DBG_RQL | server.DBG_MORE)
- source = self.sources_by_uri[uri]
+ source = self.sources_by_uri[sourceuri]
if source.should_call_hooks:
- entity = session.entity_from_eid(eid)
self.hm.call_hooks('before_delete_entity', session, entity=entity)
- self._delete_info(session, eid)
- source.delete_entity(session, etype, eid)
+ self._delete_info(session, entity, sourceuri, extid)
+ source.delete_entity(session, entity)
if source.should_call_hooks:
self.hm.call_hooks('after_delete_entity', session, entity=entity)
# don't clear cache here this is done in a hook on commit
--- a/server/schemaserial.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/schemaserial.py Wed Mar 24 10:23:57 2010 +0100
@@ -50,6 +50,10 @@
continue
return res
+def cstrtype_mapping(cursor):
+ """cached constraint types mapping"""
+ return dict(cursor.execute('Any T, X WHERE X is CWConstraintType, X name T'))
+
# schema / perms deserialization ##############################################
def deserialize_schema(schema, session):
@@ -214,7 +218,7 @@
if not quiet:
_title = '-> storing the schema in the database '
print _title,
- execute = cursor.unsafe_execute
+ execute = cursor.execute
eschemas = schema.entities()
if not quiet:
pb_size = (len(eschemas + schema.relations())
@@ -229,14 +233,15 @@
eschemas.remove(schema.eschema('CWEType'))
eschemas.insert(0, schema.eschema('CWEType'))
for eschema in eschemas:
- for rql, kwargs in eschema2rql(eschema, groupmap):
- execute(rql, kwargs, build_descr=False)
+ execschemarql(execute, eschema, eschema2rql(eschema, groupmap))
if pb is not None:
pb.update()
# serialize constraint types
+ cstrtypemap = {}
rql = 'INSERT CWConstraintType X: X name %(ct)s'
for cstrtype in CONSTRAINTS:
- execute(rql, {'ct': unicode(cstrtype)}, build_descr=False)
+ cstrtypemap[cstrtype] = execute(rql, {'ct': unicode(cstrtype)},
+ build_descr=False)[0][0]
if pb is not None:
pb.update()
# serialize relations
@@ -246,8 +251,15 @@
if pb is not None:
pb.update()
continue
- for rql, kwargs in rschema2rql(rschema, groupmap=groupmap):
- execute(rql, kwargs, build_descr=False)
+ execschemarql(execute, rschema, rschema2rql(rschema, addrdef=False))
+ if rschema.symmetric:
+ rdefs = [rdef for k, rdef in rschema.rdefs.iteritems()
+ if (rdef.subject, rdef.object) == k]
+ else:
+ rdefs = rschema.rdefs.itervalues()
+ for rdef in rdefs:
+ execschemarql(execute, rdef,
+ rdef2rql(rdef, cstrtypemap, groupmap))
if pb is not None:
pb.update()
for rql, kwargs in specialize2rql(schema):
@@ -258,6 +270,55 @@
print
+# high level serialization functions
+
+def execschemarql(execute, schema, rqls):
+ for rql, kwargs in rqls:
+ kwargs['x'] = schema.eid
+ rset = execute(rql, kwargs, build_descr=False)
+ if schema.eid is None:
+ schema.eid = rset[0][0]
+ else:
+ assert rset
+
+def erschema2rql(erschema, groupmap):
+ if isinstance(erschema, schemamod.EntitySchema):
+ return eschema2rql(erschema, groupmap=groupmap)
+ return rschema2rql(erschema, groupmap=groupmap)
+
+def specialize2rql(schema):
+ for eschema in schema.entities():
+ if eschema.final:
+ continue
+ for rql, kwargs in eschemaspecialize2rql(eschema):
+ yield rql, kwargs
+
+# etype serialization
+
+def eschema2rql(eschema, groupmap=None):
+ """return a list of rql insert statements to enter an entity schema
+ in the database as an CWEType entity
+ """
+ relations, values = eschema_relations_values(eschema)
+ # NOTE: 'specializes' relation can't be inserted here since there's no
+ # way to make sure the parent type is inserted before the child type
+ yield 'INSERT CWEType X: %s' % ','.join(relations) , values
+ # entity permissions
+ if groupmap is not None:
+ for rql, args in _erperms2rql(eschema, groupmap):
+ yield rql, args
+
+def eschema_relations_values(eschema):
+ values = _ervalues(eschema)
+ relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
+ return relations, values
+
+def eschemaspecialize2rql(eschema):
+ specialized_type = eschema.specializes()
+ if specialized_type:
+ values = {'x': eschema.eid, 'et': specialized_type.eid}
+ yield 'SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s', values
+
def _ervalues(erschema):
try:
type_ = unicode(erschema.type)
@@ -273,10 +334,23 @@
'description': desc,
}
-def eschema_relations_values(eschema):
- values = _ervalues(eschema)
- relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
- return relations, values
+# rtype serialization
+
+def rschema2rql(rschema, cstrtypemap=None, addrdef=True, groupmap=None):
+ """return a list of rql insert statements to enter a relation schema
+ in the database as an CWRType entity
+ """
+ if rschema.type == 'has_text':
+ return
+ relations, values = rschema_relations_values(rschema)
+ yield 'INSERT CWRType X: %s' % ','.join(relations), values
+ if addrdef:
+ assert cstrtypemap
+ # sort for testing purpose
+ for rdef in sorted(rschema.rdefs.itervalues(),
+ key=lambda x: (x.subject, x.object)):
+ for rql, values in rdef2rql(rdef, cstrtypemap, groupmap):
+ yield rql, values
def rschema_relations_values(rschema):
values = _ervalues(rschema)
@@ -290,169 +364,58 @@
relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
return relations, values
-def _rdef_values(objtype, props):
- amap = {'order': 'ordernum'}
+# rdef serialization
+
+def rdef2rql(rdef, cstrtypemap, groupmap=None):
+ # don't serialize infered relations
+ if rdef.infered:
+ return
+ relations, values = _rdef_values(rdef)
+ relations.append('X relation_type ER,X from_entity SE,X to_entity OE')
+ values.update({'se': rdef.subject.eid, 'rt': rdef.rtype.eid, 'oe': rdef.object.eid})
+ if rdef.final:
+ etype = 'CWAttribute'
+ else:
+ etype = 'CWRelation'
+ yield 'INSERT %s X: %s WHERE SE eid %%(se)s,ER eid %%(rt)s,OE eid %%(oe)s' % (
+ etype, ','.join(relations), ), values
+ for rql, values in constraints2rql(cstrtypemap, rdef.constraints):
+ yield rql, values
+ # no groupmap means "no security insertion"
+ if groupmap:
+ for rql, args in _erperms2rql(rdef, groupmap):
+ yield rql, args
+
+def _rdef_values(rdef):
+ amap = {'order': 'ordernum', 'default': 'defaultval'}
values = {}
- for prop, default in schemamod.RelationDefinitionSchema.rproperty_defs(objtype).iteritems():
+ for prop, default in rdef.rproperty_defs(rdef.object).iteritems():
if prop in ('eid', 'constraints', 'uid', 'infered', 'permissions'):
continue
- value = props.get(prop, default)
+ value = getattr(rdef, prop)
+ # XXX type cast really necessary?
if prop in ('indexed', 'fulltextindexed', 'internationalizable'):
value = bool(value)
elif prop == 'ordernum':
value = int(value)
elif isinstance(value, str):
value = unicode(value)
+ if value is not None and prop == 'default':
+ if value is False:
+ value = u''
+ if not isinstance(value, unicode):
+ value = unicode(value)
values[amap.get(prop, prop)] = value
- return values
-
-def nfrdef_relations_values(objtype, props):
- values = _rdef_values(objtype, props)
- relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
- return relations, values
-
-def frdef_relations_values(objtype, props):
- values = _rdef_values(objtype, props)
- default = values['default']
- del values['default']
- if default is not None:
- if default is False:
- default = u''
- elif not isinstance(default, unicode):
- default = unicode(default)
- values['defaultval'] = default
relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
return relations, values
-
-def __rdef2rql(genmap, rschema, subjtype=None, objtype=None, props=None,
- groupmap=None):
- if subjtype is None:
- assert objtype is None
- assert props is None
- targets = sorted(rschema.rdefs)
- else:
- assert not objtype is None
- targets = [(subjtype, objtype)]
- # relation schema
- if rschema.final:
- etype = 'CWAttribute'
- else:
- etype = 'CWRelation'
- for subjtype, objtype in targets:
- if props is None:
- _props = rschema.rdef(subjtype, objtype)
- else:
- _props = props
- # don't serialize infered relations
- if _props.get('infered'):
- continue
- gen = genmap[rschema.final]
- for rql, values in gen(rschema, subjtype, objtype, _props):
- yield rql, values
- # no groupmap means "no security insertion"
- if groupmap:
- for rql, args in _erperms2rql(_props, groupmap):
- args['st'] = str(subjtype)
- args['rt'] = str(rschema)
- args['ot'] = str(objtype)
- yield rql + 'X is %s, X from_entity ST, X to_entity OT, '\
- 'X relation_type RT, RT name %%(rt)s, ST name %%(st)s, '\
- 'OT name %%(ot)s' % etype, args
-
-
-def schema2rql(schema, skip=None, allow=None):
- """return a list of rql insert statements to enter the schema in the
- database as CWRType and CWEType entities
- """
- assert not (skip is not None and allow is not None), \
- 'can\'t use both skip and allow'
- all = schema.entities() + schema.relations()
- if skip is not None:
- return chain(*[erschema2rql(schema[t]) for t in all if not t in skip])
- elif allow is not None:
- return chain(*[erschema2rql(schema[t]) for t in all if t in allow])
- return chain(*[erschema2rql(schema[t]) for t in all])
-
-def erschema2rql(erschema, groupmap):
- if isinstance(erschema, schemamod.EntitySchema):
- return eschema2rql(erschema, groupmap=groupmap)
- return rschema2rql(erschema, groupmap=groupmap)
-
-def eschema2rql(eschema, groupmap=None):
- """return a list of rql insert statements to enter an entity schema
- in the database as an CWEType entity
- """
- relations, values = eschema_relations_values(eschema)
- # NOTE: 'specializes' relation can't be inserted here since there's no
- # way to make sure the parent type is inserted before the child type
- yield 'INSERT CWEType X: %s' % ','.join(relations) , values
- # entity permissions
- if groupmap is not None:
- for rql, args in _erperms2rql(eschema, groupmap):
- args['name'] = str(eschema)
- yield rql + 'X is CWEType, X name %(name)s', args
-
-def specialize2rql(schema):
- for eschema in schema.entities():
- for rql, kwargs in eschemaspecialize2rql(eschema):
- yield rql, kwargs
-
-def eschemaspecialize2rql(eschema):
- specialized_type = eschema.specializes()
- if specialized_type:
- values = {'x': eschema.type, 'et': specialized_type.type}
- yield 'SET X specializes ET WHERE X name %(x)s, ET name %(et)s', values
-
-def rschema2rql(rschema, addrdef=True, groupmap=None):
- """return a list of rql insert statements to enter a relation schema
- in the database as an CWRType entity
- """
- if rschema.type == 'has_text':
- return
- relations, values = rschema_relations_values(rschema)
- yield 'INSERT CWRType X: %s' % ','.join(relations), values
- if addrdef:
- for rql, values in rdef2rql(rschema, groupmap=groupmap):
- yield rql, values
-
-def rdef2rql(rschema, subjtype=None, objtype=None, props=None, groupmap=None):
- genmap = {True: frdef2rql, False: nfrdef2rql}
- return __rdef2rql(genmap, rschema, subjtype, objtype, props, groupmap)
-
-
-_LOCATE_RDEF_RQL0 = 'X relation_type ER,X from_entity SE,X to_entity OE'
-_LOCATE_RDEF_RQL1 = 'SE name %(se)s,ER name %(rt)s,OE name %(oe)s'
-
-def frdef2rql(rschema, subjtype, objtype, props):
- relations, values = frdef_relations_values(objtype, props)
- relations.append(_LOCATE_RDEF_RQL0)
- values.update({'se': str(subjtype), 'rt': str(rschema), 'oe': str(objtype)})
- yield 'INSERT CWAttribute X: %s WHERE %s' % (','.join(relations), _LOCATE_RDEF_RQL1), values
- for rql, values in rdefrelations2rql(rschema, subjtype, objtype, props):
- yield rql + ', EDEF is CWAttribute', values
-
-def nfrdef2rql(rschema, subjtype, objtype, props):
- relations, values = nfrdef_relations_values(objtype, props)
- relations.append(_LOCATE_RDEF_RQL0)
- values.update({'se': str(subjtype), 'rt': str(rschema), 'oe': str(objtype)})
- yield 'INSERT CWRelation X: %s WHERE %s' % (','.join(relations), _LOCATE_RDEF_RQL1), values
- for rql, values in rdefrelations2rql(rschema, subjtype, objtype, props):
- yield rql + ', EDEF is CWRelation', values
-
-def rdefrelations2rql(rschema, subjtype, objtype, props):
- iterators = []
- for constraint in props.constraints:
- iterators.append(constraint2rql(rschema, subjtype, objtype, constraint))
- return chain(*iterators)
-
-def constraint2rql(rschema, subjtype, objtype, constraint):
- values = {'ctname': unicode(constraint.type()),
- 'value': unicode(constraint.serialize()),
- 'rt': str(rschema), 'se': str(subjtype), 'oe': str(objtype)}
- yield 'INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE \
-CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, \
-ER name %(rt)s, SE name %(se)s, OE name %(oe)s', values
+def constraints2rql(cstrtypemap, constraints, rdefeid=None):
+ for constraint in constraints:
+ values = {'ct': cstrtypemap[constraint.type()],
+ 'value': unicode(constraint.serialize()),
+ 'x': rdefeid} # when not specified, will have to be set by the caller
+ yield 'INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE \
+CT eid %(ct)s, EDEF eid %(x)s', values
def _erperms2rql(erschema, groupmap):
@@ -471,7 +434,7 @@
if isinstance(group_or_rqlexpr, basestring):
# group
try:
- yield ('SET X %s_permission Y WHERE Y eid %%(g)s, ' % action,
+ yield ('SET X %s_permission Y WHERE Y eid %%(g)s, X eid %%(x)s' % action,
{'g': groupmap[group_or_rqlexpr]})
except KeyError:
continue
@@ -479,36 +442,24 @@
# rqlexpr
rqlexpr = group_or_rqlexpr
yield ('INSERT RQLExpression E: E expression %%(e)s, E exprtype %%(t)s, '
- 'E mainvars %%(v)s, X %s_permission E WHERE ' % action,
+ 'E mainvars %%(v)s, X %s_permission E WHERE X eid %%(x)s' % action,
{'e': unicode(rqlexpr.expression),
'v': unicode(rqlexpr.mainvars),
't': unicode(rqlexpr.__class__.__name__)})
+# update functions
-def updateeschema2rql(eschema):
+def updateeschema2rql(eschema, eid):
relations, values = eschema_relations_values(eschema)
- values['et'] = eschema.type
- yield 'SET %s WHERE X is CWEType, X name %%(et)s' % ','.join(relations), values
-
-def updaterschema2rql(rschema):
- relations, values = rschema_relations_values(rschema)
- values['rt'] = rschema.type
- yield 'SET %s WHERE X is CWRType, X name %%(rt)s' % ','.join(relations), values
+ values['x'] = eid
+ yield 'SET %s WHERE X eid %%(x)s' % ','.join(relations), values
-def updaterdef2rql(rschema, subjtype=None, objtype=None, props=None):
- genmap = {True: updatefrdef2rql, False: updatenfrdef2rql}
- return __rdef2rql(genmap, rschema, subjtype, objtype, props)
+def updaterschema2rql(rschema, eid):
+ relations, values = rschema_relations_values(rschema)
+ values['x'] = eid
+ yield 'SET %s WHERE X eid %%(x)s' % ','.join(relations), values
-def updatefrdef2rql(rschema, subjtype, objtype, props):
- relations, values = frdef_relations_values(objtype, props)
- values.update({'se': subjtype, 'rt': str(rschema), 'oe': objtype})
- yield 'SET %s WHERE %s, %s, X is CWAttribute' % (','.join(relations),
- _LOCATE_RDEF_RQL0,
- _LOCATE_RDEF_RQL1), values
-
-def updatenfrdef2rql(rschema, subjtype, objtype, props):
- relations, values = nfrdef_relations_values(objtype, props)
- values.update({'se': subjtype, 'rt': str(rschema), 'oe': objtype})
- yield 'SET %s WHERE %s, %s, X is CWRelation' % (','.join(relations),
- _LOCATE_RDEF_RQL0,
- _LOCATE_RDEF_RQL1), values
+def updaterdef2rql(rdef, eid):
+ relations, values = _rdef_values(rdef)
+ values['x'] = eid
+ yield 'SET %s WHERE X eid %%(x)s' % ','.join(relations), values
--- a/server/server.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/server.py Wed Mar 24 10:23:57 2010 +0100
@@ -90,6 +90,7 @@
def run(self, req_timeout=5.0):
"""enter the service loop"""
+ self.repo.start_looping_tasks()
while self.quiting is None:
try:
self.daemon.handleRequests(req_timeout)
--- a/server/serverconfig.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/serverconfig.py Wed Mar 24 10:23:57 2010 +0100
@@ -127,6 +127,20 @@
'help': 'size of the parsed rql cache size.',
'group': 'main', 'inputlevel': 1,
}),
+ ('undo-support',
+ {'type' : 'string', 'default': '',
+ 'help': 'string defining actions that will have undo support: \
+[C]reate [U]pdate [D]elete entities / [A]dd [R]emove relation. Leave it empty \
+for no undo support, set it to CUDAR for full undo support, or to DR for \
+support undoing of deletion only.',
+ 'group': 'main', 'inputlevel': 1,
+ }),
+ ('keep-transaction-lifetime',
+ {'type' : 'int', 'default': 7,
+ 'help': 'number of days during which transaction records should be \
+kept (hence undoable).',
+ 'group': 'main', 'inputlevel': 1,
+ }),
('delay-full-text-indexation',
{'type' : 'yn', 'default': False,
'help': 'When full text indexation of entity has a too important cost'
@@ -185,63 +199,6 @@
# check user's state at login time
consider_user_state = True
- # XXX hooks control stuff should probably be on the session, not on the config
-
- # hooks activation configuration
- # all hooks should be activated during normal execution
- disabled_hooks_categories = set()
- enabled_hooks_categories = set()
- ALLOW_ALL = object()
- DENY_ALL = object()
- hooks_mode = ALLOW_ALL
-
- @classmethod
- def set_hooks_mode(cls, mode):
- assert mode is cls.ALLOW_ALL or mode is cls.DENY_ALL
- oldmode = cls.hooks_mode
- cls.hooks_mode = mode
- return oldmode
-
- @classmethod
- def disable_hook_category(cls, *categories):
- changes = set()
- if cls.hooks_mode is cls.DENY_ALL:
- for category in categories:
- if category in cls.enabled_hooks_categories:
- cls.enabled_hooks_categories.remove(category)
- changes.add(category)
- else:
- for category in categories:
- if category not in cls.disabled_hooks_categories:
- cls.disabled_hooks_categories.add(category)
- changes.add(category)
- return changes
-
- @classmethod
- def enable_hook_category(cls, *categories):
- changes = set()
- if cls.hooks_mode is cls.DENY_ALL:
- for category in categories:
- if category not in cls.enabled_hooks_categories:
- cls.enabled_hooks_categories.add(category)
- changes.add(category)
- else:
- for category in categories:
- if category in cls.disabled_hooks_categories:
- cls.disabled_hooks_categories.remove(category)
- changes.add(category)
- return changes
-
- @classmethod
- def is_hook_activated(cls, hook):
- return cls.is_hook_category_activated(hook.category)
-
- @classmethod
- def is_hook_category_activated(cls, category):
- if cls.hooks_mode is cls.DENY_ALL:
- return category in cls.enabled_hooks_categories
- return category not in cls.disabled_hooks_categories
-
# should some hooks be deactivated during [pre|post]create script execution
free_wheel = False
--- a/server/serverctl.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/serverctl.py Wed Mar 24 10:23:57 2010 +0100
@@ -66,14 +66,13 @@
cnx = get_connection(driver, dbhost, dbname, user, password=password,
port=source.get('db-port'),
**extra)
- if not hasattr(cnx, 'logged_user'): # XXX logilab.db compat
- try:
- cnx.logged_user = user
- except AttributeError:
- # C object, __slots__
- from logilab.database import _SimpleConnectionWrapper
- cnx = _SimpleConnectionWrapper(cnx)
- cnx.logged_user = user
+ try:
+ cnx.logged_user = user
+ except AttributeError:
+ # C object, __slots__
+ from logilab.database import _SimpleConnectionWrapper
+ cnx = _SimpleConnectionWrapper(cnx)
+ cnx.logged_user = user
return cnx
def system_source_cnx(source, dbms_system_base=False,
@@ -84,8 +83,8 @@
create/drop the instance database)
"""
if dbms_system_base:
- from logilab.common.adbh import get_adv_func_helper
- system_db = get_adv_func_helper(source['db-driver']).system_database()
+ from logilab.database import get_db_helper
+ system_db = get_db_helper(source['db-driver']).system_database()
return source_cnx(source, system_db, special_privs=special_privs, verbose=verbose)
return source_cnx(source, special_privs=special_privs, verbose=verbose)
@@ -94,11 +93,11 @@
or a database
"""
import logilab.common as lgp
- from logilab.common.adbh import get_adv_func_helper
+ from logilab.database import get_db_helper
lgp.USE_MX_DATETIME = False
special_privs = ''
driver = source['db-driver']
- helper = get_adv_func_helper(driver)
+ helper = get_db_helper(driver)
if user is not None and helper.users_support:
special_privs += '%s USER' % what
if db is not None:
@@ -211,10 +210,10 @@
def cleanup(self):
"""remove instance's configuration and database"""
- from logilab.common.adbh import get_adv_func_helper
+ from logilab.database import get_db_helper
source = self.config.sources()['system']
dbname = source['db-name']
- helper = get_adv_func_helper(source['db-driver'])
+ helper = get_db_helper(source['db-driver'])
if ASK.confirm('Delete database %s ?' % dbname):
user = source['db-user'] or None
cnx = _db_sys_cnx(source, 'DROP DATABASE', user=user)
@@ -294,8 +293,7 @@
)
def run(self, args):
"""run the command with its specific arguments"""
- from logilab.common.adbh import get_adv_func_helper
- from indexer import get_indexer
+ from logilab.database import get_db_helper
verbose = self.get('verbose')
automatic = self.get('automatic')
appid = pop_arg(args, msg='No instance specified !')
@@ -304,7 +302,7 @@
dbname = source['db-name']
driver = source['db-driver']
create_db = self.config.create_db
- helper = get_adv_func_helper(driver)
+ helper = get_db_helper(driver)
if driver == 'sqlite':
if os.path.exists(dbname) and automatic or \
ASK.confirm('Database %s already exists -- do you want to drop it ?' % dbname):
@@ -330,13 +328,8 @@
helper.create_database(cursor, dbname, source['db-user'],
source['db-encoding'])
else:
- try:
- helper.create_database(cursor, dbname,
- encoding=source['db-encoding'])
- except TypeError:
- # logilab.database
- helper.create_database(cursor, dbname,
- dbencoding=source['db-encoding'])
+ helper.create_database(cursor, dbname,
+ dbencoding=source['db-encoding'])
dbcnx.commit()
print '-> database %s created.' % dbname
except:
@@ -344,8 +337,7 @@
raise
cnx = system_source_cnx(source, special_privs='LANGUAGE C', verbose=verbose)
cursor = cnx.cursor()
- indexer = get_indexer(driver)
- indexer.init_extensions(cursor)
+ helper.init_fti_extensions(cursor)
# postgres specific stuff
if driver == 'postgres':
# install plpythonu/plpgsql language if not installed by the cube
--- a/server/session.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/session.py Wed Mar 24 10:23:57 2010 +0100
@@ -5,17 +5,20 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
import threading
from time import time
+from uuid import uuid4
from logilab.common.deprecation import deprecated
from rql.nodes import VariableRef, Function, ETYPE_PYOBJ_MAP, etype_from_pyobj
from yams import BASE_TYPES
-from cubicweb import Binary, UnknownEid
+from cubicweb import Binary, UnknownEid, schema
from cubicweb.req import RequestSessionBase
from cubicweb.dbapi import ConnectionProperties
from cubicweb.utils import make_uid
@@ -23,6 +26,10 @@
ETYPE_PYOBJ_MAP[Binary] = 'Bytes'
+NO_UNDO_TYPES = schema.SCHEMA_TYPES.copy()
+NO_UNDO_TYPES.add('CWCache')
+# XXX rememberme,forgotpwd,apycot,vcsfile
+
def is_final(rqlst, variable, args):
# try to find if this is a final var or not
for select in rqlst.children:
@@ -42,10 +49,73 @@
return description
+class hooks_control(object):
+ """context manager to control activated hooks categories.
+
+ If mode is session.`HOOKS_DENY_ALL`, given hooks categories will
+ be enabled.
+
+ If mode is session.`HOOKS_ALLOW_ALL`, given hooks categories will
+ be disabled.
+ """
+ def __init__(self, session, mode, *categories):
+ self.session = session
+ self.mode = mode
+ self.categories = categories
+
+ def __enter__(self):
+ self.oldmode = self.session.set_hooks_mode(self.mode)
+ if self.mode is self.session.HOOKS_DENY_ALL:
+ self.changes = self.session.enable_hook_categories(*self.categories)
+ else:
+ self.changes = self.session.disable_hook_categories(*self.categories)
+
+ def __exit__(self, exctype, exc, traceback):
+ if self.changes:
+ if self.mode is self.session.HOOKS_DENY_ALL:
+ self.session.disable_hook_categories(*self.changes)
+ else:
+ self.session.enable_hook_categories(*self.changes)
+ self.session.set_hooks_mode(self.oldmode)
+
+INDENT = ''
+class security_enabled(object):
+ """context manager to control security w/ session.execute, since by
+ default security is disabled on queries executed on the repository
+ side.
+ """
+ def __init__(self, session, read=None, write=None):
+ self.session = session
+ self.read = read
+ self.write = write
+
+ def __enter__(self):
+# global INDENT
+ if self.read is not None:
+ self.oldread = self.session.set_read_security(self.read)
+# print INDENT + 'read', self.read, self.oldread
+ if self.write is not None:
+ self.oldwrite = self.session.set_write_security(self.write)
+# print INDENT + 'write', self.write, self.oldwrite
+# INDENT += ' '
+
+ def __exit__(self, exctype, exc, traceback):
+# global INDENT
+# INDENT = INDENT[:-2]
+ if self.read is not None:
+ self.session.set_read_security(self.oldread)
+# print INDENT + 'reset read to', self.oldread
+ if self.write is not None:
+ self.session.set_write_security(self.oldwrite)
+# print INDENT + 'reset write to', self.oldwrite
+
+
+
class Session(RequestSessionBase):
"""tie session id, user, connections pool and other session data all
together
"""
+ is_internal_session = False
def __init__(self, user, repo, cnxprops=None, _id=None):
super(Session, self).__init__(repo.vreg)
@@ -56,9 +126,14 @@
self.cnxtype = cnxprops.cnxtype
self.creation = time()
self.timestamp = self.creation
- self.is_internal_session = False
- self.is_super_session = False
self.default_mode = 'read'
+ # support undo for Create Update Delete entity / Add Remove relation
+ if repo.config.creating or repo.config.repairing or self.is_internal_session:
+ self.undo_actions = ()
+ else:
+ self.undo_actions = set(repo.config['undo-support'].upper())
+ if self.undo_actions - set('CUDAR'):
+ raise Exception('bad undo-support string in configuration')
# short cut to querier .execute method
self._execute = repo.querier.execute
# shared data, used to communicate extra information between the client
@@ -78,19 +153,14 @@
def hijack_user(self, user):
"""return a fake request/session using specified user"""
session = Session(user, self.repo)
- session._threaddata = self.actual_session()._threaddata
+ threaddata = session._threaddata
+ threaddata.pool = self.pool
+ # everything in transaction_data should be copied back but the entity
+ # type cache we don't want to avoid security pb
+ threaddata.transaction_data = self.transaction_data.copy()
+ threaddata.transaction_data.pop('ecache', None)
return session
- def _super_call(self, __cb, *args, **kwargs):
- if self.is_super_session:
- __cb(self, *args, **kwargs)
- return
- self.is_super_session = True
- try:
- __cb(self, *args, **kwargs)
- finally:
- self.is_super_session = False
-
def add_relation(self, fromeid, rtype, toeid):
"""provide direct access to the repository method to add a relation.
@@ -102,14 +172,13 @@
You may use this in hooks when you know both eids of the relation you
want to add.
"""
- if self.vreg.schema[rtype].inlined:
- entity = self.entity_from_eid(fromeid)
- entity[rtype] = toeid
- self._super_call(self.repo.glob_update_entity,
- entity, set((rtype,)))
- else:
- self._super_call(self.repo.glob_add_relation,
- fromeid, rtype, toeid)
+ with security_enabled(self, False, False):
+ if self.vreg.schema[rtype].inlined:
+ entity = self.entity_from_eid(fromeid)
+ entity[rtype] = toeid
+ self.repo.glob_update_entity(self, entity, set((rtype,)))
+ else:
+ self.repo.glob_add_relation(self, fromeid, rtype, toeid)
def delete_relation(self, fromeid, rtype, toeid):
"""provide direct access to the repository method to delete a relation.
@@ -122,14 +191,13 @@
You may use this in hooks when you know both eids of the relation you
want to delete.
"""
- if self.vreg.schema[rtype].inlined:
- entity = self.entity_from_eid(fromeid)
- entity[rtype] = None
- self._super_call(self.repo.glob_update_entity,
- entity, set((rtype,)))
- else:
- self._super_call(self.repo.glob_delete_relation,
- fromeid, rtype, toeid)
+ with security_enabled(self, False, False):
+ if self.vreg.schema[rtype].inlined:
+ entity = self.entity_from_eid(fromeid)
+ entity[rtype] = None
+ self.repo.glob_update_entity(self, entity, set((rtype,)))
+ else:
+ self.repo.glob_delete_relation(self, fromeid, rtype, toeid)
# relations cache handling #################################################
@@ -198,10 +266,6 @@
# resource accessors ######################################################
- def actual_session(self):
- """return the original parent session if any, else self"""
- return self
-
def system_sql(self, sql, args=None, rollback_on_failure=True):
"""return a sql cursor on the system database"""
if not sql.split(None, 1)[0].upper() == 'SELECT':
@@ -251,6 +315,165 @@
rdef = rschema.rdef(subjtype, objtype)
return rdef.get(rprop)
+ # security control #########################################################
+
+ DEFAULT_SECURITY = object() # evaluated to true by design
+
+ @property
+ def read_security(self):
+ """return a boolean telling if read security is activated or not"""
+ try:
+ return self._threaddata.read_security
+ except AttributeError:
+ self._threaddata.read_security = self.DEFAULT_SECURITY
+ return self._threaddata.read_security
+
+ def set_read_security(self, activated):
+ """[de]activate read security, returning the previous value set for
+ later restoration.
+
+ you should usually use the `security_enabled` context manager instead
+ of this to change security settings.
+ """
+ oldmode = self.read_security
+ self._threaddata.read_security = activated
+ # dbapi_query used to detect hooks triggered by a 'dbapi' query (eg not
+ # issued on the session). This is tricky since we the execution model of
+ # a (write) user query is:
+ #
+ # repository.execute (security enabled)
+ # \-> querier.execute
+ # \-> repo.glob_xxx (add/update/delete entity/relation)
+ # \-> deactivate security before calling hooks
+ # \-> WE WANT TO CHECK QUERY NATURE HERE
+ # \-> potentially, other calls to querier.execute
+ #
+ # so we can't rely on simply checking session.read_security, but
+ # recalling the first transition from DEFAULT_SECURITY to something
+ # else (False actually) is not perfect but should be enough
+ #
+ # also reset dbapi_query to true when we go back to DEFAULT_SECURITY
+ self._threaddata.dbapi_query = (oldmode is self.DEFAULT_SECURITY
+ or activated is self.DEFAULT_SECURITY)
+ return oldmode
+
+ @property
+ def write_security(self):
+ """return a boolean telling if write security is activated or not"""
+ try:
+ return self._threaddata.write_security
+ except:
+ self._threaddata.write_security = self.DEFAULT_SECURITY
+ return self._threaddata.write_security
+
+ def set_write_security(self, activated):
+ """[de]activate write security, returning the previous value set for
+ later restoration.
+
+ you should usually use the `security_enabled` context manager instead
+ of this to change security settings.
+ """
+ oldmode = self.write_security
+ self._threaddata.write_security = activated
+ return oldmode
+
+ @property
+ def running_dbapi_query(self):
+ """return a boolean telling if it's triggered by a db-api query or by
+ a session query.
+
+ To be used in hooks, else may have a wrong value.
+ """
+ return getattr(self._threaddata, 'dbapi_query', True)
+
+ # hooks activation control #################################################
+ # all hooks should be activated during normal execution
+
+ HOOKS_ALLOW_ALL = object()
+ HOOKS_DENY_ALL = object()
+
+ @property
+ def hooks_mode(self):
+ return getattr(self._threaddata, 'hooks_mode', self.HOOKS_ALLOW_ALL)
+
+ def set_hooks_mode(self, mode):
+ assert mode is self.HOOKS_ALLOW_ALL or mode is self.HOOKS_DENY_ALL
+ oldmode = getattr(self._threaddata, 'hooks_mode', self.HOOKS_ALLOW_ALL)
+ self._threaddata.hooks_mode = mode
+ return oldmode
+
+ @property
+ def disabled_hook_categories(self):
+ try:
+ return getattr(self._threaddata, 'disabled_hook_cats')
+ except AttributeError:
+ cats = self._threaddata.disabled_hook_cats = set()
+ return cats
+
+ @property
+ def enabled_hook_categories(self):
+ try:
+ return getattr(self._threaddata, 'enabled_hook_cats')
+ except AttributeError:
+ cats = self._threaddata.enabled_hook_cats = set()
+ return cats
+
+ def disable_hook_categories(self, *categories):
+ """disable the given hook categories:
+
+ - on HOOKS_DENY_ALL mode, ensure those categories are not enabled
+ - on HOOKS_ALLOW_ALL mode, ensure those categories are disabled
+ """
+ changes = set()
+ if self.hooks_mode is self.HOOKS_DENY_ALL:
+ enablecats = self.enabled_hook_categories
+ for category in categories:
+ if category in enablecats:
+ enablecats.remove(category)
+ changes.add(category)
+ else:
+ disablecats = self.disabled_hook_categories
+ for category in categories:
+ if category not in disablecats:
+ disablecats.add(category)
+ changes.add(category)
+ return tuple(changes)
+
+ def enable_hook_categories(self, *categories):
+ """enable the given hook categories:
+
+ - on HOOKS_DENY_ALL mode, ensure those categories are enabled
+ - on HOOKS_ALLOW_ALL mode, ensure those categories are not disabled
+ """
+ changes = set()
+ if self.hooks_mode is self.HOOKS_DENY_ALL:
+ enablecats = self.enabled_hook_categories
+ for category in categories:
+ if category not in enablecats:
+ enablecats.add(category)
+ changes.add(category)
+ else:
+ disablecats = self.disabled_hook_categories
+ for category in categories:
+ if category in self.disabled_hook_categories:
+ disablecats.remove(category)
+ changes.add(category)
+ return tuple(changes)
+
+ def is_hook_category_activated(self, category):
+ """return a boolean telling if the given category is currently activated
+ or not
+ """
+ if self.hooks_mode is self.HOOKS_DENY_ALL:
+ return category in self.enabled_hook_categories
+ return category not in self.disabled_hook_categories
+
+ def is_hook_activated(self, hook):
+ """return a boolean telling if the given hook class is currently
+ activated or not
+ """
+ return self.is_hook_category_activated(hook.category)
+
# connection management ###################################################
def keep_pool_mode(self, mode):
@@ -408,47 +631,12 @@
"""return the source where the entity with id <eid> is located"""
return self.repo.source_from_eid(eid, self)
- def decorate_rset(self, rset, propagate=False):
- rset.vreg = self.vreg
- rset.req = propagate and self or self.actual_session()
+ def execute(self, rql, kwargs=None, eid_key=None, build_descr=True):
+ """db-api like method directly linked to the querier execute method"""
+ rset = self._execute(self, rql, kwargs, eid_key, build_descr)
+ rset.req = self
return rset
- @property
- def super_session(self):
- try:
- csession = self.childsession
- except AttributeError:
- if isinstance(self, (ChildSession, InternalSession)):
- csession = self
- else:
- csession = ChildSession(self)
- self.childsession = csession
- # need shared pool set
- self.set_pool(checkclosed=False)
- return csession
-
- def unsafe_execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
- propagate=False):
- """like .execute but with security checking disabled (this method is
- internal to the server, it's not part of the db-api)
-
- if `propagate` is true, the super_session will be attached to the result
- set instead of the parent session, hence further query done through
- entities fetched from this result set will bypass security as well
- """
- return self.super_session.execute(rql, kwargs, eid_key, build_descr,
- propagate)
-
- def execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
- propagate=False):
- """db-api like method directly linked to the querier execute method
-
- Becare that unlike actual cursor.execute, `build_descr` default to
- false
- """
- rset = self._execute(self, rql, kwargs, eid_key, build_descr)
- return self.decorate_rset(rset, propagate)
-
def _clear_thread_data(self):
"""remove everything from the thread local storage, except pool
which is explicitly removed by reset_pool, and mode which is set anyway
@@ -472,58 +660,61 @@
return
if self.commit_state:
return
- # on rollback, an operation should have the following state
- # information:
- # - processed by the precommit/commit event or not
- # - if processed, is it the failed operation
- try:
- for trstate in ('precommit', 'commit'):
- processed = []
- self.commit_state = trstate
- try:
- while self.pending_operations:
- operation = self.pending_operations.pop(0)
- operation.processed = trstate
- processed.append(operation)
+ # by default, operations are executed with security turned off
+ with security_enabled(self, False, False):
+ # on rollback, an operation should have the following state
+ # information:
+ # - processed by the precommit/commit event or not
+ # - if processed, is it the failed operation
+ try:
+ for trstate in ('precommit', 'commit'):
+ processed = []
+ self.commit_state = trstate
+ try:
+ while self.pending_operations:
+ operation = self.pending_operations.pop(0)
+ operation.processed = trstate
+ processed.append(operation)
+ operation.handle_event('%s_event' % trstate)
+ self.pending_operations[:] = processed
+ self.debug('%s session %s done', trstate, self.id)
+ except:
+ self.exception('error while %sing', trstate)
+ # if error on [pre]commit:
+ #
+ # * set .failed = True on the operation causing the failure
+ # * call revert<event>_event on processed operations
+ # * call rollback_event on *all* operations
+ #
+ # that seems more natural than not calling rollback_event
+ # for processed operations, and allow generic rollback
+ # instead of having to implements rollback, revertprecommit
+ # and revertcommit, that will be enough in mont case.
+ operation.failed = True
+ for operation in processed:
+ operation.handle_event('revert%s_event' % trstate)
+ # XXX use slice notation since self.pending_operations is a
+ # read-only property.
+ self.pending_operations[:] = processed + self.pending_operations
+ self.rollback(reset_pool)
+ raise
+ self.pool.commit()
+ self.commit_state = trstate = 'postcommit'
+ while self.pending_operations:
+ operation = self.pending_operations.pop(0)
+ operation.processed = trstate
+ try:
operation.handle_event('%s_event' % trstate)
- self.pending_operations[:] = processed
- self.debug('%s session %s done', trstate, self.id)
- except:
- self.exception('error while %sing', trstate)
- # if error on [pre]commit:
- #
- # * set .failed = True on the operation causing the failure
- # * call revert<event>_event on processed operations
- # * call rollback_event on *all* operations
- #
- # that seems more natural than not calling rollback_event
- # for processed operations, and allow generic rollback
- # instead of having to implements rollback, revertprecommit
- # and revertcommit, that will be enough in mont case.
- operation.failed = True
- for operation in processed:
- operation.handle_event('revert%s_event' % trstate)
- # XXX use slice notation since self.pending_operations is a
- # read-only property.
- self.pending_operations[:] = processed + self.pending_operations
- self.rollback(reset_pool)
- raise
- self.pool.commit()
- self.commit_state = trstate = 'postcommit'
- while self.pending_operations:
- operation = self.pending_operations.pop(0)
- operation.processed = trstate
- try:
- operation.handle_event('%s_event' % trstate)
- except:
- self.critical('error while %sing', trstate,
- exc_info=sys.exc_info())
- self.info('%s session %s done', trstate, self.id)
- finally:
- self._clear_thread_data()
- self._touch()
- if reset_pool:
- self.reset_pool(ignoremode=True)
+ except:
+ self.critical('error while %sing', trstate,
+ exc_info=sys.exc_info())
+ self.info('%s session %s done', trstate, self.id)
+ return self.transaction_uuid(set=False)
+ finally:
+ self._clear_thread_data()
+ self._touch()
+ if reset_pool:
+ self.reset_pool(ignoremode=True)
def rollback(self, reset_pool=True):
"""rollback the current session's transaction"""
@@ -533,21 +724,23 @@
self._touch()
self.debug('rollback session %s done (no db activity)', self.id)
return
- try:
- while self.pending_operations:
- try:
- operation = self.pending_operations.pop(0)
- operation.handle_event('rollback_event')
- except:
- self.critical('rollback error', exc_info=sys.exc_info())
- continue
- self.pool.rollback()
- self.debug('rollback for session %s done', self.id)
- finally:
- self._clear_thread_data()
- self._touch()
- if reset_pool:
- self.reset_pool(ignoremode=True)
+ # by default, operations are executed with security turned off
+ with security_enabled(self, False, False):
+ try:
+ while self.pending_operations:
+ try:
+ operation = self.pending_operations.pop(0)
+ operation.handle_event('rollback_event')
+ except:
+ self.critical('rollback error', exc_info=sys.exc_info())
+ continue
+ self.pool.rollback()
+ self.debug('rollback for session %s done', self.id)
+ finally:
+ self._clear_thread_data()
+ self._touch()
+ if reset_pool:
+ self.reset_pool(ignoremode=True)
def close(self):
"""do not close pool on session close, since they are shared now"""
@@ -592,10 +785,31 @@
def add_operation(self, operation, index=None):
"""add an observer"""
assert self.commit_state != 'commit'
- if index is not None:
+ if index is None:
+ self.pending_operations.append(operation)
+ else:
self.pending_operations.insert(index, operation)
- else:
- self.pending_operations.append(operation)
+
+ # undo support ############################################################
+
+ def undoable_action(self, action, ertype):
+ return action in self.undo_actions and not ertype in NO_UNDO_TYPES
+ # XXX elif transaction on mark it partial
+
+ def transaction_uuid(self, set=True):
+ try:
+ return self.transaction_data['tx_uuid']
+ except KeyError:
+ if not set:
+ return
+ self.transaction_data['tx_uuid'] = uuid = uuid4().hex
+ self.repo.system_source.start_undoable_transaction(self, uuid)
+ return uuid
+
+ def transaction_inc_action_counter(self):
+ num = self.transaction_data.setdefault('tx_action_count', 0) + 1
+ self.transaction_data['tx_action_count'] = num
+ return num
# querier helpers #########################################################
@@ -671,6 +885,25 @@
# deprecated ###############################################################
+ @deprecated("[3.7] control security with session.[read|write]_security")
+ def unsafe_execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
+ propagate=False):
+ """like .execute but with security checking disabled (this method is
+ internal to the server, it's not part of the db-api)
+ """
+ return self.execute(rql, kwargs, eid_key, build_descr)
+
+ @property
+ @deprecated("[3.7] is_super_session is deprecated, test "
+ "session.read_security and or session.write_security")
+ def is_super_session(self):
+ return not self.read_security or not self.write_security
+
+ @deprecated("[3.7] session is actual session")
+ def actual_session(self):
+ """return the original parent session if any, else self"""
+ return self
+
@property
@deprecated("[3.6] use session.vreg.schema")
def schema(self):
@@ -697,98 +930,16 @@
return self.entity_from_eid(eid)
-class ChildSession(Session):
- """child (or internal) session are used to hijack the security system
- """
- cnxtype = 'inmemory'
-
- def __init__(self, parent_session):
- self.id = None
- self.is_internal_session = False
- self.is_super_session = True
- # session which has created this one
- self.parent_session = parent_session
- self.user = InternalManager()
- self.user.req = self # XXX remove when "vreg = user.req.vreg" hack in entity.py is gone
- self.repo = parent_session.repo
- self.vreg = parent_session.vreg
- self.data = parent_session.data
- self.encoding = parent_session.encoding
- self.lang = parent_session.lang
- self._ = self.__ = parent_session._
- # short cut to querier .execute method
- self._execute = self.repo.querier.execute
-
- @property
- def super_session(self):
- return self
-
- def get_mode(self):
- return self.parent_session.mode
- def set_mode(self, value):
- self.parent_session.set_mode(value)
- mode = property(get_mode, set_mode)
-
- def get_commit_state(self):
- return self.parent_session.commit_state
- def set_commit_state(self, value):
- self.parent_session.set_commit_state(value)
- commit_state = property(get_commit_state, set_commit_state)
-
- @property
- def pool(self):
- return self.parent_session.pool
- @property
- def pending_operations(self):
- return self.parent_session.pending_operations
- @property
- def transaction_data(self):
- return self.parent_session.transaction_data
-
- def set_pool(self):
- """the session need a pool to execute some queries"""
- self.parent_session.set_pool()
-
- def reset_pool(self):
- """the session has no longer using its pool, at least for some time
- """
- self.parent_session.reset_pool()
-
- def actual_session(self):
- """return the original parent session if any, else self"""
- return self.parent_session
-
- def commit(self, reset_pool=True):
- """commit the current session's transaction"""
- self.parent_session.commit(reset_pool)
-
- def rollback(self, reset_pool=True):
- """rollback the current session's transaction"""
- self.parent_session.rollback(reset_pool)
-
- def close(self):
- """do not close pool on session close, since they are shared now"""
- self.parent_session.close()
-
- def user_data(self):
- """returns a dictionnary with this user's information"""
- return self.parent_session.user_data()
-
-
class InternalSession(Session):
"""special session created internaly by the repository"""
+ is_internal_session = True
def __init__(self, repo, cnxprops=None):
super(InternalSession, self).__init__(InternalManager(), repo, cnxprops,
_id='internal')
self.user.req = self # XXX remove when "vreg = user.req.vreg" hack in entity.py is gone
self.cnxtype = 'inmemory'
- self.is_internal_session = True
- self.is_super_session = True
-
- @property
- def super_session(self):
- return self
+ self.disable_hook_categories('integrity')
class InternalManager(object):
--- a/server/sources/__init__.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/sources/__init__.py Wed Mar 24 10:23:57 2010 +0100
@@ -351,7 +351,7 @@
"""update an entity in the source"""
raise NotImplementedError()
- def delete_entity(self, session, etype, eid):
+ def delete_entity(self, session, entity):
"""delete an entity from the source"""
raise NotImplementedError()
@@ -372,11 +372,15 @@
def create_eid(self, session):
raise NotImplementedError()
- def add_info(self, session, entity, source, extid=None):
+ def add_info(self, session, entity, source, extid):
"""add type and source info for an eid into the system table"""
raise NotImplementedError()
- def delete_info(self, session, eid, etype, uri, extid):
+ def update_info(self, session, entity, need_fti_update):
+ """mark entity as being modified, fulltext reindex if needed"""
+ raise NotImplementedError()
+
+ def delete_info(self, session, entity, uri, extid, attributes, relations):
"""delete system information on deletion of an entity by transfering
record from the entities table to the deleted_entities table
"""
--- a/server/sources/extlite.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/sources/extlite.py Wed Mar 24 10:23:57 2010 +0100
@@ -20,12 +20,6 @@
self.source = source
self._cnx = None
- @property
- def logged_user(self):
- if self._cnx is None:
- self._cnx = self.source._sqlcnx
- return self._cnx.logged_user
-
def cursor(self):
if self._cnx is None:
self._cnx = self.source._sqlcnx
@@ -231,15 +225,15 @@
"""update an entity in the source"""
raise NotImplementedError()
- def delete_entity(self, session, etype, eid):
+ def delete_entity(self, session, entity):
"""delete an entity from the source
this is not deleting a file in the svn but deleting entities from the
source. Main usage is to delete repository content when a Repository
entity is deleted.
"""
- attrs = {SQL_PREFIX + 'eid': eid}
- sql = self.sqladapter.sqlgen.delete(SQL_PREFIX + etype, attrs)
+ attrs = {'cw_eid': entity.eid}
+ sql = self.sqladapter.sqlgen.delete(SQL_PREFIX + entity.__regid__, attrs)
self.doexec(session, sql, attrs)
def local_add_relation(self, session, subject, rtype, object):
--- a/server/sources/ldapuser.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/sources/ldapuser.py Wed Mar 24 10:23:57 2010 +0100
@@ -476,7 +476,8 @@
if eid:
self.warning('deleting ldap user with eid %s and dn %s',
eid, base)
- self.repo.delete_info(session, eid)
+ entity = session.entity_from_eid(eid, 'CWUser')
+ self.repo.delete_info(session, entity, self.uri, base)
self._cache.pop(base, None)
return []
## except ldap.REFERRAL, e:
@@ -554,7 +555,7 @@
"""replace an entity in the source"""
raise RepositoryError('this source is read only')
- def delete_entity(self, session, etype, eid):
+ def delete_entity(self, session, entity):
"""delete an entity from the source"""
raise RepositoryError('this source is read only')
--- a/server/sources/native.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/sources/native.py Wed Mar 24 10:23:57 2010 +0100
@@ -11,27 +11,32 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
+from pickle import loads, dumps
from threading import Lock
from datetime import datetime
from base64 import b64decode, b64encode
+from contextlib import contextmanager
from logilab.common.compat import any
from logilab.common.cache import Cache
from logilab.common.decorators import cached, clear_cache
from logilab.common.configuration import Method
-from logilab.common.adbh import get_adv_func_helper
from logilab.common.shellutils import getlogin
+from logilab.database import get_db_helper
-from indexer import get_indexer
-
-from cubicweb import UnknownEid, AuthenticationError, Binary, server
+from cubicweb import UnknownEid, AuthenticationError, Binary, server, neg_role
+from cubicweb import transaction as tx
+from cubicweb.schema import VIRTUAL_RTYPES
from cubicweb.cwconfig import CubicWebNoAppConfiguration
from cubicweb.server import hook
from cubicweb.server.utils import crypt_password
from cubicweb.server.sqlutils import SQL_PREFIX, SQLAdapterMixIn
from cubicweb.server.rqlannotation import set_qdata
+from cubicweb.server.session import hooks_control, security_enabled
from cubicweb.server.sources import AbstractSource, dbg_st_search, dbg_results
from cubicweb.server.sources.rql2sql import SQLGenerator
@@ -95,6 +100,35 @@
table, restr, attr)
+def sql_or_clauses(sql, clauses):
+ select, restr = sql.split(' WHERE ', 1)
+ restrclauses = restr.split(' AND ')
+ for clause in clauses:
+ restrclauses.remove(clause)
+ if restrclauses:
+ restr = '%s AND (%s)' % (' AND '.join(restrclauses),
+ ' OR '.join(clauses))
+ else:
+ restr = '(%s)' % ' OR '.join(clauses)
+ return '%s WHERE %s' % (select, restr)
+
+
+class UndoException(Exception):
+ """something went wrong during undoing"""
+
+
+def _undo_check_relation_target(tentity, rdef, role):
+ """check linked entity has not been redirected for this relation"""
+ card = rdef.role_cardinality(role)
+ if card in '?1' and tentity.related(rdef.rtype, role):
+ raise UndoException(tentity._cw._(
+ "Can't restore %(role)s relation %(rtype)s to entity %(eid)s which "
+ "is already linked using this relation.")
+ % {'role': neg_role(role),
+ 'rtype': rdef.rtype,
+ 'eid': tentity.eid})
+
+
class NativeSQLSource(SQLAdapterMixIn, AbstractSource):
"""adapter for source using the native cubicweb schema (see below)
"""
@@ -149,24 +183,17 @@
self.authentifiers = [LoginPasswordAuthentifier(self)]
AbstractSource.__init__(self, repo, appschema, source_config,
*args, **kwargs)
+ # sql generator
+ self._rql_sqlgen = self.sqlgen_class(appschema, self.dbhelper,
+ ATTR_MAP.copy())
# full text index helper
self.do_fti = not repo.config['delay-full-text-indexation']
- if self.do_fti:
- self.indexer = get_indexer(self.dbdriver, self.encoding)
- # XXX should go away with logilab.db
- self.dbhelper.fti_uid_attr = self.indexer.uid_attr
- self.dbhelper.fti_table = self.indexer.table
- self.dbhelper.fti_restriction_sql = self.indexer.restriction_sql
- self.dbhelper.fti_need_distinct_query = self.indexer.need_distinct
- else:
- self.dbhelper.fti_need_distinct_query = False
- # sql generator
- self._rql_sqlgen = self.sqlgen_class(appschema, self.dbhelper,
- self.encoding, ATTR_MAP.copy())
# sql queries cache
self._cache = Cache(repo.config['rql-cache-size'])
self._temp_table_data = {}
self._eid_creation_lock = Lock()
+ # (etype, attr) / storage mapping
+ self._storages = {}
# XXX no_sqlite_wrap trick since we've a sqlite locking pb when
# running unittest_multisources with the wrapping below
if self.dbdriver == 'sqlite' and \
@@ -209,7 +236,7 @@
pool.pool_set()
# check full text index availibility
if self.do_fti:
- if not self.indexer.has_fti_table(pool['system']):
+ if not self.dbhelper.has_fti_table(pool['system']):
if not self.repo.config.creating:
self.critical('no text index table')
self.do_fti = False
@@ -243,6 +270,18 @@
def unmap_attribute(self, etype, attr):
self._rql_sqlgen.attr_map.pop('%s.%s' % (etype, attr), None)
+ def set_storage(self, etype, attr, storage):
+ storage_dict = self._storages.setdefault(etype, {})
+ storage_dict[attr] = storage
+ self.map_attribute(etype, attr, storage.sqlgen_callback)
+
+ def unset_storage(self, etype, attr):
+ self._storages[etype].pop(attr)
+ # if etype has no storage left, remove the entry
+ if not self._storages[etype]:
+ del self._storages[etype]
+ self.unmap_attribute(etype, attr)
+
# ISource interface #######################################################
def compile_rql(self, rql, sols):
@@ -323,8 +362,7 @@
assert isinstance(sql, basestring), repr(sql)
try:
cursor = self.doexec(session, sql, args)
- except (self.dbapi_module.OperationalError,
- self.dbapi_module.InterfaceError):
+ except (self.OperationalError, self.InterfaceError):
# FIXME: better detection of deconnection pb
self.info("request failed '%s' ... retry with a new cursor", sql)
session.pool.reconnect(self)
@@ -344,7 +382,7 @@
prefix='ON THE FLY temp data insertion into %s from' % table)
# generate sql queries if we are able to do so
sql, query_args = self._rql_sqlgen.generate(union, args, varmap)
- query = 'INSERT INTO %s %s' % (table, sql.encode(self.encoding))
+ query = 'INSERT INTO %s %s' % (table, sql.encode(self._dbencoding))
self.doexec(session, query, self.merge_args(args, query_args))
def manual_insert(self, results, table, session):
@@ -361,7 +399,7 @@
row = tuple(row)
for index, cell in enumerate(row):
if isinstance(cell, Binary):
- cell = self.binary(cell.getvalue())
+ cell = self._binary(cell.getvalue())
kwargs[str(index)] = cell
kwargs_list.append(kwargs)
self.doexecmany(session, query, kwargs_list)
@@ -379,37 +417,83 @@
except KeyError:
continue
+ @contextmanager
+ def _storage_handler(self, entity, event):
+ # 1/ memorize values as they are before the storage is called.
+ # For instance, the BFSStorage will replace the `data`
+ # binary value with a Binary containing the destination path
+ # on the filesystem. To make the entity.data usage absolutely
+ # transparent, we'll have to reset entity.data to its binary
+ # value once the SQL query will be executed
+ orig_values = {}
+ etype = entity.__regid__
+ for attr, storage in self._storages.get(etype, {}).items():
+ if attr in entity.edited_attributes:
+ orig_values[attr] = entity[attr]
+ handler = getattr(storage, 'entity_%s' % event)
+ handler(entity, attr)
+ yield # 2/ execute the source's instructions
+ # 3/ restore original values
+ for attr, value in orig_values.items():
+ entity[attr] = value
+
def add_entity(self, session, entity):
"""add a new entity to the source"""
- attrs = self.preprocess_entity(entity)
- sql = self.sqlgen.insert(SQL_PREFIX + str(entity.e_schema), attrs)
- self.doexec(session, sql, attrs)
+ with self._storage_handler(entity, 'added'):
+ attrs = self.preprocess_entity(entity)
+ sql = self.sqlgen.insert(SQL_PREFIX + entity.__regid__, attrs)
+ self.doexec(session, sql, attrs)
+ if session.undoable_action('C', entity.__regid__):
+ self._record_tx_action(session, 'tx_entity_actions', 'C',
+ etype=entity.__regid__, eid=entity.eid)
def update_entity(self, session, entity):
"""replace an entity in the source"""
- attrs = self.preprocess_entity(entity)
- sql = self.sqlgen.update(SQL_PREFIX + str(entity.e_schema), attrs,
- [SQL_PREFIX + 'eid'])
- self.doexec(session, sql, attrs)
+ with self._storage_handler(entity, 'updated'):
+ attrs = self.preprocess_entity(entity)
+ if session.undoable_action('U', entity.__regid__):
+ changes = self._save_attrs(session, entity, attrs)
+ self._record_tx_action(session, 'tx_entity_actions', 'U',
+ etype=entity.__regid__, eid=entity.eid,
+ changes=self._binary(dumps(changes)))
+ sql = self.sqlgen.update(SQL_PREFIX + entity.__regid__, attrs,
+ ['cw_eid'])
+ self.doexec(session, sql, attrs)
- def delete_entity(self, session, etype, eid):
+ def delete_entity(self, session, entity):
"""delete an entity from the source"""
- attrs = {SQL_PREFIX + 'eid': eid}
- sql = self.sqlgen.delete(SQL_PREFIX + etype, attrs)
- self.doexec(session, sql, attrs)
+ with self._storage_handler(entity, 'deleted'):
+ if session.undoable_action('D', entity.__regid__):
+ attrs = [SQL_PREFIX + r.type
+ for r in entity.e_schema.subject_relations()
+ if (r.final or r.inlined) and not r in VIRTUAL_RTYPES]
+ changes = self._save_attrs(session, entity, attrs)
+ self._record_tx_action(session, 'tx_entity_actions', 'D',
+ etype=entity.__regid__, eid=entity.eid,
+ changes=self._binary(dumps(changes)))
+ attrs = {'cw_eid': entity.eid}
+ sql = self.sqlgen.delete(SQL_PREFIX + entity.__regid__, attrs)
+ self.doexec(session, sql, attrs)
- def add_relation(self, session, subject, rtype, object, inlined=False):
+ def _add_relation(self, session, subject, rtype, object, inlined=False):
"""add a relation to the source"""
if inlined is False:
attrs = {'eid_from': subject, 'eid_to': object}
sql = self.sqlgen.insert('%s_relation' % rtype, attrs)
else: # used by data import
etype = session.describe(subject)[0]
- attrs = {SQL_PREFIX + 'eid': subject, SQL_PREFIX + rtype: object}
+ attrs = {'cw_eid': subject, SQL_PREFIX + rtype: object}
sql = self.sqlgen.update(SQL_PREFIX + etype, attrs,
- [SQL_PREFIX + 'eid'])
+ ['cw_eid'])
self.doexec(session, sql, attrs)
+ def add_relation(self, session, subject, rtype, object, inlined=False):
+ """add a relation to the source"""
+ self._add_relation(session, subject, rtype, object, inlined)
+ if session.undoable_action('A', rtype):
+ self._record_tx_action(session, 'tx_relation_actions', 'A',
+ eid_from=subject, rtype=rtype, eid_to=object)
+
def delete_relation(self, session, subject, rtype, object):
"""delete a relation from the source"""
rschema = self.schema.rschema(rtype)
@@ -423,6 +507,9 @@
attrs = {'eid_from': subject, 'eid_to': object}
sql = self.sqlgen.delete('%s_relation' % rtype, attrs)
self.doexec(session, sql, attrs)
+ if session.undoable_action('R', rtype):
+ self._record_tx_action(session, 'tx_relation_actions', 'R',
+ eid_from=subject, rtype=rtype, eid_to=object)
def doexec(self, session, query, args=None, rollback=True):
"""Execute a query.
@@ -479,6 +566,9 @@
# short cut to method requiring advanced db helper usage ##################
+ def binary_to_str(self, value):
+ return self.dbhelper.dbapi_module.binary_to_str(value)
+
def create_index(self, session, table, column, unique=False):
cursor = LogCursor(session.pool[self.uri])
self.dbhelper.create_index(cursor, table, column, unique)
@@ -493,7 +583,7 @@
"""return a tuple (type, source, extid) for the entity with id <eid>"""
sql = 'SELECT type, source, extid FROM entities WHERE eid=%s' % eid
try:
- res = session.system_sql(sql).fetchone()
+ res = self.doexec(session, sql).fetchone()
except:
assert session.pool, 'session has no pool set'
raise UnknownEid(eid)
@@ -508,9 +598,10 @@
def extid2eid(self, session, source, extid):
"""get eid from an external id. Return None if no record found."""
assert isinstance(extid, str)
- cursor = session.system_sql('SELECT eid FROM entities WHERE '
- 'extid=%(x)s AND source=%(s)s',
- {'x': b64encode(extid), 's': source.uri})
+ cursor = self.doexec(session,
+ 'SELECT eid FROM entities '
+ 'WHERE extid=%(x)s AND source=%(s)s',
+ {'x': b64encode(extid), 's': source.uri})
# XXX testing rowcount cause strange bug with sqlite, results are there
# but rowcount is 0
#if cursor.rowcount > 0:
@@ -541,7 +632,7 @@
finally:
self._eid_creation_lock.release()
- def add_info(self, session, entity, source, extid=None, complete=True):
+ def add_info(self, session, entity, source, extid, complete):
"""add type and source info for an eid into the system table"""
# begin by inserting eid/type/source/extid into the entities table
if extid is not None:
@@ -549,7 +640,7 @@
extid = b64encode(extid)
attrs = {'type': entity.__regid__, 'eid': entity.eid, 'extid': extid,
'source': source.uri, 'mtime': datetime.now()}
- session.system_sql(self.sqlgen.insert('entities', attrs), attrs)
+ self.doexec(session, self.sqlgen.insert('entities', attrs), attrs)
# now we can update the full text index
if self.do_fti and self.need_fti_indexation(entity.__regid__):
if complete:
@@ -557,26 +648,28 @@
FTIndexEntityOp(session, entity=entity)
def update_info(self, session, entity, need_fti_update):
+ """mark entity as being modified, fulltext reindex if needed"""
if self.do_fti and need_fti_update:
# reindex the entity only if this query is updating at least
# one indexable attribute
FTIndexEntityOp(session, entity=entity)
# update entities.mtime
attrs = {'eid': entity.eid, 'mtime': datetime.now()}
- session.system_sql(self.sqlgen.update('entities', attrs, ['eid']), attrs)
+ self.doexec(session, self.sqlgen.update('entities', attrs, ['eid']), attrs)
- def delete_info(self, session, eid, etype, uri, extid):
+ def delete_info(self, session, entity, uri, extid):
"""delete system information on deletion of an entity by transfering
record from the entities table to the deleted_entities table
"""
- attrs = {'eid': eid}
- session.system_sql(self.sqlgen.delete('entities', attrs), attrs)
+ attrs = {'eid': entity.eid}
+ self.doexec(session, self.sqlgen.delete('entities', attrs), attrs)
if extid is not None:
assert isinstance(extid, str), type(extid)
extid = b64encode(extid)
- attrs = {'type': etype, 'eid': eid, 'extid': extid,
- 'source': uri, 'dtime': datetime.now()}
- session.system_sql(self.sqlgen.insert('deleted_entities', attrs), attrs)
+ attrs = {'type': entity.__regid__, 'eid': entity.eid, 'extid': extid,
+ 'source': uri, 'dtime': datetime.now(),
+ }
+ self.doexec(session, self.sqlgen.insert('deleted_entities', attrs), attrs)
def modified_entities(self, session, etypes, mtime):
"""return a 2-uple:
@@ -587,13 +680,316 @@
deleted since the given timestamp
"""
modsql = _modified_sql('entities', etypes)
- cursor = session.system_sql(modsql, {'time': mtime})
+ cursor = self.doexec(session, modsql, {'time': mtime})
modentities = cursor.fetchall()
delsql = _modified_sql('deleted_entities', etypes)
- cursor = session.system_sql(delsql, {'time': mtime})
+ cursor = self.doexec(session, delsql, {'time': mtime})
delentities = cursor.fetchall()
return modentities, delentities
+ # undo support #############################################################
+
+ def undoable_transactions(self, session, ueid=None, **actionfilters):
+ """See :class:`cubicweb.dbapi.Connection.undoable_transactions`"""
+ # force filtering to session's user if not a manager
+ if not session.user.is_in_group('managers'):
+ ueid = session.user.eid
+ restr = {}
+ if ueid is not None:
+ restr['tx_user'] = ueid
+ sql = self.sqlgen.select('transactions', restr, ('tx_uuid', 'tx_time', 'tx_user'))
+ if actionfilters:
+ # we will need subqueries to filter transactions according to
+ # actions done
+ tearestr = {} # filters on the tx_entity_actions table
+ trarestr = {} # filters on the tx_relation_actions table
+ genrestr = {} # generic filters, appliyable to both table
+ # unless public explicitly set to false, we only consider public
+ # actions
+ if actionfilters.pop('public', True):
+ genrestr['txa_public'] = True
+ # put additional filters in trarestr and/or tearestr
+ for key, val in actionfilters.iteritems():
+ if key == 'etype':
+ # filtering on etype implies filtering on entity actions
+ # only, and with no eid specified
+ assert actionfilters.get('action', 'C') in 'CUD'
+ assert not 'eid' in actionfilters
+ tearestr['etype'] = val
+ elif key == 'eid':
+ # eid filter may apply to 'eid' of tx_entity_actions or to
+ # 'eid_from' OR 'eid_to' of tx_relation_actions
+ if actionfilters.get('action', 'C') in 'CUD':
+ tearestr['eid'] = val
+ if actionfilters.get('action', 'A') in 'AR':
+ trarestr['eid_from'] = val
+ trarestr['eid_to'] = val
+ elif key == 'action':
+ if val in 'CUD':
+ tearestr['txa_action'] = val
+ else:
+ assert val in 'AR'
+ trarestr['txa_action'] = val
+ else:
+ raise AssertionError('unknow filter %s' % key)
+ assert trarestr or tearestr, "can't only filter on 'public'"
+ subqsqls = []
+ # append subqueries to the original query, using EXISTS()
+ if trarestr or (genrestr and not tearestr):
+ trarestr.update(genrestr)
+ trasql = self.sqlgen.select('tx_relation_actions', trarestr, ('1',))
+ if 'eid_from' in trarestr:
+ # replace AND by OR between eid_from/eid_to restriction
+ trasql = sql_or_clauses(trasql, ['eid_from = %(eid_from)s',
+ 'eid_to = %(eid_to)s'])
+ trasql += ' AND transactions.tx_uuid=tx_relation_actions.tx_uuid'
+ subqsqls.append('EXISTS(%s)' % trasql)
+ if tearestr or (genrestr and not trarestr):
+ tearestr.update(genrestr)
+ teasql = self.sqlgen.select('tx_entity_actions', tearestr, ('1',))
+ teasql += ' AND transactions.tx_uuid=tx_entity_actions.tx_uuid'
+ subqsqls.append('EXISTS(%s)' % teasql)
+ if restr:
+ sql += ' AND %s' % ' OR '.join(subqsqls)
+ else:
+ sql += ' WHERE %s' % ' OR '.join(subqsqls)
+ restr.update(trarestr)
+ restr.update(tearestr)
+ # we want results ordered by transaction's time descendant
+ sql += ' ORDER BY tx_time DESC'
+ cu = self.doexec(session, sql, restr)
+ # turn results into transaction objects
+ return [tx.Transaction(*args) for args in cu.fetchall()]
+
+ def tx_info(self, session, txuuid):
+ """See :class:`cubicweb.dbapi.Connection.transaction_info`"""
+ return tx.Transaction(txuuid, *self._tx_info(session, txuuid))
+
+ def tx_actions(self, session, txuuid, public):
+ """See :class:`cubicweb.dbapi.Connection.transaction_actions`"""
+ self._tx_info(session, txuuid)
+ restr = {'tx_uuid': txuuid}
+ if public:
+ restr['txa_public'] = True
+ sql = self.sqlgen.select('tx_entity_actions', restr,
+ ('txa_action', 'txa_public', 'txa_order',
+ 'etype', 'eid', 'changes'))
+ cu = self.doexec(session, sql, restr)
+ actions = [tx.EntityAction(a,p,o,et,e,c and loads(self.binary_to_str(c)))
+ for a,p,o,et,e,c in cu.fetchall()]
+ sql = self.sqlgen.select('tx_relation_actions', restr,
+ ('txa_action', 'txa_public', 'txa_order',
+ 'rtype', 'eid_from', 'eid_to'))
+ cu = self.doexec(session, sql, restr)
+ actions += [tx.RelationAction(*args) for args in cu.fetchall()]
+ return sorted(actions, key=lambda x: x.order)
+
+ def undo_transaction(self, session, txuuid):
+ """See :class:`cubicweb.dbapi.Connection.undo_transaction`"""
+ # set mode so pool isn't released subsquently until commit/rollback
+ session.mode = 'write'
+ errors = []
+ with hooks_control(session, session.HOOKS_DENY_ALL, 'integrity'):
+ with security_enabled(session, read=False):
+ for action in reversed(self.tx_actions(session, txuuid, False)):
+ undomethod = getattr(self, '_undo_%s' % action.action.lower())
+ errors += undomethod(session, action)
+ # remove the transactions record
+ self.doexec(session,
+ "DELETE FROM transactions WHERE tx_uuid='%s'" % txuuid)
+ return errors
+
+ def start_undoable_transaction(self, session, uuid):
+ """session callback to insert a transaction record in the transactions
+ table when some undoable transaction is started
+ """
+ ueid = session.user.eid
+ attrs = {'tx_uuid': uuid, 'tx_user': ueid, 'tx_time': datetime.now()}
+ self.doexec(session, self.sqlgen.insert('transactions', attrs), attrs)
+
+ def _save_attrs(self, session, entity, attrs):
+ """return a pickleable dictionary containing current values for given
+ attributes of the entity
+ """
+ restr = {'cw_eid': entity.eid}
+ sql = self.sqlgen.select(SQL_PREFIX + entity.__regid__, restr, attrs)
+ cu = self.doexec(session, sql, restr)
+ values = dict(zip(attrs, cu.fetchone()))
+ # ensure backend specific binary are converted back to string
+ eschema = entity.e_schema
+ for column in attrs:
+ # [3:] remove 'cw_' prefix
+ attr = column[3:]
+ if not eschema.subjrels[attr].final:
+ continue
+ if eschema.destination(attr) in ('Password', 'Bytes'):
+ value = values[column]
+ if value is not None:
+ values[column] = self.binary_to_str(value)
+ return values
+
+ def _record_tx_action(self, session, table, action, **kwargs):
+ """record a transaction action in the given table (either
+ 'tx_entity_actions' or 'tx_relation_action')
+ """
+ kwargs['tx_uuid'] = session.transaction_uuid()
+ kwargs['txa_action'] = action
+ kwargs['txa_order'] = session.transaction_inc_action_counter()
+ kwargs['txa_public'] = session.running_dbapi_query
+ self.doexec(session, self.sqlgen.insert(table, kwargs), kwargs)
+
+ def _tx_info(self, session, txuuid):
+ """return transaction's time and user of the transaction with the given uuid.
+
+ raise `NoSuchTransaction` if there is no such transaction of if the
+ session's user isn't allowed to see it.
+ """
+ restr = {'tx_uuid': txuuid}
+ sql = self.sqlgen.select('transactions', restr, ('tx_time', 'tx_user'))
+ cu = self.doexec(session, sql, restr)
+ try:
+ time, ueid = cu.fetchone()
+ except TypeError:
+ raise tx.NoSuchTransaction()
+ if not (session.user.is_in_group('managers')
+ or session.user.eid == ueid):
+ raise tx.NoSuchTransaction()
+ return time, ueid
+
+ def _undo_d(self, session, action):
+ """undo an entity deletion"""
+ errors = []
+ err = errors.append
+ eid = action.eid
+ etype = action.etype
+ _ = session._
+ # get an entity instance
+ try:
+ entity = self.repo.vreg['etypes'].etype_class(etype)(session)
+ except Exception:
+ err("can't restore entity %s of type %s, type no more supported"
+ % (eid, etype))
+ return errors
+ # check for schema changes, entities linked through inlined relation
+ # still exists, rewrap binary values
+ eschema = entity.e_schema
+ getrschema = eschema.subjrels
+ for column, value in action.changes.items():
+ rtype = column[3:] # remove cw_ prefix
+ try:
+ rschema = getrschema[rtype]
+ except KeyError:
+ err(_("Can't restore relation %(rtype)s of entity %(eid)s, "
+ "this relation does not exists anymore in the schema.")
+ % {'rtype': rtype, 'eid': eid})
+ if not rschema.final:
+ assert value is None
+ # try:
+ # tentity = session.entity_from_eid(eid)
+ # except UnknownEid:
+ # err(_("Can't restore %(role)s relation %(rtype)s to "
+ # "entity %(eid)s which doesn't exist anymore.")
+ # % {'role': _('subject'),
+ # 'rtype': _(rtype),
+ # 'eid': eid})
+ # continue
+ # rdef = rdefs[(eschema, tentity.__regid__)]
+ # try:
+ # _undo_check_relation_target(tentity, rdef, 'object')
+ # except UndoException, ex:
+ # err(unicode(ex))
+ # continue
+ # if rschema.inlined:
+ # entity[rtype] = value
+ # else:
+ # # restore relation where inlined changed since the deletion
+ # del action.changes[column]
+ # self._add_relation(session, subject, rtype, object)
+ # # set related cache
+ # session.update_rel_cache_add(eid, rtype, value,
+ # rschema.symmetric)
+ elif eschema.destination(rtype) in ('Bytes', 'Password'):
+ action.changes[column] = self._binary(value)
+ entity[rtype] = Binary(value)
+ elif isinstance(value, str):
+ entity[rtype] = unicode(value, session.encoding, 'replace')
+ else:
+ entity[rtype] = value
+ entity.set_eid(eid)
+ entity.edited_attributes = set(entity)
+ entity.check()
+ self.repo.hm.call_hooks('before_add_entity', session, entity=entity)
+ # restore the entity
+ action.changes['cw_eid'] = eid
+ sql = self.sqlgen.insert(SQL_PREFIX + etype, action.changes)
+ self.doexec(session, sql, action.changes)
+ # restore record in entities (will update fti if needed)
+ self.add_info(session, entity, self, None, True)
+ # remove record from deleted_entities
+ self.doexec(session, 'DELETE FROM deleted_entities WHERE eid=%s' % eid)
+ self.repo.hm.call_hooks('after_add_entity', session, entity=entity)
+ return errors
+
+ def _undo_r(self, session, action):
+ """undo a relation removal"""
+ errors = []
+ err = errors.append
+ _ = session._
+ subj, rtype, obj = action.eid_from, action.rtype, action.eid_to
+ entities = []
+ for role, eid in (('subject', subj), ('object', obj)):
+ try:
+ entities.append(session.entity_from_eid(eid))
+ except UnknownEid:
+ err(_("Can't restore relation %(rtype)s, %(role)s entity %(eid)s"
+ " doesn't exist anymore.")
+ % {'role': _(role),
+ 'rtype': _(rtype),
+ 'eid': eid})
+ if not len(entities) == 2:
+ return errors
+ sentity, oentity = entities
+ try:
+ rschema = self.schema.rschema(rtype)
+ rdef = rschema.rdefs[(sentity.__regid__, oentity.__regid__)]
+ except KeyError:
+ err(_("Can't restore relation %(rtype)s between %(subj)s and "
+ "%(obj)s, that relation does not exists anymore in the "
+ "schema.")
+ % {'rtype': rtype,
+ 'subj': subj,
+ 'obj': obj})
+ else:
+ for role, entity in (('subject', sentity),
+ ('object', oentity)):
+ try:
+ _undo_check_relation_target(entity, rdef, role)
+ except UndoException, ex:
+ err(unicode(ex))
+ continue
+ if not errors:
+ self.repo.hm.call_hooks('before_add_relation', session,
+ eidfrom=subj, rtype=rtype, eidto=obj)
+ # add relation in the database
+ self._add_relation(session, subj, rtype, obj, rschema.inlined)
+ # set related cache
+ session.update_rel_cache_add(subj, rtype, obj, rschema.symmetric)
+ self.repo.hm.call_hooks('after_add_relation', session,
+ eidfrom=subj, rtype=rtype, eidto=obj)
+ return errors
+
+ def _undo_c(self, session, action):
+ """undo an entity creation"""
+ return ['undoing of entity creation not yet supported.']
+
+ def _undo_u(self, session, action):
+ """undo an entity update"""
+ return ['undoing of entity updating not yet supported.']
+
+ def _undo_a(self, session, action):
+ """undo a relation addition"""
+ return ['undoing of relation addition not yet supported.']
+
# full text index handling #################################################
@cached
@@ -616,7 +1012,7 @@
index
"""
try:
- self.indexer.cursor_unindex_object(eid, session.pool['system'])
+ self.dbhelper.cursor_unindex_object(eid, session.pool['system'])
except Exception: # let KeyboardInterrupt / SystemExit propagate
self.exception('error while unindexing %s', eid)
@@ -627,8 +1023,8 @@
try:
# use cursor_index_object, not cursor_reindex_object since
# unindexing done in the FTIndexEntityOp
- self.indexer.cursor_index_object(entity.eid, entity,
- session.pool['system'])
+ self.dbhelper.cursor_index_object(entity.eid, entity,
+ session.pool['system'])
except Exception: # let KeyboardInterrupt / SystemExit propagate
self.exception('error while reindexing %s', entity)
@@ -661,8 +1057,8 @@
def sql_schema(driver):
- helper = get_adv_func_helper(driver)
- tstamp_col_type = helper.TYPE_MAPPING['Datetime']
+ helper = get_db_helper(driver)
+ typemap = helper.TYPE_MAPPING
schema = """
/* Create the repository's system database */
@@ -674,10 +1070,10 @@
source VARCHAR(64) NOT NULL,
mtime %s NOT NULL,
extid VARCHAR(256)
-);
-CREATE INDEX entities_type_idx ON entities(type);
-CREATE INDEX entities_mtime_idx ON entities(mtime);
-CREATE INDEX entities_extid_idx ON entities(extid);
+);;
+CREATE INDEX entities_type_idx ON entities(type);;
+CREATE INDEX entities_mtime_idx ON entities(mtime);;
+CREATE INDEX entities_extid_idx ON entities(extid);;
CREATE TABLE deleted_entities (
eid INTEGER PRIMARY KEY NOT NULL,
@@ -685,32 +1081,80 @@
source VARCHAR(64) NOT NULL,
dtime %s NOT NULL,
extid VARCHAR(256)
-);
-CREATE INDEX deleted_entities_type_idx ON deleted_entities(type);
-CREATE INDEX deleted_entities_dtime_idx ON deleted_entities(dtime);
-CREATE INDEX deleted_entities_extid_idx ON deleted_entities(extid);
-""" % (helper.sql_create_sequence('entities_id_seq'), tstamp_col_type, tstamp_col_type)
+);;
+CREATE INDEX deleted_entities_type_idx ON deleted_entities(type);;
+CREATE INDEX deleted_entities_dtime_idx ON deleted_entities(dtime);;
+CREATE INDEX deleted_entities_extid_idx ON deleted_entities(extid);;
+
+CREATE TABLE transactions (
+ tx_uuid CHAR(32) PRIMARY KEY NOT NULL,
+ tx_user INTEGER NOT NULL,
+ tx_time %s NOT NULL
+);;
+CREATE INDEX transactions_tx_user_idx ON transactions(tx_user);;
+
+CREATE TABLE tx_entity_actions (
+ tx_uuid CHAR(32) REFERENCES transactions(tx_uuid) ON DELETE CASCADE,
+ txa_action CHAR(1) NOT NULL,
+ txa_public %s NOT NULL,
+ txa_order INTEGER,
+ eid INTEGER NOT NULL,
+ etype VARCHAR(64) NOT NULL,
+ changes %s
+);;
+CREATE INDEX tx_entity_actions_txa_action_idx ON tx_entity_actions(txa_action);;
+CREATE INDEX tx_entity_actions_txa_public_idx ON tx_entity_actions(txa_public);;
+CREATE INDEX tx_entity_actions_eid_idx ON tx_entity_actions(eid);;
+CREATE INDEX tx_entity_actions_etype_idx ON tx_entity_actions(etype);;
+
+CREATE TABLE tx_relation_actions (
+ tx_uuid CHAR(32) REFERENCES transactions(tx_uuid) ON DELETE CASCADE,
+ txa_action CHAR(1) NOT NULL,
+ txa_public %s NOT NULL,
+ txa_order INTEGER,
+ eid_from INTEGER NOT NULL,
+ eid_to INTEGER NOT NULL,
+ rtype VARCHAR(256) NOT NULL
+);;
+CREATE INDEX tx_relation_actions_txa_action_idx ON tx_relation_actions(txa_action);;
+CREATE INDEX tx_relation_actions_txa_public_idx ON tx_relation_actions(txa_public);;
+CREATE INDEX tx_relation_actions_eid_from_idx ON tx_relation_actions(eid_from);;
+CREATE INDEX tx_relation_actions_eid_to_idx ON tx_relation_actions(eid_to);;
+""" % (helper.sql_create_sequence('entities_id_seq').replace(';', ';;'),
+ typemap['Datetime'], typemap['Datetime'], typemap['Datetime'],
+ typemap['Boolean'], typemap['Bytes'], typemap['Boolean'])
+ if helper.backend_name == 'sqlite':
+ # sqlite support the ON DELETE CASCADE syntax but do nothing
+ schema += '''
+CREATE TRIGGER fkd_transactions
+BEFORE DELETE ON transactions
+FOR EACH ROW BEGIN
+ DELETE FROM tx_entity_actions WHERE tx_uuid=OLD.tx_uuid;
+ DELETE FROM tx_relation_actions WHERE tx_uuid=OLD.tx_uuid;
+END;;
+'''
return schema
def sql_drop_schema(driver):
- helper = get_adv_func_helper(driver)
+ helper = get_db_helper(driver)
return """
%s
DROP TABLE entities;
DROP TABLE deleted_entities;
+DROP TABLE transactions;
+DROP TABLE tx_entity_actions;
+DROP TABLE tx_relation_actions;
""" % helper.sql_drop_sequence('entities_id_seq')
def grant_schema(user, set_owner=True):
result = ''
- if set_owner:
- result = 'ALTER TABLE entities OWNER TO %s;\n' % user
- result += 'ALTER TABLE deleted_entities OWNER TO %s;\n' % user
- result += 'ALTER TABLE entities_id_seq OWNER TO %s;\n' % user
- result += 'GRANT ALL ON entities TO %s;\n' % user
- result += 'GRANT ALL ON deleted_entities TO %s;\n' % user
- result += 'GRANT ALL ON entities_id_seq TO %s;\n' % user
+ for table in ('entities', 'deleted_entities', 'entities_id_seq',
+ 'transactions', 'tx_entity_actions', 'tx_relation_actions'):
+ if set_owner:
+ result = 'ALTER TABLE %s OWNER TO %s;\n' % (table, user)
+ result += 'GRANT ALL ON %s TO %s;\n' % (table, user)
return result
--- a/server/sources/pyrorql.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/sources/pyrorql.py Wed Mar 24 10:23:57 2010 +0100
@@ -203,7 +203,8 @@
insert=False)
# entity has been deleted from external repository but is not known here
if eid is not None:
- repo.delete_info(session, eid)
+ entity = session.entity_from_eid(eid, etype)
+ repo.delete_info(session, entity, self.uri, extid)
except:
self.exception('while updating %s with external id %s of source %s',
etype, extid, self.uri)
@@ -350,11 +351,11 @@
self._query_cache.clear()
entity.clear_all_caches()
- def delete_entity(self, session, etype, eid):
+ def delete_entity(self, session, entity):
"""delete an entity from the source"""
cu = session.pool[self.uri]
- cu.execute('DELETE %s X WHERE X eid %%(x)s' % etype,
- {'x': self.eid2extid(eid, session)}, 'x')
+ cu.execute('DELETE %s X WHERE X eid %%(x)s' % entity.__regid__,
+ {'x': self.eid2extid(entity.eid, session)}, 'x')
self._query_cache.clear()
def add_relation(self, session, subject, rtype, object):
--- a/server/sources/rql2sql.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/sources/rql2sql.py Wed Mar 24 10:23:57 2010 +0100
@@ -332,16 +332,16 @@
protected by a lock
"""
- def __init__(self, schema, dbms_helper, dbencoding='UTF-8', attrmap=None):
+ def __init__(self, schema, dbms_helper, attrmap=None):
self.schema = schema
self.dbms_helper = dbms_helper
- self.dbencoding = dbencoding
+ self.dbencoding = dbms_helper.dbencoding
self.keyword_map = {'NOW' : self.dbms_helper.sql_current_timestamp,
'TODAY': self.dbms_helper.sql_current_date,
}
if not self.dbms_helper.union_parentheses_support:
self.union_sql = self.noparen_union_sql
- if self.dbms_helper.fti_need_distinct_query:
+ if self.dbms_helper.fti_need_distinct:
self.__union_sql = self.union_sql
self.union_sql = self.has_text_need_distinct_union_sql
self._lock = threading.Lock()
@@ -986,10 +986,9 @@
def visit_function(self, func):
"""generate SQL name for a function"""
- # function_description will check function is supported by the backend
- sqlname = self.dbms_helper.func_sqlname(func.name)
- return '%s(%s)' % (sqlname, ', '.join(c.accept(self)
- for c in func.children))
+ # func_sql_call will check function is supported by the backend
+ return self.dbms_helper.func_as_sql(func.name,
+ [c.accept(self) for c in func.children])
def visit_constant(self, constant):
"""generate SQL name for a constant"""
--- a/server/sources/storages.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/sources/storages.py Wed Mar 24 10:23:57 2010 +0100
@@ -4,16 +4,11 @@
from cubicweb import Binary
from cubicweb.server.hook import Operation
-
-ETYPE_ATTR_STORAGE = {}
def set_attribute_storage(repo, etype, attr, storage):
- ETYPE_ATTR_STORAGE.setdefault(etype, {})[attr] = storage
- repo.system_source.map_attribute(etype, attr, storage.sqlgen_callback)
+ repo.system_source.set_storage(etype, attr, storage)
def unset_attribute_storage(repo, etype, attr):
- ETYPE_ATTR_STORAGE.setdefault(etype, {}).pop(attr, None)
- repo.system_source.unmap_attribute(etype, attr)
-
+ repo.system_source.unset_storage(etype, attr)
class Storage(object):
"""abstract storage"""
@@ -92,9 +87,8 @@
cu = sysource.doexec(entity._cw,
'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s' % (
attr, entity.__regid__, entity.eid))
- dbmod = sysource.dbapi_module
- return dbmod.process_value(cu.fetchone()[0], [None, dbmod.BINARY],
- binarywrap=str)
+ return sysource._process_value(cu.fetchone()[0], [None, dbmod.BINARY],
+ binarywrap=str)
class AddFileOp(Operation):
--- a/server/sqlutils.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/sqlutils.py Wed Mar 24 10:23:57 2010 +0100
@@ -11,21 +11,17 @@
import subprocess
from datetime import datetime, date
-import logilab.common as lgc
-from logilab.common import db
+from logilab import database as db, common as lgc
from logilab.common.shellutils import ProgressBar
-from logilab.common.adbh import get_adv_func_helper
-from logilab.common.sqlgen import SQLGenerator
from logilab.common.date import todate, todatetime
-
-from indexer import get_indexer
+from logilab.database.sqlgen import SQLGenerator
from cubicweb import Binary, ConfigurationError
from cubicweb.uilib import remove_html_tags
from cubicweb.schema import PURE_VIRTUAL_RTYPES
from cubicweb.server import SQL_CONNECT_HOOKS
from cubicweb.server.utils import crypt_password
-
+from rql.utils import RQL_FUNCTIONS_REGISTRY
lgc.USE_MX_DATETIME = False
SQL_PREFIX = 'cw_'
@@ -77,8 +73,8 @@
w(native.grant_schema(user, set_owner))
w('')
if text_index:
- indexer = get_indexer(driver)
- w(indexer.sql_grant_user(user))
+ dbhelper = db.get_db_helper(driver)
+ w(dbhelper.sql_grant_user_on_fti(user))
w('')
w(grant_schema(schema, user, set_owner, skip_entities=skip_entities, prefix=SQL_PREFIX))
return '\n'.join(output)
@@ -96,17 +92,17 @@
w = output.append
w(native.sql_schema(driver))
w('')
+ dbhelper = db.get_db_helper(driver)
if text_index:
- indexer = get_indexer(driver)
- w(indexer.sql_init_fti())
+ w(dbhelper.sql_init_fti().replace(';', ';;'))
w('')
- dbhelper = get_adv_func_helper(driver)
w(schema2sql(dbhelper, schema, prefix=SQL_PREFIX,
- skip_entities=skip_entities, skip_relations=skip_relations))
+ skip_entities=skip_entities,
+ skip_relations=skip_relations).replace(';', ';;'))
if dbhelper.users_support and user:
w('')
w(sqlgrants(schema, driver, user, text_index, set_owner,
- skip_relations, skip_entities))
+ skip_relations, skip_entities).replace(';', ';;'))
return '\n'.join(output)
@@ -120,8 +116,8 @@
w(native.sql_drop_schema(driver))
w('')
if text_index:
- indexer = get_indexer(driver)
- w(indexer.sql_drop_fti())
+ dbhelper = db.get_db_helper(driver)
+ w(dbhelper.sql_drop_fti())
w('')
w(dropschema2sql(schema, prefix=SQL_PREFIX,
skip_entities=skip_entities,
@@ -137,65 +133,44 @@
def __init__(self, source_config):
try:
self.dbdriver = source_config['db-driver'].lower()
- self.dbname = source_config['db-name']
+ dbname = source_config['db-name']
except KeyError:
raise ConfigurationError('missing some expected entries in sources file')
- self.dbhost = source_config.get('db-host')
+ dbhost = source_config.get('db-host')
port = source_config.get('db-port')
- self.dbport = port and int(port) or None
- self.dbuser = source_config.get('db-user')
- self.dbpasswd = source_config.get('db-password')
- self.encoding = source_config.get('db-encoding', 'UTF-8')
- self.dbapi_module = db.get_dbapi_compliant_module(self.dbdriver)
- self.dbdriver_extra_args = source_config.get('db-extra-arguments')
- self.binary = self.dbapi_module.Binary
- self.dbhelper = self.dbapi_module.adv_func_helper
+ dbport = port and int(port) or None
+ dbuser = source_config.get('db-user')
+ dbpassword = source_config.get('db-password')
+ dbencoding = source_config.get('db-encoding', 'UTF-8')
+ dbextraargs = source_config.get('db-extra-arguments')
+ self.dbhelper = db.get_db_helper(self.dbdriver)
+ self.dbhelper.record_connection_info(dbname, dbhost, dbport, dbuser,
+ dbpassword, dbextraargs,
+ dbencoding)
self.sqlgen = SQLGenerator()
+ # copy back some commonly accessed attributes
+ dbapi_module = self.dbhelper.dbapi_module
+ self.OperationalError = dbapi_module.OperationalError
+ self.InterfaceError = dbapi_module.InterfaceError
+ self._binary = dbapi_module.Binary
+ self._process_value = dbapi_module.process_value
+ self._dbencoding = dbencoding
- def get_connection(self, user=None, password=None):
+ def get_connection(self):
"""open and return a connection to the database"""
- if user or self.dbuser:
- self.info('connecting to %s@%s for user %s', self.dbname,
- self.dbhost or 'localhost', user or self.dbuser)
- else:
- self.info('connecting to %s@%s', self.dbname,
- self.dbhost or 'localhost')
- extra = {}
- if self.dbdriver_extra_args:
- extra = {'extra_args': self.dbdriver_extra_args}
- cnx = self.dbapi_module.connect(self.dbhost, self.dbname,
- user or self.dbuser,
- password or self.dbpasswd,
- port=self.dbport,
- **extra)
- init_cnx(self.dbdriver, cnx)
- #self.dbapi_module.type_code_test(cnx.cursor())
- return cnx
+ return self.dbhelper.get_connection()
def backup_to_file(self, backupfile, confirm):
- for cmd in self.dbhelper.backup_commands(backupfile=backupfile,
- keepownership=False,
- dbname=self.dbname,
- dbhost=self.dbhost,
- dbuser=self.dbuser,
- dbport=self.dbport):
+ for cmd in self.dbhelper.backup_commands(backupfile,
+ keepownership=False):
if _run_command(cmd):
if not confirm(' [Failed] Continue anyway?', default='n'):
raise Exception('Failed command: %s' % cmd)
def restore_from_file(self, backupfile, confirm, drop=True):
- if 'dbencoding' in self.dbhelper.restore_commands.im_func.func_code.co_varnames:
- kwargs = {'dbencoding': self.encoding}
- else:
- kwargs = {'encoding': self.encoding}
- for cmd in self.dbhelper.restore_commands(backupfile=backupfile,
+ for cmd in self.dbhelper.restore_commands(backupfile,
keepownership=False,
- drop=drop,
- dbname=self.dbname,
- dbhost=self.dbhost,
- dbuser=self.dbuser,
- dbport=self.dbport,
- **kwargs):
+ drop=drop):
if _run_command(cmd):
if not confirm(' [Failed] Continue anyway?', default='n'):
raise Exception('Failed command: %s' % cmd)
@@ -206,7 +181,7 @@
for key, val in args.iteritems():
# convert cubicweb binary into db binary
if isinstance(val, Binary):
- val = self.binary(val.getvalue())
+ val = self._binary(val.getvalue())
newargs[key] = val
# should not collide
newargs.update(query_args)
@@ -216,10 +191,12 @@
def process_result(self, cursor):
"""return a list of CubicWeb compliant values from data in the given cursor
"""
+ # begin bind to locals for optimization
descr = cursor.description
- encoding = self.encoding
- process_value = self.dbapi_module.process_value
+ encoding = self._dbencoding
+ process_value = self._process_value
binary = Binary
+ # /end
results = cursor.fetchall()
for i, line in enumerate(results):
result = []
@@ -237,7 +214,8 @@
"""
attrs = {}
eschema = entity.e_schema
- for attr, value in entity.items():
+ for attr in entity.edited_attributes:
+ value = entity[attr]
rschema = eschema.subjrels[attr]
if rschema.final:
atype = str(entity.e_schema.destination(attr))
@@ -250,15 +228,16 @@
value = value.getvalue()
else:
value = crypt_password(value)
- value = self.binary(value)
+ value = self._binary(value)
# XXX needed for sqlite but I don't think it is for other backends
elif atype == 'Datetime' and isinstance(value, date):
value = todatetime(value)
elif atype == 'Date' and isinstance(value, datetime):
value = todate(value)
elif isinstance(value, Binary):
- value = self.binary(value.getvalue())
+ value = self._binary(value.getvalue())
attrs[SQL_PREFIX+str(attr)] = value
+ attrs[SQL_PREFIX+'eid'] = entity.eid
return attrs
@@ -267,12 +246,8 @@
set_log_methods(SQLAdapterMixIn, getLogger('cubicweb.sqladapter'))
def init_sqlite_connexion(cnx):
- # XXX should not be publicly exposed
- #def comma_join(strings):
- # return ', '.join(strings)
- #cnx.create_function("COMMA_JOIN", 1, comma_join)
- class concat_strings(object):
+ class group_concat(object):
def __init__(self):
self.values = []
def step(self, value):
@@ -280,10 +255,7 @@
self.values.append(value)
def finalize(self):
return ', '.join(self.values)
- # renamed to GROUP_CONCAT in cubicweb 2.45, keep old name for bw compat for
- # some time
- cnx.create_aggregate("CONCAT_STRINGS", 1, concat_strings)
- cnx.create_aggregate("GROUP_CONCAT", 1, concat_strings)
+ cnx.create_aggregate("GROUP_CONCAT", 1, group_concat)
def _limit_size(text, maxsize, format='text/plain'):
if len(text) < maxsize:
@@ -301,9 +273,9 @@
def limit_size2(text, maxsize):
return _limit_size(text, maxsize)
cnx.create_function("TEXT_LIMIT_SIZE", 2, limit_size2)
+
import yams.constraints
- if hasattr(yams.constraints, 'patch_sqlite_decimal'):
- yams.constraints.patch_sqlite_decimal()
+ yams.constraints.patch_sqlite_decimal()
def fspath(eid, etype, attr):
try:
@@ -328,10 +300,5 @@
raise
cnx.create_function('_fsopen', 1, _fsopen)
-
sqlite_hooks = SQL_CONNECT_HOOKS.setdefault('sqlite', [])
sqlite_hooks.append(init_sqlite_connexion)
-
-def init_cnx(driver, cnx):
- for hook in SQL_CONNECT_HOOKS.get(driver, ()):
- hook(cnx)
--- a/server/ssplanner.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/ssplanner.py Wed Mar 24 10:23:57 2010 +0100
@@ -5,16 +5,114 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
from copy import copy
from rql.stmts import Union, Select
-from rql.nodes import Constant
+from rql.nodes import Constant, Relation
from cubicweb import QueryError, typed_eid
from cubicweb.schema import VIRTUAL_RTYPES
from cubicweb.rqlrewrite import add_types_restriction
+from cubicweb.server.session import security_enabled
+
+READ_ONLY_RTYPES = set(('eid', 'has_text', 'is', 'is_instance_of', 'identity'))
+
+_CONSTANT = object()
+_FROM_SUBSTEP = object()
+
+def _extract_const_attributes(plan, rqlst, to_build):
+ """add constant values to entity def, mark variables to be selected
+ """
+ to_select = {}
+ for relation in rqlst.main_relations:
+ lhs, rhs = relation.get_variable_parts()
+ rtype = relation.r_type
+ if rtype in READ_ONLY_RTYPES:
+ raise QueryError("can't assign to %s" % rtype)
+ try:
+ edef = to_build[str(lhs)]
+ except KeyError:
+ # lhs var is not to build, should be selected and added as an
+ # object relation
+ edef = to_build[str(rhs)]
+ to_select.setdefault(edef, []).append((rtype, lhs, 1))
+ else:
+ if isinstance(rhs, Constant) and not rhs.uid:
+ # add constant values to entity def
+ value = rhs.eval(plan.args)
+ eschema = edef.e_schema
+ attrtype = eschema.subjrels[rtype].objects(eschema)[0]
+ if attrtype == 'Password' and isinstance(value, unicode):
+ value = value.encode('UTF8')
+ edef[rtype] = value
+ elif to_build.has_key(str(rhs)):
+ # create a relation between two newly created variables
+ plan.add_relation_def((edef, rtype, to_build[rhs.name]))
+ else:
+ to_select.setdefault(edef, []).append( (rtype, rhs, 0) )
+ return to_select
+
+def _extract_eid_consts(plan, rqlst):
+ """return a dict mapping rqlst variable object to their eid if specified in
+ the syntax tree
+ """
+ session = plan.session
+ if rqlst.where is None:
+ return {}
+ eidconsts = {}
+ neweids = session.transaction_data.get('neweids', ())
+ checkread = session.read_security
+ eschema = session.vreg.schema.eschema
+ for rel in rqlst.where.get_nodes(Relation):
+ if rel.r_type == 'eid' and not rel.neged(strict=True):
+ lhs, rhs = rel.get_variable_parts()
+ if isinstance(rhs, Constant):
+ eid = typed_eid(rhs.eval(plan.args))
+ # check read permission here since it may not be done by
+ # the generated select substep if not emited (eg nothing
+ # to be selected)
+ if checkread and eid not in neweids:
+ with security_enabled(session, read=False):
+ eschema(session.describe(eid)[0]).check_perm(
+ session, 'read', eid=eid)
+ eidconsts[lhs.variable] = eid
+ return eidconsts
+
+def _build_substep_query(select, origrqlst):
+ """Finalize substep select query that should be executed to get proper
+ selection of stuff to insert/update.
+
+ Return None when no query actually needed, else the given select node that
+ will be used as substep query.
+
+ When select has nothing selected, search in origrqlst for restriction that
+ should be considered.
+ """
+ if select.selection:
+ if origrqlst.where is not None:
+ select.set_where(origrqlst.where.copy(select))
+ return select
+ if origrqlst.where is None:
+ return
+ for rel in origrqlst.where.iget_nodes(Relation):
+ # search for a relation which is neither a type restriction (is) nor an
+ # eid specification (not neged eid with constant node
+ if rel.neged(strict=True) or not (
+ rel.is_types_restriction() or
+ (rel.r_type == 'eid'
+ and isinstance(rel.get_variable_parts()[1], Constant))):
+ break
+ else:
+ return
+ select.set_where(origrqlst.where.copy(select))
+ if not select.selection:
+ # no selection, append one randomly
+ select.append_selected(rel.children[0].copy(select))
+ return select
class SSPlanner(object):
@@ -56,34 +154,37 @@
to_build[var.name] = etype_class(etype)(session)
plan.add_entity_def(to_build[var.name])
# add constant values to entity def, mark variables to be selected
- to_select = plan.relation_definitions(rqlst, to_build)
+ to_select = _extract_const_attributes(plan, rqlst, to_build)
# add necessary steps to add relations and update attributes
step = InsertStep(plan) # insert each entity and its relations
- step.children += self._compute_relation_steps(plan, rqlst.solutions,
- rqlst.where, to_select)
+ step.children += self._compute_relation_steps(plan, rqlst, to_select)
return (step,)
- def _compute_relation_steps(self, plan, solutions, restriction, to_select):
+ def _compute_relation_steps(self, plan, rqlst, to_select):
"""handle the selection of relations for an insert query"""
+ eidconsts = _extract_eid_consts(plan, rqlst)
for edef, rdefs in to_select.items():
# create a select rql st to fetch needed data
select = Select()
eschema = edef.e_schema
- for i in range(len(rdefs)):
- rtype, term, reverse = rdefs[i]
- select.append_selected(term.copy(select))
+ for i, (rtype, term, reverse) in enumerate(rdefs):
+ if getattr(term, 'variable', None) in eidconsts:
+ value = eidconsts[term.variable]
+ else:
+ select.append_selected(term.copy(select))
+ value = _FROM_SUBSTEP
if reverse:
- rdefs[i] = rtype, RelationsStep.REVERSE_RELATION
+ rdefs[i] = (rtype, InsertRelationsStep.REVERSE_RELATION, value)
else:
rschema = eschema.subjrels[rtype]
if rschema.final or rschema.inlined:
- rdefs[i] = rtype, RelationsStep.FINAL
+ rdefs[i] = (rtype, InsertRelationsStep.FINAL, value)
else:
- rdefs[i] = rtype, RelationsStep.RELATION
- if restriction is not None:
- select.set_where(restriction.copy(select))
- step = RelationsStep(plan, edef, rdefs)
- step.children += self._select_plan(plan, select, solutions)
+ rdefs[i] = (rtype, InsertRelationsStep.RELATION, value)
+ step = InsertRelationsStep(plan, edef, rdefs)
+ select = _build_substep_query(select, rqlst)
+ if select is not None:
+ step.children += self._select_plan(plan, select, rqlst.solutions)
yield step
def build_delete_plan(self, plan, rqlst):
@@ -127,37 +228,61 @@
def build_set_plan(self, plan, rqlst):
"""get an execution plan from an SET RQL query"""
- select = Select()
- # extract variables to add to the selection
- selected_index = {}
- index = 0
- relations, attrrelations = [], []
getrschema = self.schema.rschema
- for relation in rqlst.main_relations:
+ select = Select() # potential substep query
+ selectedidx = {} # local state
+ attributes = set() # edited attributes
+ updatedefs = [] # definition of update attributes/relations
+ selidx = residx = 0 # substep selection / resulting rset indexes
+ # search for eid const in the WHERE clause
+ eidconsts = _extract_eid_consts(plan, rqlst)
+ # build `updatedefs` describing things to update and add necessary
+ # variables to the substep selection
+ for i, relation in enumerate(rqlst.main_relations):
if relation.r_type in VIRTUAL_RTYPES:
raise QueryError('can not assign to %r relation'
% relation.r_type)
lhs, rhs = relation.get_variable_parts()
- if not lhs.as_string('utf-8') in selected_index:
- select.append_selected(lhs.copy(select))
- selected_index[lhs.as_string('utf-8')] = index
- index += 1
- if not rhs.as_string('utf-8') in selected_index:
- select.append_selected(rhs.copy(select))
- selected_index[rhs.as_string('utf-8')] = index
- index += 1
+ lhskey = lhs.as_string('utf-8')
+ if not lhskey in selectedidx:
+ if lhs.variable in eidconsts:
+ eid = eidconsts[lhs.variable]
+ lhsinfo = (_CONSTANT, eid, residx)
+ else:
+ select.append_selected(lhs.copy(select))
+ lhsinfo = (_FROM_SUBSTEP, selidx, residx)
+ selidx += 1
+ residx += 1
+ selectedidx[lhskey] = lhsinfo
+ else:
+ lhsinfo = selectedidx[lhskey][:-1] + (None,)
+ rhskey = rhs.as_string('utf-8')
+ if not rhskey in selectedidx:
+ if isinstance(rhs, Constant):
+ rhsinfo = (_CONSTANT, rhs.eval(plan.args), residx)
+ elif getattr(rhs, 'variable', None) in eidconsts:
+ eid = eidconsts[rhs.variable]
+ rhsinfo = (_CONSTANT, eid, residx)
+ else:
+ select.append_selected(rhs.copy(select))
+ rhsinfo = (_FROM_SUBSTEP, selidx, residx)
+ selidx += 1
+ residx += 1
+ selectedidx[rhskey] = rhsinfo
+ else:
+ rhsinfo = selectedidx[rhskey][:-1] + (None,)
rschema = getrschema(relation.r_type)
+ updatedefs.append( (lhsinfo, rhsinfo, rschema) )
if rschema.final or rschema.inlined:
- attrrelations.append(relation)
- else:
- relations.append(relation)
- # add step necessary to fetch all selected variables values
- if rqlst.where is not None:
- select.set_where(rqlst.where.copy(select))
- # set distinct to avoid potential duplicate key error
- select.distinct = True
- step = UpdateStep(plan, attrrelations, relations, selected_index)
- step.children += self._select_plan(plan, select, rqlst.solutions)
+ attributes.add(relation.r_type)
+ # the update step
+ step = UpdateStep(plan, updatedefs, attributes)
+ # when necessary add substep to fetch yet unknown values
+ select = _build_substep_query(select, rqlst)
+ if select is not None:
+ # set distinct to avoid potential duplicate key error
+ select.distinct = True
+ step.children += self._select_plan(plan, select, rqlst.solutions)
return (step,)
# internal methods ########################################################
@@ -308,7 +433,7 @@
# UPDATE/INSERT/DELETE steps ##################################################
-class RelationsStep(Step):
+class InsertRelationsStep(Step):
"""step consisting in adding attributes/relations to entity defs from a
previous FetchStep
@@ -334,33 +459,38 @@
"""execute this step"""
base_edef = self.edef
edefs = []
- result = self.execute_child()
+ if self.children:
+ result = self.execute_child()
+ else:
+ result = [[]]
for row in result:
# get a new entity definition for this row
edef = copy(base_edef)
# complete this entity def using row values
- for i in range(len(self.rdefs)):
- rtype, rorder = self.rdefs[i]
- if rorder == RelationsStep.FINAL:
- edef[rtype] = row[i]
- elif rorder == RelationsStep.RELATION:
- self.plan.add_relation_def( (edef, rtype, row[i]) )
- edef.querier_pending_relations[(rtype, 'subject')] = row[i]
+ index = 0
+ for rtype, rorder, value in self.rdefs:
+ if value is _FROM_SUBSTEP:
+ value = row[index]
+ index += 1
+ if rorder == InsertRelationsStep.FINAL:
+ edef.rql_set_value(rtype, value)
+ elif rorder == InsertRelationsStep.RELATION:
+ self.plan.add_relation_def( (edef, rtype, value) )
+ edef.querier_pending_relations[(rtype, 'subject')] = value
else:
- self.plan.add_relation_def( (row[i], rtype, edef) )
- edef.querier_pending_relations[(rtype, 'object')] = row[i]
+ self.plan.add_relation_def( (value, rtype, edef) )
+ edef.querier_pending_relations[(rtype, 'object')] = value
edefs.append(edef)
self.plan.substitute_entity_def(base_edef, edefs)
return result
-
class InsertStep(Step):
"""step consisting in inserting new entities / relations"""
def execute(self):
"""execute this step"""
for step in self.children:
- assert isinstance(step, RelationsStep)
+ assert isinstance(step, InsertRelationsStep)
step.plan = self.plan
step.execute()
# insert entities first
@@ -408,40 +538,46 @@
definitions and from results fetched in previous step
"""
- def __init__(self, plan, attribute_relations, relations, selected_index):
+ def __init__(self, plan, updatedefs, attributes):
Step.__init__(self, plan)
- self.attribute_relations = attribute_relations
- self.relations = relations
- self.selected_index = selected_index
+ self.updatedefs = updatedefs
+ self.attributes = attributes
def execute(self):
"""execute this step"""
- plan = self.plan
session = self.plan.session
repo = session.repo
edefs = {}
# insert relations
- attributes = set([relation.r_type for relation in self.attribute_relations])
- result = self.execute_child()
- for row in result:
- for relation in self.attribute_relations:
- lhs, rhs = relation.get_variable_parts()
- eid = typed_eid(row[self.selected_index[str(lhs)]])
- try:
- edef = edefs[eid]
- except KeyError:
- edefs[eid] = edef = session.entity_from_eid(eid)
- if isinstance(rhs, Constant):
- # add constant values to entity def
- value = rhs.eval(plan.args)
- edef[relation.r_type] = value
+ if self.children:
+ result = self.execute_child()
+ else:
+ result = [[]]
+ for i, row in enumerate(result):
+ newrow = []
+ for (lhsinfo, rhsinfo, rschema) in self.updatedefs:
+ lhsval = _handle_relterm(lhsinfo, row, newrow)
+ rhsval = _handle_relterm(rhsinfo, row, newrow)
+ if rschema.final or rschema.inlined:
+ eid = typed_eid(lhsval)
+ try:
+ edef = edefs[eid]
+ except KeyError:
+ edefs[eid] = edef = session.entity_from_eid(eid)
+ edef.rql_set_value(str(rschema), rhsval)
else:
- edef[relation.r_type] = row[self.selected_index[str(rhs)]]
- for relation in self.relations:
- subj = row[self.selected_index[str(relation.children[0])]]
- obj = row[self.selected_index[str(relation.children[1])]]
- repo.glob_add_relation(session, subj, relation.r_type, obj)
+ repo.glob_add_relation(session, lhsval, str(rschema), rhsval)
+ result[i] = newrow
# update entities
for eid, edef in edefs.iteritems():
- repo.glob_update_entity(session, edef, attributes)
+ repo.glob_update_entity(session, edef, self.attributes)
return result
+
+def _handle_relterm(info, row, newrow):
+ if info[0] is _CONSTANT:
+ val = info[1]
+ else: # _FROM_SUBSTEP
+ val = row[info[1]]
+ if info[-1] is not None:
+ newrow.append(val)
+ return val
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/server/test/data/site_cubicweb.py Wed Mar 24 10:23:57 2010 +0100
@@ -0,0 +1,23 @@
+"""
+
+:organization: Logilab
+:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+"""
+
+from logilab.database import FunctionDescr
+from logilab.database.sqlite import register_sqlite_pyfunc
+from rql.utils import register_function
+
+try:
+ class DUMB_SORT(FunctionDescr):
+ supported_backends = ('sqlite',)
+
+ register_function(DUMB_SORT)
+ def dumb_sort(something):
+ return something
+ register_sqlite_pyfunc(dumb_sort)
+except:
+ # already registered
+ pass
--- a/server/test/data/site_erudi.py Wed Mar 24 08:40:00 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-"""
-
-:organization: Logilab
-:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-from logilab.common.adbh import FunctionDescr
-from rql.utils import register_function
-
-try:
- class DUMB_SORT(FunctionDescr):
- supported_backends = ('sqlite',)
-
- register_function(DUMB_SORT)
-
-
- def init_sqlite_connexion(cnx):
- def dumb_sort(something):
- return something
- cnx.create_function("DUMB_SORT", 1, dumb_sort)
-
- from cubicweb.server import sqlutils
- sqlutils.SQL_CONNECT_HOOKS['sqlite'].append(init_sqlite_connexion)
-except:
- # already registered
- pass
--- a/server/test/unittest_checkintegrity.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_checkintegrity.py Wed Mar 24 10:23:57 2010 +0100
@@ -13,10 +13,9 @@
from cubicweb.server.checkintegrity import check
-repo, cnx = init_test_database()
-
class CheckIntegrityTC(TestCase):
def test(self):
+ repo, cnx = init_test_database()
sys.stderr = sys.stdout = StringIO()
try:
check(repo, cnx, ('entities', 'relations', 'text_index', 'metadata'),
@@ -24,6 +23,7 @@
finally:
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
+ repo.shutdown()
if __name__ == '__main__':
unittest_main()
--- a/server/test/unittest_hook.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_hook.py Wed Mar 24 10:23:57 2010 +0100
@@ -69,6 +69,10 @@
config.bootstrap_cubes()
schema = config.load_schema()
+def teardown_module(*args):
+ global config, schema
+ del config, schema
+
class AddAnyHook(hook.Hook):
__regid__ = 'addany'
category = 'cat1'
@@ -104,13 +108,19 @@
def test_call_hook(self):
self.o.register(AddAnyHook)
- cw = mock_object(vreg=self.vreg)
- self.assertRaises(HookCalled, self.o.call_hooks, 'before_add_entity', cw)
+ dis = set()
+ cw = mock_object(vreg=self.vreg,
+ set_read_security=lambda *a,**k: None,
+ set_write_security=lambda *a,**k: None,
+ is_hook_activated=lambda x, cls: cls.category not in dis)
+ self.assertRaises(HookCalled,
+ self.o.call_hooks, 'before_add_entity', cw)
self.o.call_hooks('before_delete_entity', cw) # nothing to call
- config.disabled_hooks_categories.add('cat1')
+ dis.add('cat1')
self.o.call_hooks('before_add_entity', cw) # disabled hooks category, not called
- config.disabled_hooks_categories.remove('cat1')
- self.assertRaises(HookCalled, self.o.call_hooks, 'before_add_entity', cw)
+ dis.remove('cat1')
+ self.assertRaises(HookCalled,
+ self.o.call_hooks, 'before_add_entity', cw)
self.o.unregister(AddAnyHook)
self.o.call_hooks('before_add_entity', cw) # nothing to call
--- a/server/test/unittest_ldapuser.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_ldapuser.py Wed Mar 24 10:23:57 2010 +0100
@@ -370,6 +370,11 @@
LDAPUserSourceTC._init_repo()
repo = LDAPUserSourceTC.repo
+def teardown_module(*args):
+ global repo
+ del repo
+ del RQL2LDAPFilterTC.schema
+
class RQL2LDAPFilterTC(RQLGeneratorTC):
schema = repo.schema
--- a/server/test/unittest_migractions.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_migractions.py Wed Mar 24 10:23:57 2010 +0100
@@ -14,6 +14,11 @@
from cubicweb.server.sqlutils import SQL_PREFIX
from cubicweb.server.migractions import *
+migrschema = None
+def teardown_module(*args):
+ global migrschema
+ del migrschema
+ del MigrationCommandsTC.origschema
class MigrationCommandsTC(CubicWebTC):
@@ -35,6 +40,13 @@
def _refresh_repo(cls):
super(MigrationCommandsTC, cls)._refresh_repo()
cls.repo.set_schema(deepcopy(cls.origschema), resetvreg=False)
+ # reset migration schema eids
+ for eschema in migrschema.entities():
+ eschema.eid = None
+ for rschema in migrschema.relations():
+ rschema.eid = None
+ for rdef in rschema.rdefs.values():
+ rdef.eid = None
def setUp(self):
CubicWebTC.setUp(self)
@@ -44,7 +56,6 @@
assert self.cnx is self.mh._cnx
assert self.session is self.mh.session, (self.session.id, self.mh.session.id)
-
def test_add_attribute_int(self):
self.failIf('whatever' in self.schema)
self.request().create_entity('Note')
--- a/server/test/unittest_msplanner.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_msplanner.py Wed Mar 24 10:23:57 2010 +0100
@@ -60,6 +60,11 @@
# keep cnx so it's not garbage collected and the associated session is closed
repo, cnx = init_test_database()
+def teardown_module(*args):
+ global repo, cnx
+ del repo, cnx
+
+
class BaseMSPlannerTC(BasePlannerTC):
"""test planner related feature on a 3-sources repository:
@@ -87,10 +92,10 @@
self.add_source(FakeCardSource, 'cards')
def tearDown(self):
- super(BaseMSPlannerTC, self).tearDown()
# restore hijacked security
self.restore_orig_affaire_security()
self.restore_orig_cwuser_security()
+ super(BaseMSPlannerTC, self).tearDown()
def restore_orig_affaire_security(self):
affreadperms = list(self.schema['Affaire'].permissions['read'])
@@ -1005,7 +1010,7 @@
self.session = self.user_groups_session('guests')
self._test('Any X,XT,U WHERE X is Card, X owned_by U?, X title XT, U login L',
[('FetchStep',
- [('Any U,L WHERE U identity 5, U login L, U is CWUser',
+ [('Any U,L WHERE U login L, EXISTS(U identity 5), U is CWUser',
[{'L': 'String', u'U': 'CWUser'}])],
[self.system], {}, {'L': 'table0.C1', 'U': 'table0.C0', 'U.login': 'table0.C1'}, []),
('FetchStep',
@@ -1517,15 +1522,11 @@
repo._type_source_cache[999999] = ('Note', 'cards', 999999)
repo._type_source_cache[999998] = ('State', 'system', None)
self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T',
- [('FetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
- [{'N': 'Note', 'T': 'String'}])],
- [self.cards], None, {'N.type': 'table0.C0', 'T': 'table0.C0'}, []),
- ('InsertStep',
- [('RelationsStep',
- [('OneFetchStep', [('Any 999998,T WHERE N type T, N is Note',
+ [('InsertStep',
+ [('InsertRelationsStep',
+ [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
[{'N': 'Note', 'T': 'String'}])],
- None, None, [self.system],
- {'N.type': 'table0.C0', 'T': 'table0.C0'}, [])])
+ None, None, [self.cards], {}, [])])
])
],
{'n': 999999, 's': 999998})
@@ -1534,15 +1535,11 @@
repo._type_source_cache[999999] = ('Note', 'cards', 999999)
repo._type_source_cache[999998] = ('State', 'system', None)
self._test('INSERT Note X: X in_state S, X type T, X migrated_from N WHERE S eid %(s)s, N eid %(n)s, N type T',
- [('FetchStep', [('Any T,N WHERE N eid 999999, N type T, N is Note',
- [{'N': 'Note', 'T': 'String'}])],
- [self.cards], None, {'N': 'table0.C1', 'N.type': 'table0.C0', 'T': 'table0.C0'}, []),
- ('InsertStep',
- [('RelationsStep',
- [('OneFetchStep', [('Any 999998,T,N WHERE N type T, N is Note',
+ [('InsertStep',
+ [('InsertRelationsStep',
+ [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
[{'N': 'Note', 'T': 'String'}])],
- None, None, [self.system],
- {'N': 'table0.C1', 'N.type': 'table0.C0', 'T': 'table0.C0'}, [])
+ None, None, [self.cards], {}, [])
])
])
],
@@ -1553,8 +1550,8 @@
repo._type_source_cache[999998] = ('State', 'cards', 999998)
self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T',
[('InsertStep',
- [('RelationsStep',
- [('OneFetchStep', [('Any 999998,T WHERE N eid 999999, N type T, N is Note',
+ [('InsertRelationsStep',
+ [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
[{'N': 'Note', 'T': 'String'}])],
None, None, [self.cards], {}, [])]
)]
@@ -1566,10 +1563,7 @@
repo._type_source_cache[999998] = ('State', 'system', None)
self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s',
[('InsertStep',
- [('RelationsStep',
- [('OneFetchStep', [('Any 999998,999999', [{}])],
- None, None, [self.system], {}, [])]
- )]
+ [('InsertRelationsStep', [])]
)],
{'n': 999999, 's': 999998})
@@ -1578,12 +1572,14 @@
repo._type_source_cache[999998] = ('State', 'system', None)
self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s, A concerne N',
[('InsertStep',
- [('RelationsStep',
- [('OneFetchStep', [('Any 999998,999999 WHERE A concerne 999999, A is Affaire',
- [{'A': 'Affaire'}])],
- None, None, [self.system], {}, [])]
- )]
- )],
+ [('InsertRelationsStep',
+ [('OneFetchStep',
+ [('Any A WHERE A concerne 999999, A is Affaire',
+ [{'A': 'Affaire'}])],
+ None, None, [self.system], {}, []),
+ ]),
+ ])
+ ],
{'n': 999999, 's': 999998})
def test_delete_relation1(self):
@@ -1664,7 +1660,7 @@
# source, states should only be searched in the system source as well
self._test('SET X in_state S WHERE X eid %(x)s, S name "deactivated"',
[('UpdateStep', [
- ('OneFetchStep', [('DISTINCT Any 5,S WHERE S name "deactivated", S is State',
+ ('OneFetchStep', [('DISTINCT Any S WHERE S name "deactivated", S is State',
[{'S': 'State'}])],
None, None, [self.system], {}, []),
]),
@@ -1814,7 +1810,7 @@
[('FetchStep', [('Any Y WHERE Y multisource_rel 999998, Y is Note', [{'Y': 'Note'}])],
[self.cards], None, {'Y': u'table0.C0'}, []),
('UpdateStep',
- [('OneFetchStep', [('DISTINCT Any 999999,Y WHERE Y migrated_from 999998, Y is Note',
+ [('OneFetchStep', [('DISTINCT Any Y WHERE Y migrated_from 999998, Y is Note',
[{'Y': 'Note'}])],
None, None, [self.system],
{'Y': u'table0.C0'}, [])])],
@@ -1841,14 +1837,9 @@
def test_nonregr11(self):
repo._type_source_cache[999999] = ('Bookmark', 'system', 999999)
self._test('SET X bookmarked_by Y WHERE X eid %(x)s, Y login "hop"',
- [('FetchStep',
- [('Any Y WHERE Y login "hop", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system],
- None, {'Y': 'table0.C0'}, []),
- ('UpdateStep',
- [('OneFetchStep', [('DISTINCT Any 999999,Y WHERE Y is CWUser', [{'Y': 'CWUser'}])],
- None, None, [self.system], {'Y': 'table0.C0'},
- [])]
+ [('UpdateStep',
+ [('OneFetchStep', [('DISTINCT Any Y WHERE Y login "hop", Y is CWUser', [{'Y': 'CWUser'}])],
+ None, None, [self.ldap, self.system], {}, [])]
)],
{'x': 999999})
--- a/server/test/unittest_multisources.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_multisources.py Wed Mar 24 10:23:57 2010 +0100
@@ -48,7 +48,12 @@
def teardown_module(*args):
PyroRQLSource.get_connection = PyroRQLSource_get_connection
Connection.close = Connection_close
-
+ global repo2, cnx2, repo3, cnx3
+ repo2.shutdown()
+ repo3.shutdown()
+ del repo2, cnx2, repo3, cnx3
+ #del TwoSourcesTC.config.vreg
+ #del TwoSourcesTC.config
class TwoSourcesTC(CubicWebTC):
config = TwoSourcesConfiguration('data')
@@ -130,7 +135,7 @@
cu = cnx.cursor()
rset = cu.execute('Any X WHERE X has_text "card"')
self.assertEquals(len(rset), 5, zip(rset.rows, rset.description))
- cnx.close()
+ Connection_close(cnx)
def test_synchronization(self):
cu = cnx2.cursor()
--- a/server/test/unittest_querier.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_querier.py Wed Mar 24 10:23:57 2010 +0100
@@ -35,7 +35,7 @@
SQL_CONNECT_HOOKS['sqlite'].append(init_sqlite_connexion)
-from logilab.common.adbh import _GenericAdvFuncHelper
+from logilab.database import _GenericAdvFuncHelper
TYPEMAP = _GenericAdvFuncHelper.TYPE_MAPPING
class MakeSchemaTC(TestCase):
@@ -48,6 +48,11 @@
repo, cnx = init_test_database()
+def teardown_module(*args):
+ global repo, cnx
+ cnx.close()
+ repo.shutdown()
+ del repo, cnx
class UtilsTC(BaseQuerierTC):
@@ -392,6 +397,18 @@
rset = self.execute('Note X WHERE NOT Y evaluee X')
self.assertEquals(len(rset.rows), 1, rset.rows)
+ def test_select_date_extraction(self):
+ self.execute("INSERT Personne X: X nom 'foo', X datenaiss %(d)s",
+ {'d': datetime(2001, 2,3, 12,13)})
+ test_data = [('YEAR', 2001), ('MONTH', 2), ('DAY', 3),
+ ('HOUR', 12), ('MINUTE', 13)]
+ for funcname, result in test_data:
+ rset = self.execute('Any %s(D) WHERE X is Personne, X datenaiss D'
+ % funcname)
+ self.assertEquals(len(rset.rows), 1)
+ self.assertEquals(rset.rows[0][0], result)
+ self.assertEquals(rset.description, [('Int',)])
+
def test_select_aggregat_count(self):
rset = self.execute('Any COUNT(X)')
self.assertEquals(len(rset.rows), 1)
@@ -425,7 +442,7 @@
self.assertEquals(rset.description, [('Int',)])
def test_select_custom_aggregat_concat_string(self):
- rset = self.execute('Any CONCAT_STRINGS(N) WHERE X is CWGroup, X name N')
+ rset = self.execute('Any GROUP_CONCAT(N) WHERE X is CWGroup, X name N')
self.failUnless(rset)
self.failUnlessEqual(sorted(rset[0][0].split(', ')), ['guests', 'managers',
'owners', 'users'])
@@ -1023,6 +1040,10 @@
{'x': str(eid1), 'y': str(eid2)})
rset = self.execute('Any X, Y WHERE X travaille Y')
self.assertEqual(len(rset.rows), 1)
+ # test add of an existant relation but with NOT X rel Y protection
+ self.failIf(self.execute("SET X travaille Y WHERE X eid %(x)s, Y eid %(y)s,"
+ "NOT X travaille Y",
+ {'x': str(eid1), 'y': str(eid2)}))
def test_update_2ter(self):
rset = self.execute("INSERT Personne X, Societe Y: X nom 'bidule', Y nom 'toto'")
--- a/server/test/unittest_repository.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_repository.py Wed Mar 24 10:23:57 2010 +0100
@@ -21,7 +21,7 @@
from cubicweb import (BadConnectionId, RepositoryError, ValidationError,
UnknownEid, AuthenticationError)
from cubicweb.schema import CubicWebSchema, RQLConstraint
-from cubicweb.dbapi import connect, repo_connect, multiple_connections_unfix
+from cubicweb.dbapi import connect, multiple_connections_unfix
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.devtools.repotest import tuplify
from cubicweb.server import repository, hook
@@ -38,25 +38,29 @@
"""
def test_fill_schema(self):
- self.repo.schema = CubicWebSchema(self.repo.config.appid)
- self.repo.config._cubes = None # avoid assertion error
- self.repo.config.repairing = True # avoid versions checking
- self.repo.fill_schema()
- table = SQL_PREFIX + 'CWEType'
- namecol = SQL_PREFIX + 'name'
- finalcol = SQL_PREFIX + 'final'
- self.session.set_pool()
- cu = self.session.system_sql('SELECT %s FROM %s WHERE %s is NULL' % (
- namecol, table, finalcol))
- self.assertEquals(cu.fetchall(), [])
- cu = self.session.system_sql('SELECT %s FROM %s WHERE %s=%%(final)s ORDER BY %s'
- % (namecol, table, finalcol, namecol), {'final': 'TRUE'})
- self.assertEquals(cu.fetchall(), [(u'Boolean',), (u'Bytes',),
- (u'Date',), (u'Datetime',),
- (u'Decimal',),(u'Float',),
- (u'Int',),
- (u'Interval',), (u'Password',),
- (u'String',), (u'Time',)])
+ origshema = self.repo.schema
+ try:
+ self.repo.schema = CubicWebSchema(self.repo.config.appid)
+ self.repo.config._cubes = None # avoid assertion error
+ self.repo.config.repairing = True # avoid versions checking
+ self.repo.fill_schema()
+ table = SQL_PREFIX + 'CWEType'
+ namecol = SQL_PREFIX + 'name'
+ finalcol = SQL_PREFIX + 'final'
+ self.session.set_pool()
+ cu = self.session.system_sql('SELECT %s FROM %s WHERE %s is NULL' % (
+ namecol, table, finalcol))
+ self.assertEquals(cu.fetchall(), [])
+ cu = self.session.system_sql('SELECT %s FROM %s WHERE %s=%%(final)s ORDER BY %s'
+ % (namecol, table, finalcol, namecol), {'final': 'TRUE'})
+ self.assertEquals(cu.fetchall(), [(u'Boolean',), (u'Bytes',),
+ (u'Date',), (u'Datetime',),
+ (u'Decimal',),(u'Float',),
+ (u'Int',),
+ (u'Interval',), (u'Password',),
+ (u'String',), (u'Time',)])
+ finally:
+ self.repo.set_schema(origshema)
def test_schema_has_owner(self):
repo = self.repo
@@ -180,7 +184,9 @@
repo = self.repo
cnxid = repo.connect(self.admlogin, password=self.admpassword)
# rollback state change which trigger TrInfo insertion
- user = repo._get_session(cnxid).user
+ session = repo._get_session(cnxid)
+ session.set_pool()
+ user = session.user
user.fire_transition('deactivate')
rset = repo.execute(cnxid, 'TrInfo T WHERE T wf_info_for X, X eid %(x)s', {'x': user.eid})
self.assertEquals(len(rset), 1)
@@ -263,6 +269,8 @@
self.fail('something went wrong, thread still alive')
finally:
repository.pyro_unregister(self.repo.config)
+ from logilab.common import pyro_ext
+ pyro_ext._DAEMONS.clear()
def _pyro_client(self, done):
cnx = connect(self.repo.config.appid, u'admin', password='gingkow')
@@ -377,14 +385,14 @@
entity.eid = -1
entity.complete = lambda x: None
self.session.set_pool()
- self.repo.add_info(self.session, entity, self.repo.sources_by_uri['system'])
+ self.repo.add_info(self.session, entity, self.repo.system_source)
cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1')
data = cu.fetchall()
self.assertIsInstance(data[0][3], datetime)
data[0] = list(data[0])
data[0][3] = None
self.assertEquals(tuplify(data), [(-1, 'Personne', 'system', None, None)])
- self.repo.delete_info(self.session, -1)
+ self.repo.delete_info(self.session, entity, 'system', None)
#self.repo.commit()
cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1')
data = cu.fetchall()
@@ -470,13 +478,6 @@
u'system.version.tag'])
CALLED = []
-class EcritParHook(hook.Hook):
- __regid__ = 'inlinedrelhook'
- __select__ = hook.Hook.__select__ & hook.match_rtype('ecrit_par')
- events = ('before_add_relation', 'after_add_relation',
- 'before_delete_relation', 'after_delete_relation')
- def __call__(self):
- CALLED.append((self.event, self.eidfrom, self.rtype, self.eidto))
class InlineRelHooksTC(CubicWebTC):
"""test relation hooks are called for inlined relations
@@ -491,6 +492,14 @@
def test_inline_relation(self):
"""make sure <event>_relation hooks are called for inlined relation"""
+ class EcritParHook(hook.Hook):
+ __regid__ = 'inlinedrelhook'
+ __select__ = hook.Hook.__select__ & hook.match_rtype('ecrit_par')
+ events = ('before_add_relation', 'after_add_relation',
+ 'before_delete_relation', 'after_delete_relation')
+ def __call__(self):
+ CALLED.append((self.event, self.eidfrom, self.rtype, self.eidto))
+
self.hm.register(EcritParHook)
eidp = self.execute('INSERT Personne X: X nom "toto"')[0][0]
eidn = self.execute('INSERT Note X: X type "T"')[0][0]
--- a/server/test/unittest_rql2sql.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_rql2sql.py Wed Mar 24 10:23:57 2010 +0100
@@ -13,7 +13,6 @@
from logilab.common.testlib import TestCase, unittest_main, mock_object
from rql import BadRQLQuery
-from indexer import get_indexer
#from cubicweb.server.sources.native import remove_unused_solutions
from cubicweb.server.sources.rql2sql import SQLGenerator, remove_unused_solutions
@@ -37,6 +36,10 @@
schema['state_of'].inlined = False
schema['comments'].inlined = False
+def teardown_module(*args):
+ global config, schema
+ del config, schema
+
PARSER = [
(r"Personne P WHERE P nom 'Zig\'oto';",
'''SELECT _P.cw_eid
@@ -1068,7 +1071,7 @@
WHERE rel_is0.eid_to=2'''),
]
-from logilab.common.adbh import get_adv_func_helper
+from logilab.database import get_db_helper
class CWRQLTC(RQLGeneratorTC):
schema = schema
@@ -1102,12 +1105,7 @@
#capture = True
def setUp(self):
RQLGeneratorTC.setUp(self)
- indexer = get_indexer('postgres', 'utf8')
- dbms_helper = get_adv_func_helper('postgres')
- dbms_helper.fti_uid_attr = indexer.uid_attr
- dbms_helper.fti_table = indexer.table
- dbms_helper.fti_restriction_sql = indexer.restriction_sql
- dbms_helper.fti_need_distinct_query = indexer.need_distinct
+ dbms_helper = get_db_helper('postgres')
self.o = SQLGenerator(schema, dbms_helper)
def _norm_sql(self, sql):
@@ -1208,6 +1206,13 @@
FROM cw_CWUser AS _X
WHERE _X.cw_login IS NULL''')
+
+ def test_date_extraction(self):
+ self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
+ '''SELECT CAST(EXTRACT(MONTH from _P.cw_creation_date) AS INTEGER)
+FROM cw_Personne AS _P''')
+
+
def test_parser_parse(self):
for t in self._parse(PARSER):
yield t
@@ -1405,17 +1410,17 @@
def setUp(self):
RQLGeneratorTC.setUp(self)
- indexer = get_indexer('sqlite', 'utf8')
- dbms_helper = get_adv_func_helper('sqlite')
- dbms_helper.fti_uid_attr = indexer.uid_attr
- dbms_helper.fti_table = indexer.table
- dbms_helper.fti_restriction_sql = indexer.restriction_sql
- dbms_helper.fti_need_distinct_query = indexer.need_distinct
+ dbms_helper = get_db_helper('sqlite')
self.o = SQLGenerator(schema, dbms_helper)
def _norm_sql(self, sql):
return sql.strip().replace(' ILIKE ', ' LIKE ').replace('\nINTERSECT ALL\n', '\nINTERSECT\n')
+ def test_date_extraction(self):
+ self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
+ '''SELECT MONTH(_P.cw_creation_date)
+FROM cw_Personne AS _P''')
+
def test_union(self):
for t in self._parse((
('(Any N ORDERBY 1 WHERE X name N, X is State)'
@@ -1513,12 +1518,7 @@
def setUp(self):
RQLGeneratorTC.setUp(self)
- indexer = get_indexer('mysql', 'utf8')
- dbms_helper = get_adv_func_helper('mysql')
- dbms_helper.fti_uid_attr = indexer.uid_attr
- dbms_helper.fti_table = indexer.table
- dbms_helper.fti_restriction_sql = indexer.restriction_sql
- dbms_helper.fti_need_distinct_query = indexer.need_distinct
+ dbms_helper = get_db_helper('mysql')
self.o = SQLGenerator(schema, dbms_helper)
def _norm_sql(self, sql):
@@ -1533,6 +1533,11 @@
latest = firstword
return '\n'.join(newsql)
+ def test_date_extraction(self):
+ self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
+ '''SELECT EXTRACT(MONTH from _P.cw_creation_date)
+FROM cw_Personne AS _P''')
+
def test_from_clause_needed(self):
queries = [("Any 1 WHERE EXISTS(T is CWGroup, T name 'managers')",
'''SELECT 1
--- a/server/test/unittest_rqlannotation.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_rqlannotation.py Wed Mar 24 10:23:57 2010 +0100
@@ -8,6 +8,11 @@
repo, cnx = init_test_database()
+def teardown_module(*args):
+ global repo, cnx
+ del repo, cnx
+
+
class SQLGenAnnotatorTC(BaseQuerierTC):
repo = repo
--- a/server/test/unittest_schemaserial.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_schemaserial.py Wed Mar 24 10:23:57 2010 +0100
@@ -15,9 +15,19 @@
config.bootstrap_cubes()
schema = loader.load(config)
+def teardown_module(*args):
+ global schema, config, loader
+ del schema, config, loader
+
from cubicweb.server.schemaserial import *
from cubicweb.server.schemaserial import _erperms2rql as erperms2rql
+cstrtypemap = {'RQLConstraint': 'RQLConstraint_eid',
+ 'SizeConstraint': 'SizeConstraint_eid',
+ 'StaticVocabularyConstraint': 'StaticVocabularyConstraint_eid',
+ 'FormatConstraint': 'FormatConstraint_eid',
+ }
+
class Schema2RQLTC(TestCase):
def test_eschema2rql1(self):
@@ -34,104 +44,124 @@
{'description': u'', 'final': True, 'name': u'String'})])
def test_eschema2rql_specialization(self):
+ # x: None since eschema.eid are None
self.assertListEquals(sorted(specialize2rql(schema)),
- [('SET X specializes ET WHERE X name %(x)s, ET name %(et)s',
- {'et': 'BaseTransition', 'x': 'Transition'}),
- ('SET X specializes ET WHERE X name %(x)s, ET name %(et)s',
- {'et': 'BaseTransition', 'x': 'WorkflowTransition'}),
- ('SET X specializes ET WHERE X name %(x)s, ET name %(et)s',
- {'et': 'Division', 'x': 'SubDivision'}),
- # ('SET X specializes ET WHERE X name %(x)s, ET name %(et)s',
+ [('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
+ {'et': None, 'x': None}),
+ ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
+ {'et': None, 'x': None}),
+ ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
+ {'et': None, 'x': None}),
+ # ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
# {'et': 'File', 'x': 'Image'}),
- ('SET X specializes ET WHERE X name %(x)s, ET name %(et)s',
- {'et': 'Societe', 'x': 'Division'})])
+ ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
+ {'et': None, 'x': None})])
def test_rschema2rql1(self):
- self.assertListEquals(list(rschema2rql(schema.rschema('relation_type'))),
+ self.assertListEquals(list(rschema2rql(schema.rschema('relation_type'), cstrtypemap)),
[
('INSERT CWRType X: X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s',
{'description': u'link a relation definition to its relation type', 'symmetric': False, 'name': u'relation_type', 'final' : False, 'fulltext_container': None, 'inlined': True}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'relation_type', 'description': u'', 'composite': u'object', 'oe': 'CWRType',
- 'ordernum': 1, 'cardinality': u'1*', 'se': 'CWAttribute'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWRelation',
- {'rt': 'relation_type', 'oe': 'CWRType', 'ctname': u'RQLConstraint', 'se': 'CWAttribute', 'value': u';O;O final TRUE\n'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'', 'composite': u'object', 'cardinality': u'1*',
+ 'ordernum': 1}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'RQLConstraint_eid', 'value': u';O;O final TRUE\n'}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'relation_type', 'description': u'', 'composite': u'object', 'oe': 'CWRType',
- 'ordernum': 1, 'cardinality': u'1*', 'se': 'CWRelation'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWRelation',
- {'rt': 'relation_type', 'oe': 'CWRType', 'ctname': u'RQLConstraint', 'se': 'CWRelation', 'value': u';O;O final FALSE\n'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'', 'composite': u'object',
+ 'ordernum': 1, 'cardinality': u'1*'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'RQLConstraint_eid', 'value': u';O;O final FALSE\n'}),
])
def test_rschema2rql2(self):
- self.assertListEquals(list(rschema2rql(schema.rschema('add_permission'))),
+ self.assertListEquals(list(rschema2rql(schema.rschema('add_permission'), cstrtypemap)),
[
('INSERT CWRType X: X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s', {'description': u'', 'symmetric': False, 'name': u'add_permission', 'final': False, 'fulltext_container': None, 'inlined': False}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'add_permission', 'description': u'groups allowed to add entities/relations of this type', 'composite': None, 'oe': 'CWGroup', 'ordernum': 9999, 'cardinality': u'**', 'se': 'CWEType'}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'add_permission', 'description': u'rql expression allowing to add entities/relations of this type', 'composite': 'subject', 'oe': 'RQLExpression', 'ordernum': 9999, 'cardinality': u'*?', 'se': 'CWEType'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'groups allowed to add entities/relations of this type', 'composite': None, 'ordernum': 9999, 'cardinality': u'**'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'rql expression allowing to add entities/relations of this type', 'composite': 'subject', 'ordernum': 9999, 'cardinality': u'*?'}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'add_permission', 'description': u'groups allowed to add entities/relations of this type', 'composite': None, 'oe': 'CWGroup', 'ordernum': 9999, 'cardinality': u'**', 'se': 'CWRelation'}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'add_permission', 'description': u'rql expression allowing to add entities/relations of this type', 'composite': 'subject', 'oe': 'RQLExpression', 'ordernum': 9999, 'cardinality': u'*?', 'se': 'CWRelation'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'groups allowed to add entities/relations of this type', 'composite': None, 'ordernum': 9999, 'cardinality': u'**'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'rql expression allowing to add entities/relations of this type', 'composite': 'subject', 'ordernum': 9999, 'cardinality': u'*?'}),
])
def test_rschema2rql3(self):
- self.assertListEquals(list(rschema2rql(schema.rschema('cardinality'))),
+ self.assertListEquals(list(rschema2rql(schema.rschema('cardinality'), cstrtypemap)),
[
('INSERT CWRType X: X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s',
{'description': u'', 'symmetric': False, 'name': u'cardinality', 'final': True, 'fulltext_container': None, 'inlined': False}),
- ('INSERT CWAttribute X: X cardinality %(cardinality)s,X defaultval %(defaultval)s,X description %(description)s,X fulltextindexed %(fulltextindexed)s,X indexed %(indexed)s,X internationalizable %(internationalizable)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'cardinality', 'description': u'subject/object cardinality', 'internationalizable': True, 'fulltextindexed': False, 'ordernum': 5, 'defaultval': None, 'indexed': False, 'cardinality': u'?1', 'oe': 'String', 'se': 'CWAttribute'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWAttribute',
- {'rt': 'cardinality', 'oe': 'String', 'ctname': u'SizeConstraint', 'se': 'CWAttribute', 'value': u'max=2'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWAttribute',
- {'rt': 'cardinality', 'oe': 'String', 'ctname': u'StaticVocabularyConstraint', 'se': 'CWAttribute', 'value': u"u'?1', u'11'"}),
+ ('INSERT CWAttribute X: X cardinality %(cardinality)s,X defaultval %(defaultval)s,X description %(description)s,X fulltextindexed %(fulltextindexed)s,X indexed %(indexed)s,X internationalizable %(internationalizable)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'subject/object cardinality', 'internationalizable': True, 'fulltextindexed': False, 'ordernum': 5, 'defaultval': None, 'indexed': False, 'cardinality': u'?1'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'SizeConstraint_eid', 'value': u'max=2'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'StaticVocabularyConstraint_eid', 'value': u"u'?1', u'11'"}),
- ('INSERT CWAttribute X: X cardinality %(cardinality)s,X defaultval %(defaultval)s,X description %(description)s,X fulltextindexed %(fulltextindexed)s,X indexed %(indexed)s,X internationalizable %(internationalizable)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'cardinality', 'description': u'subject/object cardinality', 'internationalizable': True, 'fulltextindexed': False, 'ordernum': 5, 'defaultval': None, 'indexed': False, 'cardinality': u'?1', 'oe': 'String', 'se': 'CWRelation'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWAttribute',
- {'rt': 'cardinality', 'oe': 'String', 'ctname': u'SizeConstraint', 'se': 'CWRelation', 'value': u'max=2'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWAttribute',
- {'rt': 'cardinality', 'oe': 'String', 'ctname': u'StaticVocabularyConstraint', 'se': 'CWRelation', 'value': u"u'?*', u'1*', u'+*', u'**', u'?+', u'1+', u'++', u'*+', u'?1', u'11', u'+1', u'*1', u'??', u'1?', u'+?', u'*?'"}),
+ ('INSERT CWAttribute X: X cardinality %(cardinality)s,X defaultval %(defaultval)s,X description %(description)s,X fulltextindexed %(fulltextindexed)s,X indexed %(indexed)s,X internationalizable %(internationalizable)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'subject/object cardinality', 'internationalizable': True, 'fulltextindexed': False, 'ordernum': 5, 'defaultval': None, 'indexed': False, 'cardinality': u'?1'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'SizeConstraint_eid', 'value': u'max=2'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'StaticVocabularyConstraint_eid', 'value': u"u'?*', u'1*', u'+*', u'**', u'?+', u'1+', u'++', u'*+', u'?1', u'11', u'+1', u'*1', u'??', u'1?', u'+?', u'*?'"}),
])
+ def test_rdef2rql(self):
+ self.assertListEquals(list(rdef2rql(schema['description_format'].rdefs[('CWRType', 'String')], cstrtypemap)),
+ [
+ ('INSERT CWAttribute X: X cardinality %(cardinality)s,X defaultval %(defaultval)s,X description %(description)s,X fulltextindexed %(fulltextindexed)s,X indexed %(indexed)s,X internationalizable %(internationalizable)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'', 'internationalizable': True, 'fulltextindexed': False, 'ordernum': 7, 'defaultval': u'text/plain', 'indexed': False, 'cardinality': u'?1'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'value': u'None', 'ct': 'FormatConstraint_eid'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'value': u'max=50', 'ct': 'SizeConstraint_eid'})])
+
def test_updateeschema2rql1(self):
- self.assertListEquals(list(updateeschema2rql(schema.eschema('CWAttribute'))),
- [('SET X description %(description)s,X final %(final)s,X name %(name)s WHERE X is CWEType, X name %(et)s',
- {'description': u'define a final relation: link a final relation type from a non final entity to a final entity type. used to build the instance schema', 'et': 'CWAttribute', 'final': False, 'name': u'CWAttribute'}),
+ self.assertListEquals(list(updateeschema2rql(schema.eschema('CWAttribute'), 1)),
+ [('SET X description %(description)s,X final %(final)s,X name %(name)s WHERE X eid %(x)s',
+ {'description': u'define a final relation: link a final relation type from a non final entity to a final entity type. used to build the instance schema', 'x': 1, 'final': False, 'name': u'CWAttribute'}),
])
def test_updateeschema2rql2(self):
- self.assertListEquals(list(updateeschema2rql(schema.eschema('String'))),
- [('SET X description %(description)s,X final %(final)s,X name %(name)s WHERE X is CWEType, X name %(et)s',
- {'description': u'', 'et': 'String', 'final': True, 'name': u'String'})
+ self.assertListEquals(list(updateeschema2rql(schema.eschema('String'), 1)),
+ [('SET X description %(description)s,X final %(final)s,X name %(name)s WHERE X eid %(x)s',
+ {'description': u'', 'x': 1, 'final': True, 'name': u'String'})
])
def test_updaterschema2rql1(self):
- self.assertListEquals(list(updaterschema2rql(schema.rschema('relation_type'))),
+ self.assertListEquals(list(updaterschema2rql(schema.rschema('relation_type'), 1)),
[
- ('SET X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s WHERE X is CWRType, X name %(rt)s',
- {'rt': 'relation_type', 'symmetric': False,
+ ('SET X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s WHERE X eid %(x)s',
+ {'x': 1, 'symmetric': False,
'description': u'link a relation definition to its relation type',
'final': False, 'fulltext_container': None, 'inlined': True, 'name': u'relation_type'})
])
def test_updaterschema2rql2(self):
expected = [
- ('SET X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s WHERE X is CWRType, X name %(rt)s',
- {'rt': 'add_permission', 'symmetric': False,
+ ('SET X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s WHERE X eid %(x)s',
+ {'x': 1, 'symmetric': False,
'description': u'', 'final': False, 'fulltext_container': None,
'inlined': False, 'name': u'add_permission'})
]
- for i, (rql, args) in enumerate(updaterschema2rql(schema.rschema('add_permission'))):
+ for i, (rql, args) in enumerate(updaterschema2rql(schema.rschema('add_permission'), 1)):
yield self.assertEquals, (rql, args), expected[i]
class Perms2RQLTC(TestCase):
@@ -144,29 +174,29 @@
def test_eperms2rql1(self):
self.assertListEquals([(rql, kwargs) for rql, kwargs in erperms2rql(schema.eschema('CWEType'), self.GROUP_MAPPING)],
- [('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 1}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 2}),
- ('SET X add_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X update_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X delete_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
+ [('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 1}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 2}),
+ ('SET X add_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X update_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X delete_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
])
def test_rperms2rql2(self):
self.assertListEquals([(rql, kwargs) for rql, kwargs in erperms2rql(schema.rschema('read_permission').rdef('CWEType', 'CWGroup'), self.GROUP_MAPPING)],
- [('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 1}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 2}),
- ('SET X add_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X delete_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
+ [('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 1}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 2}),
+ ('SET X add_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X delete_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
])
def test_rperms2rql3(self):
self.assertListEquals([(rql, kwargs) for rql, kwargs in erperms2rql(schema.rschema('name').rdef('CWEType', 'String'), self.GROUP_MAPPING)],
- [('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 1}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 2}),
- ('SET X update_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
+ [('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 1}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 2}),
+ ('SET X update_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
])
#def test_perms2rql(self):
--- a/server/test/unittest_sqlutils.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_sqlutils.py Wed Mar 24 10:23:57 2010 +0100
@@ -20,13 +20,13 @@
def test_init(self):
o = SQLAdapterMixIn(BASE_CONFIG)
- self.assertEquals(o.encoding, 'UTF-8')
+ self.assertEquals(o.dbhelper.dbencoding, 'UTF-8')
def test_init_encoding(self):
config = BASE_CONFIG.copy()
config['db-encoding'] = 'ISO-8859-1'
o = SQLAdapterMixIn(config)
- self.assertEquals(o.encoding, 'ISO-8859-1')
+ self.assertEquals(o.dbhelper.dbencoding, 'ISO-8859-1')
if __name__ == '__main__':
unittest_main()
--- a/server/test/unittest_ssplanner.py Wed Mar 24 08:40:00 2010 +0100
+++ b/server/test/unittest_ssplanner.py Wed Mar 24 10:23:57 2010 +0100
@@ -12,6 +12,10 @@
# keep cnx so it's not garbage collected and the associated session closed
repo, cnx = init_test_database()
+def teardown_module(*args):
+ global repo, cnx
+ del repo, cnx
+
class SSPlannerTC(BasePlannerTC):
repo = repo
_test = test_plan
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/server/test/unittest_storage.py Wed Mar 24 10:23:57 2010 +0100
@@ -0,0 +1,91 @@
+"""unit tests for module cubicweb.server.sources.storages
+
+:organization: Logilab
+:copyright: 2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+"""
+
+from logilab.common.testlib import unittest_main
+from cubicweb.devtools.testlib import CubicWebTC
+
+import os.path as osp
+import shutil
+import tempfile
+
+from cubicweb import Binary
+from cubicweb.selectors import implements
+from cubicweb.server.sources import storages
+from cubicweb.server.hook import Hook, Operation
+
+class DummyBeforeHook(Hook):
+ __regid__ = 'dummy-before-hook'
+ __select__ = Hook.__select__ & implements('File')
+ events = ('before_add_entity',)
+
+ def __call__(self):
+ self._cw.transaction_data['orig_file_value'] = self.entity.data.getvalue()
+
+
+class DummyAfterHook(Hook):
+ __regid__ = 'dummy-after-hook'
+ __select__ = Hook.__select__ & implements('File')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ # new value of entity.data should be the same as before
+ oldvalue = self._cw.transaction_data['orig_file_value']
+ assert oldvalue == self.entity.data.getvalue()
+
+
+class StorageTC(CubicWebTC):
+
+ def setup_database(self):
+ self.tempdir = tempfile.mkdtemp()
+ bfs_storage = storages.BytesFileSystemStorage(self.tempdir)
+ storages.set_attribute_storage(self.repo, 'File', 'data', bfs_storage)
+
+ def tearDown(self):
+ super(CubicWebTC, self).tearDown()
+ storages.unset_attribute_storage(self.repo, 'File', 'data')
+ shutil.rmtree(self.tempdir)
+
+
+ def create_file(self, content):
+ req = self.request()
+ return req.create_entity('File', data=Binary(content),
+ data_format=u'text/plain', data_name=u'foo')
+
+ def test_bfs_storage(self):
+ f1 = self.create_file(content='the-data')
+ expected_filepath = osp.join(self.tempdir, '%s_data' % f1.eid)
+ self.failUnless(osp.isfile(expected_filepath))
+ self.assertEquals(file(expected_filepath).read(), 'the-data')
+
+ def test_sqlite_fspath(self):
+ f1 = self.create_file(content='the-data')
+ expected_filepath = osp.join(self.tempdir, '%s_data' % f1.eid)
+ fspath = self.execute('Any fspath(F, "File", "data") WHERE F eid %(f)s',
+ {'f': f1.eid})[0][0]
+ self.assertEquals(fspath.getvalue(), expected_filepath)
+
+ def test_fs_importing_doesnt_touch_path(self):
+ self.session.transaction_data['fs_importing'] = True
+ f1 = self.session.create_entity('File', data=Binary('/the/path'),
+ data_format=u'text/plain', data_name=u'foo')
+ fspath = self.execute('Any fspath(F, "File", "data") WHERE F eid %(f)s',
+ {'f': f1.eid})[0][0]
+ self.assertEquals(fspath.getvalue(), '/the/path')
+
+ def test_storage_transparency(self):
+ self.vreg._loadedmods[__name__] = {}
+ self.vreg.register(DummyBeforeHook)
+ self.vreg.register(DummyAfterHook)
+ try:
+ self.create_file(content='the-data')
+ finally:
+ self.vreg.unregister(DummyBeforeHook)
+ self.vreg.unregister(DummyAfterHook)
+
+if __name__ == '__main__':
+ unittest_main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/server/test/unittest_undo.py Wed Mar 24 10:23:57 2010 +0100
@@ -0,0 +1,206 @@
+"""
+
+:organization: Logilab
+:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+"""
+from __future__ import with_statement
+
+from cubicweb import ValidationError
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.transaction import *
+
+class UndoableTransactionTC(CubicWebTC):
+
+ def setup_database(self):
+ self.session.undo_actions = set('CUDAR')
+ self.toto = self.create_user('toto', password='toto', groups=('users',),
+ commit=False)
+ self.txuuid = self.commit()
+
+ def tearDown(self):
+ self.restore_connection()
+ self.session.undo_support = set()
+ super(UndoableTransactionTC, self).tearDown()
+
+ def test_undo_api(self):
+ self.failUnless(self.txuuid)
+ # test transaction api
+ self.assertRaises(NoSuchTransaction,
+ self.cnx.transaction_info, 'hop')
+ self.assertRaises(NoSuchTransaction,
+ self.cnx.transaction_actions, 'hop')
+ self.assertRaises(NoSuchTransaction,
+ self.cnx.undo_transaction, 'hop')
+ txinfo = self.cnx.transaction_info(self.txuuid)
+ self.failUnless(txinfo.datetime)
+ self.assertEquals(txinfo.user_eid, self.session.user.eid)
+ self.assertEquals(txinfo.user().login, 'admin')
+ actions = txinfo.actions_list()
+ self.assertEquals(len(actions), 2)
+ actions = txinfo.actions_list(public=False)
+ self.assertEquals(len(actions), 6)
+ a1 = actions[0]
+ self.assertEquals(a1.action, 'C')
+ self.assertEquals(a1.eid, self.toto.eid)
+ self.assertEquals(a1.etype,'CWUser')
+ self.assertEquals(a1.changes, None)
+ self.assertEquals(a1.public, True)
+ self.assertEquals(a1.order, 1)
+ a4 = actions[3]
+ self.assertEquals(a4.action, 'A')
+ self.assertEquals(a4.rtype, 'in_group')
+ self.assertEquals(a4.eid_from, self.toto.eid)
+ self.assertEquals(a4.eid_to, self.toto.in_group[0].eid)
+ self.assertEquals(a4.order, 4)
+ for i, rtype in ((1, 'owned_by'), (2, 'owned_by'),
+ (4, 'created_by'), (5, 'in_state')):
+ a = actions[i]
+ self.assertEquals(a.action, 'A')
+ self.assertEquals(a.eid_from, self.toto.eid)
+ self.assertEquals(a.rtype, rtype)
+ self.assertEquals(a.order, i+1)
+ # test undoable_transactions
+ txs = self.cnx.undoable_transactions()
+ self.assertEquals(len(txs), 1)
+ self.assertEquals(txs[0].uuid, self.txuuid)
+ # test transaction_info / undoable_transactions security
+ cnx = self.login('anon')
+ self.assertRaises(NoSuchTransaction,
+ cnx.transaction_info, self.txuuid)
+ self.assertRaises(NoSuchTransaction,
+ cnx.transaction_actions, self.txuuid)
+ self.assertRaises(NoSuchTransaction,
+ cnx.undo_transaction, self.txuuid)
+ txs = cnx.undoable_transactions()
+ self.assertEquals(len(txs), 0)
+
+ def test_undoable_transactions(self):
+ toto = self.toto
+ e = self.session.create_entity('EmailAddress',
+ address=u'toto@logilab.org',
+ reverse_use_email=toto)
+ txuuid1 = self.commit()
+ toto.delete()
+ txuuid2 = self.commit()
+ undoable_transactions = self.cnx.undoable_transactions
+ txs = undoable_transactions(action='D')
+ self.assertEquals(len(txs), 1, txs)
+ self.assertEquals(txs[0].uuid, txuuid2)
+ txs = undoable_transactions(action='C')
+ self.assertEquals(len(txs), 2, txs)
+ self.assertEquals(txs[0].uuid, txuuid1)
+ self.assertEquals(txs[1].uuid, self.txuuid)
+ txs = undoable_transactions(eid=toto.eid)
+ self.assertEquals(len(txs), 3)
+ self.assertEquals(txs[0].uuid, txuuid2)
+ self.assertEquals(txs[1].uuid, txuuid1)
+ self.assertEquals(txs[2].uuid, self.txuuid)
+ txs = undoable_transactions(etype='CWUser')
+ self.assertEquals(len(txs), 2)
+ txs = undoable_transactions(etype='CWUser', action='C')
+ self.assertEquals(len(txs), 1)
+ self.assertEquals(txs[0].uuid, self.txuuid)
+ txs = undoable_transactions(etype='EmailAddress', action='D')
+ self.assertEquals(len(txs), 0)
+ txs = undoable_transactions(etype='EmailAddress', action='D',
+ public=False)
+ self.assertEquals(len(txs), 1)
+ self.assertEquals(txs[0].uuid, txuuid2)
+ txs = undoable_transactions(eid=toto.eid, action='R', public=False)
+ self.assertEquals(len(txs), 1)
+ self.assertEquals(txs[0].uuid, txuuid2)
+
+ def test_undo_deletion_base(self):
+ toto = self.toto
+ e = self.session.create_entity('EmailAddress',
+ address=u'toto@logilab.org',
+ reverse_use_email=toto)
+ # entity with inlined relation
+ p = self.session.create_entity('CWProperty',
+ pkey=u'ui.default-text-format',
+ value=u'text/rest',
+ for_user=toto)
+ self.commit()
+ txs = self.cnx.undoable_transactions()
+ self.assertEquals(len(txs), 2)
+ toto.delete()
+ txuuid = self.commit()
+ actions = self.cnx.transaction_info(txuuid).actions_list()
+ self.assertEquals(len(actions), 1)
+ toto.clear_all_caches()
+ e.clear_all_caches()
+ errors = self.cnx.undo_transaction(txuuid)
+ undotxuuid = self.commit()
+ self.assertEquals(undotxuuid, None) # undo not undoable
+ self.assertEquals(errors, [])
+ self.failUnless(self.execute('Any X WHERE X eid %(x)s', {'x': toto.eid}, 'x'))
+ self.failUnless(self.execute('Any X WHERE X eid %(x)s', {'x': e.eid}, 'x'))
+ self.failUnless(self.execute('Any X WHERE X has_text "toto@logilab"'))
+ self.assertEquals(toto.state, 'activated')
+ self.assertEquals(toto.get_email(), 'toto@logilab.org')
+ self.assertEquals([(p.pkey, p.value) for p in toto.reverse_for_user],
+ [('ui.default-text-format', 'text/rest')])
+ self.assertEquals([g.name for g in toto.in_group],
+ ['users'])
+ self.assertEquals([et.name for et in toto.related('is', entities=True)],
+ ['CWUser'])
+ self.assertEquals([et.name for et in toto.is_instance_of],
+ ['CWUser'])
+ # undoing shouldn't be visble in undoable transaction, and the undoed
+ # transaction should be removed
+ txs = self.cnx.undoable_transactions()
+ self.assertEquals(len(txs), 2)
+ self.assertRaises(NoSuchTransaction,
+ self.cnx.transaction_info, txuuid)
+ # also check transaction actions have been properly deleted
+ cu = self.session.system_sql(
+ "SELECT * from tx_entity_actions WHERE tx_uuid='%s'" % txuuid)
+ self.failIf(cu.fetchall())
+ cu = self.session.system_sql(
+ "SELECT * from tx_relation_actions WHERE tx_uuid='%s'" % txuuid)
+ self.failIf(cu.fetchall())
+ # the final test: check we can login with the previously deleted user
+ self.login('toto')
+
+ def test_undo_deletion_integrity_1(self):
+ session = self.session
+ # 'Personne fiche Card with' '??' cardinality
+ c = session.create_entity('Card', title=u'hop', content=u'hop')
+ p = session.create_entity('Personne', nom=u'louis', fiche=c)
+ self.commit()
+ c.delete()
+ txuuid = self.commit()
+ c2 = session.create_entity('Card', title=u'hip', content=u'hip')
+ p.set_relations(fiche=c2)
+ self.commit()
+ errors = self.cnx.undo_transaction(txuuid)
+ self.commit()
+ p.clear_all_caches()
+ self.assertEquals(p.fiche[0].eid, c2.eid)
+ self.assertEquals(len(errors), 1)
+ self.assertEquals(errors[0],
+ "Can't restore object relation fiche to entity "
+ "%s which is already linked using this relation." % p.eid)
+
+ def test_undo_deletion_integrity_2(self):
+ # test validation error raised if we can't restore a required relation
+ session = self.session
+ g = session.create_entity('CWGroup', name=u'staff')
+ session.execute('DELETE U in_group G WHERE U eid %(x)s', {'x': self.toto.eid})
+ self.toto.set_relations(in_group=g)
+ self.commit()
+ self.toto.delete()
+ txuuid = self.commit()
+ g.delete()
+ self.commit()
+ errors = self.cnx.undo_transaction(txuuid)
+ self.assertRaises(ValidationError, self.commit)
+
+ def test_undo_creation(self):
+ # XXX what about relation / composite entities which have been created
+ # afterwhile and linked to the undoed addition ?
+ self.skip('not implemented')
+
+ # test implicit 'replacement' of an inlined relation
--- a/sobjects/notification.py Wed Mar 24 08:40:00 2010 +0100
+++ b/sobjects/notification.py Wed Mar 24 10:23:57 2010 +0100
@@ -33,11 +33,9 @@
def recipients(self):
mode = self._cw.vreg.config['default-recipients-mode']
if mode == 'users':
- # use unsafe execute else we may don't have the right to see users
- # to notify...
- execute = self._cw.unsafe_execute
+ execute = self._cw.execute
dests = [(u.get_email(), u.property_value('ui.language'))
- for u in execute(self.user_rql, build_descr=True, propagate=True).entities()]
+ for u in execute(self.user_rql, build_descr=True).entities()]
elif mode == 'default-dest-addrs':
lang = self._cw.vreg.property_value('ui.language')
dests = zip(self._cw.vreg.config['default-dest-addrs'], repeat(lang))
@@ -158,7 +156,8 @@
if not rdef.has_perm(self._cw, 'read', eid=self.cw_rset[0][0]):
continue
# XXX suppose it's a subject relation...
- elif not rschema.has_perm(self._cw, 'read', fromeid=self.cw_rset[0][0]): # XXX toeid
+ elif not rschema.has_perm(self._cw, 'read',
+ fromeid=self.cw_rset[0][0]):
continue
if attr in self.no_detailed_change_attrs:
msg = _('%s updated') % _(attr)
--- a/sobjects/supervising.py Wed Mar 24 08:40:00 2010 +0100
+++ b/sobjects/supervising.py Wed Mar 24 10:23:57 2010 +0100
@@ -92,7 +92,7 @@
return self._cw._('[%s supervision] changes summary') % self._cw.vreg.config.appid
def call(self, changes):
- user = self._cw.actual_session().user
+ user = self._cw.user
self.w(self._cw._('user %s has made the following change(s):\n\n')
% user.login)
for event, changedescr in filter_changes(changes):
@@ -129,17 +129,16 @@
self.w(u' %s' % entity.absolute_url())
def _relation_context(self, changedescr):
- _ = self._cw._
- session = self._cw.actual_session()
+ session = self._cw
def describe(eid):
try:
- return _(session.describe(eid)[0]).lower()
+ return session._(session.describe(eid)[0]).lower()
except UnknownEid:
# may occurs when an entity has been deleted from an external
# source and we're cleaning its relation
- return _('unknown external entity')
+ return session._('unknown external entity')
eidfrom, rtype, eidto = changedescr.eidfrom, changedescr.rtype, changedescr.eidto
- return {'rtype': _(rtype),
+ return {'rtype': session._(rtype),
'eidfrom': eidfrom,
'frometype': describe(eidfrom),
'eidto': eidto,
--- a/test/data/rewrite/schema.py Wed Mar 24 08:40:00 2010 +0100
+++ b/test/data/rewrite/schema.py Wed Mar 24 10:23:57 2010 +0100
@@ -39,3 +39,10 @@
class require_state(RelationDefinition):
subject = 'CWPermission'
object = 'State'
+
+
+class inlined_card(RelationDefinition):
+ subject = 'Affaire'
+ object = 'Card'
+ inlined = True
+ cardinality = '?*'
--- a/test/unittest_dbapi.py Wed Mar 24 08:40:00 2010 +0100
+++ b/test/unittest_dbapi.py Wed Mar 24 10:23:57 2010 +0100
@@ -5,11 +5,13 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+from copy import copy
+
from cubicweb import ConnectionError
from cubicweb.dbapi import ProgrammingError
from cubicweb.devtools.testlib import CubicWebTC
-
class DBAPITC(CubicWebTC):
def test_public_repo_api(self):
@@ -35,8 +37,8 @@
self.assertEquals(cnx.user(None).login, 'anon')
self.assertEquals(cnx.describe(1), (u'CWGroup', u'system', None))
self.restore_connection() # proper way to close cnx
- self.assertRaises(ConnectionError, cnx.user, None)
- self.assertRaises(ConnectionError, cnx.describe, 1)
+ self.assertRaises(ProgrammingError, cnx.user, None)
+ self.assertRaises(ProgrammingError, cnx.describe, 1)
def test_session_data_api(self):
cnx = self.login('anon')
@@ -64,9 +66,10 @@
cnx.set_shared_data('data', 4)
self.assertEquals(cnx.get_shared_data('data'), 4)
self.restore_connection() # proper way to close cnx
- self.assertRaises(ConnectionError, cnx.check)
- self.assertRaises(ConnectionError, cnx.set_shared_data, 'data', 0)
- self.assertRaises(ConnectionError, cnx.get_shared_data, 'data')
+ self.assertRaises(ProgrammingError, cnx.check)
+ self.assertRaises(ProgrammingError, cnx.set_shared_data, 'data', 0)
+ self.assertRaises(ProgrammingError, cnx.get_shared_data, 'data')
+
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
--- a/test/unittest_entity.py Wed Mar 24 08:40:00 2010 +0100
+++ b/test/unittest_entity.py Wed Mar 24 10:23:57 2010 +0100
@@ -436,7 +436,7 @@
def test_complete_relation(self):
session = self.session
- eid = session.unsafe_execute(
+ eid = session.execute(
'INSERT TrInfo X: X comment "zou", X wf_info_for U, X from_state S1, X to_state S2 '
'WHERE U login "admin", S1 name "activated", S2 name "deactivated"')[0][0]
trinfo = self.entity('Any X WHERE X eid %(x)s', {'x': eid}, 'x')
--- a/test/unittest_rqlrewrite.py Wed Mar 24 08:40:00 2010 +0100
+++ b/test/unittest_rqlrewrite.py Wed Mar 24 10:23:57 2010 +0100
@@ -7,7 +7,7 @@
"""
from logilab.common.testlib import unittest_main, TestCase
from logilab.common.testlib import mock_object
-
+from yams import BadSchemaDefinition
from rql import parse, nodes, RQLHelper
from cubicweb import Unauthorized
@@ -123,7 +123,7 @@
"EXISTS(2 in_state A, B in_group D, E require_state A, "
"E name 'read', E require_group D, A is State, D is CWGroup, E is CWPermission)")
- def test_optional_var(self):
+ def test_optional_var_base(self):
card_constraint = ('X in_state S, U in_group G, P require_state S,'
'P name "read", P require_group G')
rqlst = parse('Any A,C WHERE A documented_by C?')
@@ -131,15 +131,51 @@
self.failUnlessEqual(rqlst.as_string(),
"Any A,C WHERE A documented_by C?, A is Affaire "
"WITH C BEING "
- "(Any C WHERE C in_state B, D in_group F, G require_state B, G name 'read', "
- "G require_group F, D eid %(A)s, C is Card)")
+ "(Any C WHERE EXISTS(C in_state B, D in_group F, G require_state B, G name 'read', "
+ "G require_group F), D eid %(A)s, C is Card)")
rqlst = parse('Any A,C,T WHERE A documented_by C?, C title T')
rewrite(rqlst, {('C', 'X'): (card_constraint,)}, {})
self.failUnlessEqual(rqlst.as_string(),
"Any A,C,T WHERE A documented_by C?, A is Affaire "
"WITH C,T BEING "
- "(Any C,T WHERE C in_state B, D in_group F, G require_state B, G name 'read', "
- "G require_group F, C title T, D eid %(A)s, C is Card)")
+ "(Any C,T WHERE C title T, EXISTS(C in_state B, D in_group F, "
+ "G require_state B, G name 'read', G require_group F), "
+ "D eid %(A)s, C is Card)")
+
+ def test_optional_var_inlined(self):
+ c1 = ('X require_permission P')
+ c2 = ('X inlined_card O, O require_permission P')
+ rqlst = parse('Any C,A,R WHERE A? inlined_card C, A ref R')
+ rewrite(rqlst, {('C', 'X'): (c1,),
+ ('A', 'X'): (c2,),
+ }, {})
+ # XXX suboptimal
+ self.failUnlessEqual(rqlst.as_string(),
+ "Any C,A,R WITH A,R,C BEING "
+ "(Any A,R,C WHERE A ref R, A? inlined_card C, "
+ "(A is NULL) OR (EXISTS(A inlined_card B, B require_permission D, "
+ "B is Card, D is CWPermission)), "
+ "A is Affaire, C is Card, EXISTS(C require_permission E, E is CWPermission))")
+
+ # def test_optional_var_inlined_has_perm(self):
+ # c1 = ('X require_permission P')
+ # c2 = ('X inlined_card O, U has_read_permission O')
+ # rqlst = parse('Any C,A,R WHERE A? inlined_card C, A ref R')
+ # rewrite(rqlst, {('C', 'X'): (c1,),
+ # ('A', 'X'): (c2,),
+ # }, {})
+ # self.failUnlessEqual(rqlst.as_string(),
+ # "")
+
+ def test_optional_var_inlined_imbricated_error(self):
+ c1 = ('X require_permission P')
+ c2 = ('X inlined_card O, O require_permission P')
+ rqlst = parse('Any C,A,R,A2,R2 WHERE A? inlined_card C, A ref R,A2? inlined_card C, A2 ref R2')
+ self.assertRaises(BadSchemaDefinition,
+ rewrite, rqlst, {('C', 'X'): (c1,),
+ ('A', 'X'): (c2,),
+ ('A2', 'X'): (c2,),
+ }, {})
def test_relation_optimization_1_lhs(self):
# since Card in_state State as monovalued cardinality, the in_state
@@ -243,7 +279,7 @@
rewrite(rqlst, {('X', 'X'): (constraint,)}, {})
# ambiguity are kept in the sub-query, no need to be resolved using OR
self.failUnlessEqual(rqlst.as_string(),
- u"Any X,C WHERE X? documented_by C, C is Card WITH X BEING (Any X WHERE X concerne A, X is Affaire)")
+ u"Any X,C WHERE X? documented_by C, C is Card WITH X BEING (Any X WHERE EXISTS(X concerne A), X is Affaire)")
def test_rrqlexpr_nonexistant_subject_1(self):
--- a/test/unittest_rset.py Wed Mar 24 08:40:00 2010 +0100
+++ b/test/unittest_rset.py Wed Mar 24 10:23:57 2010 +0100
@@ -11,7 +11,7 @@
from rql import parse
-from logilab.common.testlib import TestCase, unittest_main
+from logilab.common.testlib import TestCase, unittest_main, mock_object
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.rset import NotAnEntity, ResultSet, attr_desc_iterator
@@ -60,7 +60,7 @@
self.rset = ResultSet([[12, 'adim'], [13, 'syt']],
'Any U,L where U is CWUser, U login L',
description=[['CWUser', 'String'], ['Bar', 'String']])
- self.rset.vreg = self.vreg
+ self.rset.req = mock_object(vreg=self.vreg)
def compare_urls(self, url1, url2):
info1 = urlsplit(url1)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/transaction.py Wed Mar 24 10:23:57 2010 +0100
@@ -0,0 +1,96 @@
+"""undoable transaction objects.
+
+
+This module is in the cubicweb package and not in cubicweb.server because those
+objects should be accessible to client through pyro, where the cubicweb.server
+package may not be installed.
+
+:organization: Logilab
+:copyright: 2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+"""
+__docformat__ = "restructuredtext en"
+_ = unicode
+
+from cubicweb import RepositoryError
+
+
+ACTION_LABELS = {
+ 'C': _('entity creation'),
+ 'U': _('entity update'),
+ 'D': _('entity deletion'),
+ 'A': _('relation add'),
+ 'R': _('relation removal'),
+ }
+
+
+class NoSuchTransaction(RepositoryError):
+ pass
+
+
+class Transaction(object):
+ """an undoable transaction"""
+
+ def __init__(self, uuid, time, ueid):
+ self.uuid = uuid
+ self.datetime = time
+ self.user_eid = ueid
+ # should be set by the dbapi connection
+ self.req = None
+
+ def __repr__(self):
+ return '<Transaction %s by %s on %s>' % (
+ self.uuid, self.user_eid, self.datetime)
+
+ def user(self):
+ """return the user entity which has done the transaction,
+ none if not found.
+ """
+ return self.req.execute('Any X WHERE X eid %(x)s',
+ {'x': self.user_eid}, 'x').get_entity(0, 0)
+
+ def actions_list(self, public=True):
+ """return an ordered list of action effectued during that transaction
+
+ if public is true, return only 'public' action, eg not ones triggered
+ under the cover by hooks.
+ """
+ return self.req.cnx.transaction_actions(self.uuid, public)
+
+
+class AbstractAction(object):
+ def __init__(self, action, public, order):
+ self.action = action
+ self.public = public
+ self.order = order
+
+ @property
+ def label(self):
+ return ACTION_LABELS[self.action]
+
+
+class EntityAction(AbstractAction):
+ def __init__(self, action, public, order, etype, eid, changes):
+ AbstractAction.__init__(self, action, public, order)
+ self.etype = etype
+ self.eid = eid
+ self.changes = changes
+
+ def __repr__(self):
+ return '<%s: %s %s (%s)>' % (
+ self.label, self.eid, self.changes,
+ self.public and 'dbapi' or 'hook')
+
+
+class RelationAction(AbstractAction):
+ def __init__(self, action, public, order, rtype, eidfrom, eidto):
+ AbstractAction.__init__(self, action, public, order)
+ self.rtype = rtype
+ self.eid_from = eidfrom
+ self.eid_to = eidto
+
+ def __repr__(self):
+ return '<%s: %s %s %s (%s)>' % (
+ self.label, self.eid_from, self.rtype, self.eid_to,
+ self.public and 'dbapi' or 'hook')
--- a/utils.py Wed Mar 24 08:40:00 2010 +0100
+++ b/utils.py Wed Mar 24 10:23:57 2010 +0100
@@ -12,35 +12,29 @@
import decimal
import datetime
import random
+from uuid import uuid4
+from warnings import warn
from logilab.mtconverter import xml_escape
from logilab.common.deprecation import deprecated
+_MARKER = object()
+
# initialize random seed from current time
random.seed()
-if sys.version_info[:2] < (2, 5):
+def make_uid(key=None):
+ """Return a unique identifier string.
- from time import time
- from md5 import md5
- from random import randint
+ if specified, `key` is used to prefix the generated uid so it can be used
+ for instance as a DOM id or as sql table names.
- def make_uid(key):
- """forge a unique identifier
- XXX not that unique on win32
- """
- key = str(key)
- msg = key + "%.10f" % time() + str(randint(0, 1000000))
- return key + md5(msg).hexdigest()
-
-else:
-
- from uuid import uuid4
-
- def make_uid(key):
- # remove dash, generated uid are used as identifier sometimes (sql table
- # names at least)
- return str(key) + str(uuid4()).replace('-', '')
+ See uuid.uuid4 documentation for the shape of the generated identifier, but
+ this is basicallly a 32 bits hexadecimal string.
+ """
+ if key is None:
+ return uuid4().hex
+ return str(key) + uuid4().hex
def dump_class(cls, clsname):
@@ -53,14 +47,9 @@
# type doesn't accept unicode name
# return type.__new__(type, str(clsname), (cls,), {})
# __autogenerated__ attribute is just a marker
- return type(str(clsname), (cls,), {'__autogenerated__': True})
-
-
-def merge_dicts(dict1, dict2):
- """update a copy of `dict1` with `dict2`"""
- dict1 = dict(dict1)
- dict1.update(dict2)
- return dict1
+ return type(str(clsname), (cls,), {'__autogenerated__': True,
+ '__doc__': cls.__doc__,
+ '__module__': cls.__module__})
# use networkX instead ?
@@ -167,15 +156,12 @@
def add_post_inline_script(self, content):
self.post_inlined_scripts.append(content)
- def add_onload(self, jscode, jsoncall=False):
- if jsoncall:
- self.add_post_inline_script(u"""jQuery(CubicWeb).one('ajax-loaded', function(event) {
-%s
-});""" % jscode)
- else:
- self.add_post_inline_script(u"""jQuery(document).ready(function () {
- %s
- });""" % jscode)
+ def add_onload(self, jscode, jsoncall=_MARKER):
+ if jsoncall is not _MARKER:
+ warn('[3.7] specifying jsoncall is not needed anymore',
+ DeprecationWarning, stacklevel=2)
+ self.add_post_inline_script(u"""jQuery(CubicWeb).one('server-response', function(event) {
+%s});""" % jscode)
def add_js(self, jsfile):
@@ -342,6 +328,14 @@
# just return None in those cases.
return None
+
+@deprecated('[3.7] merge_dicts is deprecated')
+def merge_dicts(dict1, dict2):
+ """update a copy of `dict1` with `dict2`"""
+ dict1 = dict(dict1)
+ dict1.update(dict2)
+ return dict1
+
from logilab.common import date
_THIS_MOD_NS = globals()
for funcname in ('date_range', 'todate', 'todatetime', 'datetime2ticks',
--- a/web/application.py Wed Mar 24 08:40:00 2010 +0100
+++ b/web/application.py Wed Mar 24 10:23:57 2010 +0100
@@ -342,7 +342,11 @@
# redirect is raised by edit controller when everything went fine,
# so try to commit
try:
- req.cnx.commit()
+ txuuid = req.cnx.commit()
+ if txuuid is not None:
+ msg = u'<span class="undo">[<a href="%s">%s</a>]</span>' %(
+ req.build_url('undo', txuuid=txuuid), req._('undo'))
+ req.append_to_redirect_message(msg)
except ValidationError, ex:
self.validation_error_handler(req, ex)
except Unauthorized, ex:
@@ -393,7 +397,7 @@
self.exception(repr(ex))
req.set_header('Cache-Control', 'no-cache')
req.remove_header('Etag')
- req.message = None
+ req.reset_message()
req.reset_headers()
if req.json_request:
raise RemoteCallFailed(unicode(ex))
--- a/web/component.py Wed Mar 24 08:40:00 2010 +0100
+++ b/web/component.py Wed Mar 24 10:23:57 2010 +0100
@@ -14,7 +14,6 @@
from logilab.mtconverter import xml_escape
from cubicweb import role
-from cubicweb.utils import merge_dicts
from cubicweb.view import Component
from cubicweb.selectors import (
paginated_rset, one_line_rset, primary_view, match_context_prop,
@@ -116,8 +115,9 @@
del params[self.stop_param]
def page_url(self, path, params, start, stop):
- params = merge_dicts(params, {self.start_param : start,
- self.stop_param : stop,})
+ params = dict(params)
+ params.update({self.start_param : start,
+ self.stop_param : stop,})
if path == 'json':
rql = params.pop('rql', self.cw_rset.printable_rql())
# latest 'true' used for 'swap' mode
--- a/web/controller.py Wed Mar 24 08:40:00 2010 +0100
+++ b/web/controller.py Wed Mar 24 10:23:57 2010 +0100
@@ -8,6 +8,8 @@
"""
__docformat__ = "restructuredtext en"
+from logilab.mtconverter import xml_escape
+
from cubicweb.selectors import yes
from cubicweb.appobject import AppObject
from cubicweb.web import LOGGER, Redirect, RequestError
@@ -79,19 +81,6 @@
self.cw_rset = pp.process_query(rql)
return self.cw_rset
- def check_expected_params(self, params):
- """check that the given list of parameters are specified in the form
- dictionary
- """
- missing = []
- for param in params:
- if not self._cw.form.get(param):
- missing.append(param)
- if missing:
- raise RequestError('missing required parameter(s): %s'
- % ','.join(missing))
-
-
def notify_edited(self, entity):
"""called by edit_entity() to notify which entity is edited"""
# NOTE: we can't use entity.rest_path() at this point because
@@ -100,31 +89,10 @@
if not self._edited_entity:
self._edited_entity = entity
- # XXX move to EditController (only customer)
- def delete_entities(self, eidtypes):
- """delete entities from the repository"""
- redirect_info = set()
- eidtypes = tuple(eidtypes)
- for eid, etype in eidtypes:
- entity = self._cw.entity_from_eid(eid, etype)
- path, params = entity.after_deletion_path()
- redirect_info.add( (path, tuple(params.iteritems())) )
- entity.delete()
- if len(redirect_info) > 1:
- # In the face of ambiguity, refuse the temptation to guess.
- self._after_deletion_path = 'view', ()
- else:
- self._after_deletion_path = iter(redirect_info).next()
- if len(eidtypes) > 1:
- self._cw.set_message(self._cw._('entities deleted'))
- else:
- self._cw.set_message(self._cw._('entity deleted'))
-
def validate_cache(self, view):
view.set_http_cache_headers()
self._cw.validate_cache()
- # XXX is that used AT ALL ?
def reset(self):
"""reset form parameters and redirect to a view determinated by given
parameters
@@ -132,7 +100,7 @@
newparams = {}
# sets message if needed
if self._cw.message:
- newparams['__message'] = self._cw.message
+ newparams['_cwmsgid'] = self._cw.set_redirect_message(self._cw.message)
if self._cw.form.has_key('__action_apply'):
self._return_to_edition_view(newparams)
if self._cw.form.has_key('__action_cancel'):
@@ -140,8 +108,6 @@
else:
self._return_to_original_view(newparams)
-
- # XXX is that used AT ALL ?
def _return_to_original_view(self, newparams):
"""validate-button case"""
# transforms __redirect[*] parameters into regular form parameters
@@ -156,10 +122,13 @@
elif '__redirectpath' in self._cw.form:
# if redirect path was explicitly specified in the form, use it
path = self._cw.form['__redirectpath']
- if self._edited_entity and path != self._edited_entity.rest_path():
- # XXX may be here on modification? if yes the message should be
- # modified where __createdpath is detected (cw.web.request)
- newparams['__createdpath'] = self._edited_entity.rest_path()
+ if (self._edited_entity and path != self._edited_entity.rest_path()
+ and '_cwmsgid' in newparams):
+ # XXX may be here on modification?
+ msg = u'(<a href="%s">%s</a>)' % (
+ xml_escape(self._edited_entity.absolute_url()),
+ self._cw._('click here to see created entity'))
+ self._cw.append_to_redirect_message(msg)
elif self._after_deletion_path:
# else it should have been set during form processing
path, params = self._after_deletion_path
@@ -174,7 +143,6 @@
url = append_url_params(url, self._cw.form.get('__redirectparams'))
raise Redirect(url)
- # XXX is that used AT ALL ?
def _return_to_edition_view(self, newparams):
"""apply-button case"""
form = self._cw.form
@@ -186,7 +154,7 @@
path = 'view'
newparams['rql'] = form['rql']
else:
- self.warning("the edited data seems inconsistent")
+ self.warning('the edited data seems inconsistent')
path = 'view'
# pick up the correction edition view
if form.get('__form_id'):
@@ -198,7 +166,6 @@
raise Redirect(self._cw.build_url(path, **newparams))
- # XXX is that used AT ALL ?
def _return_to_lastpage(self, newparams):
"""cancel-button case: in this case we are always expecting to go back
where we came from, and this is not easy. Currently we suppose that
--- a/web/data/cubicweb.ajax.js Wed Mar 24 08:40:00 2010 +0100
+++ b/web/data/cubicweb.ajax.js Wed Mar 24 10:23:57 2010 +0100
@@ -92,12 +92,9 @@
setFormsTarget(node);
}
loadDynamicFragments(node);
- // XXX simulates document.ready, but the former
- // only runs once, this one potentially many times
- // we probably need to unbind the fired events
- // When this is done, jquery.treeview.js (for instance)
- // can be unpatched.
- jQuery(CubicWeb).trigger('ajax-loaded');
+ // XXX [3.7] jQuery.one is now used instead jQuery.bind,
+ // jquery.treeview.js can be unpatched accordingly.
+ jQuery(CubicWeb).trigger('server-response', [true, node]);
}
/* cubicweb loadxhtml plugin to make jquery handle xhtml response
--- a/web/data/cubicweb.python.js Wed Mar 24 08:40:00 2010 +0100
+++ b/web/data/cubicweb.python.js Wed Mar 24 10:23:57 2010 +0100
@@ -394,4 +394,13 @@
}
};
+jQuery(document).ready(function() {
+ jQuery(CubicWeb).trigger('server-response', [false, document]);
+});
+
+jQuery(CubicWeb).bind('ajax-loaded', function() {
+ log('[3.7] "ajax-loaded" event is deprecated, use "server-response" instead');
+ jQuery(CubicWeb).trigger('server-response', [false, document]);
+});
+
CubicWeb.provide('python.js');
--- a/web/request.py Wed Mar 24 08:40:00 2010 +0100
+++ b/web/request.py Wed Mar 24 10:23:57 2010 +0100
@@ -68,7 +68,6 @@
def __init__(self, vreg, https, form=None):
super(CubicWebRequestBase, self).__init__(vreg)
- self.message = None
self.authmode = vreg.config['auth-mode']
self.https = https
# raw html headers that can be added from any view
@@ -126,35 +125,24 @@
"""
super(CubicWebRequestBase, self).set_connection(cnx, user)
# set request language
- try:
- vreg = self.vreg
- if self.user:
- try:
- # 1. user specified language
- lang = vreg.typed_value('ui.language',
- self.user.properties['ui.language'])
+ vreg = self.vreg
+ if self.user:
+ try:
+ # 1. user specified language
+ lang = vreg.typed_value('ui.language',
+ self.user.properties['ui.language'])
+ self.set_language(lang)
+ return
+ except KeyError:
+ pass
+ if vreg.config['language-negociation']:
+ # 2. http negociated language
+ for lang in self.header_accept_language():
+ if lang in self.translations:
self.set_language(lang)
return
- except KeyError:
- pass
- if vreg.config['language-negociation']:
- # 2. http negociated language
- for lang in self.header_accept_language():
- if lang in self.translations:
- self.set_language(lang)
- return
- # 3. default language
- self.set_default_language(vreg)
- finally:
- # XXX code smell
- # have to be done here because language is not yet set in setup_params
- #
- # special key for created entity, added in controller's reset method
- # if no message set, we don't want this neither
- if '__createdpath' in self.form and self.message:
- self.message += ' (<a href="%s">%s</a>)' % (
- self.build_url(self.form.pop('__createdpath')),
- self._('click here to see created entity'))
+ # 3. default language
+ self.set_default_language(vreg)
def set_language(self, lang):
gettext, self.pgettext = self.translations[lang]
@@ -179,26 +167,27 @@
subclasses should overrides to
"""
+ self.form = {}
if params is None:
- params = {}
- self.form = params
+ return
encoding = self.encoding
- for k, v in params.items():
- if isinstance(v, (tuple, list)):
- v = [unicode(x, encoding) for x in v]
- if len(v) == 1:
- v = v[0]
- if k in self.no_script_form_params:
- v = self.no_script_form_param(k, value=v)
- if isinstance(v, str):
- v = unicode(v, encoding)
- if k == '__message':
- self.set_message(v)
- del self.form[k]
+ for param, val in params.iteritems():
+ if isinstance(val, (tuple, list)):
+ val = [unicode(x, encoding) for x in val]
+ if len(val) == 1:
+ val = val[0]
+ elif isinstance(val, str):
+ val = unicode(val, encoding)
+ if param in self.no_script_form_params and val:
+ val = self.no_script_form_param(param, val)
+ if param == '_cwmsgid':
+ self.set_message_id(val)
+ elif param == '__message':
+ self.set_message(val)
else:
- self.form[k] = v
+ self.form[param] = val
- def no_script_form_param(self, param, default=None, value=None):
+ def no_script_form_param(self, param, value):
"""ensure there is no script in a user form param
by default return a cleaned string instead of raising a security
@@ -208,16 +197,12 @@
that are at some point inserted in a generated html page to protect
against script kiddies
"""
- if value is None:
- value = self.form.get(param, default)
- if not value is default and value:
- # safety belt for strange urls like http://...?vtitle=yo&vtitle=yo
- if isinstance(value, (list, tuple)):
- self.error('no_script_form_param got a list (%s). Who generated the URL ?',
- repr(value))
- value = value[0]
- return remove_html_tags(value)
- return value
+ # safety belt for strange urls like http://...?vtitle=yo&vtitle=yo
+ if isinstance(value, (list, tuple)):
+ self.error('no_script_form_param got a list (%s). Who generated the URL ?',
+ repr(value))
+ value = value[0]
+ return remove_html_tags(value)
def list_form_param(self, param, form=None, pop=False):
"""get param from form parameters and return its value as a list,
@@ -245,9 +230,48 @@
# web state helpers #######################################################
+ @property
+ def message(self):
+ try:
+ return self.get_session_data(self._msgid, default=u'', pop=True)
+ except AttributeError:
+ try:
+ return self._msg
+ except AttributeError:
+ return None
+
def set_message(self, msg):
assert isinstance(msg, unicode)
- self.message = msg
+ self._msg = msg
+
+ def set_message_id(self, msgid):
+ self._msgid = msgid
+
+ @cached
+ def redirect_message_id(self):
+ return make_uid()
+
+ def set_redirect_message(self, msg):
+ assert isinstance(msg, unicode)
+ msgid = self.redirect_message_id()
+ self.set_session_data(msgid, msg)
+ return msgid
+
+ def append_to_redirect_message(self, msg):
+ msgid = self.redirect_message_id()
+ currentmsg = self.get_session_data(msgid)
+ if currentmsg is not None:
+ currentmsg = '%s %s' % (currentmsg, msg)
+ else:
+ currentmsg = msg
+ self.set_session_data(msgid, currentmsg)
+ return msgid
+
+ def reset_message(self):
+ if hasattr(self, '_msg'):
+ del self._msg
+ if hasattr(self, '_msgid'):
+ del self._msgid
def update_search_state(self):
"""update the current search state"""
@@ -481,7 +505,7 @@
# high level methods for HTML headers management ##########################
def add_onload(self, jscode):
- self.html_headers.add_onload(jscode, self.json_request)
+ self.html_headers.add_onload(jscode)
def add_js(self, jsfiles, localfile=True):
"""specify a list of JS files to include in the HTML headers
--- a/web/test/unittest_views_basecontrollers.py Wed Mar 24 08:40:00 2010 +0100
+++ b/web/test/unittest_views_basecontrollers.py Wed Mar 24 10:23:57 2010 +0100
@@ -343,7 +343,7 @@
'__action_delete': ''}
path, params = self.expect_redirect_publish(req, 'edit')
self.assertEquals(path, 'blogentry')
- self.assertEquals(params, {u'__message': u'entity deleted'})
+ self.assertIn('_cwmsgid', params)
eid = req.create_entity('EmailAddress', address=u'hop@logilab.fr').eid
self.execute('SET X use_email E WHERE E eid %(e)s, X eid %(x)s',
{'x': self.session.user.eid, 'e': eid}, 'x')
@@ -353,7 +353,7 @@
'__action_delete': ''}
path, params = self.expect_redirect_publish(req, 'edit')
self.assertEquals(path, 'cwuser/admin')
- self.assertEquals(params, {u'__message': u'entity deleted'})
+ self.assertIn('_cwmsgid', params)
eid1 = req.create_entity('BlogEntry', title=u'hop', content=u'hop').eid
eid2 = req.create_entity('EmailAddress', address=u'hop@logilab.fr').eid
req = self.request()
@@ -363,7 +363,7 @@
'__action_delete': ''}
path, params = self.expect_redirect_publish(req, 'edit')
self.assertEquals(path, 'view')
- self.assertEquals(params, {u'__message': u'entities deleted'})
+ self.assertIn('_cwmsgid', params)
def test_nonregr_eetype_etype_editing(self):
"""non-regression test checking that a manager user can edit a CWEType entity
@@ -498,12 +498,20 @@
def test_usable_by_guets(self):
self.login('anon')
- self.vreg['controllers'].select('reportbug', self.request())
+ self.assertRaises(NoSelectableObject,
+ self.vreg['controllers'].select, 'reportbug', self.request())
+ self.vreg['controllers'].select('reportbug', self.request(description='hop'))
class SendMailControllerTC(CubicWebTC):
def test_not_usable_by_guets(self):
+ self.assertRaises(NoSelectableObject,
+ self.vreg['controllers'].select, 'sendmail', self.request())
+ self.vreg['controllers'].select('sendmail',
+ self.request(subject='toto',
+ recipient='toto@logilab.fr',
+ mailbody='hop'))
self.login('anon')
self.assertRaises(NoSelectableObject,
self.vreg['controllers'].select, 'sendmail', self.request())
--- a/web/views/basecontrollers.py Wed Mar 24 08:40:00 2010 +0100
+++ b/web/views/basecontrollers.py Wed Mar 24 10:23:57 2010 +0100
@@ -20,7 +20,7 @@
from cubicweb import (NoSelectableObject, ValidationError, ObjectNotFound,
typed_eid)
from cubicweb.utils import CubicWebJsonEncoder
-from cubicweb.selectors import yes, match_user_groups
+from cubicweb.selectors import authenticated_user, match_form_params
from cubicweb.mail import format_mail
from cubicweb.web import ExplicitLogin, Redirect, RemoteCallFailed, json_dumps
from cubicweb.web.controller import Controller
@@ -567,7 +567,7 @@
class SendMailController(Controller):
__regid__ = 'sendmail'
- __select__ = match_user_groups('managers', 'users')
+ __select__ = authenticated_user() & match_form_params('recipient', 'mailbody', 'subject')
def recipients(self):
"""returns an iterator on email's recipients as entities"""
@@ -615,7 +615,7 @@
class MailBugReportController(SendMailController):
__regid__ = 'reportbug'
- __select__ = yes()
+ __select__ = match_form_params('description')
def publish(self, rset=None):
body = self._cw.form['description']
@@ -623,3 +623,27 @@
url = self._cw.build_url(__message=self._cw._('bug report sent'))
raise Redirect(url)
+
+class UndoController(SendMailController):
+ __regid__ = 'undo'
+ __select__ = authenticated_user() & match_form_params('txuuid')
+
+ def publish(self, rset=None):
+ txuuid = self._cw.form['txuuid']
+ errors = self._cw.cnx.undo_transaction(txuuid)
+ if errors:
+ self.w(self._cw._('some errors occured:'))
+ self.wview('pyvalist', pyvalue=errors)
+ else:
+ self.redirect()
+
+ def redirect(self):
+ req = self._cw
+ breadcrumbs = req.get_session_data('breadcrumbs', None)
+ if breadcrumbs is not None and len(breadcrumbs) > 1:
+ url = req.rebuild_url(breadcrumbs[-2],
+ __message=req._('transaction undoed'))
+ else:
+ url = req.build_url(__message=req._('transaction undoed'))
+ raise Redirect(url)
+
--- a/web/views/editcontroller.py Wed Mar 24 08:40:00 2010 +0100
+++ b/web/views/editcontroller.py Wed Mar 24 10:23:57 2010 +0100
@@ -252,6 +252,25 @@
for reid in seteids:
self.relations_rql.append((rql, {'x': eid, 'y': reid}, ('x', 'y')))
+ def delete_entities(self, eidtypes):
+ """delete entities from the repository"""
+ redirect_info = set()
+ eidtypes = tuple(eidtypes)
+ for eid, etype in eidtypes:
+ entity = self._cw.entity_from_eid(eid, etype)
+ path, params = entity.after_deletion_path()
+ redirect_info.add( (path, tuple(params.iteritems())) )
+ entity.delete()
+ if len(redirect_info) > 1:
+ # In the face of ambiguity, refuse the temptation to guess.
+ self._after_deletion_path = 'view', ()
+ else:
+ self._after_deletion_path = iter(redirect_info).next()
+ if len(eidtypes) > 1:
+ self._cw.set_message(self._cw._('entities deleted'))
+ else:
+ self._cw.set_message(self._cw._('entity deleted'))
+
def _action_apply(self):
self._default_publish()
self.reset()
@@ -260,7 +279,7 @@
errorurl = self._cw.form.get('__errorurl')
if errorurl:
self._cw.cancel_edition(errorurl)
- self._cw.message = self._cw._('edit canceled')
+ self._cw.set_message(self._cw._('edit canceled'))
return self.reset()
def _action_delete(self):
--- a/web/views/treeview.py Wed Mar 24 08:40:00 2010 +0100
+++ b/web/views/treeview.py Wed Mar 24 10:23:57 2010 +0100
@@ -46,8 +46,7 @@
self._cw.add_css('jquery.treeview.css')
self._cw.add_js(('cubicweb.ajax.js', 'cubicweb.widgets.js', 'jquery.treeview.js'))
self._cw.html_headers.add_onload(u"""
-jQuery("#tree-%s").treeview({toggle: toggleTree, prerendered: true});""" % treeid,
- jsoncall=toplevel_thru_ajax)
+jQuery("#tree-%s").treeview({toggle: toggleTree, prerendered: true});""" % treeid)
def call(self, subvid=None, treeid=None,
initial_load=True, initial_thru_ajax=False, **morekwargs):