--- a/__pkginfo__.py Fri Mar 12 16:21:13 2010 +0100
+++ b/__pkginfo__.py Fri Mar 12 16:23:21 2010 +0100
@@ -30,7 +30,7 @@
web = 'http://www.cubicweb.org'
ftp = 'ftp://ftp.logilab.org/pub/cubicweb'
-pyversions = ['2.4', '2.5']
+pyversions = ['2.5', '2.6']
classifiers = [
'Environment :: Web Environment',
--- a/cwconfig.py Fri Mar 12 16:21:13 2010 +0100
+++ b/cwconfig.py Fri Mar 12 16:23:21 2010 +0100
@@ -1002,7 +1002,7 @@
_EXT_REGISTERED = False
def register_stored_procedures():
- from logilab.common.adbh import FunctionDescr
+ from logilab.database import FunctionDescr
from rql.utils import register_function, iter_funcnode_variables
global _EXT_REGISTERED
@@ -1014,8 +1014,7 @@
supported_backends = ('postgres', 'sqlite',)
rtype = 'String'
- @classmethod
- def st_description(cls, funcnode, mainindex, tr):
+ def st_description(self, funcnode, mainindex, tr):
return ', '.join(sorted(term.get_description(mainindex, tr)
for term in iter_funcnode_variables(funcnode)))
@@ -1027,6 +1026,7 @@
register_function(CONCAT_STRINGS) # XXX bw compat
+
class GROUP_CONCAT(CONCAT_STRINGS):
supported_backends = ('mysql', 'postgres', 'sqlite',)
@@ -1037,8 +1037,7 @@
supported_backends = ('postgres', 'sqlite',)
rtype = 'String'
- @classmethod
- def st_description(cls, funcnode, mainindex, tr):
+ def st_description(self, funcnode, mainindex, tr):
return funcnode.children[0].get_description(mainindex, tr)
register_function(LIMIT_SIZE)
@@ -1050,7 +1049,6 @@
register_function(TEXT_LIMIT_SIZE)
-
class FSPATH(FunctionDescr):
supported_backends = ('postgres', 'sqlite',)
rtype = 'Bytes'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/dataimport.py Fri Mar 12 16:23:21 2010 +0100
@@ -0,0 +1,752 @@
+# -*- coding: utf-8 -*-
+"""This module provides tools to import tabular data.
+
+:organization: Logilab
+:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+
+
+Example of use (run this with `cubicweb-ctl shell instance import-script.py`):
+
+.. sourcecode:: python
+
+ from cubicweb.devtools.dataimport import *
+ # define data generators
+ GENERATORS = []
+
+ USERS = [('Prenom', 'firstname', ()),
+ ('Nom', 'surname', ()),
+ ('Identifiant', 'login', ()),
+ ]
+
+ def gen_users(ctl):
+ for row in ctl.get_data('utilisateurs'):
+ entity = mk_entity(row, USERS)
+ entity['upassword'] = u'motdepasse'
+ ctl.check('login', entity['login'], None)
+ ctl.store.add('CWUser', entity)
+ email = {'address': row['email']}
+ ctl.store.add('EmailAddress', email)
+ ctl.store.relate(entity['eid'], 'use_email', email['eid'])
+ ctl.store.rql('SET U in_group G WHERE G name "users", U eid %(x)s', {'x':entity['eid']})
+
+ CHK = [('login', check_doubles, 'Utilisateurs Login',
+ 'Deux utilisateurs ne devraient pas avoir le même login.'),
+ ]
+
+ GENERATORS.append( (gen_users, CHK) )
+
+ # create controller
+ ctl = CWImportController(RQLObjectStore())
+ ctl.askerror = 1
+ ctl.generators = GENERATORS
+ ctl.store._checkpoint = checkpoint
+ ctl.store._rql = rql
+ ctl.data['utilisateurs'] = lazytable(utf8csvreader(open('users.csv')))
+ # run
+ ctl.run()
+ sys.exit(0)
+
+
+.. BUG fichier à une colonne pose un problème de parsing
+.. TODO rollback()
+"""
+__docformat__ = "restructuredtext en"
+
+import sys
+import csv
+import traceback
+import os.path as osp
+from StringIO import StringIO
+from copy import copy
+
+from logilab.common import shellutils
+from logilab.common.date import strptime
+from logilab.common.decorators import cached
+from logilab.common.deprecation import deprecated
+
+
+def ucsvreader_pb(filepath, encoding='utf-8', separator=',', quote='"',
+ skipfirst=False, withpb=True):
+ """same as ucsvreader but a progress bar is displayed as we iter on rows"""
+ if not osp.exists(filepath):
+ raise Exception("file doesn't exists: %s" % filepath)
+ rowcount = int(shellutils.Execute('wc -l "%s"' % filepath).out.strip().split()[0])
+ if skipfirst:
+ rowcount -= 1
+ if withpb:
+ pb = shellutils.ProgressBar(rowcount, 50)
+ for urow in ucsvreader(file(filepath), encoding, separator, quote, skipfirst):
+ yield urow
+ if withpb:
+ pb.update()
+ print ' %s rows imported' % rowcount
+
+def ucsvreader(stream, encoding='utf-8', separator=',', quote='"',
+ skipfirst=False):
+ """A csv reader that accepts files with any encoding and outputs unicode
+ strings
+ """
+ it = iter(csv.reader(stream, delimiter=separator, quotechar=quote))
+ if skipfirst:
+ it.next()
+ for row in it:
+ yield [item.decode(encoding) for item in row]
+
+def commit_every(nbit, store, it):
+ for i, x in enumerate(it):
+ yield x
+ if nbit is not None and i % nbit:
+ store.checkpoint()
+ if nbit is not None:
+ store.checkpoint()
+
+def lazytable(reader):
+ """The first row is taken to be the header of the table and
+ used to output a dict for each row of data.
+
+ >>> data = lazytable(utf8csvreader(open(filename)))
+ """
+ header = reader.next()
+ for row in reader:
+ yield dict(zip(header, row))
+
+def mk_entity(row, map):
+ """Return a dict made from sanitized mapped values.
+
+ ValidationError can be raised on unexpected values found in checkers
+
+ >>> row = {'myname': u'dupont'}
+ >>> map = [('myname', u'name', (capitalize_if_unicase,))]
+ >>> mk_entity(row, map)
+ {'name': u'Dupont'}
+ >>> row = {'myname': u'dupont', 'optname': u''}
+ >>> map = [('myname', u'name', (capitalize_if_unicase,)),
+ ... ('optname', u'MARKER', (optional,))]
+ >>> mk_entity(row, map)
+ {'name': u'Dupont'}
+ """
+ res = {}
+ assert isinstance(row, dict)
+ assert isinstance(map, list)
+ for src, dest, funcs in map:
+ assert not (required in funcs and optional in funcs), \
+ "optional and required checks are exclusive"
+ res[dest] = row[src]
+ try:
+ for func in funcs:
+ res[dest] = func(res[dest])
+ if res[dest] is None:
+ break
+ except ValueError, err:
+ raise ValueError('error with %r field: %s' % (src, err))
+ return res
+
+
+# user interactions ############################################################
+
+def tell(msg):
+ print msg
+
+def confirm(question):
+ """A confirm function that asks for yes/no/abort and exits on abort."""
+ answer = shellutils.ASK.ask(question, ('Y', 'n', 'abort'), 'Y')
+ if answer == 'abort':
+ sys.exit(1)
+ return answer == 'Y'
+
+
+class catch_error(object):
+ """Helper for @contextmanager decorator."""
+
+ def __init__(self, ctl, key='unexpected error', msg=None):
+ self.ctl = ctl
+ self.key = key
+ self.msg = msg
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ if type is not None:
+ if issubclass(type, (KeyboardInterrupt, SystemExit)):
+ return # re-raise
+ if self.ctl.catcherrors:
+ self.ctl.record_error(self.key, None, type, value, traceback)
+ return True # silent
+
+
+# base sanitizing/coercing functions ###########################################
+
+def optional(value):
+ """validation error will not been raised if you add this checker in chain"""
+ if value:
+ return value
+ return None
+
+def required(value):
+ """raise ValueError is value is empty
+
+ This check should be often found in last position in the chain.
+ """
+ if value:
+ return value
+ raise ValueError("required")
+
+def todatetime(format='%d/%m/%Y'):
+ """return a transformation function to turn string input value into a
+ `datetime.datetime` instance, using given format.
+
+ Follow it by `todate` or `totime` functions from `logilab.common.date` if
+ you want a `date`/`time` instance instead of `datetime`.
+ """
+ def coerce(value):
+ return strptime(value, format)
+ return coerce
+
+def call_transform_method(methodname, *args, **kwargs):
+ """return value returned by calling the given method on input"""
+ def coerce(value):
+ return getattr(value, methodname)(*args, **kwargs)
+ return coerce
+
+def call_check_method(methodname, *args, **kwargs):
+ """check value returned by calling the given method on input is true,
+ else raise ValueError
+ """
+ def check(value):
+ if getattr(value, methodname)(*args, **kwargs):
+ return value
+ raise ValueError('%s not verified on %r' % (methodname, value))
+ return check
+
+# base integrity checking functions ############################################
+
+def check_doubles(buckets):
+ """Extract the keys that have more than one item in their bucket."""
+ return [(k, len(v)) for k, v in buckets.items() if len(v) > 1]
+
+def check_doubles_not_none(buckets):
+ """Extract the keys that have more than one item in their bucket."""
+ return [(k, len(v)) for k, v in buckets.items()
+ if k is not None and len(v) > 1]
+
+
+# object stores #################################################################
+
+class ObjectStore(object):
+ """Store objects in memory for *faster* validation (development mode)
+
+ But it will not enforce the constraints of the schema and hence will miss some problems
+
+ >>> store = ObjectStore()
+ >>> user = {'login': 'johndoe'}
+ >>> store.add('CWUser', user)
+ >>> group = {'name': 'unknown'}
+ >>> store.add('CWUser', group)
+ >>> store.relate(user['eid'], 'in_group', group['eid'])
+ """
+ def __init__(self):
+ self.items = []
+ self.eids = {}
+ self.types = {}
+ self.relations = set()
+ self.indexes = {}
+ self._rql = None
+ self._checkpoint = None
+
+ def _put(self, type, item):
+ self.items.append(item)
+ return len(self.items) - 1
+
+ def add(self, type, item):
+ assert isinstance(item, dict), 'item is not a dict but a %s' % type(item)
+ eid = item['eid'] = self._put(type, item)
+ self.eids[eid] = item
+ self.types.setdefault(type, []).append(eid)
+
+ def relate(self, eid_from, rtype, eid_to, inlined=False):
+ """Add new relation (reverse type support is available)
+
+ >>> 1,2 = eid_from, eid_to
+ >>> self.relate(eid_from, 'in_group', eid_to)
+ 1, 'in_group', 2
+ >>> self.relate(eid_from, 'reverse_in_group', eid_to)
+ 2, 'in_group', 1
+ """
+ if rtype.startswith('reverse_'):
+ eid_from, eid_to = eid_to, eid_from
+ rtype = rtype[8:]
+ relation = eid_from, rtype, eid_to
+ self.relations.add(relation)
+ return relation
+
+ def build_index(self, name, type, func=None):
+ index = {}
+ if func is None or not callable(func):
+ func = lambda x: x['eid']
+ for eid in self.types[type]:
+ index.setdefault(func(self.eids[eid]), []).append(eid)
+ assert index, "new index '%s' cannot be empty" % name
+ self.indexes[name] = index
+
+ def build_rqlindex(self, name, type, key, rql, rql_params=False, func=None):
+ """build an index by rql query
+
+ rql should return eid in first column
+ ctl.store.build_index('index_name', 'users', 'login', 'Any U WHERE U is CWUser')
+ """
+ rset = self.rql(rql, rql_params or {})
+ for entity in rset.entities():
+ getattr(entity, key) # autopopulate entity with key attribute
+ self.eids[entity.eid] = dict(entity)
+ if entity.eid not in self.types.setdefault(type, []):
+ self.types[type].append(entity.eid)
+ assert self.types[type], "new index type '%s' cannot be empty (0 record found)" % type
+
+ # Build index with specified key
+ func = lambda x: x[key]
+ self.build_index(name, type, func)
+
+ def fetch(self, name, key, unique=False, decorator=None):
+ """
+ decorator is a callable method or an iterator of callable methods (usually a lambda function)
+ decorator=lambda x: x[:1] (first value is returned)
+
+ We can use validation check function available in _entity
+ """
+ eids = self.indexes[name].get(key, [])
+ if decorator is not None:
+ if not hasattr(decorator, '__iter__'):
+ decorator = (decorator,)
+ for f in decorator:
+ eids = f(eids)
+ if unique:
+ assert len(eids) == 1, u'expected a single one value for key "%s" in index "%s". Got %i' % (key, name, len(eids))
+ eids = eids[0] # FIXME maybe it's better to keep an iterator here ?
+ return eids
+
+ def find(self, type, key, value):
+ for idx in self.types[type]:
+ item = self.items[idx]
+ if item[key] == value:
+ yield item
+
+ def rql(self, *args):
+ if self._rql is not None:
+ return self._rql(*args)
+
+ def checkpoint(self):
+ pass
+
+ @property
+ def nb_inserted_entities(self):
+ return len(self.eids)
+ @property
+ def nb_inserted_types(self):
+ return len(self.types)
+ @property
+ def nb_inserted_relations(self):
+ return len(self.relations)
+
+ @deprecated('[3.6] get_many() deprecated. Use fetch() instead')
+ def get_many(self, name, key):
+ return self.fetch(name, key, unique=False)
+
+ @deprecated('[3.6] get_one() deprecated. Use fetch(..., unique=True) instead')
+ def get_one(self, name, key):
+ return self.fetch(name, key, unique=True)
+
+
+class RQLObjectStore(ObjectStore):
+ """ObjectStore that works with an actual RQL repository (production mode)"""
+ _rql = None # bw compat
+
+ def __init__(self, session=None, checkpoint=None):
+ ObjectStore.__init__(self)
+ if session is not None:
+ if not hasattr(session, 'set_pool'):
+ # connection
+ cnx = session
+ session = session.request()
+ session.set_pool = lambda : None
+ checkpoint = checkpoint or cnx.commit
+ else:
+ session.set_pool()
+ self.session = session
+ self._checkpoint = checkpoint or session.commit
+ elif checkpoint is not None:
+ self._checkpoint = checkpoint
+ # XXX .session
+
+ def checkpoint(self):
+ self._checkpoint()
+ self.session.set_pool()
+
+ def rql(self, *args):
+ if self._rql is not None:
+ return self._rql(*args)
+ return self.session.execute(*args)
+
+ def create_entity(self, *args, **kwargs):
+ entity = self.session.create_entity(*args, **kwargs)
+ self.eids[entity.eid] = entity
+ self.types.setdefault(args[0], []).append(entity.eid)
+ return entity
+
+ def _put(self, type, item):
+ query = ('INSERT %s X: ' % type) + ', '.join('X %s %%(%s)s' % (k, k)
+ for k in item)
+ return self.rql(query, item)[0][0]
+
+ def relate(self, eid_from, rtype, eid_to, inlined=False):
+ # if reverse relation is found, eids are exchanged
+ eid_from, rtype, eid_to = super(RQLObjectStore, self).relate(
+ eid_from, rtype, eid_to)
+ self.rql('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
+ {'x': int(eid_from), 'y': int(eid_to)}, ('x', 'y'))
+
+
+# the import controller ########################################################
+
+class CWImportController(object):
+ """Controller of the data import process.
+
+ >>> ctl = CWImportController(store)
+ >>> ctl.generators = list_of_data_generators
+ >>> ctl.data = dict_of_data_tables
+ >>> ctl.run()
+ """
+
+ def __init__(self, store, askerror=0, catcherrors=None, tell=tell,
+ commitevery=50):
+ self.store = store
+ self.generators = None
+ self.data = {}
+ self.errors = None
+ self.askerror = askerror
+ if catcherrors is None:
+ catcherrors = askerror
+ self.catcherrors = catcherrors
+ self.commitevery = commitevery # set to None to do a single commit
+ self._tell = tell
+
+ def check(self, type, key, value):
+ self._checks.setdefault(type, {}).setdefault(key, []).append(value)
+
+ def check_map(self, entity, key, map, default):
+ try:
+ entity[key] = map[entity[key]]
+ except KeyError:
+ self.check(key, entity[key], None)
+ entity[key] = default
+
+ def record_error(self, key, msg=None, type=None, value=None, tb=None):
+ tmp = StringIO()
+ if type is None:
+ traceback.print_exc(file=tmp)
+ else:
+ traceback.print_exception(type, value, tb, file=tmp)
+ print tmp.getvalue()
+ # use a list to avoid counting a <nb lines> errors instead of one
+ errorlog = self.errors.setdefault(key, [])
+ if msg is None:
+ errorlog.append(tmp.getvalue().splitlines())
+ else:
+ errorlog.append( (msg, tmp.getvalue().splitlines()) )
+
+ def run(self):
+ self.errors = {}
+ for func, checks in self.generators:
+ self._checks = {}
+ func_name = func.__name__[4:] # XXX
+ self.tell("Import '%s'..." % func_name)
+ try:
+ func(self)
+ except:
+ if self.catcherrors:
+ self.record_error(func_name, 'While calling %s' % func.__name__)
+ else:
+ raise
+ for key, func, title, help in checks:
+ buckets = self._checks.get(key)
+ if buckets:
+ err = func(buckets)
+ if err:
+ self.errors[title] = (help, err)
+ self.store.checkpoint()
+ nberrors = sum(len(err[1]) for err in self.errors.values())
+ self.tell('\nImport completed: %i entities, %i types, %i relations and %i errors'
+ % (self.store.nb_inserted_entities,
+ self.store.nb_inserted_types,
+ self.store.nb_inserted_relations,
+ nberrors))
+ if self.errors:
+ if self.askerror == 2 or (self.askerror and confirm('Display errors ?')):
+ from pprint import pformat
+ for errkey, error in self.errors.items():
+ self.tell("\n%s (%s): %d\n" % (error[0], errkey, len(error[1])))
+ self.tell(pformat(sorted(error[1])))
+
+ def get_data(self, key):
+ return self.data.get(key)
+
+ def index(self, name, key, value, unique=False):
+ """create a new index
+
+ If unique is set to True, only first occurence will be kept not the following ones
+ """
+ if unique:
+ try:
+ if value in self.store.indexes[name][key]:
+ return
+ except KeyError:
+ # we're sure that one is the first occurence; so continue...
+ pass
+ self.store.indexes.setdefault(name, {}).setdefault(key, []).append(value)
+
+ def tell(self, msg):
+ self._tell(msg)
+
+ def iter_and_commit(self, datakey):
+ """iter rows, triggering commit every self.commitevery iterations"""
+ return commit_every(self.commitevery, self.store, self.get_data(datakey))
+
+
+
+from datetime import datetime
+from cubicweb.schema import META_RTYPES, VIRTUAL_RTYPES
+
+
+class NoHookRQLObjectStore(RQLObjectStore):
+ """ObjectStore that works with an actual RQL repository (production mode)"""
+ _rql = None # bw compat
+
+ def __init__(self, session, metagen=None, baseurl=None):
+ super(NoHookRQLObjectStore, self).__init__(session)
+ self.source = session.repo.system_source
+ self.rschema = session.repo.schema.rschema
+ self.add_relation = self.source.add_relation
+ if metagen is None:
+ metagen = MetaGenerator(session, baseurl)
+ self.metagen = metagen
+ self._nb_inserted_entities = 0
+ self._nb_inserted_types = 0
+ self._nb_inserted_relations = 0
+ self.rql = session.unsafe_execute
+
+ def create_entity(self, etype, **kwargs):
+ for k, v in kwargs.iteritems():
+ kwargs[k] = getattr(v, 'eid', v)
+ entity, rels = self.metagen.base_etype_dicts(etype)
+ entity = copy(entity)
+ entity._related_cache = {}
+ self.metagen.init_entity(entity)
+ entity.update(kwargs)
+ session = self.session
+ self.source.add_entity(session, entity)
+ self.source.add_info(session, entity, self.source, complete=False)
+ for rtype, targeteids in rels.iteritems():
+ # targeteids may be a single eid or a list of eids
+ inlined = self.rschema(rtype).inlined
+ try:
+ for targeteid in targeteids:
+ self.add_relation(session, entity.eid, rtype, targeteid,
+ inlined)
+ except TypeError:
+ self.add_relation(session, entity.eid, rtype, targeteids,
+ inlined)
+ self._nb_inserted_entities += 1
+ return entity
+
+ def relate(self, eid_from, rtype, eid_to):
+ assert not rtype.startswith('reverse_')
+ self.add_relation(self.session, eid_from, rtype, eid_to,
+ self.rschema(rtype).inlined)
+ self._nb_inserted_relations += 1
+
+ @property
+ def nb_inserted_entities(self):
+ return self._nb_inserted_entities
+ @property
+ def nb_inserted_types(self):
+ return self._nb_inserted_types
+ @property
+ def nb_inserted_relations(self):
+ return self._nb_inserted_relations
+
+ def _put(self, type, item):
+ raise RuntimeError('use create entity')
+
+
+class MetaGenerator(object):
+ def __init__(self, session, baseurl=None):
+ self.session = session
+ self.source = session.repo.system_source
+ self.time = datetime.now()
+ if baseurl is None:
+ config = session.vreg.config
+ baseurl = config['base-url'] or config.default_base_url()
+ if not baseurl[-1] == '/':
+ baseurl += '/'
+ self.baseurl = baseurl
+ # attributes/relations shared by all entities of the same type
+ self.etype_attrs = []
+ self.etype_rels = []
+ # attributes/relations specific to each entity
+ self.entity_attrs = ['eid', 'cwuri']
+ #self.entity_rels = [] XXX not handled (YAGNI?)
+ schema = session.vreg.schema
+ rschema = schema.rschema
+ for rtype in META_RTYPES:
+ if rtype in ('eid', 'cwuri') or rtype in VIRTUAL_RTYPES:
+ continue
+ if rschema(rtype).final:
+ self.etype_attrs.append(rtype)
+ else:
+ self.etype_rels.append(rtype)
+ if not schema._eid_index:
+ # test schema loaded from the fs
+ self.gen_is = self.test_gen_is
+ self.gen_is_instance_of = self.test_gen_is_instanceof
+
+ @cached
+ def base_etype_dicts(self, etype):
+ entity = self.session.vreg['etypes'].etype_class(etype)(self.session)
+ # entity are "surface" copied, avoid shared dict between copies
+ del entity.cw_extra_kwargs
+ for attr in self.etype_attrs:
+ entity[attr] = self.generate(entity, attr)
+ rels = {}
+ for rel in self.etype_rels:
+ rels[rel] = self.generate(entity, rel)
+ return entity, rels
+
+ def init_entity(self, entity):
+ for attr in self.entity_attrs:
+ entity[attr] = self.generate(entity, attr)
+ entity.eid = entity['eid']
+
+ def generate(self, entity, rtype):
+ return getattr(self, 'gen_%s' % rtype)(entity)
+
+ def gen_eid(self, entity):
+ return self.source.create_eid(self.session)
+
+ def gen_cwuri(self, entity):
+ return u'%seid/%s' % (self.baseurl, entity['eid'])
+
+ def gen_creation_date(self, entity):
+ return self.time
+ def gen_modification_date(self, entity):
+ return self.time
+
+ def gen_is(self, entity):
+ return entity.e_schema.eid
+ def gen_is_instance_of(self, entity):
+ eids = []
+ for etype in entity.e_schema.ancestors() + [entity.e_schema]:
+ eids.append(entity.e_schema.eid)
+ return eids
+
+ def gen_created_by(self, entity):
+ return self.session.user.eid
+ def gen_owned_by(self, entity):
+ return self.session.user.eid
+
+ # implementations of gen_is / gen_is_instance_of to use during test where
+ # schema has been loaded from the fs (hence entity type schema eids are not
+ # known)
+ def test_gen_is(self, entity):
+ from cubicweb.hooks.metadata import eschema_eid
+ return eschema_eid(self.session, entity.e_schema)
+ def test_gen_is_instanceof(self, entity):
+ from cubicweb.hooks.metadata import eschema_eid
+ eids = []
+ for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
+ eids.append(eschema_eid(self.session, eschema))
+ return eids
+
+
+################################################################################
+
+utf8csvreader = deprecated('[3.6] use ucsvreader instead')(ucsvreader)
+
+@deprecated('[3.6] use required')
+def nonempty(value):
+ return required(value)
+
+@deprecated("[3.6] use call_check_method('isdigit')")
+def alldigits(txt):
+ if txt.isdigit():
+ return txt
+ else:
+ return u''
+
+@deprecated("[3.7] too specific, will move away, copy me")
+def capitalize_if_unicase(txt):
+ if txt.isupper() or txt.islower():
+ return txt.capitalize()
+ return txt
+
+@deprecated("[3.7] too specific, will move away, copy me")
+def yesno(value):
+ """simple heuristic that returns boolean value
+
+ >>> yesno("Yes")
+ True
+ >>> yesno("oui")
+ True
+ >>> yesno("1")
+ True
+ >>> yesno("11")
+ True
+ >>> yesno("")
+ False
+ >>> yesno("Non")
+ False
+ >>> yesno("blablabla")
+ False
+ """
+ if value:
+ return value.lower()[0] in 'yo1'
+ return False
+
+@deprecated("[3.7] use call_check_method('isalpha')")
+def isalpha(value):
+ if value.isalpha():
+ return value
+ raise ValueError("not all characters in the string alphabetic")
+
+@deprecated("[3.7] use call_transform_method('upper')")
+def uppercase(txt):
+ return txt.upper()
+
+@deprecated("[3.7] use call_transform_method('lower')")
+def lowercase(txt):
+ return txt.lower()
+
+@deprecated("[3.7] use call_transform_method('replace', ' ', '')")
+def no_space(txt):
+ return txt.replace(' ','')
+
+@deprecated("[3.7] use call_transform_method('replace', u'\xa0', '')")
+def no_uspace(txt):
+ return txt.replace(u'\xa0','')
+
+@deprecated("[3.7] use call_transform_method('replace', '-', '')")
+def no_dash(txt):
+ return txt.replace('-','')
+
+@deprecated("[3.7] use call_transform_method('strip')")
+def strip(txt):
+ return txt.strip()
+
+@deprecated("[3.7] use call_transform_method('replace', ',', '.'), float")
+def decimal(value):
+ return comma_float(value)
+
+@deprecated('[3.7] use int builtin')
+def integer(value):
+ return int(value)
--- a/dbapi.py Fri Mar 12 16:21:13 2010 +0100
+++ b/dbapi.py Fri Mar 12 16:23:21 2010 +0100
@@ -203,11 +203,6 @@
self.pgettext = lambda x, y: y
self.debug('request default language: %s', self.lang)
- def decorate_rset(self, rset):
- rset.vreg = self.vreg
- rset.req = self
- return rset
-
def describe(self, eid):
"""return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
return self.cnx.describe(eid)
@@ -242,7 +237,7 @@
def get_session_data(self, key, default=None, pop=False):
"""return value associated to `key` in session data"""
if self.cnx is None:
- return None # before the connection has been established
+ return default # before the connection has been established
return self.cnx.get_session_data(key, default, pop)
def set_session_data(self, key, value):
@@ -398,14 +393,20 @@
def check(self):
"""raise `BadSessionId` if the connection is no more valid"""
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
self._repo.check_session(self.sessionid)
def set_session_props(self, **props):
"""raise `BadSessionId` if the connection is no more valid"""
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
self._repo.set_session_props(self.sessionid, props)
def get_shared_data(self, key, default=None, pop=False):
"""return value associated to `key` in shared data"""
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
return self._repo.get_shared_data(self.sessionid, key, default, pop)
def set_shared_data(self, key, value, querydata=False):
@@ -416,6 +417,8 @@
transaction, and won't be available through the connexion, only on the
repository side.
"""
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
return self._repo.set_shared_data(self.sessionid, key, value, querydata)
def get_schema(self):
@@ -501,6 +504,8 @@
def user(self, req=None, props=None):
"""return the User object associated to this connection"""
# cnx validity is checked by the call to .user_info
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
eid, login, groups, properties = self._repo.user_info(self.sessionid,
props)
if req is None:
@@ -521,6 +526,8 @@
pass
def describe(self, eid):
+ if self._closed is not None:
+ raise ProgrammingError('Closed connection')
return self._repo.describe(self.sessionid, eid)
def close(self):
@@ -535,6 +542,7 @@
if self._closed:
raise ProgrammingError('Connection is already closed')
self._repo.close(self.sessionid)
+ del self._repo # necessary for proper garbage collection
self._closed = 1
def commit(self):
@@ -646,11 +654,11 @@
Return values are not defined by the DB-API, but this here it returns a
ResultSet object.
"""
- self._res = res = self._repo.execute(self._sessid, operation,
- parameters, eid_key, build_descr)
- self.req.decorate_rset(res)
+ self._res = rset = self._repo.execute(self._sessid, operation,
+ parameters, eid_key, build_descr)
+ rset.req = self.req
self._index = 0
- return res
+ return rset
def executemany(self, operation, seq_of_parameters):
--- a/debian/control Fri Mar 12 16:21:13 2010 +0100
+++ b/debian/control Fri Mar 12 16:23:21 2010 +0100
@@ -7,10 +7,10 @@
Adrien Di Mascio <Adrien.DiMascio@logilab.fr>,
Aurélien Campéas <aurelien.campeas@logilab.fr>,
Nicolas Chauvat <nicolas.chauvat@logilab.fr>
-Build-Depends: debhelper (>= 5), python-dev (>=2.4), python-central (>= 0.5)
+Build-Depends: debhelper (>= 5), python-dev (>=2.5), python-central (>= 0.5)
Standards-Version: 3.8.0
Homepage: http://www.cubicweb.org
-XS-Python-Version: >= 2.4, << 2.6
+XS-Python-Version: >= 2.5, << 2.6
Package: cubicweb
Architecture: all
@@ -34,7 +34,7 @@
Replaces: cubicweb-multisources
Provides: cubicweb-multisources
# postgresql/mysql -client packages for backup/restore of non local database
-Depends: ${python:Depends}, cubicweb-common (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-indexer (>= 0.6.1), (python-psycopg2, postgresql-client) | (python-mysqldb, mysql-client) | python-pysqlite2
+Depends: ${python:Depends}, cubicweb-common (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-logilab-database, (python-psycopg2, postgresql-client) | (python-mysqldb, mysql-client) | python-pysqlite2
Recommends: pyro, cubicweb-documentation (= ${source:Version})
Description: server part of the CubicWeb framework
CubicWeb is a semantic web application framework.
--- a/devtools/dataimport.py Fri Mar 12 16:21:13 2010 +0100
+++ b/devtools/dataimport.py Fri Mar 12 16:23:21 2010 +0100
@@ -1,752 +1,4 @@
-# -*- coding: utf-8 -*-
-"""This module provides tools to import tabular data.
-
-:organization: Logilab
-:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-
-
-Example of use (run this with `cubicweb-ctl shell instance import-script.py`):
-
-.. sourcecode:: python
-
- from cubicweb.devtools.dataimport import *
- # define data generators
- GENERATORS = []
-
- USERS = [('Prenom', 'firstname', ()),
- ('Nom', 'surname', ()),
- ('Identifiant', 'login', ()),
- ]
-
- def gen_users(ctl):
- for row in ctl.get_data('utilisateurs'):
- entity = mk_entity(row, USERS)
- entity['upassword'] = u'motdepasse'
- ctl.check('login', entity['login'], None)
- ctl.store.add('CWUser', entity)
- email = {'address': row['email']}
- ctl.store.add('EmailAddress', email)
- ctl.store.relate(entity['eid'], 'use_email', email['eid'])
- ctl.store.rql('SET U in_group G WHERE G name "users", U eid %(x)s', {'x':entity['eid']})
-
- CHK = [('login', check_doubles, 'Utilisateurs Login',
- 'Deux utilisateurs ne devraient pas avoir le même login.'),
- ]
-
- GENERATORS.append( (gen_users, CHK) )
-
- # create controller
- ctl = CWImportController(RQLObjectStore())
- ctl.askerror = 1
- ctl.generators = GENERATORS
- ctl.store._checkpoint = checkpoint
- ctl.store._rql = rql
- ctl.data['utilisateurs'] = lazytable(utf8csvreader(open('users.csv')))
- # run
- ctl.run()
- sys.exit(0)
-
-
-.. BUG fichier à une colonne pose un problème de parsing
-.. TODO rollback()
-"""
-__docformat__ = "restructuredtext en"
-
-import sys
-import csv
-import traceback
-import os.path as osp
-from StringIO import StringIO
-from copy import copy
-
-from logilab.common import shellutils
-from logilab.common.date import strptime
-from logilab.common.decorators import cached
-from logilab.common.deprecation import deprecated
-
-
-def ucsvreader_pb(filepath, encoding='utf-8', separator=',', quote='"',
- skipfirst=False, withpb=True):
- """same as ucsvreader but a progress bar is displayed as we iter on rows"""
- if not osp.exists(filepath):
- raise Exception("file doesn't exists: %s" % filepath)
- rowcount = int(shellutils.Execute('wc -l "%s"' % filepath).out.strip().split()[0])
- if skipfirst:
- rowcount -= 1
- if withpb:
- pb = shellutils.ProgressBar(rowcount, 50)
- for urow in ucsvreader(file(filepath), encoding, separator, quote, skipfirst):
- yield urow
- if withpb:
- pb.update()
- print ' %s rows imported' % rowcount
-
-def ucsvreader(stream, encoding='utf-8', separator=',', quote='"',
- skipfirst=False):
- """A csv reader that accepts files with any encoding and outputs unicode
- strings
- """
- it = iter(csv.reader(stream, delimiter=separator, quotechar=quote))
- if skipfirst:
- it.next()
- for row in it:
- yield [item.decode(encoding) for item in row]
-
-def commit_every(nbit, store, it):
- for i, x in enumerate(it):
- yield x
- if nbit is not None and i % nbit:
- store.checkpoint()
- if nbit is not None:
- store.checkpoint()
-
-def lazytable(reader):
- """The first row is taken to be the header of the table and
- used to output a dict for each row of data.
-
- >>> data = lazytable(utf8csvreader(open(filename)))
- """
- header = reader.next()
- for row in reader:
- yield dict(zip(header, row))
-
-def mk_entity(row, map):
- """Return a dict made from sanitized mapped values.
-
- ValidationError can be raised on unexpected values found in checkers
-
- >>> row = {'myname': u'dupont'}
- >>> map = [('myname', u'name', (capitalize_if_unicase,))]
- >>> mk_entity(row, map)
- {'name': u'Dupont'}
- >>> row = {'myname': u'dupont', 'optname': u''}
- >>> map = [('myname', u'name', (capitalize_if_unicase,)),
- ... ('optname', u'MARKER', (optional,))]
- >>> mk_entity(row, map)
- {'name': u'Dupont'}
- """
- res = {}
- assert isinstance(row, dict)
- assert isinstance(map, list)
- for src, dest, funcs in map:
- assert not (required in funcs and optional in funcs), \
- "optional and required checks are exclusive"
- res[dest] = row[src]
- try:
- for func in funcs:
- res[dest] = func(res[dest])
- if res[dest] is None:
- break
- except ValueError, err:
- raise ValueError('error with %r field: %s' % (src, err))
- return res
-
-
-# user interactions ############################################################
-
-def tell(msg):
- print msg
-
-def confirm(question):
- """A confirm function that asks for yes/no/abort and exits on abort."""
- answer = shellutils.ASK.ask(question, ('Y', 'n', 'abort'), 'Y')
- if answer == 'abort':
- sys.exit(1)
- return answer == 'Y'
-
-
-class catch_error(object):
- """Helper for @contextmanager decorator."""
-
- def __init__(self, ctl, key='unexpected error', msg=None):
- self.ctl = ctl
- self.key = key
- self.msg = msg
-
- def __enter__(self):
- return self
-
- def __exit__(self, type, value, traceback):
- if type is not None:
- if issubclass(type, (KeyboardInterrupt, SystemExit)):
- return # re-raise
- if self.ctl.catcherrors:
- self.ctl.record_error(self.key, None, type, value, traceback)
- return True # silent
-
-
-# base sanitizing/coercing functions ###########################################
-
-def optional(value):
- """validation error will not been raised if you add this checker in chain"""
- if value:
- return value
- return None
-
-def required(value):
- """raise ValueError is value is empty
-
- This check should be often found in last position in the chain.
- """
- if value:
- return value
- raise ValueError("required")
-
-def todatetime(format='%d/%m/%Y'):
- """return a transformation function to turn string input value into a
- `datetime.datetime` instance, using given format.
-
- Follow it by `todate` or `totime` functions from `logilab.common.date` if
- you want a `date`/`time` instance instead of `datetime`.
- """
- def coerce(value):
- return strptime(value, format)
- return coerce
-
-def call_transform_method(methodname, *args, **kwargs):
- """return value returned by calling the given method on input"""
- def coerce(value):
- return getattr(value, methodname)(*args, **kwargs)
- return coerce
-
-def call_check_method(methodname, *args, **kwargs):
- """check value returned by calling the given method on input is true,
- else raise ValueError
- """
- def check(value):
- if getattr(value, methodname)(*args, **kwargs):
- return value
- raise ValueError('%s not verified on %r' % (methodname, value))
- return check
-
-# base integrity checking functions ############################################
-
-def check_doubles(buckets):
- """Extract the keys that have more than one item in their bucket."""
- return [(k, len(v)) for k, v in buckets.items() if len(v) > 1]
-
-def check_doubles_not_none(buckets):
- """Extract the keys that have more than one item in their bucket."""
- return [(k, len(v)) for k, v in buckets.items()
- if k is not None and len(v) > 1]
-
-
-# object stores #################################################################
-
-class ObjectStore(object):
- """Store objects in memory for *faster* validation (development mode)
-
- But it will not enforce the constraints of the schema and hence will miss some problems
-
- >>> store = ObjectStore()
- >>> user = {'login': 'johndoe'}
- >>> store.add('CWUser', user)
- >>> group = {'name': 'unknown'}
- >>> store.add('CWUser', group)
- >>> store.relate(user['eid'], 'in_group', group['eid'])
- """
- def __init__(self):
- self.items = []
- self.eids = {}
- self.types = {}
- self.relations = set()
- self.indexes = {}
- self._rql = None
- self._checkpoint = None
-
- def _put(self, type, item):
- self.items.append(item)
- return len(self.items) - 1
-
- def add(self, type, item):
- assert isinstance(item, dict), 'item is not a dict but a %s' % type(item)
- eid = item['eid'] = self._put(type, item)
- self.eids[eid] = item
- self.types.setdefault(type, []).append(eid)
-
- def relate(self, eid_from, rtype, eid_to, inlined=False):
- """Add new relation (reverse type support is available)
-
- >>> 1,2 = eid_from, eid_to
- >>> self.relate(eid_from, 'in_group', eid_to)
- 1, 'in_group', 2
- >>> self.relate(eid_from, 'reverse_in_group', eid_to)
- 2, 'in_group', 1
- """
- if rtype.startswith('reverse_'):
- eid_from, eid_to = eid_to, eid_from
- rtype = rtype[8:]
- relation = eid_from, rtype, eid_to
- self.relations.add(relation)
- return relation
-
- def build_index(self, name, type, func=None):
- index = {}
- if func is None or not callable(func):
- func = lambda x: x['eid']
- for eid in self.types[type]:
- index.setdefault(func(self.eids[eid]), []).append(eid)
- assert index, "new index '%s' cannot be empty" % name
- self.indexes[name] = index
-
- def build_rqlindex(self, name, type, key, rql, rql_params=False, func=None):
- """build an index by rql query
-
- rql should return eid in first column
- ctl.store.build_index('index_name', 'users', 'login', 'Any U WHERE U is CWUser')
- """
- rset = self.rql(rql, rql_params or {})
- for entity in rset.entities():
- getattr(entity, key) # autopopulate entity with key attribute
- self.eids[entity.eid] = dict(entity)
- if entity.eid not in self.types.setdefault(type, []):
- self.types[type].append(entity.eid)
- assert self.types[type], "new index type '%s' cannot be empty (0 record found)" % type
-
- # Build index with specified key
- func = lambda x: x[key]
- self.build_index(name, type, func)
-
- def fetch(self, name, key, unique=False, decorator=None):
- """
- decorator is a callable method or an iterator of callable methods (usually a lambda function)
- decorator=lambda x: x[:1] (first value is returned)
-
- We can use validation check function available in _entity
- """
- eids = self.indexes[name].get(key, [])
- if decorator is not None:
- if not hasattr(decorator, '__iter__'):
- decorator = (decorator,)
- for f in decorator:
- eids = f(eids)
- if unique:
- assert len(eids) == 1, u'expected a single one value for key "%s" in index "%s". Got %i' % (key, name, len(eids))
- eids = eids[0] # FIXME maybe it's better to keep an iterator here ?
- return eids
-
- def find(self, type, key, value):
- for idx in self.types[type]:
- item = self.items[idx]
- if item[key] == value:
- yield item
-
- def rql(self, *args):
- if self._rql is not None:
- return self._rql(*args)
-
- def checkpoint(self):
- pass
-
- @property
- def nb_inserted_entities(self):
- return len(self.eids)
- @property
- def nb_inserted_types(self):
- return len(self.types)
- @property
- def nb_inserted_relations(self):
- return len(self.relations)
-
- @deprecated('[3.6] get_many() deprecated. Use fetch() instead')
- def get_many(self, name, key):
- return self.fetch(name, key, unique=False)
-
- @deprecated('[3.6] get_one() deprecated. Use fetch(..., unique=True) instead')
- def get_one(self, name, key):
- return self.fetch(name, key, unique=True)
-
-
-class RQLObjectStore(ObjectStore):
- """ObjectStore that works with an actual RQL repository (production mode)"""
- _rql = None # bw compat
-
- def __init__(self, session=None, checkpoint=None):
- ObjectStore.__init__(self)
- if session is not None:
- if not hasattr(session, 'set_pool'):
- # connection
- cnx = session
- session = session.request()
- session.set_pool = lambda : None
- checkpoint = checkpoint or cnx.commit
- else:
- session.set_pool()
- self.session = session
- self._checkpoint = checkpoint or session.commit
- elif checkpoint is not None:
- self._checkpoint = checkpoint
- # XXX .session
-
- def checkpoint(self):
- self._checkpoint()
- self.session.set_pool()
-
- def rql(self, *args):
- if self._rql is not None:
- return self._rql(*args)
- return self.session.execute(*args)
-
- def create_entity(self, *args, **kwargs):
- entity = self.session.create_entity(*args, **kwargs)
- self.eids[entity.eid] = entity
- self.types.setdefault(args[0], []).append(entity.eid)
- return entity
-
- def _put(self, type, item):
- query = ('INSERT %s X: ' % type) + ', '.join('X %s %%(%s)s' % (k, k)
- for k in item)
- return self.rql(query, item)[0][0]
-
- def relate(self, eid_from, rtype, eid_to, inlined=False):
- # if reverse relation is found, eids are exchanged
- eid_from, rtype, eid_to = super(RQLObjectStore, self).relate(
- eid_from, rtype, eid_to)
- self.rql('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
- {'x': int(eid_from), 'y': int(eid_to)}, ('x', 'y'))
-
-
-# the import controller ########################################################
-
-class CWImportController(object):
- """Controller of the data import process.
-
- >>> ctl = CWImportController(store)
- >>> ctl.generators = list_of_data_generators
- >>> ctl.data = dict_of_data_tables
- >>> ctl.run()
- """
-
- def __init__(self, store, askerror=0, catcherrors=None, tell=tell,
- commitevery=50):
- self.store = store
- self.generators = None
- self.data = {}
- self.errors = None
- self.askerror = askerror
- if catcherrors is None:
- catcherrors = askerror
- self.catcherrors = catcherrors
- self.commitevery = commitevery # set to None to do a single commit
- self._tell = tell
-
- def check(self, type, key, value):
- self._checks.setdefault(type, {}).setdefault(key, []).append(value)
-
- def check_map(self, entity, key, map, default):
- try:
- entity[key] = map[entity[key]]
- except KeyError:
- self.check(key, entity[key], None)
- entity[key] = default
-
- def record_error(self, key, msg=None, type=None, value=None, tb=None):
- tmp = StringIO()
- if type is None:
- traceback.print_exc(file=tmp)
- else:
- traceback.print_exception(type, value, tb, file=tmp)
- print tmp.getvalue()
- # use a list to avoid counting a <nb lines> errors instead of one
- errorlog = self.errors.setdefault(key, [])
- if msg is None:
- errorlog.append(tmp.getvalue().splitlines())
- else:
- errorlog.append( (msg, tmp.getvalue().splitlines()) )
-
- def run(self):
- self.errors = {}
- for func, checks in self.generators:
- self._checks = {}
- func_name = func.__name__[4:] # XXX
- self.tell("Import '%s'..." % func_name)
- try:
- func(self)
- except:
- if self.catcherrors:
- self.record_error(func_name, 'While calling %s' % func.__name__)
- else:
- raise
- for key, func, title, help in checks:
- buckets = self._checks.get(key)
- if buckets:
- err = func(buckets)
- if err:
- self.errors[title] = (help, err)
- self.store.checkpoint()
- nberrors = sum(len(err[1]) for err in self.errors.values())
- self.tell('\nImport completed: %i entities, %i types, %i relations and %i errors'
- % (self.store.nb_inserted_entities,
- self.store.nb_inserted_types,
- self.store.nb_inserted_relations,
- nberrors))
- if self.errors:
- if self.askerror == 2 or (self.askerror and confirm('Display errors ?')):
- from pprint import pformat
- for errkey, error in self.errors.items():
- self.tell("\n%s (%s): %d\n" % (error[0], errkey, len(error[1])))
- self.tell(pformat(sorted(error[1])))
-
- def get_data(self, key):
- return self.data.get(key)
-
- def index(self, name, key, value, unique=False):
- """create a new index
-
- If unique is set to True, only first occurence will be kept not the following ones
- """
- if unique:
- try:
- if value in self.store.indexes[name][key]:
- return
- except KeyError:
- # we're sure that one is the first occurence; so continue...
- pass
- self.store.indexes.setdefault(name, {}).setdefault(key, []).append(value)
-
- def tell(self, msg):
- self._tell(msg)
-
- def iter_and_commit(self, datakey):
- """iter rows, triggering commit every self.commitevery iterations"""
- return commit_every(self.commitevery, self.store, self.get_data(datakey))
-
-
-
-from datetime import datetime
-from cubicweb.schema import META_RTYPES, VIRTUAL_RTYPES
-
-
-class NoHookRQLObjectStore(RQLObjectStore):
- """ObjectStore that works with an actual RQL repository (production mode)"""
- _rql = None # bw compat
-
- def __init__(self, session, metagen=None, baseurl=None):
- super(NoHookRQLObjectStore, self).__init__(session)
- self.source = session.repo.system_source
- self.rschema = session.repo.schema.rschema
- self.add_relation = self.source.add_relation
- if metagen is None:
- metagen = MetaGenerator(session, baseurl)
- self.metagen = metagen
- self._nb_inserted_entities = 0
- self._nb_inserted_types = 0
- self._nb_inserted_relations = 0
- self.rql = session.unsafe_execute
-
- def create_entity(self, etype, **kwargs):
- for k, v in kwargs.iteritems():
- kwargs[k] = getattr(v, 'eid', v)
- entity, rels = self.metagen.base_etype_dicts(etype)
- entity = copy(entity)
- entity._related_cache = {}
- self.metagen.init_entity(entity)
- entity.update(kwargs)
- session = self.session
- self.source.add_entity(session, entity)
- self.source.add_info(session, entity, self.source, complete=False)
- for rtype, targeteids in rels.iteritems():
- # targeteids may be a single eid or a list of eids
- inlined = self.rschema(rtype).inlined
- try:
- for targeteid in targeteids:
- self.add_relation(session, entity.eid, rtype, targeteid,
- inlined)
- except TypeError:
- self.add_relation(session, entity.eid, rtype, targeteids,
- inlined)
- self._nb_inserted_entities += 1
- return entity
-
- def relate(self, eid_from, rtype, eid_to):
- assert not rtype.startswith('reverse_')
- self.add_relation(self.session, eid_from, rtype, eid_to,
- self.rschema(rtype).inlined)
- self._nb_inserted_relations += 1
-
- @property
- def nb_inserted_entities(self):
- return self._nb_inserted_entities
- @property
- def nb_inserted_types(self):
- return self._nb_inserted_types
- @property
- def nb_inserted_relations(self):
- return self._nb_inserted_relations
-
- def _put(self, type, item):
- raise RuntimeError('use create entity')
-
-
-class MetaGenerator(object):
- def __init__(self, session, baseurl=None):
- self.session = session
- self.source = session.repo.system_source
- self.time = datetime.now()
- if baseurl is None:
- config = session.vreg.config
- baseurl = config['base-url'] or config.default_base_url()
- if not baseurl[-1] == '/':
- baseurl += '/'
- self.baseurl = baseurl
- # attributes/relations shared by all entities of the same type
- self.etype_attrs = []
- self.etype_rels = []
- # attributes/relations specific to each entity
- self.entity_attrs = ['eid', 'cwuri']
- #self.entity_rels = [] XXX not handled (YAGNI?)
- schema = session.vreg.schema
- rschema = schema.rschema
- for rtype in META_RTYPES:
- if rtype in ('eid', 'cwuri') or rtype in VIRTUAL_RTYPES:
- continue
- if rschema(rtype).final:
- self.etype_attrs.append(rtype)
- else:
- self.etype_rels.append(rtype)
- if not schema._eid_index:
- # test schema loaded from the fs
- self.gen_is = self.test_gen_is
- self.gen_is_instance_of = self.test_gen_is_instanceof
-
- @cached
- def base_etype_dicts(self, etype):
- entity = self.session.vreg['etypes'].etype_class(etype)(self.session)
- # entity are "surface" copied, avoid shared dict between copies
- del entity.cw_extra_kwargs
- for attr in self.etype_attrs:
- entity[attr] = self.generate(entity, attr)
- rels = {}
- for rel in self.etype_rels:
- rels[rel] = self.generate(entity, rel)
- return entity, rels
-
- def init_entity(self, entity):
- for attr in self.entity_attrs:
- entity[attr] = self.generate(entity, attr)
- entity.eid = entity['eid']
-
- def generate(self, entity, rtype):
- return getattr(self, 'gen_%s' % rtype)(entity)
-
- def gen_eid(self, entity):
- return self.source.create_eid(self.session)
-
- def gen_cwuri(self, entity):
- return u'%seid/%s' % (self.baseurl, entity['eid'])
-
- def gen_creation_date(self, entity):
- return self.time
- def gen_modification_date(self, entity):
- return self.time
-
- def gen_is(self, entity):
- return entity.e_schema.eid
- def gen_is_instance_of(self, entity):
- eids = []
- for etype in entity.e_schema.ancestors() + [entity.e_schema]:
- eids.append(entity.e_schema.eid)
- return eids
-
- def gen_created_by(self, entity):
- return self.session.user.eid
- def gen_owned_by(self, entity):
- return self.session.user.eid
-
- # implementations of gen_is / gen_is_instance_of to use during test where
- # schema has been loaded from the fs (hence entity type schema eids are not
- # known)
- def test_gen_is(self, entity):
- from cubicweb.hooks.metadata import eschema_eid
- return eschema_eid(self.session, entity.e_schema)
- def test_gen_is_instanceof(self, entity):
- from cubicweb.hooks.metadata import eschema_eid
- eids = []
- for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
- eids.append(eschema_eid(self.session, eschema))
- return eids
-
-
-################################################################################
-
-utf8csvreader = deprecated('[3.6] use ucsvreader instead')(ucsvreader)
-
-@deprecated('[3.6] use required')
-def nonempty(value):
- return required(value)
-
-@deprecated("[3.6] use call_check_method('isdigit')")
-def alldigits(txt):
- if txt.isdigit():
- return txt
- else:
- return u''
-
-@deprecated("[3.7] too specific, will move away, copy me")
-def capitalize_if_unicase(txt):
- if txt.isupper() or txt.islower():
- return txt.capitalize()
- return txt
-
-@deprecated("[3.7] too specific, will move away, copy me")
-def yesno(value):
- """simple heuristic that returns boolean value
-
- >>> yesno("Yes")
- True
- >>> yesno("oui")
- True
- >>> yesno("1")
- True
- >>> yesno("11")
- True
- >>> yesno("")
- False
- >>> yesno("Non")
- False
- >>> yesno("blablabla")
- False
- """
- if value:
- return value.lower()[0] in 'yo1'
- return False
-
-@deprecated("[3.7] use call_check_method('isalpha')")
-def isalpha(value):
- if value.isalpha():
- return value
- raise ValueError("not all characters in the string alphabetic")
-
-@deprecated("[3.7] use call_transform_method('upper')")
-def uppercase(txt):
- return txt.upper()
-
-@deprecated("[3.7] use call_transform_method('lower')")
-def lowercase(txt):
- return txt.lower()
-
-@deprecated("[3.7] use call_transform_method('replace', ' ', '')")
-def no_space(txt):
- return txt.replace(' ','')
-
-@deprecated("[3.7] use call_transform_method('replace', u'\xa0', '')")
-def no_uspace(txt):
- return txt.replace(u'\xa0','')
-
-@deprecated("[3.7] use call_transform_method('replace', '-', '')")
-def no_dash(txt):
- return txt.replace('-','')
-
-@deprecated("[3.7] use call_transform_method('strip')")
-def strip(txt):
- return txt.strip()
-
-@deprecated("[3.7] use call_transform_method('replace', ',', '.'), float")
-def decimal(value):
- return comma_float(value)
-
-@deprecated('[3.7] use int builtin')
-def integer(value):
- return int(value)
+# pylint: disable-msg=W0614,W0401
+from warnings import warn
+warn('moved to cubicweb.dataimport', DeprecationWarning, stacklevel=2)
+from cubicweb.dataimport import *
--- a/devtools/fake.py Fri Mar 12 16:21:13 2010 +0100
+++ b/devtools/fake.py Fri Mar 12 16:23:21 2010 +0100
@@ -7,9 +7,7 @@
"""
__docformat__ = "restructuredtext en"
-from logilab.common.adbh import get_adv_func_helper
-
-from indexer import get_indexer
+from logilab.database import get_db_helper
from cubicweb.req import RequestSessionBase
from cubicweb.cwvreg import CubicWebVRegistry
@@ -118,17 +116,6 @@
def validate_cache(self):
pass
- # session compatibility (in some test are using this class to test server
- # side views...)
- def actual_session(self):
- """return the original parent session if any, else self"""
- return self
-
- def unsafe_execute(self, *args, **kwargs):
- """return the original parent session if any, else self"""
- kwargs.pop('propagate', None)
- return self.execute(*args, **kwargs)
-
class FakeUser(object):
login = 'toto'
@@ -138,18 +125,19 @@
class FakeSession(RequestSessionBase):
+ read_security = write_security = True
+ set_read_security = set_write_security = lambda *args, **kwargs: None
+
def __init__(self, repo=None, user=None):
self.repo = repo
self.vreg = getattr(self.repo, 'vreg', CubicWebVRegistry(FakeConfig(), initlog=False))
self.pool = FakePool()
self.user = user or FakeUser()
self.is_internal_session = False
- self.is_super_session = self.user.eid == -1
self.transaction_data = {}
- def execute(self, *args):
+ def execute(self, *args, **kwargs):
pass
- unsafe_execute = execute
def commit(self, *args):
self.transaction_data.clear()
@@ -158,11 +146,6 @@
def system_sql(self, sql, args=None):
pass
- def decorate_rset(self, rset, propagate=False):
- rset.vreg = self.vreg
- rset.req = self
- return rset
-
def set_entity_cache(self, entity):
pass
@@ -200,12 +183,7 @@
class FakeSource(object):
- dbhelper = get_adv_func_helper('sqlite')
- indexer = get_indexer('sqlite', 'UTF8')
- dbhelper.fti_uid_attr = indexer.uid_attr
- dbhelper.fti_table = indexer.table
- dbhelper.fti_restriction_sql = indexer.restriction_sql
- dbhelper.fti_need_distinct_query = indexer.need_distinct
+ dbhelper = get_db_helper('sqlite')
def __init__(self, uri):
self.uri = uri
--- a/devtools/repotest.py Fri Mar 12 16:21:13 2010 +0100
+++ b/devtools/repotest.py Fri Mar 12 16:23:21 2010 +0100
@@ -95,6 +95,31 @@
def __iter__(self):
return iter(sorted(self.origdict, key=self.sortkey))
+def schema_eids_idx(schema):
+ """return a dictionary mapping schema types to their eids so we can reread
+ it from the fs instead of the db (too costly) between tests
+ """
+ schema_eids = {}
+ for x in schema.entities():
+ schema_eids[x] = x.eid
+ for x in schema.relations():
+ schema_eids[x] = x.eid
+ for rdef in x.rdefs.itervalues():
+ schema_eids[(rdef.subject, rdef.rtype, rdef.object)] = rdef.eid
+ return schema_eids
+
+def restore_schema_eids_idx(schema, schema_eids):
+ """rebuild schema eid index"""
+ for x in schema.entities():
+ x.eid = schema_eids[x]
+ schema._eid_index[x.eid] = x
+ for x in schema.relations():
+ x.eid = schema_eids[x]
+ schema._eid_index[x.eid] = x
+ for rdef in x.rdefs.itervalues():
+ rdef.eid = schema_eids[(rdef.subject, rdef.rtype, rdef.object)]
+ schema._eid_index[rdef.eid] = rdef
+
from logilab.common.testlib import TestCase
from rql import RQLHelper
@@ -150,17 +175,23 @@
self.pool = self.session.set_pool()
self.maxeid = self.get_max_eid()
do_monkey_patch()
+ self._dumb_sessions = []
def get_max_eid(self):
- return self.session.unsafe_execute('Any MAX(X)')[0][0]
+ return self.session.execute('Any MAX(X)')[0][0]
def cleanup(self):
- self.session.unsafe_execute('DELETE Any X WHERE X eid > %s' % self.maxeid)
+ self.session.set_pool()
+ self.session.execute('DELETE Any X WHERE X eid > %s' % self.maxeid)
def tearDown(self):
undo_monkey_patch()
self.session.rollback()
self.cleanup()
self.commit()
+ # properly close dumb sessions
+ for session in self._dumb_sessions:
+ session.rollback()
+ session.close()
self.repo._free_pool(self.pool)
assert self.session.user.eid != -1
@@ -198,6 +229,8 @@
u._groups = set(groups)
s = Session(u, self.repo)
s._threaddata.pool = self.pool
+ # register session to ensure it gets closed
+ self._dumb_sessions.append(s)
return s
def execute(self, rql, args=None, eid_key=None, build_descr=True):
@@ -223,6 +256,7 @@
self.sources = self.o._repo.sources
self.system = self.sources[-1]
do_monkey_patch()
+ self._dumb_sessions = [] # by hi-jacked parent setup
def add_source(self, sourcecls, uri):
self.sources.append(sourcecls(self.repo, self.o.schema,
@@ -237,6 +271,9 @@
del self.repo.sources_by_uri[source.uri]
self.newsources -= 1
undo_monkey_patch()
+ for session in self._dumb_sessions:
+ session._threaddata.pool = None
+ session.close()
def _prepare_plan(self, rql, kwargs=None):
rqlst = self.o.parse(rql, annotate=True)
--- a/devtools/test/unittest_testlib.py Fri Mar 12 16:21:13 2010 +0100
+++ b/devtools/test/unittest_testlib.py Fri Mar 12 16:23:21 2010 +0100
@@ -9,12 +9,12 @@
from cStringIO import StringIO
from unittest import TestSuite
-
-from logilab.common.testlib import (TestCase, unittest_main,
+from logilab.common.testlib import (TestCase, unittest_main,
SkipAwareTextTestRunner)
from cubicweb.devtools import htmlparser
from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.pytestconf import clean_repo_test_cls
class WebTestTC(TestCase):
@@ -37,7 +37,7 @@
self.assertEquals(result.testsRun, 2)
self.assertEquals(len(result.errors), 0)
self.assertEquals(len(result.failures), 1)
-
+ clean_repo_test_cls(MyWebTest)
HTML_PAGE = u"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
--- a/devtools/testlib.py Fri Mar 12 16:21:13 2010 +0100
+++ b/devtools/testlib.py Fri Mar 12 16:23:21 2010 +0100
@@ -228,7 +228,9 @@
@property
def session(self):
"""return current server side session (using default manager account)"""
- return self.repo._sessions[self.cnx.sessionid]
+ session = self.repo._sessions[self.cnx.sessionid]
+ session.set_pool()
+ return session
@property
def adminsession(self):
--- a/doc/book/en/development/devweb/js.rst Fri Mar 12 16:21:13 2010 +0100
+++ b/doc/book/en/development/devweb/js.rst Fri Mar 12 16:23:21 2010 +0100
@@ -40,6 +40,21 @@
snippet inline in the html headers. This is quite useful for setting
up early jQuery(document).ready(...) initialisations.
+CubicWeb javascript events
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* ``server-response``: this event is triggered on HTTP responses (both
+ standard and ajax). The two following extra parameters are passed
+ to callbacks :
+
+ - ``ajax``: a boolean that says if the reponse was issued by an
+ ajax request
+
+ - ``node``: the DOM node returned by the server in case of an
+ ajax request, otherwise the document itself for standard HTTP
+ requests.
+
+
Overview of what's available
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--- a/doc/tools/generate_modules.py Fri Mar 12 16:21:13 2010 +0100
+++ b/doc/tools/generate_modules.py Fri Mar 12 16:23:21 2010 +0100
@@ -16,7 +16,7 @@
cw_gen = ModuleGenerator('cubicweb', '../..')
cw_gen.generate("../book/en/annexes/api_cubicweb.rst",
EXCLUDE_DIRS + ('cwdesklets', 'misc', 'skel', 'skeleton'))
- for modname in ('indexer', 'logilab', 'rql', 'yams'):
+ for modname in ('logilab', 'rql', 'yams'):
cw_gen = ModuleGenerator(modname, '../../../' + modname)
cw_gen.generate("../book/en/annexes/api_%s.rst" % modname,
EXCLUDE_DIRS + ('tools',))
--- a/entities/authobjs.py Fri Mar 12 16:21:13 2010 +0100
+++ b/entities/authobjs.py Fri Mar 12 16:23:21 2010 +0100
@@ -93,15 +93,10 @@
return self.groups == frozenset(('guests', ))
def owns(self, eid):
- if hasattr(self._cw, 'unsafe_execute'):
- # use unsafe_execute on the repository side, in case
- # session's user doesn't have access to CWUser
- execute = self._cw.unsafe_execute
- else:
- execute = self._cw.execute
try:
- return execute('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
- {'x': eid, 'u': self.eid}, 'x')
+ return self._cw.execute(
+ 'Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
+ {'x': eid, 'u': self.eid}, 'x')
except Unauthorized:
return False
owns = cached(owns, keyarg=1)
--- a/entities/test/unittest_wfobjs.py Fri Mar 12 16:21:13 2010 +0100
+++ b/entities/test/unittest_wfobjs.py Fri Mar 12 16:23:21 2010 +0100
@@ -1,5 +1,7 @@
+from __future__ import with_statement
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb import ValidationError
+from cubicweb.server.session import security_enabled
def add_wf(self, etype, name=None, default=False):
if name is None:
@@ -126,10 +128,11 @@
wf = add_wf(self, 'CWUser')
s = wf.add_state(u'foo', initial=True)
self.commit()
- ex = self.assertRaises(ValidationError, self.session.unsafe_execute,
+ with security_enabled(self.session, write=False):
+ ex = self.assertRaises(ValidationError, self.session.execute,
'SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
{'x': self.user().eid, 's': s.eid}, 'x')
- self.assertEquals(ex.errors, {'in_state': "state doesn't belong to entity's workflow. "
+ self.assertEquals(ex.errors, {'in_state': "state doesn't belong to entity's workflow. "
"You may want to set a custom workflow for this entity first."})
def test_fire_transition(self):
@@ -505,7 +508,7 @@
{'wf': self.wf.eid})
self.commit()
- # XXX currently, we've to rely on hooks to set initial state, or to use unsafe_execute
+ # XXX currently, we've to rely on hooks to set initial state, or to use execute
# def test_initial_state(self):
# cnx = self.login('stduser')
# cu = cnx.cursor()
--- a/entities/wfobjs.py Fri Mar 12 16:21:13 2010 +0100
+++ b/entities/wfobjs.py Fri Mar 12 16:23:21 2010 +0100
@@ -158,7 +158,7 @@
todelstate = self.state_by_name(todelstate)
if not hasattr(replacement, 'eid'):
replacement = self.state_by_name(replacement)
- execute = self._cw.unsafe_execute
+ execute = self._cw.execute
execute('SET X in_state S WHERE S eid %(s)s', {'s': todelstate.eid}, 's')
execute('SET X from_state NS WHERE X to_state OS, OS eid %(os)s, NS eid %(ns)s',
{'os': todelstate.eid, 'ns': replacement.eid}, 's')
--- a/entity.py Fri Mar 12 16:21:13 2010 +0100
+++ b/entity.py Fri Mar 12 16:23:21 2010 +0100
@@ -20,6 +20,7 @@
from cubicweb.rset import ResultSet
from cubicweb.selectors import yes
from cubicweb.appobject import AppObject
+from cubicweb.req import _check_cw_unsafe
from cubicweb.schema import RQLVocabularyConstraint, RQLConstraint
from cubicweb.rqlrewrite import RQLRewriter
@@ -440,7 +441,8 @@
"""returns a resultset containing `self` information"""
rset = ResultSet([(self.eid,)], 'Any X WHERE X eid %(x)s',
{'x': self.eid}, [(self.__regid__,)])
- return self._cw.decorate_rset(rset)
+ rset.req = self._cw
+ return rset
def to_complete_relations(self):
"""by default complete final relations to when calling .complete()"""
@@ -531,8 +533,8 @@
# if some outer join are included to fetch inlined relations
rql = 'Any %s,%s %s' % (V, ','.join(var for attr, var in selected),
','.join(rql))
- execute = getattr(self._cw, 'unsafe_execute', self._cw.execute)
- rset = execute(rql, {'x': self.eid}, 'x', build_descr=False)[0]
+ rset = self._cw.execute(rql, {'x': self.eid}, 'x',
+ build_descr=False)[0]
# handle attributes
for i in xrange(1, lastattr):
self[str(selected[i-1][0])] = rset[i]
@@ -542,7 +544,7 @@
value = rset[i]
if value is None:
rrset = ResultSet([], rql, {'x': self.eid})
- self._cw.decorate_rset(rrset)
+ rrset.req = self._cw
else:
rrset = self._cw.eid_rset(value)
self.set_related_cache(rtype, role, rrset)
@@ -560,11 +562,8 @@
if not self.is_saved():
return None
rql = "Any A WHERE X eid %%(x)s, X %s A" % name
- # XXX should we really use unsafe_execute here? I think so (syt),
- # see #344874
- execute = getattr(self._cw, 'unsafe_execute', self._cw.execute)
try:
- rset = execute(rql, {'x': self.eid}, 'x')
+ rset = self._cw.execute(rql, {'x': self.eid}, 'x')
except Unauthorized:
self[name] = value = None
else:
@@ -595,10 +594,7 @@
pass
assert self.has_eid()
rql = self.related_rql(rtype, role)
- # XXX should we really use unsafe_execute here? I think so (syt),
- # see #344874
- execute = getattr(self._cw, 'unsafe_execute', self._cw.execute)
- rset = execute(rql, {'x': self.eid}, 'x')
+ rset = self._cw.execute(rql, {'x': self.eid}, 'x')
self.set_related_cache(rtype, role, rset)
return self.related(rtype, role, limit, entities)
@@ -800,8 +796,9 @@
# raw edition utilities ###################################################
- def set_attributes(self, _cw_unsafe=False, **kwargs):
+ def set_attributes(self, **kwargs):
assert kwargs
+ _check_cw_unsafe(kwargs)
relations = []
for key in kwargs:
relations.append('X %s %%(%s)s' % (key, key))
@@ -809,25 +806,18 @@
self.update(kwargs)
# and now update the database
kwargs['x'] = self.eid
- if _cw_unsafe:
- self._cw.unsafe_execute(
- 'SET %s WHERE X eid %%(x)s' % ','.join(relations), kwargs, 'x')
- else:
- self._cw.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations),
- kwargs, 'x')
+ self._cw.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations),
+ kwargs, 'x')
- def set_relations(self, _cw_unsafe=False, **kwargs):
+ def set_relations(self, **kwargs):
"""add relations to the given object. To set a relation where this entity
is the object of the relation, use 'reverse_'<relation> as argument name.
Values may be an entity, a list of entity, or None (meaning that all
relations of the given type from or to this object should be deleted).
"""
- if _cw_unsafe:
- execute = self._cw.unsafe_execute
- else:
- execute = self._cw.execute
# XXX update cache
+ _check_cw_unsafe(kwargs)
for attr, values in kwargs.iteritems():
if attr.startswith('reverse_'):
restr = 'Y %s X' % attr[len('reverse_'):]
@@ -839,14 +829,14 @@
continue
if not isinstance(values, (tuple, list, set, frozenset)):
values = (values,)
- execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
+ self._cw.execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
restr, ','.join(str(r.eid) for r in values)),
- {'x': self.eid}, 'x')
+ {'x': self.eid}, 'x')
- def delete(self):
+ def delete(self, **kwargs):
assert self.has_eid(), self.eid
self._cw.execute('DELETE %s X WHERE X eid %%(x)s' % self.e_schema,
- {'x': self.eid})
+ {'x': self.eid}, **kwargs)
# server side utilities ###################################################
@@ -894,12 +884,12 @@
"""used by the full text indexer to get words to index
this method should only be used on the repository side since it depends
- on the indexer package
+ on the logilab.database package
:rtype: list
:return: the list of indexable word of this entity
"""
- from indexer.query_objects import tokenize
+ from logilab.database.fti import tokenize
# take care to cases where we're modyfying the schema
pending = self._cw.transaction_data.setdefault('pendingrdefs', set())
words = []
--- a/etwist/server.py Fri Mar 12 16:21:13 2010 +0100
+++ b/etwist/server.py Fri Mar 12 16:23:21 2010 +0100
@@ -350,32 +350,131 @@
set_log_methods(CubicWebRootResource, getLogger('cubicweb.twisted'))
+listiterator = type(iter([]))
-def _gc_debug():
+def _gc_debug(all=True):
import gc
from pprint import pprint
from cubicweb.appobject import AppObject
gc.collect()
count = 0
acount = 0
+ fcount = 0
+ rcount = 0
+ ccount = 0
+ scount = 0
ocount = {}
+ from rql.stmts import Union
+ from cubicweb.schema import CubicWebSchema
+ from cubicweb.rset import ResultSet
+ from cubicweb.dbapi import Connection, Cursor
+ from cubicweb.req import RequestSessionBase
+ from cubicweb.server.repository import Repository
+ from cubicweb.server.sources.native import NativeSQLSource
+ from cubicweb.server.session import Session
+ from cubicweb.devtools.testlib import CubicWebTC
+ from logilab.common.testlib import TestSuite
+ from optparse import Values
+ import types, weakref
for obj in gc.get_objects():
- if isinstance(obj, CubicWebTwistedRequestAdapter):
+ if isinstance(obj, RequestSessionBase):
count += 1
+ if isinstance(obj, Session):
+ print ' session', obj, referrers(obj, True)
elif isinstance(obj, AppObject):
acount += 1
- else:
+ elif isinstance(obj, ResultSet):
+ rcount += 1
+ #print ' rset', obj, referrers(obj)
+ elif isinstance(obj, Repository):
+ print ' REPO', obj, referrers(obj, True)
+ #elif isinstance(obj, NativeSQLSource):
+ # print ' SOURCe', obj, referrers(obj)
+ elif isinstance(obj, CubicWebTC):
+ print ' TC', obj, referrers(obj)
+ elif isinstance(obj, TestSuite):
+ print ' SUITE', obj, referrers(obj)
+ #elif isinstance(obj, Values):
+ # print ' values', '%#x' % id(obj), referrers(obj, True)
+ elif isinstance(obj, Connection):
+ ccount += 1
+ #print ' cnx', obj, referrers(obj)
+ #elif isinstance(obj, Cursor):
+ # ccount += 1
+ # print ' cursor', obj, referrers(obj)
+ elif isinstance(obj, file):
+ fcount += 1
+ # print ' open file', file.name, file.fileno
+ elif isinstance(obj, CubicWebSchema):
+ scount += 1
+ print ' schema', obj, referrers(obj)
+ elif not isinstance(obj, (type, tuple, dict, list, set, frozenset,
+ weakref.ref, weakref.WeakKeyDictionary,
+ listiterator,
+ property, classmethod,
+ types.ModuleType, types.MemberDescriptorType,
+ types.FunctionType, types.MethodType)):
try:
ocount[obj.__class__] += 1
except KeyError:
ocount[obj.__class__] = 1
except AttributeError:
pass
- print 'IN MEM REQUESTS', count
- print 'IN MEM APPOBJECTS', acount
- ocount = sorted(ocount.items(), key=lambda x: x[1], reverse=True)[:20]
- pprint(ocount)
- print 'UNREACHABLE', gc.garbage
+ if count:
+ print ' NB REQUESTS/SESSIONS', count
+ if acount:
+ print ' NB APPOBJECTS', acount
+ if ccount:
+ print ' NB CONNECTIONS', ccount
+ if rcount:
+ print ' NB RSETS', rcount
+ if scount:
+ print ' NB SCHEMAS', scount
+ if fcount:
+ print ' NB FILES', fcount
+ if all:
+ ocount = sorted(ocount.items(), key=lambda x: x[1], reverse=True)[:20]
+ pprint(ocount)
+ if gc.garbage:
+ print 'UNREACHABLE', gc.garbage
+
+def referrers(obj, showobj=False):
+ try:
+ return sorted(set((type(x), showobj and x or getattr(x, '__name__', '%#x' % id(x)))
+ for x in _referrers(obj)))
+ except TypeError:
+ s = set()
+ unhashable = []
+ for x in _referrers(obj):
+ try:
+ s.add(x)
+ except TypeError:
+ unhashable.append(x)
+ return sorted(s) + unhashable
+
+def _referrers(obj, seen=None, level=0):
+ import gc, types
+ from cubicweb.schema import CubicWebRelationSchema, CubicWebEntitySchema
+ interesting = []
+ if seen is None:
+ seen = set()
+ for x in gc.get_referrers(obj):
+ if id(x) in seen:
+ continue
+ seen.add(id(x))
+ if isinstance(x, types.FrameType):
+ continue
+ if isinstance(x, (CubicWebRelationSchema, CubicWebEntitySchema)):
+ continue
+ if isinstance(x, (list, tuple, set, dict, listiterator)):
+ if level >= 5:
+ pass
+ #interesting.append(x)
+ else:
+ interesting += _referrers(x, seen, level+1)
+ else:
+ interesting.append(x)
+ return interesting
def run(config, debug):
# create the site
--- a/ext/html4zope.py Fri Mar 12 16:21:13 2010 +0100
+++ b/ext/html4zope.py Fri Mar 12 16:23:21 2010 +0100
@@ -24,12 +24,13 @@
__docformat__ = 'reStructuredText'
+import os
+
from logilab.mtconverter import xml_escape
from docutils import nodes
from docutils.writers.html4css1 import Writer as CSS1Writer
from docutils.writers.html4css1 import HTMLTranslator as CSS1HTMLTranslator
-import os
default_level = int(os.environ.get('STX_DEFAULT_LEVEL', 3))
--- a/ext/rest.py Fri Mar 12 16:21:13 2010 +0100
+++ b/ext/rest.py Fri Mar 12 16:23:21 2010 +0100
@@ -25,7 +25,7 @@
from os.path import join
from docutils import statemachine, nodes, utils, io
-from docutils.core import publish_string
+from docutils.core import Publisher
from docutils.parsers.rst import Parser, states, directives
from docutils.parsers.rst.roles import register_canonical_role, set_classes
@@ -92,14 +92,15 @@
in `docutils.parsers.rst.directives.misc`
"""
context = state.document.settings.context
+ cw = context._cw
source = state_machine.input_lines.source(
lineno - state_machine.input_offset - 1)
#source_dir = os.path.dirname(os.path.abspath(source))
fid = arguments[0]
- for lang in chain((context._cw.lang, context.vreg.property_value('ui.language')),
- context.config.available_languages()):
+ for lang in chain((cw.lang, cw.vreg.property_value('ui.language')),
+ cw.vreg.config.available_languages()):
rid = '%s_%s.rst' % (fid, lang)
- resourcedir = context.config.locate_doc_file(rid)
+ resourcedir = cw.vreg.config.locate_doc_file(rid)
if resourcedir:
break
else:
@@ -196,6 +197,15 @@
self.finish_parse()
+# XXX docutils keep a ref on context, can't find a correct way to remove it
+class CWReSTPublisher(Publisher):
+ def __init__(self, context, settings, **kwargs):
+ Publisher.__init__(self, **kwargs)
+ self.set_components('standalone', 'restructuredtext', 'pseudoxml')
+ self.process_programmatic_settings(None, settings, None)
+ self.settings.context = context
+
+
def rest_publish(context, data):
"""publish a string formatted as ReStructured Text to HTML
@@ -218,7 +228,7 @@
# remove unprintable characters unauthorized in xml
data = data.translate(ESC_CAR_TABLE)
settings = {'input_encoding': encoding, 'output_encoding': 'unicode',
- 'warning_stream': StringIO(), 'context': context,
+ 'warning_stream': StringIO(),
# dunno what's the max, severe is 4, and we never want a crash
# (though try/except may be a better option...)
'halt_level': 10,
@@ -233,9 +243,17 @@
else:
base_url = None
try:
- return publish_string(writer=Writer(base_url=base_url),
- parser=CubicWebReSTParser(), source=data,
- settings_overrides=settings)
+ pub = CWReSTPublisher(context, settings,
+ parser=CubicWebReSTParser(),
+ writer=Writer(base_url=base_url),
+ source_class=io.StringInput,
+ destination_class=io.StringOutput)
+ pub.set_source(data)
+ pub.set_destination()
+ res = pub.publish(enable_exit_status=None)
+ # necessary for proper garbage collection, else a ref is kept somewhere in docutils...
+ del pub.settings.context
+ return res
except Exception:
LOGGER.exception('error while publishing ReST text')
if not isinstance(data, unicode):
--- a/goa/appobjects/dbmgmt.py Fri Mar 12 16:21:13 2010 +0100
+++ b/goa/appobjects/dbmgmt.py Fri Mar 12 16:23:21 2010 +0100
@@ -172,7 +172,7 @@
skip_etypes = ('CWGroup', 'CWUser')
def call(self):
- # XXX should use unsafe_execute with all hooks deactivated
+ # XXX should use unsafe execute with all hooks deactivated
# XXX step by catching datastore errors?
for eschema in self.schema.entities():
if eschema.final or eschema in self.skip_etypes:
--- a/goa/db.py Fri Mar 12 16:21:13 2010 +0100
+++ b/goa/db.py Fri Mar 12 16:23:21 2010 +0100
@@ -86,7 +86,7 @@
entity = vreg.etype_class(eschema.type)(req, rset, i, j)
rset._get_entity_cache_ = {(i, j): entity}
rset.rowcount = len(rows)
- req.decorate_rset(rset)
+ rset.req = req
return rset
--- a/goa/dbinit.py Fri Mar 12 16:21:13 2010 +0100
+++ b/goa/dbinit.py Fri Mar 12 16:23:21 2010 +0100
@@ -84,7 +84,7 @@
Put(gaeentity)
def init_persistent_schema(ssession, schema):
- execute = ssession.unsafe_execute
+ execute = ssession.execute
rql = ('INSERT CWEType X: X name %(name)s, X description %(descr)s,'
'X final FALSE')
eschema = schema.eschema('CWEType')
@@ -96,7 +96,7 @@
'descr': unicode(eschema.description)})
def insert_versions(ssession, config):
- execute = ssession.unsafe_execute
+ execute = ssession.execute
# insert versions
execute('INSERT CWProperty X: X pkey %(pk)s, X value%(v)s',
{'pk': u'system.version.cubicweb',
--- a/hooks/email.py Fri Mar 12 16:21:13 2010 +0100
+++ b/hooks/email.py Fri Mar 12 16:23:21 2010 +0100
@@ -26,7 +26,7 @@
def precommit_event(self):
if self.condition():
- self.session.unsafe_execute(
+ self.session.execute(
'SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % self.rtype,
{'x': self.entity.eid, 'y': self.email.eid}, 'x')
--- a/hooks/integrity.py Fri Mar 12 16:21:13 2010 +0100
+++ b/hooks/integrity.py Fri Mar 12 16:23:21 2010 +0100
@@ -35,13 +35,12 @@
RQLUniqueConstraint in two different transactions, as explained in
http://intranet.logilab.fr/jpl/ticket/36564
"""
- asession = session.actual_session()
- if 'uniquecstrholder' in asession.transaction_data:
+ if 'uniquecstrholder' in session.transaction_data:
return
_UNIQUE_CONSTRAINTS_LOCK.acquire()
- asession.transaction_data['uniquecstrholder'] = True
+ session.transaction_data['uniquecstrholder'] = True
# register operation responsible to release the lock on commit/rollback
- _ReleaseUniqueConstraintsOperation(asession)
+ _ReleaseUniqueConstraintsOperation(session)
def _release_unique_cstr_lock(session):
if 'uniquecstrholder' in session.transaction_data:
@@ -69,7 +68,7 @@
return
if self.rtype in self.session.transaction_data.get('pendingrtypes', ()):
return
- if self.session.unsafe_execute(*self._rql()).rowcount < 1:
+ if self.session.execute(*self._rql()).rowcount < 1:
etype = self.session.describe(self.eid)[0]
_ = self.session._
msg = _('at least one relation %(rtype)s is required on %(etype)s (%(eid)s)')
@@ -99,12 +98,8 @@
__abstract__ = True
category = 'integrity'
-class UserIntegrityHook(IntegrityHook):
- __abstract__ = True
- __select__ = IntegrityHook.__select__ & hook.regular_session()
-
-class CheckCardinalityHook(UserIntegrityHook):
+class CheckCardinalityHook(IntegrityHook):
"""check cardinalities are satisfied"""
__regid__ = 'checkcard'
events = ('after_add_entity', 'before_delete_relation')
@@ -176,7 +171,7 @@
pass
-class CheckConstraintHook(UserIntegrityHook):
+class CheckConstraintHook(IntegrityHook):
"""check the relation satisfy its constraints
this is delayed to a precommit time operation since other relation which
@@ -194,7 +189,7 @@
rdef=(self.eidfrom, self.rtype, self.eidto))
-class CheckAttributeConstraintHook(UserIntegrityHook):
+class CheckAttributeConstraintHook(IntegrityHook):
"""check the attribute relation satisfy its constraints
this is delayed to a precommit time operation since other relation which
@@ -214,7 +209,7 @@
rdef=(self.entity.eid, attr, None))
-class CheckUniqueHook(UserIntegrityHook):
+class CheckUniqueHook(IntegrityHook):
__regid__ = 'checkunique'
events = ('before_add_entity', 'before_update_entity')
@@ -227,7 +222,7 @@
if val is None:
continue
rql = '%s X WHERE X %s %%(val)s' % (entity.e_schema, attr)
- rset = self._cw.unsafe_execute(rql, {'val': val})
+ rset = self._cw.execute(rql, {'val': val})
if rset and rset[0][0] != entity.eid:
msg = self._cw._('the value "%s" is already used, use another one')
raise ValidationError(entity.eid, {attr: msg % val})
@@ -244,9 +239,9 @@
if not (session.deleted_in_transaction(self.eid) or
session.added_in_transaction(self.eid)):
etype = session.describe(self.eid)[0]
- session.unsafe_execute('DELETE %s X WHERE X eid %%(x)s, NOT %s'
- % (etype, self.relation),
- {'x': self.eid}, 'x')
+ session.execute('DELETE %s X WHERE X eid %%(x)s, NOT %s'
+ % (etype, self.relation),
+ {'x': self.eid}, 'x')
class DeleteCompositeOrphanHook(IntegrityHook):
@@ -290,7 +285,7 @@
self.entity['name'] = newname
-class TidyHtmlFields(UserIntegrityHook):
+class TidyHtmlFields(IntegrityHook):
"""tidy HTML in rich text strings"""
__regid__ = 'htmltidy'
events = ('before_add_entity', 'before_update_entity')
--- a/hooks/metadata.py Fri Mar 12 16:21:13 2010 +0100
+++ b/hooks/metadata.py Fri Mar 12 16:23:21 2010 +0100
@@ -19,7 +19,7 @@
# eschema.eid is None if schema has been readen from the filesystem, not
# from the database (eg during tests)
if eschema.eid is None:
- eschema.eid = session.unsafe_execute(
+ eschema.eid = session.execute(
'Any X WHERE X is CWEType, X name %(name)s',
{'name': str(eschema)})[0][0]
return eschema.eid
@@ -103,18 +103,17 @@
events = ('after_add_entity',)
def __call__(self):
- asession = self._cw.actual_session()
- if not asession.is_internal_session:
- self._cw.add_relation(self.entity.eid, 'owned_by', asession.user.eid)
- _SetCreatorOp(asession, entity=self.entity)
+ if not self._cw.is_internal_session:
+ self._cw.add_relation(self.entity.eid, 'owned_by', self._cw.user.eid)
+ _SetCreatorOp(self._cw, entity=self.entity)
class _SyncOwnersOp(hook.Operation):
def precommit_event(self):
- self.session.unsafe_execute('SET X owned_by U WHERE C owned_by U, C eid %(c)s,'
- 'NOT EXISTS(X owned_by U, X eid %(x)s)',
- {'c': self.compositeeid, 'x': self.composedeid},
- ('c', 'x'))
+ self.session.execute('SET X owned_by U WHERE C owned_by U, C eid %(c)s,'
+ 'NOT EXISTS(X owned_by U, X eid %(x)s)',
+ {'c': self.compositeeid, 'x': self.composedeid},
+ ('c', 'x'))
class SyncCompositeOwner(MetaDataHook):
--- a/hooks/notification.py Fri Mar 12 16:21:13 2010 +0100
+++ b/hooks/notification.py Fri Mar 12 16:23:21 2010 +0100
@@ -103,20 +103,19 @@
class EntityUpdateHook(NotificationHook):
__regid__ = 'notifentityupdated'
__abstract__ = True # do not register by default
-
+ __select__ = NotificationHook.__select__ & hook.from_dbapi_query()
events = ('before_update_entity',)
skip_attrs = set()
def __call__(self):
session = self._cw
- if self.entity.eid in session.transaction_data.get('neweids', ()):
+ if session.added_in_transaction(self.entity.eid):
return # entity is being created
- if session.is_super_session:
- return # ignore changes triggered by hooks
# then compute changes
changes = session.transaction_data.setdefault('changes', {})
thisentitychanges = changes.setdefault(self.entity.eid, set())
- attrs = [k for k in self.entity.edited_attributes if not k in self.skip_attrs]
+ attrs = [k for k in self.entity.edited_attributes
+ if not k in self.skip_attrs]
if not attrs:
return
rqlsel, rqlrestr = [], ['X eid %(x)s']
@@ -125,7 +124,7 @@
rqlsel.append(var)
rqlrestr.append('X %s %s' % (attr, var))
rql = 'Any %s WHERE %s' % (','.join(rqlsel), ','.join(rqlrestr))
- rset = session.unsafe_execute(rql, {'x': self.entity.eid}, 'x')
+ rset = session.execute(rql, {'x': self.entity.eid}, 'x')
for i, attr in enumerate(attrs):
oldvalue = rset[0][i]
newvalue = self.entity[attr]
@@ -139,13 +138,11 @@
class SomethingChangedHook(NotificationHook):
__regid__ = 'supervising'
+ __select__ = NotificationHook.__select__ & hook.from_dbapi_query()
events = ('before_add_relation', 'before_delete_relation',
'after_add_entity', 'before_update_entity')
def __call__(self):
- # XXX use proper selectors
- if self._cw.is_super_session or self._cw.repo.config.repairing:
- return # ignore changes triggered by hooks or maintainance shell
dest = self._cw.vreg.config['supervising-addrs']
if not dest: # no supervisors, don't do this for nothing...
return
--- a/hooks/security.py Fri Mar 12 16:21:13 2010 +0100
+++ b/hooks/security.py Fri Mar 12 16:23:21 2010 +0100
@@ -9,6 +9,7 @@
__docformat__ = "restructuredtext en"
from cubicweb import Unauthorized
+from cubicweb.selectors import objectify_selector, lltrace
from cubicweb.server import BEFORE_ADD_RELATIONS, ON_COMMIT_ADD_RELATIONS, hook
@@ -53,10 +54,17 @@
pass
+@objectify_selector
+@lltrace
+def write_security_enabled(cls, req, **kwargs):
+ if req is None or not req.write_security:
+ return 0
+ return 1
+
class SecurityHook(hook.Hook):
__abstract__ = True
category = 'security'
- __select__ = hook.Hook.__select__ & hook.regular_session()
+ __select__ = hook.Hook.__select__ & write_security_enabled()
class AfterAddEntitySecurityHook(SecurityHook):
--- a/hooks/syncschema.py Fri Mar 12 16:21:13 2010 +0100
+++ b/hooks/syncschema.py Fri Mar 12 16:23:21 2010 +0100
@@ -12,11 +12,12 @@
"""
__docformat__ = "restructuredtext en"
+from copy import copy
from yams.schema import BASE_TYPES, RelationSchema, RelationDefinitionSchema
-from yams.buildobjs import EntityType, RelationType, RelationDefinition
-from yams.schema2sql import eschema2sql, rschema2sql, type_from_constraints
+from yams import buildobjs as ybo, schema2sql as y2sql
from logilab.common.decorators import clear_cache
+from logilab.common.testlib import mock_object
from cubicweb import ValidationError
from cubicweb.selectors import implements
@@ -254,7 +255,7 @@
# need to create the relation if it has not been already done by
# another event of the same transaction
if not rschema.type in session.transaction_data.get('createdtables', ()):
- tablesql = rschema2sql(rschema)
+ tablesql = y2sql.rschema2sql(rschema)
# create the necessary table
for sql in tablesql.split(';'):
if sql.strip():
@@ -322,13 +323,13 @@
rtype = entity.rtype.name
obj = str(entity.otype.name)
constraints = get_constraints(self.session, entity)
- rdef = RelationDefinition(subj, rtype, obj,
- description=entity.description,
- cardinality=entity.cardinality,
- constraints=constraints,
- order=entity.ordernum,
- eid=entity.eid,
- **kwargs)
+ rdef = ybo.RelationDefinition(subj, rtype, obj,
+ description=entity.description,
+ cardinality=entity.cardinality,
+ constraints=constraints,
+ order=entity.ordernum,
+ eid=entity.eid,
+ **kwargs)
MemSchemaRDefAdd(self.session, rdef)
return rdef
@@ -346,8 +347,8 @@
'internationalizable': entity.internationalizable}
rdef = self.init_rdef(**props)
sysource = session.pool.source('system')
- attrtype = type_from_constraints(sysource.dbhelper, rdef.object,
- rdef.constraints)
+ attrtype = y2sql.type_from_constraints(
+ sysource.dbhelper, rdef.object, rdef.constraints)
# XXX should be moved somehow into lgc.adbh: sqlite doesn't support to
# add a new column with UNIQUE, it should be added after the ALTER TABLE
# using ADD INDEX
@@ -378,12 +379,13 @@
self.error('error while creating index for %s.%s: %s',
table, column, ex)
# final relations are not infered, propagate
+ schema = session.vreg.schema
try:
- eschema = session.vreg.schema.eschema(rdef.subject)
+ eschema = schema.eschema(rdef.subject)
except KeyError:
return # entity type currently being added
# propagate attribute to children classes
- rschema = session.vreg.schema.rschema(rdef.name)
+ rschema = schema.rschema(rdef.name)
# if relation type has been inserted in the same transaction, its final
# attribute is still set to False, so we've to ensure it's False
rschema.final = True
@@ -393,15 +395,19 @@
'cardinality': rdef.cardinality,
'constraints': rdef.constraints,
'permissions': rdef.get_permissions(),
- 'order': rdef.order})
+ 'order': rdef.order,
+ 'infered': False, 'eid': None
+ })
+ cstrtypemap = ss.cstrtype_mapping(session)
groupmap = group_mapping(session)
+ object = schema.eschema(rdef.object)
for specialization in eschema.specialized_by(False):
if (specialization, rdef.object) in rschema.rdefs:
continue
- sperdef = RelationDefinitionSchema(specialization, rschema, rdef.object, props)
- for rql, args in ss.rdef2rql(rschema, str(specialization),
- rdef.object, sperdef, groupmap=groupmap):
- session.execute(rql, args)
+ sperdef = RelationDefinitionSchema(specialization, rschema,
+ object, props)
+ ss.execschemarql(session.execute, sperdef,
+ ss.rdef2rql(sperdef, cstrtypemap, groupmap))
# set default value, using sql for performance and to avoid
# modification_date update
if default:
@@ -450,13 +456,13 @@
rtype in session.transaction_data.get('createdtables', ())):
try:
rschema = schema.rschema(rtype)
- tablesql = rschema2sql(rschema)
+ tablesql = y2sql.rschema2sql(rschema)
except KeyError:
# fake we add it to the schema now to get a correctly
# initialized schema but remove it before doing anything
# more dangerous...
rschema = schema.add_relation_type(rdef)
- tablesql = rschema2sql(rschema)
+ tablesql = y2sql.rschema2sql(rschema)
schema.del_relation_type(rtype)
# create the necessary table
for sql in tablesql.split(';'):
@@ -489,11 +495,11 @@
return
atype = self.rschema.objects(etype)[0]
constraints = self.rschema.rdef(etype, atype).constraints
- coltype = type_from_constraints(adbh, atype, constraints,
- creating=False)
+ coltype = y2sql.type_from_constraints(adbh, atype, constraints,
+ creating=False)
# XXX check self.values['cardinality'][0] actually changed?
- sql = adbh.sql_set_null_allowed(table, column, coltype,
- self.values['cardinality'][0] != '1')
+ notnull = self.values['cardinality'][0] != '1'
+ sql = adbh.sql_set_null_allowed(table, column, coltype, notnull)
session.system_sql(sql)
if 'fulltextindexed' in self.values:
UpdateFTIndexOp(session)
@@ -526,8 +532,8 @@
oldcstr is None or oldcstr.max != newcstr.max):
adbh = self.session.pool.source('system').dbhelper
card = rtype.rdef(subjtype, objtype).cardinality
- coltype = type_from_constraints(adbh, objtype, [newcstr],
- creating=False)
+ coltype = y2sql.type_from_constraints(adbh, objtype, [newcstr],
+ creating=False)
sql = adbh.sql_change_col_type(table, column, coltype, card != '1')
try:
session.system_sql(sql, rollback_on_failure=False)
@@ -796,7 +802,7 @@
if name in CORE_ETYPES:
raise ValidationError(self.entity.eid, {None: self._cw._('can\'t be deleted')})
# delete every entities of this type
- self._cw.unsafe_execute('DELETE %s X' % name)
+ self._cw.execute('DELETE %s X' % name)
DropTable(self._cw, table=SQL_PREFIX + name)
MemSchemaCWETypeDel(self._cw, name)
@@ -828,23 +834,26 @@
return
schema = self._cw.vreg.schema
name = entity['name']
- etype = EntityType(name=name, description=entity.get('description'),
- meta=entity.get('meta')) # don't care about final
+ etype = ybo.EntityType(name=name, description=entity.get('description'),
+ meta=entity.get('meta')) # don't care about final
# fake we add it to the schema now to get a correctly initialized schema
# but remove it before doing anything more dangerous...
schema = self._cw.vreg.schema
eschema = schema.add_entity_type(etype)
# generate table sql and rql to add metadata
- tablesql = eschema2sql(self._cw.pool.source('system').dbhelper, eschema,
- prefix=SQL_PREFIX)
- relrqls = []
+ tablesql = y2sql.eschema2sql(self._cw.pool.source('system').dbhelper,
+ eschema, prefix=SQL_PREFIX)
+ rdefrqls = []
+ gmap = group_mapping(self._cw)
+ cmap = ss.cstrtype_mapping(self._cw)
for rtype in (META_RTYPES - VIRTUAL_RTYPES):
rschema = schema[rtype]
sampletype = rschema.subjects()[0]
desttype = rschema.objects()[0]
- props = rschema.rdef(sampletype, desttype)
- relrqls += list(ss.rdef2rql(rschema, name, desttype, props,
- groupmap=group_mapping(self._cw)))
+ rdef = copy(rschema.rdef(sampletype, desttype))
+ rdef.subject = mock_object(eid=entity.eid)
+ mock = mock_object(eid=None)
+ rdefrqls.append( (mock, tuple(ss.rdef2rql(rdef, cmap, gmap))) )
# now remove it !
schema.del_entity_type(name)
# create the necessary table
@@ -857,8 +866,8 @@
etype.eid = entity.eid
MemSchemaCWETypeAdd(self._cw, etype)
# add meta relations
- for rql, kwargs in relrqls:
- self._cw.execute(rql, kwargs)
+ for rdef, relrqls in rdefrqls:
+ ss.execschemarql(self._cw.execute, rdef, relrqls)
class BeforeUpdateCWETypeHook(DelCWETypeHook):
@@ -915,12 +924,12 @@
def __call__(self):
entity = self.entity
- rtype = RelationType(name=entity.name,
- description=entity.get('description'),
- meta=entity.get('meta', False),
- inlined=entity.get('inlined', False),
- symmetric=entity.get('symmetric', False),
- eid=entity.eid)
+ rtype = ybo.RelationType(name=entity.name,
+ description=entity.get('description'),
+ meta=entity.get('meta', False),
+ inlined=entity.get('inlined', False),
+ symmetric=entity.get('symmetric', False),
+ eid=entity.eid)
MemSchemaCWRTypeAdd(self._cw, rtype)
@@ -974,7 +983,7 @@
if not (subjschema.eid in pendings or objschema.eid in pendings):
session.execute('DELETE X %s Y WHERE X is %s, Y is %s'
% (rschema, subjschema, objschema))
- execute = session.unsafe_execute
+ execute = session.execute
rset = execute('Any COUNT(X) WHERE X is %s, X relation_type R,'
'R eid %%(x)s' % rdeftype, {'x': self.eidto})
lastrel = rset[0][0] == 0
--- a/hooks/test/unittest_syncschema.py Fri Mar 12 16:21:13 2010 +0100
+++ b/hooks/test/unittest_syncschema.py Fri Mar 12 16:23:21 2010 +0100
@@ -3,9 +3,11 @@
from cubicweb import ValidationError
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.server.sqlutils import SQL_PREFIX
-
+from cubicweb.devtools.repotest import schema_eids_idx, restore_schema_eids_idx
-SCHEMA_EIDS = {}
+def teardown_module(*args):
+ del SchemaModificationHooksTC.schema_eids
+
class SchemaModificationHooksTC(CubicWebTC):
reset_schema = True
@@ -15,29 +17,12 @@
# we have to read schema from the database to get eid for schema entities
config._cubes = None
cls.repo.fill_schema()
- # remember them so we can reread it from the fs instead of the db (too
- # costly) between tests
- for x in cls.repo.schema.entities():
- SCHEMA_EIDS[x] = x.eid
- for x in cls.repo.schema.relations():
- SCHEMA_EIDS[x] = x.eid
- for rdef in x.rdefs.itervalues():
- SCHEMA_EIDS[(rdef.subject, rdef.rtype, rdef.object)] = rdef.eid
+ cls.schema_eids = schema_eids_idx(cls.repo.schema)
@classmethod
def _refresh_repo(cls):
super(SchemaModificationHooksTC, cls)._refresh_repo()
- # rebuild schema eid index
- schema = cls.repo.schema
- for x in schema.entities():
- x.eid = SCHEMA_EIDS[x]
- schema._eid_index[x.eid] = x
- for x in cls.repo.schema.relations():
- x.eid = SCHEMA_EIDS[x]
- schema._eid_index[x.eid] = x
- for rdef in x.rdefs.itervalues():
- rdef.eid = SCHEMA_EIDS[(rdef.subject, rdef.rtype, rdef.object)]
- schema._eid_index[rdef.eid] = rdef
+ restore_schema_eids_idx(cls.repo.schema, cls.schema_eids)
def index_exists(self, etype, attr, unique=False):
self.session.set_pool()
--- a/hooks/workflow.py Fri Mar 12 16:21:13 2010 +0100
+++ b/hooks/workflow.py Fri Mar 12 16:23:21 2010 +0100
@@ -19,8 +19,8 @@
nocheck = session.transaction_data.setdefault('skip-security', set())
nocheck.add((x, 'in_state', oldstate))
nocheck.add((x, 'in_state', newstate))
- # delete previous state first in case we're using a super session,
- # unless in_state isn't stored in the system source
+ # delete previous state first unless in_state isn't stored in the system
+ # source
fromsource = session.describe(x)[1]
if fromsource == 'system' or \
not session.repo.sources_by_uri[fromsource].support_relation('in_state'):
@@ -42,9 +42,7 @@
and entity.current_workflow:
state = entity.current_workflow.initial
if state:
- # use super session to by-pass security checks
- session.super_session.add_relation(entity.eid, 'in_state',
- state.eid)
+ session.add_relation(entity.eid, 'in_state', state.eid)
class _FireAutotransitionOp(hook.Operation):
@@ -122,14 +120,7 @@
msg = session._('exiting from subworkflow %s')
msg %= session._(forentity.current_workflow.name)
session.transaction_data[(forentity.eid, 'subwfentrytr')] = True
- # XXX iirk
- req = forentity._cw
- forentity._cw = session.super_session
- try:
- trinfo = forentity.change_state(tostate, msg, u'text/plain',
- tr=wftr)
- finally:
- forentity._cw = req
+ forentity.change_state(tostate, msg, u'text/plain', tr=wftr)
# hooks ########################################################################
@@ -195,7 +186,8 @@
raise ValidationError(entity.eid, {None: msg})
# True if we are coming back from subworkflow
swtr = session.transaction_data.pop((forentity.eid, 'subwfentrytr'), None)
- cowpowers = session.is_super_session or 'managers' in session.user.groups
+ cowpowers = ('managers' in session.user.groups
+ or not session.write_security)
# no investigate the requested state change...
try:
treid = entity['by_transition']
@@ -266,7 +258,7 @@
class CheckInStateChangeAllowed(WorkflowHook):
- """check state apply, in case of direct in_state change using unsafe_execute
+ """check state apply, in case of direct in_state change using unsafe execute
"""
__regid__ = 'wfcheckinstate'
__select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
@@ -307,8 +299,7 @@
return
entity = self._cw.entity_from_eid(self.eidfrom)
try:
- entity.set_attributes(modification_date=datetime.now(),
- _cw_unsafe=True)
+ entity.set_attributes(modification_date=datetime.now())
except RepositoryError, ex:
# usually occurs if entity is coming from a read-only source
# (eg ldap user)
--- a/mail.py Fri Mar 12 16:21:13 2010 +0100
+++ b/mail.py Fri Mar 12 16:23:21 2010 +0100
@@ -215,16 +215,9 @@
"""return a list of either 2-uple (email, language) or user entity to
who this email should be sent
"""
- # use super_session when available, we don't want to consider security
- # when selecting recipients_finder
- try:
- req = self._cw.super_session
- except AttributeError:
- req = self._cw
- finder = self._cw.vreg['components'].select('recipients_finder', req,
- rset=self.cw_rset,
- row=self.cw_row or 0,
- col=self.cw_col or 0)
+ finder = self._cw.vreg['components'].select(
+ 'recipients_finder', self._cw, rset=self.cw_rset,
+ row=self.cw_row or 0, col=self.cw_col or 0)
return finder.recipients()
def send_now(self, recipients, msg):
--- a/misc/migration/bootstrapmigration_repository.py Fri Mar 12 16:21:13 2010 +0100
+++ b/misc/migration/bootstrapmigration_repository.py Fri Mar 12 16:23:21 2010 +0100
@@ -20,8 +20,7 @@
if applcubicwebversion == (3, 6, 0) and cubicwebversion >= (3, 6, 0):
_add_relation_definition_no_perms('CWAttribute', 'update_permission', 'CWGroup')
_add_relation_definition_no_perms('CWAttribute', 'update_permission', 'RQLExpression')
- session.set_pool()
- session.unsafe_execute('SET X update_permission Y WHERE X is CWAttribute, X add_permission Y')
+ rql('SET X update_permission Y WHERE X is CWAttribute, X add_permission Y')
drop_relation_definition('CWAttribute', 'add_permission', 'CWGroup')
drop_relation_definition('CWAttribute', 'add_permission', 'RQLExpression')
drop_relation_definition('CWAttribute', 'delete_permission', 'CWGroup')
@@ -29,10 +28,9 @@
elif applcubicwebversion < (3, 6, 0) and cubicwebversion >= (3, 6, 0):
session.set_pool()
- session.execute = session.unsafe_execute
permsdict = ss.deserialize_ertype_permissions(session)
- config.disabled_hooks_categories.add('integrity')
+ changes = session.disable_hook_categories.add('integrity')
for rschema in repo.schema.relations():
rpermsdict = permsdict.get(rschema.eid, {})
for rdef in rschema.rdefs.values():
@@ -72,7 +70,8 @@
for action in ('read', 'add', 'delete'):
drop_relation_definition('CWRType', '%s_permission' % action, 'CWGroup', commit=False)
drop_relation_definition('CWRType', '%s_permission' % action, 'RQLExpression')
- config.disabled_hooks_categories.remove('integrity')
+ if changes:
+ session.enable_hook_categories.add(*changes)
if applcubicwebversion < (3, 4, 0) and cubicwebversion >= (3, 4, 0):
@@ -80,13 +79,11 @@
deactivate_verification_hooks()
add_relation_type('cwuri')
base_url = session.base_url()
- # use an internal session since some entity might forbid modifications to admin
- isession = repo.internal_session()
for eid, in rql('Any X', ask_confirm=False):
type, source, extid = session.describe(eid)
if source == 'system':
- isession.execute('SET X cwuri %(u)s WHERE X eid %(x)s',
- {'x': eid, 'u': base_url + u'eid/%s' % eid})
+ rql('SET X cwuri %(u)s WHERE X eid %(x)s',
+ {'x': eid, 'u': base_url + u'eid/%s' % eid})
isession.commit()
reactivate_verification_hooks()
session.set_shared_data('do-not-insert-cwuri', False)
--- a/misc/migration/postcreate.py Fri Mar 12 16:21:13 2010 +0100
+++ b/misc/migration/postcreate.py Fri Mar 12 16:23:21 2010 +0100
@@ -42,8 +42,8 @@
# need this since we already have at least one user in the database (the default admin)
for user in rql('Any X WHERE X is CWUser').entities():
- session.unsafe_execute('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
- {'x': user.eid, 's': activated.eid}, 'x')
+ rql('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
+ {'x': user.eid, 's': activated.eid}, 'x')
# on interactive mode, ask for level 0 persistent options
if interactive_mode:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/pytestconf.py Fri Mar 12 16:23:21 2010 +0100
@@ -0,0 +1,34 @@
+"""pytest configuration file: we need this to properly remove ressources
+cached on test classes, at least until we've proper support for teardown_class
+"""
+import sys
+from os.path import split, splitext
+from logilab.common.pytest import PyTester
+
+from cubicweb.etwist.server import _gc_debug
+
+class CustomPyTester(PyTester):
+ def testfile(self, filename, batchmode=False):
+ try:
+ return super(CustomPyTester, self).testfile(filename, batchmode)
+ finally:
+ modname = splitext(split(filename)[1])[0]
+ try:
+ module = sys.modules[modname]
+ except KeyError:
+ # error during test module import
+ return
+ for cls in vars(module).values():
+ if getattr(cls, '__module__', None) != modname:
+ continue
+ clean_repo_test_cls(cls)
+ #_gc_debug()
+
+def clean_repo_test_cls(cls):
+ if 'repo' in cls.__dict__:
+ if not cls.repo._shutting_down:
+ cls.repo.shutdown()
+ del cls.repo
+ for clsattr in ('cnx', '_orig_cnx', 'config', '_config', 'vreg', 'schema'):
+ if clsattr in cls.__dict__:
+ delattr(cls, clsattr)
--- a/req.py Fri Mar 12 16:21:13 2010 +0100
+++ b/req.py Fri Mar 12 16:23:21 2010 +0100
@@ -23,6 +23,12 @@
CACHE_REGISTRY = {}
+def _check_cw_unsafe(kwargs):
+ if kwargs.pop('_cw_unsafe', False):
+ warn('[3.7] _cw_unsafe argument is deprecated, now unsafe by '
+ 'default, control it using cw_[read|write]_security.',
+ DeprecationWarning, stacklevel=3)
+
class Cache(dict):
def __init__(self):
super(Cache, self).__init__()
@@ -71,7 +77,8 @@
def get_entity(row, col=0, etype=etype, req=self, rset=rset):
return req.vreg.etype_class(etype)(req, rset, row, col)
rset.get_entity = get_entity
- return self.decorate_rset(rset)
+ rset.req = self
+ return rset
def eid_rset(self, eid, etype=None):
"""return a result set for the given eid without doing actual query
@@ -83,14 +90,17 @@
etype = self.describe(eid)[0]
rset = ResultSet([(eid,)], 'Any X WHERE X eid %(x)s', {'x': eid},
[(etype,)])
- return self.decorate_rset(rset)
+ rset.req = self
+ return rset
def empty_rset(self):
"""return a result set for the given eid without doing actual query
(we have the eid, we can suppose it exists and user has access to the
entity)
"""
- return self.decorate_rset(ResultSet([], 'Any X WHERE X eid -1'))
+ rset = ResultSet([], 'Any X WHERE X eid -1')
+ rset.req = self
+ return rset
def entity_from_eid(self, eid, etype=None):
"""return an entity instance for the given eid. No query is done"""
@@ -111,19 +121,18 @@
# XXX move to CWEntityManager or even better as factory method (unclear
# where yet...)
- def create_entity(self, etype, _cw_unsafe=False, **kwargs):
+ def create_entity(self, etype, **kwargs):
"""add a new entity of the given type
Example (in a shell session):
- c = create_entity('Company', name=u'Logilab')
- create_entity('Person', works_for=c, firstname=u'John', lastname=u'Doe')
+ >>> c = create_entity('Company', name=u'Logilab')
+ >>> create_entity('Person', firstname=u'John', lastname=u'Doe',
+ ... works_for=c)
"""
- if _cw_unsafe:
- execute = self.unsafe_execute
- else:
- execute = self.execute
+ _check_cw_unsafe(kwargs)
+ execute = self.execute
rql = 'INSERT %s X' % etype
relations = []
restrictions = set()
@@ -163,7 +172,7 @@
restr = 'X %s Y' % attr
execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
restr, ','.join(str(r.eid) for r in values)),
- {'x': created.eid}, 'x')
+ {'x': created.eid}, 'x', build_descr=False)
return created
def ensure_ro_rql(self, rql):
@@ -301,7 +310,7 @@
userinfo['name'] = "cubicweb"
userinfo['email'] = ""
return userinfo
- user = self.actual_session().user
+ user = self.user
userinfo['login'] = user.login
userinfo['name'] = user.name()
userinfo['email'] = user.get_email()
@@ -402,10 +411,6 @@
"""return the root url of the instance"""
raise NotImplementedError
- def decorate_rset(self, rset):
- """add vreg/req (at least) attributes to the given result set """
- raise NotImplementedError
-
def describe(self, eid):
"""return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
raise NotImplementedError
--- a/rset.py Fri Mar 12 16:21:13 2010 +0100
+++ b/rset.py Fri Mar 12 16:23:21 2010 +0100
@@ -50,7 +50,6 @@
# .limit method
self.limited = None
# set by the cursor which returned this resultset
- self.vreg = None
self.req = None
# actions cache
self._rsetactions = None
@@ -83,7 +82,7 @@
try:
return self._rsetactions[key]
except KeyError:
- actions = self.vreg['actions'].poss_visible_objects(
+ actions = self.req.vreg['actions'].poss_visible_objects(
self.req, rset=self, **kwargs)
self._rsetactions[key] = actions
return actions
@@ -115,14 +114,16 @@
# method anymore (syt)
rset = ResultSet(self.rows+rset.rows, self.rql, self.args,
self.description +rset.description)
- return self.req.decorate_rset(rset)
+ rset.req = self.req
+ return rset
def copy(self, rows=None, descr=None):
if rows is None:
rows = self.rows[:]
descr = self.description[:]
rset = ResultSet(rows, self.rql, self.args, descr)
- return self.req.decorate_rset(rset)
+ rset.req = self.req
+ return rset
def transformed_rset(self, transformcb):
""" the result set according to a given column types
@@ -258,8 +259,8 @@
# try to get page boundaries from the navigation component
# XXX we should probably not have a ref to this component here (eg in
# cubicweb)
- nav = self.vreg['components'].select_or_none('navigation', self.req,
- rset=self)
+ nav = self.req.vreg['components'].select_or_none('navigation', self.req,
+ rset=self)
if nav:
start, stop = nav.page_boundaries()
rql = self._limit_offset_rql(stop - start, start)
@@ -389,7 +390,7 @@
"""
etype = self.description[row][col]
try:
- eschema = self.vreg.schema.eschema(etype)
+ eschema = self.req.vreg.schema.eschema(etype)
if eschema.final:
raise NotAnEntity(etype)
except KeyError:
@@ -433,8 +434,8 @@
return entity
# build entity instance
etype = self.description[row][col]
- entity = self.vreg['etypes'].etype_class(etype)(req, rset=self,
- row=row, col=col)
+ entity = self.req.vreg['etypes'].etype_class(etype)(req, rset=self,
+ row=row, col=col)
entity.set_eid(eid)
# cache entity
req.set_entity_cache(entity)
@@ -470,7 +471,7 @@
else:
rql = 'Any Y WHERE Y %s X, X eid %s'
rrset = ResultSet([], rql % (attr, entity.eid))
- req.decorate_rset(rrset)
+ rrset.req = req
else:
rrset = self._build_entity(row, outerselidx).as_rset()
entity.set_related_cache(attr, role, rrset)
@@ -487,10 +488,10 @@
rqlst = self._rqlst.copy()
# to avoid transport overhead when pyro is used, the schema has been
# unset from the syntax tree
- rqlst.schema = self.vreg.schema
- self.vreg.rqlhelper.annotate(rqlst)
+ rqlst.schema = self.req.vreg.schema
+ self.req.vreg.rqlhelper.annotate(rqlst)
else:
- rqlst = self.vreg.parse(self.req, self.rql, self.args)
+ rqlst = self.req.vreg.parse(self.req, self.rql, self.args)
return rqlst
@cached
@@ -530,7 +531,7 @@
etype = self.description[row][col]
# final type, find a better one to locate the correct subquery
# (ambiguous if possible)
- eschema = self.vreg.schema.eschema
+ eschema = self.req.vreg.schema.eschema
if eschema(etype).final:
for select in rqlst.children:
try:
--- a/schema.py Fri Mar 12 16:21:13 2010 +0100
+++ b/schema.py Fri Mar 12 16:23:21 2010 +0100
@@ -704,7 +704,7 @@
rql = 'Any %s WHERE %s' % (self.mainvars, restriction)
if self.distinct_query:
rql = 'DISTINCT ' + rql
- return session.unsafe_execute(rql, args, ck, build_descr=False)
+ return session.execute(rql, args, ck, build_descr=False)
class RQLConstraint(RepoEnforcedRQLConstraintMixIn, RQLVocabularyConstraint):
@@ -830,13 +830,10 @@
return True
return False
if keyarg is None:
- # on the server side, use unsafe_execute, but this is not available
- # on the client side (session is actually a request)
- execute = getattr(session, 'unsafe_execute', session.execute)
kwargs.setdefault('u', session.user.eid)
cachekey = kwargs.keys()
try:
- rset = execute(rql, kwargs, cachekey, build_descr=True)
+ rset = session.execute(rql, kwargs, cachekey, build_descr=True)
except NotImplementedError:
self.critical('cant check rql expression, unsupported rql %s', rql)
if self.eid is not None:
@@ -1084,10 +1081,10 @@
elif form is not None:
cw = form._cw
if cw is not None:
- if hasattr(cw, 'is_super_session'):
+ if hasattr(cw, 'write_security'): # test it's a session and not a request
# cw is a server session
- hasperm = cw.is_super_session or \
- not cw.vreg.config.is_hook_category_activated('integrity') or \
+ hasperm = not cw.write_security or \
+ not cw.is_hook_category_activated('integrity') or \
cw.user.has_permission(PERM_USE_TEMPLATE_FORMAT)
else:
hasperm = cw.user.has_permission(PERM_USE_TEMPLATE_FORMAT)
--- a/selectors.py Fri Mar 12 16:21:13 2010 +0100
+++ b/selectors.py Fri Mar 12 16:23:21 2010 +0100
@@ -23,17 +23,15 @@
You can log the selectors involved for *calendar* by replacing the line
above by::
- # in Python2.5
from cubicweb.selectors import traced_selection
with traced_selection():
self.view('calendar', myrset)
- # in Python2.4
- from cubicweb import selectors
- selectors.TRACED_OIDS = ('calendar',)
- self.view('calendar', myrset)
- selectors.TRACED_OIDS = ()
+With python 2.5, think to add:
+ from __future__ import with_statement
+
+at the top of your module.
:organization: Logilab
:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
--- a/server/__init__.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/__init__.py Fri Mar 12 16:23:21 2010 +0100
@@ -8,6 +8,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
@@ -197,6 +199,7 @@
cnx.commit()
cnx.close()
session.close()
+ repo.shutdown()
# restore initial configuration
config.creating = False
config.read_instance_schema = read_instance_schema
@@ -208,33 +211,30 @@
def initialize_schema(config, schema, mhandler, event='create'):
from cubicweb.server.schemaserial import serialize_schema
- # deactivate every hooks but those responsible to set metadata
- # so, NO INTEGRITY CHECKS are done, to have quicker db creation
- oldmode = config.set_hooks_mode(config.DENY_ALL)
- changes = config.enable_hook_category('metadata')
+ from cubicweb.server.session import hooks_control
+ session = mhandler.session
paths = [p for p in config.cubes_path() + [config.apphome]
if exists(join(p, 'migration'))]
- # execute cubicweb's pre<event> script
- mhandler.exec_event_script('pre%s' % event)
- # execute cubes pre<event> script if any
- for path in reversed(paths):
- mhandler.exec_event_script('pre%s' % event, path)
- # enter instance'schema into the database
- mhandler.session.set_pool()
- serialize_schema(mhandler.session, schema)
- # execute cubicweb's post<event> script
- mhandler.exec_event_script('post%s' % event)
- # execute cubes'post<event> script if any
- for path in reversed(paths):
- mhandler.exec_event_script('post%s' % event, path)
- # restore hooks config
- if changes:
- config.disable_hook_category(changes)
- config.set_hooks_mode(oldmode)
+ # deactivate every hooks but those responsible to set metadata
+ # so, NO INTEGRITY CHECKS are done, to have quicker db creation
+ with hooks_control(session, session.HOOKS_DENY_ALL, 'metadata'):
+ # execute cubicweb's pre<event> script
+ mhandler.exec_event_script('pre%s' % event)
+ # execute cubes pre<event> script if any
+ for path in reversed(paths):
+ mhandler.exec_event_script('pre%s' % event, path)
+ # enter instance'schema into the database
+ session.set_pool()
+ serialize_schema(session, schema)
+ # execute cubicweb's post<event> script
+ mhandler.exec_event_script('post%s' % event)
+ # execute cubes'post<event> script if any
+ for path in reversed(paths):
+ mhandler.exec_event_script('post%s' % event, path)
-# sqlite'stored procedures have to be registered at connexion opening time
-SQL_CONNECT_HOOKS = {}
+# sqlite'stored procedures have to be registered at connection opening time
+from logilab.database import SQL_CONNECT_HOOKS
# add to this set relations which should have their add security checking done
# *BEFORE* adding the actual relation (done after by default)
--- a/server/checkintegrity.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/checkintegrity.py Fri Mar 12 16:23:21 2010 +0100
@@ -6,6 +6,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
@@ -15,6 +17,7 @@
from cubicweb.schema import PURE_VIRTUAL_RTYPES
from cubicweb.server.sqlutils import SQL_PREFIX
+from cubicweb.server.session import security_enabled
def has_eid(sqlcursor, eid, eids):
"""return true if the eid is a valid eid"""
@@ -70,15 +73,9 @@
# to be updated due to the reindexation
repo = session.repo
cursor = session.pool['system']
- if not repo.system_source.indexer.has_fti_table(cursor):
- from indexer import get_indexer
+ if not repo.system_source.dbhelper.has_fti_table(cursor):
print 'no text index table'
- indexer = get_indexer(repo.system_source.dbdriver)
- # XXX indexer.init_fti(cursor) once index 0.7 is out
- indexer.init_extensions(cursor)
- cursor.execute(indexer.sql_init_fti())
- repo.config.disabled_hooks_categories.add('metadata')
- repo.config.disabled_hooks_categories.add('integrity')
+ dbhelper.init_fti(cursor)
repo.system_source.do_fti = True # ensure full-text indexation is activated
etypes = set()
for eschema in schema.entities():
@@ -94,9 +91,6 @@
if withpb:
pb = ProgressBar(len(etypes) + 1)
# first monkey patch Entity.check to disable validation
- from cubicweb.entity import Entity
- _check = Entity.check
- Entity.check = lambda self, creation=False: True
# clear fti table first
session.system_sql('DELETE FROM %s' % session.repo.system_source.dbhelper.fti_table)
if withpb:
@@ -106,14 +100,9 @@
source = repo.system_source
for eschema in etypes:
for entity in session.execute('Any X WHERE X is %s' % eschema).entities():
- source.fti_unindex_entity(session, entity.eid)
source.fti_index_entity(session, entity)
if withpb:
pb.update()
- # restore Entity.check
- Entity.check = _check
- repo.config.disabled_hooks_categories.remove('metadata')
- repo.config.disabled_hooks_categories.remove('integrity')
def check_schema(schema, session, eids, fix=1):
@@ -291,9 +280,10 @@
# yo, launch checks
if checks:
eids_cache = {}
- for check in checks:
- check_func = globals()['check_%s' % check]
- check_func(repo.schema, session, eids_cache, fix=fix)
+ with security_enabled(session, read=False): # ensure no read security
+ for check in checks:
+ check_func = globals()['check_%s' % check]
+ check_func(repo.schema, session, eids_cache, fix=fix)
if fix:
cnx.commit()
else:
--- a/server/hook.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/hook.py Fri Mar 12 16:23:21 2010 +0100
@@ -33,6 +33,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
from warnings import warn
@@ -47,7 +49,7 @@
from cubicweb.selectors import (objectify_selector, lltrace, ExpectedValueSelector,
implements)
from cubicweb.appobject import AppObject
-
+from cubicweb.server.session import security_enabled
ENTITIES_HOOKS = set(('before_add_entity', 'after_add_entity',
'before_update_entity', 'after_update_entity',
@@ -76,13 +78,20 @@
event, obj.__module__, obj.__name__))
super(HooksRegistry, self).register(obj, **kwargs)
- def call_hooks(self, event, req=None, **kwargs):
+ def call_hooks(self, event, session=None, **kwargs):
kwargs['event'] = event
- for hook in sorted(self.possible_objects(req, **kwargs), key=lambda x: x.order):
- if hook.enabled:
+ if session is None:
+ for hook in sorted(self.possible_objects(session, **kwargs),
+ key=lambda x: x.order):
hook()
- else:
- warn('[3.6] %s: enabled is deprecated' % hook.__class__)
+ else:
+ # by default, hooks are executed with security turned off
+ with security_enabled(session, read=False):
+ hooks = sorted(self.possible_objects(session, **kwargs),
+ key=lambda x: x.order)
+ with security_enabled(session, write=False):
+ for hook in hooks:
+ hook()
VRegistry.REGISTRY_FACTORY['hooks'] = HooksRegistry
@@ -104,6 +113,14 @@
@objectify_selector
@lltrace
+def _bw_is_enabled(cls, req, **kwargs):
+ if cls.enabled:
+ return 1
+ warn('[3.6] %s: enabled is deprecated' % cls)
+ return 0
+
+@objectify_selector
+@lltrace
def match_event(cls, req, **kwargs):
if kwargs.get('event') in cls.events:
return 1
@@ -113,19 +130,15 @@
@lltrace
def enabled_category(cls, req, **kwargs):
if req is None:
- # server startup / shutdown event
- config = kwargs['repo'].config
- else:
- config = req.vreg.config
- return config.is_hook_activated(cls)
+ return True # XXX how to deactivate server startup / shutdown event
+ return req.is_hook_activated(cls)
@objectify_selector
@lltrace
-def regular_session(cls, req, **kwargs):
- if req is None or req.is_super_session:
- return 0
- return 1
-
+def from_dbapi_query(cls, req, **kwargs):
+ if req.running_dbapi_query:
+ return 1
+ return 0
class rechain(object):
def __init__(self, *iterators):
@@ -178,7 +191,7 @@
class Hook(AppObject):
__registry__ = 'hooks'
- __select__ = match_event() & enabled_category()
+ __select__ = match_event() & enabled_category() & _bw_is_enabled()
# set this in derivated classes
events = None
category = None
@@ -263,7 +276,7 @@
else:
assert self.rtype in self.object_relations
meid, seid = self.eidto, self.eidfrom
- self._cw.unsafe_execute(
+ self._cw.execute(
'SET E %s P WHERE X %s P, X eid %%(x)s, E eid %%(e)s, NOT E %s P'\
% (self.main_rtype, self.main_rtype, self.main_rtype),
{'x': meid, 'e': seid}, ('x', 'e'))
@@ -281,7 +294,7 @@
def __call__(self):
eschema = self._cw.vreg.schema.eschema(self._cw.describe(self.eidfrom)[0])
- execute = self._cw.unsafe_execute
+ execute = self._cw.execute
for rel in self.subject_relations:
if rel in eschema.subjrels:
execute('SET R %s P WHERE X eid %%(x)s, P eid %%(p)s, '
@@ -306,7 +319,7 @@
def __call__(self):
eschema = self._cw.vreg.schema.eschema(self._cw.describe(self.eidfrom)[0])
- execute = self._cw.unsafe_execute
+ execute = self._cw.execute
for rel in self.subject_relations:
if rel in eschema.subjrels:
execute('DELETE R %s P WHERE X eid %%(x)s, P eid %%(p)s, '
@@ -510,6 +523,6 @@
class RQLPrecommitOperation(Operation):
def precommit_event(self):
- execute = self.session.unsafe_execute
+ execute = self.session.execute
for rql in self.rqls:
execute(*rql)
--- a/server/migractions.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/migractions.py Fri Mar 12 16:23:21 2010 +0100
@@ -25,10 +25,12 @@
import os.path as osp
from datetime import datetime
from glob import glob
+from copy import copy
from warnings import warn
from logilab.common.deprecation import deprecated
from logilab.common.decorators import cached, clear_cache
+from logilab.common.testlib import mock_object
from yams.constraints import SizeConstraint
from yams.schema2sql import eschema2sql, rschema2sql
@@ -240,17 +242,26 @@
@property
def session(self):
if self.config is not None:
- return self.repo._get_session(self.cnx.sessionid)
+ session = self.repo._get_session(self.cnx.sessionid)
+ if session.pool is None:
+ session.set_read_security(False)
+ session.set_write_security(False)
+ session.set_pool()
+ return session
# no access to session on remote instance
return None
def commit(self):
if hasattr(self, '_cnx'):
self._cnx.commit()
+ if self.session:
+ self.session.set_pool()
def rollback(self):
if hasattr(self, '_cnx'):
self._cnx.rollback()
+ if self.session:
+ self.session.set_pool()
def rqlexecall(self, rqliter, cachekey=None, ask_confirm=True):
for rql, kwargs in rqliter:
@@ -282,6 +293,11 @@
"""cached group mapping"""
return ss.group_mapping(self._cw)
+ @cached
+ def cstrtype_mapping(self):
+ """cached constraint types mapping"""
+ return ss.cstrtype_mapping(self._cw)
+
def exec_event_script(self, event, cubepath=None, funcname=None,
*args, **kwargs):
if cubepath:
@@ -305,7 +321,6 @@
self.cmd_reactivate_verification_hooks()
def install_custom_sql_scripts(self, directory, driver):
- self.session.set_pool() # ensure pool is set
for fpath in glob(osp.join(directory, '*.sql.%s' % driver)):
newname = osp.basename(fpath).replace('.sql.%s' % driver,
'.%s.sql' % driver)
@@ -399,14 +414,17 @@
return
self._synchronized.add(rtype)
rschema = self.fs_schema.rschema(rtype)
+ reporschema = self.repo.schema.rschema(rtype)
if syncprops:
- self.rqlexecall(ss.updaterschema2rql(rschema),
+ assert reporschema.eid, reporschema
+ self.rqlexecall(ss.updaterschema2rql(rschema, reporschema.eid),
ask_confirm=self.verbosity>=2)
if syncrdefs:
- reporschema = self.repo.schema.rschema(rtype)
for subj, obj in rschema.rdefs:
if (subj, obj) not in reporschema.rdefs:
continue
+ if rschema in VIRTUAL_RTYPES:
+ continue
self._synchronize_rdef_schema(subj, rschema, obj,
syncprops=syncprops,
syncperms=syncperms)
@@ -439,9 +457,11 @@
'Y is CWEType, Y name %(y)s',
{'x': str(repoeschema), 'y': str(espschema)},
ask_confirm=False)
- self.rqlexecall(ss.updateeschema2rql(eschema),
+ self.rqlexecall(ss.updateeschema2rql(eschema, repoeschema.eid),
ask_confirm=self.verbosity >= 2)
for rschema, targettypes, role in eschema.relation_definitions(True):
+ if rschema in VIRTUAL_RTYPES:
+ continue
if role == 'subject':
if not rschema in repoeschema.subject_relations():
continue
@@ -479,11 +499,11 @@
confirm = self.verbosity >= 2
if syncprops:
# properties
- self.rqlexecall(ss.updaterdef2rql(rschema, subjtype, objtype),
+ rdef = rschema.rdef(subjtype, objtype)
+ repordef = reporschema.rdef(subjtype, objtype)
+ self.rqlexecall(ss.updaterdef2rql(rdef, repordef.eid),
ask_confirm=confirm)
# constraints
- rdef = rschema.rdef(subjtype, objtype)
- repordef = reporschema.rdef(subjtype, objtype)
newconstraints = list(rdef.constraints)
# 1. remove old constraints and update constraints of the same type
# NOTE: don't use rschema.constraint_by_type because it may be
@@ -509,10 +529,10 @@
self.rqlexec('SET X value %(v)s WHERE X eid %(x)s',
values, 'x', ask_confirm=confirm)
# 2. add new constraints
- for newcstr in newconstraints:
- self.rqlexecall(ss.constraint2rql(rschema, subjtype, objtype,
- newcstr),
- ask_confirm=confirm)
+ cstrtype_map = self.cstrtype_mapping()
+ self.rqlexecall(ss.constraints2rql(cstrtype_map, newconstraints,
+ repordef.eid),
+ ask_confirm=confirm)
if syncperms and not rschema in VIRTUAL_RTYPES:
self._synchronize_permissions(rdef, repordef.eid)
@@ -673,18 +693,20 @@
targeted type is known
"""
instschema = self.repo.schema
- if etype in instschema:
- # XXX (syt) plz explain: if we're adding an entity type, it should
- # not be there...
- eschema = instschema[etype]
- if eschema.final:
- instschema.del_entity_type(etype)
- else:
- eschema = self.fs_schema.eschema(etype)
+ assert not etype in instschema
+ # # XXX (syt) plz explain: if we're adding an entity type, it should
+ # # not be there...
+ # eschema = instschema[etype]
+ # if eschema.final:
+ # instschema.del_entity_type(etype)
+ # else:
+ eschema = self.fs_schema.eschema(etype)
confirm = self.verbosity >= 2
groupmap = self.group_mapping()
+ cstrtypemap = self.cstrtype_mapping()
# register the entity into CWEType
- self.rqlexecall(ss.eschema2rql(eschema, groupmap), ask_confirm=confirm)
+ execute = self._cw.execute
+ ss.execschemarql(execute, eschema, ss.eschema2rql(eschema, groupmap))
# add specializes relation if needed
self.rqlexecall(ss.eschemaspecialize2rql(eschema), ask_confirm=confirm)
# register entity's attributes
@@ -697,9 +719,8 @@
# actually in the schema
self.cmd_add_relation_type(rschema.type, False, commit=True)
# register relation definition
- self.rqlexecall(ss.rdef2rql(rschema, etype, attrschema.type,
- groupmap=groupmap),
- ask_confirm=confirm)
+ rdef = self._get_rdef(rschema, eschema, eschema.destination(rschema))
+ ss.execschemarql(execute, rdef, ss.rdef2rql(rdef, cstrtypemap, groupmap),)
# take care to newly introduced base class
# XXX some part of this should probably be under the "if auto" block
for spschema in eschema.specialized_by(recursive=False):
@@ -759,10 +780,12 @@
# remember this two avoid adding twice non symmetric relation
# such as "Emailthread forked_from Emailthread"
added.append((etype, rschema.type, targettype))
- self.rqlexecall(ss.rdef2rql(rschema, etype, targettype,
- groupmap=groupmap),
- ask_confirm=confirm)
+ rdef = self._get_rdef(rschema, eschema, targetschema)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cstrtypemap, groupmap))
for rschema in eschema.object_relations():
+ if rschema.type in META_RTYPES:
+ continue
rtypeadded = rschema.type in instschema or rschema.type in added
for targetschema in rschema.subjects(etype):
# ignore relations where the targeted type is not in the
@@ -780,9 +803,9 @@
elif (targettype, rschema.type, etype) in added:
continue
# register relation definition
- self.rqlexecall(ss.rdef2rql(rschema, targettype, etype,
- groupmap=groupmap),
- ask_confirm=confirm)
+ rdef = self._get_rdef(rschema, targetschema, eschema)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cstrtypemap, groupmap))
if commit:
self.commit()
@@ -821,15 +844,23 @@
committing depends on the `commit` argument value).
"""
+ reposchema = self.repo.schema
rschema = self.fs_schema.rschema(rtype)
+ execute = self._cw.execute
# register the relation into CWRType and insert necessary relation
# definitions
- self.rqlexecall(ss.rschema2rql(rschema, addrdef=False),
- ask_confirm=self.verbosity>=2)
+ ss.execschemarql(execute, rschema, ss.rschema2rql(rschema, addrdef=False))
if addrdef:
self.commit()
- self.rqlexecall(ss.rdef2rql(rschema, groupmap=self.group_mapping()),
- ask_confirm=self.verbosity>=2)
+ gmap = self.group_mapping()
+ cmap = self.cstrtype_mapping()
+ for rdef in rschema.rdefs.itervalues():
+ if not (reposchema.has_entity(rdef.subject)
+ and reposchema.has_entity(rdef.object)):
+ continue
+ self._set_rdef_eid(rdef)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cmap, gmap))
if rtype in META_RTYPES:
# if the relation is in META_RTYPES, ensure we're adding it for
# all entity types *in the persistent schema*, not only those in
@@ -838,15 +869,14 @@
if not etype in self.fs_schema:
# get sample object type and rproperties
objtypes = rschema.objects()
- assert len(objtypes) == 1
+ assert len(objtypes) == 1, objtypes
objtype = objtypes[0]
- props = rschema.rproperties(
- rschema.subjects(objtype)[0], objtype)
- assert props
- self.rqlexecall(ss.rdef2rql(rschema, etype, objtype, props,
- groupmap=self.group_mapping()),
- ask_confirm=self.verbosity>=2)
-
+ rdef = copy(rschema.rdef(rschema.subjects(objtype)[0], objtype))
+ rdef.subject = etype
+ rdef.rtype = self.repo.schema.rschema(rschema)
+ rdef.object = self.repo.schema.rschema(objtype)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cmap, gmap))
if commit:
self.commit()
@@ -876,12 +906,25 @@
rschema = self.fs_schema.rschema(rtype)
if not rtype in self.repo.schema:
self.cmd_add_relation_type(rtype, addrdef=False, commit=True)
- self.rqlexecall(ss.rdef2rql(rschema, subjtype, objtype,
- groupmap=self.group_mapping()),
- ask_confirm=self.verbosity>=2)
+ execute = self._cw.execute
+ rdef = self._get_rdef(rschema, subjtype, objtype)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, self.cstrtype_mapping(),
+ self.group_mapping()))
if commit:
self.commit()
+ def _get_rdef(self, rschema, subjtype, objtype):
+ return self._set_rdef_eid(rschema.rdefs[(subjtype, objtype)])
+
+ def _set_rdef_eid(self, rdef):
+ for attr in ('rtype', 'subject', 'object'):
+ schemaobj = getattr(rdef, attr)
+ if getattr(schemaobj, 'eid', None) is None:
+ schemaobj.eid = self.repo.schema[schemaobj].eid
+ assert schemaobj.eid is not None
+ return rdef
+
def cmd_drop_relation_definition(self, subjtype, rtype, objtype, commit=True):
"""unregister an existing relation definition"""
rschema = self.repo.schema.rschema(rtype)
@@ -1139,7 +1182,6 @@
level actions
"""
if not ask_confirm or self.confirm('Execute sql: %s ?' % sql):
- self.session.set_pool() # ensure pool is set
try:
cu = self.session.system_sql(sql, args)
except:
@@ -1158,10 +1200,7 @@
if not isinstance(rql, (tuple, list)):
rql = ( (rql, kwargs), )
res = None
- try:
- execute = self._cw.unsafe_execute
- except AttributeError:
- execute = self._cw.execute
+ execute = self._cw.execute
for rql, kwargs in rql:
if kwargs:
msg = '%s (%s)' % (rql, kwargs)
@@ -1178,12 +1217,6 @@
def rqliter(self, rql, kwargs=None, ask_confirm=True):
return ForRqlIterator(self, rql, None, ask_confirm)
- def cmd_deactivate_verification_hooks(self):
- self.config.disabled_hooks_categories.add('integrity')
-
- def cmd_reactivate_verification_hooks(self):
- self.config.disabled_hooks_categories.remove('integrity')
-
# broken db commands ######################################################
def cmd_change_attribute_type(self, etype, attr, newtype, commit=True):
@@ -1234,6 +1267,14 @@
if commit:
self.commit()
+ @deprecated("[3.7] use session.disable_hook_categories('integrity')")
+ def cmd_deactivate_verification_hooks(self):
+ self.session.disable_hook_categories('integrity')
+
+ @deprecated("[3.7] use session.enable_hook_categories('integrity')")
+ def cmd_reactivate_verification_hooks(self):
+ self.session.enable_hook_categories('integrity')
+
class ForRqlIterator:
"""specific rql iterator to make the loop skipable"""
--- a/server/querier.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/querier.py Fri Mar 12 16:23:21 2010 +0100
@@ -6,6 +6,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
from itertools import repeat
@@ -22,9 +24,8 @@
from cubicweb.server.utils import cleanup_solutions
from cubicweb.server.rqlannotation import SQLGenAnnotator, set_qdata
-from cubicweb.server.ssplanner import add_types_restriction
-
-READ_ONLY_RTYPES = set(('eid', 'has_text', 'is', 'is_instance_of', 'identity'))
+from cubicweb.server.ssplanner import READ_ONLY_RTYPES, add_types_restriction
+from cubicweb.server.session import security_enabled
def empty_rset(rql, args, rqlst=None):
"""build an empty result set object"""
@@ -201,8 +202,11 @@
return rqlst to actually execute
"""
noinvariant = set()
- if security and not self.session.is_super_session:
- self._insert_security(union, noinvariant)
+ if security and self.session.read_security:
+ # ensure security is turned of when security is inserted,
+ # else we may loop for ever...
+ with security_enabled(self.session, read=False):
+ self._insert_security(union, noinvariant)
self.rqlhelper.simplify(union)
self.sqlannotate(union)
set_qdata(self.schema.rschema, union, noinvariant)
@@ -300,7 +304,6 @@
note: rqlst should not have been simplified at this point
"""
- assert not self.session.is_super_session
user = self.session.user
schema = self.schema
msgs = []
@@ -377,39 +380,6 @@
self._r_obj_index = {}
self._expanded_r_defs = {}
- def relation_definitions(self, rqlst, to_build):
- """add constant values to entity def, mark variables to be selected
- """
- to_select = {}
- for relation in rqlst.main_relations:
- lhs, rhs = relation.get_variable_parts()
- rtype = relation.r_type
- if rtype in READ_ONLY_RTYPES:
- raise QueryError("can't assign to %s" % rtype)
- try:
- edef = to_build[str(lhs)]
- except KeyError:
- # lhs var is not to build, should be selected and added as an
- # object relation
- edef = to_build[str(rhs)]
- to_select.setdefault(edef, []).append((rtype, lhs, 1))
- else:
- if isinstance(rhs, Constant) and not rhs.uid:
- # add constant values to entity def
- value = rhs.eval(self.args)
- eschema = edef.e_schema
- attrtype = eschema.subjrels[rtype].objects(eschema)[0]
- if attrtype == 'Password' and isinstance(value, unicode):
- value = value.encode('UTF8')
- edef[rtype] = value
- elif to_build.has_key(str(rhs)):
- # create a relation between two newly created variables
- self.add_relation_def((edef, rtype, to_build[rhs.name]))
- else:
- to_select.setdefault(edef, []).append( (rtype, rhs, 0) )
- return to_select
-
-
def add_entity_def(self, edef):
"""add an entity definition to build"""
edef.querier_pending_relations = {}
@@ -629,20 +599,20 @@
try:
self.solutions(session, rqlst, args)
except UnknownEid:
- # we want queries such as "Any X WHERE X eid 9999"
- # return an empty result instead of raising UnknownEid
+ # we want queries such as "Any X WHERE X eid 9999" return an
+ # empty result instead of raising UnknownEid
return empty_rset(rql, args, rqlst)
self._rql_cache[cachekey] = rqlst
orig_rqlst = rqlst
if not rqlst.TYPE == 'select':
- if not session.is_super_session:
+ if session.read_security:
check_no_password_selected(rqlst)
- # write query, ensure session's mode is 'write' so connections
- # won't be released until commit/rollback
+ # write query, ensure session's mode is 'write' so connections won't
+ # be released until commit/rollback
session.mode = 'write'
cachekey = None
else:
- if not session.is_super_session:
+ if session.read_security:
for select in rqlst.children:
check_no_password_selected(select)
# on select query, always copy the cached rqlst so we don't have to
--- a/server/repository.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/repository.py Fri Mar 12 16:23:21 2010 +0100
@@ -15,6 +15,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
@@ -36,7 +38,7 @@
typed_eid)
from cubicweb import cwvreg, schema, server
from cubicweb.server import utils, hook, pool, querier, sources
-from cubicweb.server.session import Session, InternalSession
+from cubicweb.server.session import Session, InternalSession, security_enabled
class CleanupEidTypeCacheOp(hook.SingleLastOperation):
@@ -80,12 +82,12 @@
this kind of behaviour has to be done in the repository so we don't have
hooks order hazardness
"""
- # XXX now that rql in migraction default to unsafe_execute we don't want to
- # skip that for super session (though we can still skip it for internal
- # sessions). Also we should imo rely on the orm to first fetch existing
- # entity if any then delete it.
+ # skip that for internal session or if integrity explicitly disabled
+ #
+ # XXX we should imo rely on the orm to first fetch existing entity if any
+ # then delete it.
if session.is_internal_session \
- or not session.vreg.config.is_hook_category_activated('integrity'):
+ or not session.is_hook_category_activated('integrity'):
return
card = session.schema_rproperty(rtype, eidfrom, eidto, 'cardinality')
# one may be tented to check for neweids but this may cause more than one
@@ -100,23 +102,15 @@
rschema = session.repo.schema.rschema(rtype)
if card[0] in '1?':
if not rschema.inlined: # inlined relations will be implicitly deleted
- rset = session.unsafe_execute('Any X,Y WHERE X %s Y, X eid %%(x)s, '
- 'NOT Y eid %%(y)s' % rtype,
- {'x': eidfrom, 'y': eidto}, 'x')
- if rset:
- safe_delete_relation(session, rschema, *rset[0])
+ with security_enabled(session, read=False):
+ session.execute('DELETE X %s Y WHERE X eid %%(x)s, '
+ 'NOT Y eid %%(y)s' % rtype,
+ {'x': eidfrom, 'y': eidto}, 'x')
if card[1] in '1?':
- rset = session.unsafe_execute('Any X,Y WHERE X %s Y, Y eid %%(y)s, '
- 'NOT X eid %%(x)s' % rtype,
- {'x': eidfrom, 'y': eidto}, 'y')
- if rset:
- safe_delete_relation(session, rschema, *rset[0])
-
-
-def safe_delete_relation(session, rschema, subject, object):
- if not rschema.has_perm(session, 'delete', fromeid=subject, toeid=object):
- raise Unauthorized()
- session.repo.glob_delete_relation(session, subject, rschema.type, object)
+ with security_enabled(session, read=False):
+ session.execute('DELETE X %sY WHERE Y eid %%(y)s, '
+ 'NOT X eid %%(x)s' % rtype,
+ {'x': eidfrom, 'y': eidto}, 'y')
class Repository(object):
@@ -327,6 +321,7 @@
"""called on server stop event to properly close opened sessions and
connections
"""
+ assert not self._shutting_down, 'already shutting down'
self._shutting_down = True
if isinstance(self._looping_tasks, tuple): # if tasks have been started
for looptask in self._looping_tasks:
@@ -922,21 +917,22 @@
rql = []
eschema = self.schema.eschema(etype)
pendingrtypes = session.transaction_data.get('pendingrtypes', ())
- for rschema, targetschemas, x in eschema.relation_definitions():
- rtype = rschema.type
- if rtype in schema.VIRTUAL_RTYPES or rtype in pendingrtypes:
- continue
- var = '%s%s' % (rtype.upper(), x.upper())
- if x == 'subject':
- # don't skip inlined relation so they are regularly
- # deleted and so hooks are correctly called
- selection = 'X %s %s' % (rtype, var)
- else:
- selection = '%s %s X' % (var, rtype)
- rql = 'DELETE %s WHERE X eid %%(x)s' % selection
- # unsafe_execute since we suppose that if user can delete the entity,
- # he can delete all its relations without security checking
- session.unsafe_execute(rql, {'x': eid}, 'x', build_descr=False)
+ with security_enabled(session, read=False, write=False):
+ for rschema, targetschemas, x in eschema.relation_definitions():
+ rtype = rschema.type
+ if rtype in schema.VIRTUAL_RTYPES or rtype in pendingrtypes:
+ continue
+ var = '%s%s' % (rtype.upper(), x.upper())
+ if x == 'subject':
+ # don't skip inlined relation so they are regularly
+ # deleted and so hooks are correctly called
+ selection = 'X %s %s' % (rtype, var)
+ else:
+ selection = '%s %s X' % (var, rtype)
+ rql = 'DELETE %s WHERE X eid %%(x)s' % selection
+ # if user can delete the entity, he can delete all its relations
+ # without security checking
+ session.execute(rql, {'x': eid}, 'x', build_descr=False)
def locate_relation_source(self, session, subject, rtype, object):
subjsource = self.source_from_eid(subject, session)
@@ -992,7 +988,8 @@
if not rschema.final: # inlined relation
relations.append((attr, entity[attr]))
entity.set_defaults()
- entity.check(creation=True)
+ if session.is_hook_category_activated('integrity'):
+ entity.check(creation=True)
source.add_entity(session, entity)
if source.uri != 'system':
extid = source.get_extid(entity)
@@ -1039,7 +1036,8 @@
print 'UPDATE entity', etype, entity.eid, \
dict(entity), edited_attributes
entity.edited_attributes = edited_attributes
- entity.check()
+ if session.is_hook_category_activated('integrity'):
+ entity.check()
eschema = entity.e_schema
session.set_entity_cache(entity)
only_inline_rels, need_fti_update = True, False
--- a/server/schemaserial.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/schemaserial.py Fri Mar 12 16:23:21 2010 +0100
@@ -50,6 +50,10 @@
continue
return res
+def cstrtype_mapping(cursor):
+ """cached constraint types mapping"""
+ return dict(cursor.execute('Any T, X WHERE X is CWConstraintType, X name T'))
+
# schema / perms deserialization ##############################################
def deserialize_schema(schema, session):
@@ -214,7 +218,7 @@
if not quiet:
_title = '-> storing the schema in the database '
print _title,
- execute = cursor.unsafe_execute
+ execute = cursor.execute
eschemas = schema.entities()
if not quiet:
pb_size = (len(eschemas + schema.relations())
@@ -229,14 +233,15 @@
eschemas.remove(schema.eschema('CWEType'))
eschemas.insert(0, schema.eschema('CWEType'))
for eschema in eschemas:
- for rql, kwargs in eschema2rql(eschema, groupmap):
- execute(rql, kwargs, build_descr=False)
+ execschemarql(execute, eschema, eschema2rql(eschema, groupmap))
if pb is not None:
pb.update()
# serialize constraint types
+ cstrtypemap = {}
rql = 'INSERT CWConstraintType X: X name %(ct)s'
for cstrtype in CONSTRAINTS:
- execute(rql, {'ct': unicode(cstrtype)}, build_descr=False)
+ cstrtypemap[cstrtype] = execute(rql, {'ct': unicode(cstrtype)},
+ build_descr=False)[0][0]
if pb is not None:
pb.update()
# serialize relations
@@ -246,8 +251,15 @@
if pb is not None:
pb.update()
continue
- for rql, kwargs in rschema2rql(rschema, groupmap=groupmap):
- execute(rql, kwargs, build_descr=False)
+ execschemarql(execute, rschema, rschema2rql(rschema, addrdef=False))
+ if rschema.symmetric:
+ rdefs = [rdef for k, rdef in rschema.rdefs.iteritems()
+ if (rdef.subject, rdef.object) == k]
+ else:
+ rdefs = rschema.rdefs.itervalues()
+ for rdef in rdefs:
+ execschemarql(execute, rdef,
+ rdef2rql(rdef, cstrtypemap, groupmap))
if pb is not None:
pb.update()
for rql, kwargs in specialize2rql(schema):
@@ -258,6 +270,55 @@
print
+# high level serialization functions
+
+def execschemarql(execute, schema, rqls):
+ for rql, kwargs in rqls:
+ kwargs['x'] = schema.eid
+ rset = execute(rql, kwargs, build_descr=False)
+ if schema.eid is None:
+ schema.eid = rset[0][0]
+ else:
+ assert rset
+
+def erschema2rql(erschema, groupmap):
+ if isinstance(erschema, schemamod.EntitySchema):
+ return eschema2rql(erschema, groupmap=groupmap)
+ return rschema2rql(erschema, groupmap=groupmap)
+
+def specialize2rql(schema):
+ for eschema in schema.entities():
+ if eschema.final:
+ continue
+ for rql, kwargs in eschemaspecialize2rql(eschema):
+ yield rql, kwargs
+
+# etype serialization
+
+def eschema2rql(eschema, groupmap=None):
+ """return a list of rql insert statements to enter an entity schema
+ in the database as an CWEType entity
+ """
+ relations, values = eschema_relations_values(eschema)
+ # NOTE: 'specializes' relation can't be inserted here since there's no
+ # way to make sure the parent type is inserted before the child type
+ yield 'INSERT CWEType X: %s' % ','.join(relations) , values
+ # entity permissions
+ if groupmap is not None:
+ for rql, args in _erperms2rql(eschema, groupmap):
+ yield rql, args
+
+def eschema_relations_values(eschema):
+ values = _ervalues(eschema)
+ relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
+ return relations, values
+
+def eschemaspecialize2rql(eschema):
+ specialized_type = eschema.specializes()
+ if specialized_type:
+ values = {'x': eschema.eid, 'et': specialized_type.eid}
+ yield 'SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s', values
+
def _ervalues(erschema):
try:
type_ = unicode(erschema.type)
@@ -273,10 +334,23 @@
'description': desc,
}
-def eschema_relations_values(eschema):
- values = _ervalues(eschema)
- relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
- return relations, values
+# rtype serialization
+
+def rschema2rql(rschema, cstrtypemap=None, addrdef=True, groupmap=None):
+ """return a list of rql insert statements to enter a relation schema
+ in the database as an CWRType entity
+ """
+ if rschema.type == 'has_text':
+ return
+ relations, values = rschema_relations_values(rschema)
+ yield 'INSERT CWRType X: %s' % ','.join(relations), values
+ if addrdef:
+ assert cstrtypemap
+ # sort for testing purpose
+ for rdef in sorted(rschema.rdefs.itervalues(),
+ key=lambda x: (x.subject, x.object)):
+ for rql, values in rdef2rql(rdef, cstrtypemap, groupmap):
+ yield rql, values
def rschema_relations_values(rschema):
values = _ervalues(rschema)
@@ -290,169 +364,58 @@
relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
return relations, values
-def _rdef_values(objtype, props):
- amap = {'order': 'ordernum'}
+# rdef serialization
+
+def rdef2rql(rdef, cstrtypemap, groupmap=None):
+ # don't serialize infered relations
+ if rdef.infered:
+ return
+ relations, values = _rdef_values(rdef)
+ relations.append('X relation_type ER,X from_entity SE,X to_entity OE')
+ values.update({'se': rdef.subject.eid, 'rt': rdef.rtype.eid, 'oe': rdef.object.eid})
+ if rdef.final:
+ etype = 'CWAttribute'
+ else:
+ etype = 'CWRelation'
+ yield 'INSERT %s X: %s WHERE SE eid %%(se)s,ER eid %%(rt)s,OE eid %%(oe)s' % (
+ etype, ','.join(relations), ), values
+ for rql, values in constraints2rql(cstrtypemap, rdef.constraints):
+ yield rql, values
+ # no groupmap means "no security insertion"
+ if groupmap:
+ for rql, args in _erperms2rql(rdef, groupmap):
+ yield rql, args
+
+def _rdef_values(rdef):
+ amap = {'order': 'ordernum', 'default': 'defaultval'}
values = {}
- for prop, default in schemamod.RelationDefinitionSchema.rproperty_defs(objtype).iteritems():
+ for prop, default in rdef.rproperty_defs(rdef.object).iteritems():
if prop in ('eid', 'constraints', 'uid', 'infered', 'permissions'):
continue
- value = props.get(prop, default)
+ value = getattr(rdef, prop)
+ # XXX type cast really necessary?
if prop in ('indexed', 'fulltextindexed', 'internationalizable'):
value = bool(value)
elif prop == 'ordernum':
value = int(value)
elif isinstance(value, str):
value = unicode(value)
+ if value is not None and prop == 'default':
+ if value is False:
+ value = u''
+ if not isinstance(value, unicode):
+ value = unicode(value)
values[amap.get(prop, prop)] = value
- return values
-
-def nfrdef_relations_values(objtype, props):
- values = _rdef_values(objtype, props)
- relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
- return relations, values
-
-def frdef_relations_values(objtype, props):
- values = _rdef_values(objtype, props)
- default = values['default']
- del values['default']
- if default is not None:
- if default is False:
- default = u''
- elif not isinstance(default, unicode):
- default = unicode(default)
- values['defaultval'] = default
relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
return relations, values
-
-def __rdef2rql(genmap, rschema, subjtype=None, objtype=None, props=None,
- groupmap=None):
- if subjtype is None:
- assert objtype is None
- assert props is None
- targets = sorted(rschema.rdefs)
- else:
- assert not objtype is None
- targets = [(subjtype, objtype)]
- # relation schema
- if rschema.final:
- etype = 'CWAttribute'
- else:
- etype = 'CWRelation'
- for subjtype, objtype in targets:
- if props is None:
- _props = rschema.rdef(subjtype, objtype)
- else:
- _props = props
- # don't serialize infered relations
- if _props.get('infered'):
- continue
- gen = genmap[rschema.final]
- for rql, values in gen(rschema, subjtype, objtype, _props):
- yield rql, values
- # no groupmap means "no security insertion"
- if groupmap:
- for rql, args in _erperms2rql(_props, groupmap):
- args['st'] = str(subjtype)
- args['rt'] = str(rschema)
- args['ot'] = str(objtype)
- yield rql + 'X is %s, X from_entity ST, X to_entity OT, '\
- 'X relation_type RT, RT name %%(rt)s, ST name %%(st)s, '\
- 'OT name %%(ot)s' % etype, args
-
-
-def schema2rql(schema, skip=None, allow=None):
- """return a list of rql insert statements to enter the schema in the
- database as CWRType and CWEType entities
- """
- assert not (skip is not None and allow is not None), \
- 'can\'t use both skip and allow'
- all = schema.entities() + schema.relations()
- if skip is not None:
- return chain(*[erschema2rql(schema[t]) for t in all if not t in skip])
- elif allow is not None:
- return chain(*[erschema2rql(schema[t]) for t in all if t in allow])
- return chain(*[erschema2rql(schema[t]) for t in all])
-
-def erschema2rql(erschema, groupmap):
- if isinstance(erschema, schemamod.EntitySchema):
- return eschema2rql(erschema, groupmap=groupmap)
- return rschema2rql(erschema, groupmap=groupmap)
-
-def eschema2rql(eschema, groupmap=None):
- """return a list of rql insert statements to enter an entity schema
- in the database as an CWEType entity
- """
- relations, values = eschema_relations_values(eschema)
- # NOTE: 'specializes' relation can't be inserted here since there's no
- # way to make sure the parent type is inserted before the child type
- yield 'INSERT CWEType X: %s' % ','.join(relations) , values
- # entity permissions
- if groupmap is not None:
- for rql, args in _erperms2rql(eschema, groupmap):
- args['name'] = str(eschema)
- yield rql + 'X is CWEType, X name %(name)s', args
-
-def specialize2rql(schema):
- for eschema in schema.entities():
- for rql, kwargs in eschemaspecialize2rql(eschema):
- yield rql, kwargs
-
-def eschemaspecialize2rql(eschema):
- specialized_type = eschema.specializes()
- if specialized_type:
- values = {'x': eschema.type, 'et': specialized_type.type}
- yield 'SET X specializes ET WHERE X name %(x)s, ET name %(et)s', values
-
-def rschema2rql(rschema, addrdef=True, groupmap=None):
- """return a list of rql insert statements to enter a relation schema
- in the database as an CWRType entity
- """
- if rschema.type == 'has_text':
- return
- relations, values = rschema_relations_values(rschema)
- yield 'INSERT CWRType X: %s' % ','.join(relations), values
- if addrdef:
- for rql, values in rdef2rql(rschema, groupmap=groupmap):
- yield rql, values
-
-def rdef2rql(rschema, subjtype=None, objtype=None, props=None, groupmap=None):
- genmap = {True: frdef2rql, False: nfrdef2rql}
- return __rdef2rql(genmap, rschema, subjtype, objtype, props, groupmap)
-
-
-_LOCATE_RDEF_RQL0 = 'X relation_type ER,X from_entity SE,X to_entity OE'
-_LOCATE_RDEF_RQL1 = 'SE name %(se)s,ER name %(rt)s,OE name %(oe)s'
-
-def frdef2rql(rschema, subjtype, objtype, props):
- relations, values = frdef_relations_values(objtype, props)
- relations.append(_LOCATE_RDEF_RQL0)
- values.update({'se': str(subjtype), 'rt': str(rschema), 'oe': str(objtype)})
- yield 'INSERT CWAttribute X: %s WHERE %s' % (','.join(relations), _LOCATE_RDEF_RQL1), values
- for rql, values in rdefrelations2rql(rschema, subjtype, objtype, props):
- yield rql + ', EDEF is CWAttribute', values
-
-def nfrdef2rql(rschema, subjtype, objtype, props):
- relations, values = nfrdef_relations_values(objtype, props)
- relations.append(_LOCATE_RDEF_RQL0)
- values.update({'se': str(subjtype), 'rt': str(rschema), 'oe': str(objtype)})
- yield 'INSERT CWRelation X: %s WHERE %s' % (','.join(relations), _LOCATE_RDEF_RQL1), values
- for rql, values in rdefrelations2rql(rschema, subjtype, objtype, props):
- yield rql + ', EDEF is CWRelation', values
-
-def rdefrelations2rql(rschema, subjtype, objtype, props):
- iterators = []
- for constraint in props.constraints:
- iterators.append(constraint2rql(rschema, subjtype, objtype, constraint))
- return chain(*iterators)
-
-def constraint2rql(rschema, subjtype, objtype, constraint):
- values = {'ctname': unicode(constraint.type()),
- 'value': unicode(constraint.serialize()),
- 'rt': str(rschema), 'se': str(subjtype), 'oe': str(objtype)}
- yield 'INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE \
-CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, \
-ER name %(rt)s, SE name %(se)s, OE name %(oe)s', values
+def constraints2rql(cstrtypemap, constraints, rdefeid=None):
+ for constraint in constraints:
+ values = {'ct': cstrtypemap[constraint.type()],
+ 'value': unicode(constraint.serialize()),
+ 'x': rdefeid} # when not specified, will have to be set by the caller
+ yield 'INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE \
+CT eid %(ct)s, EDEF eid %(x)s', values
def _erperms2rql(erschema, groupmap):
@@ -471,7 +434,7 @@
if isinstance(group_or_rqlexpr, basestring):
# group
try:
- yield ('SET X %s_permission Y WHERE Y eid %%(g)s, ' % action,
+ yield ('SET X %s_permission Y WHERE Y eid %%(g)s, X eid %%(x)s' % action,
{'g': groupmap[group_or_rqlexpr]})
except KeyError:
continue
@@ -479,36 +442,24 @@
# rqlexpr
rqlexpr = group_or_rqlexpr
yield ('INSERT RQLExpression E: E expression %%(e)s, E exprtype %%(t)s, '
- 'E mainvars %%(v)s, X %s_permission E WHERE ' % action,
+ 'E mainvars %%(v)s, X %s_permission E WHERE X eid %%(x)s' % action,
{'e': unicode(rqlexpr.expression),
'v': unicode(rqlexpr.mainvars),
't': unicode(rqlexpr.__class__.__name__)})
+# update functions
-def updateeschema2rql(eschema):
+def updateeschema2rql(eschema, eid):
relations, values = eschema_relations_values(eschema)
- values['et'] = eschema.type
- yield 'SET %s WHERE X is CWEType, X name %%(et)s' % ','.join(relations), values
-
-def updaterschema2rql(rschema):
- relations, values = rschema_relations_values(rschema)
- values['rt'] = rschema.type
- yield 'SET %s WHERE X is CWRType, X name %%(rt)s' % ','.join(relations), values
+ values['x'] = eid
+ yield 'SET %s WHERE X eid %%(x)s' % ','.join(relations), values
-def updaterdef2rql(rschema, subjtype=None, objtype=None, props=None):
- genmap = {True: updatefrdef2rql, False: updatenfrdef2rql}
- return __rdef2rql(genmap, rschema, subjtype, objtype, props)
+def updaterschema2rql(rschema, eid):
+ relations, values = rschema_relations_values(rschema)
+ values['x'] = eid
+ yield 'SET %s WHERE X eid %%(x)s' % ','.join(relations), values
-def updatefrdef2rql(rschema, subjtype, objtype, props):
- relations, values = frdef_relations_values(objtype, props)
- values.update({'se': subjtype, 'rt': str(rschema), 'oe': objtype})
- yield 'SET %s WHERE %s, %s, X is CWAttribute' % (','.join(relations),
- _LOCATE_RDEF_RQL0,
- _LOCATE_RDEF_RQL1), values
-
-def updatenfrdef2rql(rschema, subjtype, objtype, props):
- relations, values = nfrdef_relations_values(objtype, props)
- values.update({'se': subjtype, 'rt': str(rschema), 'oe': objtype})
- yield 'SET %s WHERE %s, %s, X is CWRelation' % (','.join(relations),
- _LOCATE_RDEF_RQL0,
- _LOCATE_RDEF_RQL1), values
+def updaterdef2rql(rdef, eid):
+ relations, values = _rdef_values(rdef)
+ values['x'] = eid
+ yield 'SET %s WHERE X eid %%(x)s' % ','.join(relations), values
--- a/server/serverconfig.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/serverconfig.py Fri Mar 12 16:23:21 2010 +0100
@@ -185,63 +185,6 @@
# check user's state at login time
consider_user_state = True
- # XXX hooks control stuff should probably be on the session, not on the config
-
- # hooks activation configuration
- # all hooks should be activated during normal execution
- disabled_hooks_categories = set()
- enabled_hooks_categories = set()
- ALLOW_ALL = object()
- DENY_ALL = object()
- hooks_mode = ALLOW_ALL
-
- @classmethod
- def set_hooks_mode(cls, mode):
- assert mode is cls.ALLOW_ALL or mode is cls.DENY_ALL
- oldmode = cls.hooks_mode
- cls.hooks_mode = mode
- return oldmode
-
- @classmethod
- def disable_hook_category(cls, *categories):
- changes = set()
- if cls.hooks_mode is cls.DENY_ALL:
- for category in categories:
- if category in cls.enabled_hooks_categories:
- cls.enabled_hooks_categories.remove(category)
- changes.add(category)
- else:
- for category in categories:
- if category not in cls.disabled_hooks_categories:
- cls.disabled_hooks_categories.add(category)
- changes.add(category)
- return changes
-
- @classmethod
- def enable_hook_category(cls, *categories):
- changes = set()
- if cls.hooks_mode is cls.DENY_ALL:
- for category in categories:
- if category not in cls.enabled_hooks_categories:
- cls.enabled_hooks_categories.add(category)
- changes.add(category)
- else:
- for category in categories:
- if category in cls.disabled_hooks_categories:
- cls.disabled_hooks_categories.remove(category)
- changes.add(category)
- return changes
-
- @classmethod
- def is_hook_activated(cls, hook):
- return cls.is_hook_category_activated(hook.category)
-
- @classmethod
- def is_hook_category_activated(cls, category):
- if cls.hooks_mode is cls.DENY_ALL:
- return category in cls.enabled_hooks_categories
- return category not in cls.disabled_hooks_categories
-
# should some hooks be deactivated during [pre|post]create script execution
free_wheel = False
--- a/server/serverctl.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/serverctl.py Fri Mar 12 16:23:21 2010 +0100
@@ -66,14 +66,13 @@
cnx = get_connection(driver, dbhost, dbname, user, password=password,
port=source.get('db-port'),
**extra)
- if not hasattr(cnx, 'logged_user'): # XXX logilab.db compat
- try:
- cnx.logged_user = user
- except AttributeError:
- # C object, __slots__
- from logilab.db import _SimpleConnectionWrapper
- cnx = _SimpleConnectionWrapper(cnx)
- cnx.logged_user = user
+ try:
+ cnx.logged_user = user
+ except AttributeError:
+ # C object, __slots__
+ from logilab.database import _SimpleConnectionWrapper
+ cnx = _SimpleConnectionWrapper(cnx)
+ cnx.logged_user = user
return cnx
def system_source_cnx(source, dbms_system_base=False,
@@ -84,8 +83,8 @@
create/drop the instance database)
"""
if dbms_system_base:
- from logilab.common.adbh import get_adv_func_helper
- system_db = get_adv_func_helper(source['db-driver']).system_database()
+ from logilab.database import get_db_helper
+ system_db = get_db_helper(source['db-driver']).system_database()
return source_cnx(source, system_db, special_privs=special_privs, verbose=verbose)
return source_cnx(source, special_privs=special_privs, verbose=verbose)
@@ -94,11 +93,11 @@
or a database
"""
import logilab.common as lgp
- from logilab.common.adbh import get_adv_func_helper
+ from logilab.database import get_db_helper
lgp.USE_MX_DATETIME = False
special_privs = ''
driver = source['db-driver']
- helper = get_adv_func_helper(driver)
+ helper = get_db_helper(driver)
if user is not None and helper.users_support:
special_privs += '%s USER' % what
if db is not None:
@@ -211,10 +210,10 @@
def cleanup(self):
"""remove instance's configuration and database"""
- from logilab.common.adbh import get_adv_func_helper
+ from logilab.database import get_db_helper
source = self.config.sources()['system']
dbname = source['db-name']
- helper = get_adv_func_helper(source['db-driver'])
+ helper = get_db_helper(source['db-driver'])
if ASK.confirm('Delete database %s ?' % dbname):
user = source['db-user'] or None
cnx = _db_sys_cnx(source, 'DROP DATABASE', user=user)
@@ -294,8 +293,7 @@
)
def run(self, args):
"""run the command with its specific arguments"""
- from logilab.common.adbh import get_adv_func_helper
- from indexer import get_indexer
+ from logilab.database import get_db_helper
verbose = self.get('verbose')
automatic = self.get('automatic')
appid = pop_arg(args, msg='No instance specified !')
@@ -304,7 +302,7 @@
dbname = source['db-name']
driver = source['db-driver']
create_db = self.config.create_db
- helper = get_adv_func_helper(driver)
+ helper = get_db_helper(driver)
if driver == 'sqlite':
if os.path.exists(dbname) and automatic or \
ASK.confirm('Database %s already exists -- do you want to drop it ?' % dbname):
@@ -330,13 +328,8 @@
helper.create_database(cursor, dbname, source['db-user'],
source['db-encoding'])
else:
- try:
- helper.create_database(cursor, dbname,
- encoding=source['db-encoding'])
- except TypeError:
- # logilab.database
- helper.create_database(cursor, dbname,
- dbencoding=source['db-encoding'])
+ helper.create_database(cursor, dbname,
+ dbencoding=source['db-encoding'])
dbcnx.commit()
print '-> database %s created.' % dbname
except:
@@ -344,8 +337,7 @@
raise
cnx = system_source_cnx(source, special_privs='LANGUAGE C', verbose=verbose)
cursor = cnx.cursor()
- indexer = get_indexer(driver)
- indexer.init_extensions(cursor)
+ helper.init_fti_extensions(cursor)
# postgres specific stuff
if driver == 'postgres':
# install plpythonu/plpgsql language if not installed by the cube
--- a/server/session.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/session.py Fri Mar 12 16:23:21 2010 +0100
@@ -5,6 +5,8 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
@@ -42,6 +44,68 @@
return description
+class hooks_control(object):
+ """context manager to control activated hooks categories.
+
+ If mode is session.`HOOKS_DENY_ALL`, given hooks categories will
+ be enabled.
+
+ If mode is session.`HOOKS_ALLOW_ALL`, given hooks categories will
+ be disabled.
+ """
+ def __init__(self, session, mode, *categories):
+ self.session = session
+ self.mode = mode
+ self.categories = categories
+
+ def __enter__(self):
+ self.oldmode = self.session.set_hooks_mode(self.mode)
+ if self.mode is self.session.HOOKS_DENY_ALL:
+ self.changes = self.session.enable_hook_categories(*self.categories)
+ else:
+ self.changes = self.session.disable_hook_categories(*self.categories)
+
+ def __exit__(self, exctype, exc, traceback):
+ if self.changes:
+ if self.mode is self.session.HOOKS_DENY_ALL:
+ self.session.disable_hook_categories(*self.changes)
+ else:
+ self.session.enable_hook_categories(*self.changes)
+ self.session.set_hooks_mode(self.oldmode)
+
+INDENT = ''
+class security_enabled(object):
+ """context manager to control security w/ session.execute, since by
+ default security is disabled on queries executed on the repository
+ side.
+ """
+ def __init__(self, session, read=None, write=None):
+ self.session = session
+ self.read = read
+ self.write = write
+
+ def __enter__(self):
+# global INDENT
+ if self.read is not None:
+ self.oldread = self.session.set_read_security(self.read)
+# print INDENT + 'read', self.read, self.oldread
+ if self.write is not None:
+ self.oldwrite = self.session.set_write_security(self.write)
+# print INDENT + 'write', self.write, self.oldwrite
+# INDENT += ' '
+
+ def __exit__(self, exctype, exc, traceback):
+# global INDENT
+# INDENT = INDENT[:-2]
+ if self.read is not None:
+ self.session.set_read_security(self.oldread)
+# print INDENT + 'reset read to', self.oldread
+ if self.write is not None:
+ self.session.set_write_security(self.oldwrite)
+# print INDENT + 'reset write to', self.oldwrite
+
+
+
class Session(RequestSessionBase):
"""tie session id, user, connections pool and other session data all
together
@@ -57,7 +121,6 @@
self.creation = time()
self.timestamp = self.creation
self.is_internal_session = False
- self.is_super_session = False
self.default_mode = 'read'
# short cut to querier .execute method
self._execute = repo.querier.execute
@@ -78,19 +141,9 @@
def hijack_user(self, user):
"""return a fake request/session using specified user"""
session = Session(user, self.repo)
- session._threaddata = self.actual_session()._threaddata
+ session._threaddata.pool = self.pool
return session
- def _super_call(self, __cb, *args, **kwargs):
- if self.is_super_session:
- __cb(self, *args, **kwargs)
- return
- self.is_super_session = True
- try:
- __cb(self, *args, **kwargs)
- finally:
- self.is_super_session = False
-
def add_relation(self, fromeid, rtype, toeid):
"""provide direct access to the repository method to add a relation.
@@ -102,14 +155,13 @@
You may use this in hooks when you know both eids of the relation you
want to add.
"""
- if self.vreg.schema[rtype].inlined:
- entity = self.entity_from_eid(fromeid)
- entity[rtype] = toeid
- self._super_call(self.repo.glob_update_entity,
- entity, set((rtype,)))
- else:
- self._super_call(self.repo.glob_add_relation,
- fromeid, rtype, toeid)
+ with security_enabled(self, False, False):
+ if self.vreg.schema[rtype].inlined:
+ entity = self.entity_from_eid(fromeid)
+ entity[rtype] = toeid
+ self.repo.glob_update_entity(self, entity, set((rtype,)))
+ else:
+ self.repo.glob_add_relation(self, fromeid, rtype, toeid)
def delete_relation(self, fromeid, rtype, toeid):
"""provide direct access to the repository method to delete a relation.
@@ -122,14 +174,13 @@
You may use this in hooks when you know both eids of the relation you
want to delete.
"""
- if self.vreg.schema[rtype].inlined:
- entity = self.entity_from_eid(fromeid)
- entity[rtype] = None
- self._super_call(self.repo.glob_update_entity,
- entity, set((rtype,)))
- else:
- self._super_call(self.repo.glob_delete_relation,
- fromeid, rtype, toeid)
+ with security_enabled(self, False, False):
+ if self.vreg.schema[rtype].inlined:
+ entity = self.entity_from_eid(fromeid)
+ entity[rtype] = None
+ self.repo.glob_update_entity(self, entity, set((rtype,)))
+ else:
+ self.repo.glob_delete_relation(self, fromeid, rtype, toeid)
# relations cache handling #################################################
@@ -198,10 +249,6 @@
# resource accessors ######################################################
- def actual_session(self):
- """return the original parent session if any, else self"""
- return self
-
def system_sql(self, sql, args=None, rollback_on_failure=True):
"""return a sql cursor on the system database"""
if not sql.split(None, 1)[0].upper() == 'SELECT':
@@ -251,6 +298,162 @@
rdef = rschema.rdef(subjtype, objtype)
return rdef.get(rprop)
+ # security control #########################################################
+
+ DEFAULT_SECURITY = object() # evaluated to true by design
+
+ @property
+ def read_security(self):
+ """return a boolean telling if read security is activated or not"""
+ try:
+ return self._threaddata.read_security
+ except AttributeError:
+ self._threaddata.read_security = self.DEFAULT_SECURITY
+ return self._threaddata.read_security
+
+ def set_read_security(self, activated):
+ """[de]activate read security, returning the previous value set for
+ later restoration.
+
+ you should usually use the `security_enabled` context manager instead
+ of this to change security settings.
+ """
+ oldmode = self.read_security
+ self._threaddata.read_security = activated
+ # dbapi_query used to detect hooks triggered by a 'dbapi' query (eg not
+ # issued on the session). This is tricky since we the execution model of
+ # a (write) user query is:
+ #
+ # repository.execute (security enabled)
+ # \-> querier.execute
+ # \-> repo.glob_xxx (add/update/delete entity/relation)
+ # \-> deactivate security before calling hooks
+ # \-> WE WANT TO CHECK QUERY NATURE HERE
+ # \-> potentially, other calls to querier.execute
+ #
+ # so we can't rely on simply checking session.read_security, but
+ # recalling the first transition from DEFAULT_SECURITY to something
+ # else (False actually) is not perfect but should be enough
+ self._threaddata.dbapi_query = oldmode is self.DEFAULT_SECURITY
+ return oldmode
+
+ @property
+ def write_security(self):
+ """return a boolean telling if write security is activated or not"""
+ try:
+ return self._threaddata.write_security
+ except:
+ self._threaddata.write_security = self.DEFAULT_SECURITY
+ return self._threaddata.write_security
+
+ def set_write_security(self, activated):
+ """[de]activate write security, returning the previous value set for
+ later restoration.
+
+ you should usually use the `security_enabled` context manager instead
+ of this to change security settings.
+ """
+ oldmode = self.write_security
+ self._threaddata.write_security = activated
+ return oldmode
+
+ @property
+ def running_dbapi_query(self):
+ """return a boolean telling if it's triggered by a db-api query or by
+ a session query.
+
+ To be used in hooks, else may have a wrong value.
+ """
+ return getattr(self._threaddata, 'dbapi_query', True)
+
+ # hooks activation control #################################################
+ # all hooks should be activated during normal execution
+
+ HOOKS_ALLOW_ALL = object()
+ HOOKS_DENY_ALL = object()
+
+ @property
+ def hooks_mode(self):
+ return getattr(self._threaddata, 'hooks_mode', self.HOOKS_ALLOW_ALL)
+
+ def set_hooks_mode(self, mode):
+ assert mode is self.HOOKS_ALLOW_ALL or mode is self.HOOKS_DENY_ALL
+ oldmode = getattr(self._threaddata, 'hooks_mode', self.HOOKS_ALLOW_ALL)
+ self._threaddata.hooks_mode = mode
+ return oldmode
+
+ @property
+ def disabled_hook_categories(self):
+ try:
+ return getattr(self._threaddata, 'disabled_hook_cats')
+ except AttributeError:
+ cats = self._threaddata.disabled_hook_cats = set()
+ return cats
+
+ @property
+ def enabled_hook_categories(self):
+ try:
+ return getattr(self._threaddata, 'enabled_hook_cats')
+ except AttributeError:
+ cats = self._threaddata.enabled_hook_cats = set()
+ return cats
+
+ def disable_hook_categories(self, *categories):
+ """disable the given hook categories:
+
+ - on HOOKS_DENY_ALL mode, ensure those categories are not enabled
+ - on HOOKS_ALLOW_ALL mode, ensure those categories are disabled
+ """
+ changes = set()
+ if self.hooks_mode is self.HOOKS_DENY_ALL:
+ enablecats = self.enabled_hook_categories
+ for category in categories:
+ if category in enablecats:
+ enablecats.remove(category)
+ changes.add(category)
+ else:
+ disablecats = self.disabled_hook_categories
+ for category in categories:
+ if category not in disablecats:
+ disablecats.add(category)
+ changes.add(category)
+ return tuple(changes)
+
+ def enable_hook_categories(self, *categories):
+ """enable the given hook categories:
+
+ - on HOOKS_DENY_ALL mode, ensure those categories are enabled
+ - on HOOKS_ALLOW_ALL mode, ensure those categories are not disabled
+ """
+ changes = set()
+ if self.hooks_mode is self.HOOKS_DENY_ALL:
+ enablecats = self.enabled_hook_categories
+ for category in categories:
+ if category not in enablecats:
+ enablecats.add(category)
+ changes.add(category)
+ else:
+ disablecats = self.disabled_hook_categories
+ for category in categories:
+ if category in self.disabled_hook_categories:
+ disablecats.remove(category)
+ changes.add(category)
+ return tuple(changes)
+
+ def is_hook_category_activated(self, category):
+ """return a boolean telling if the given category is currently activated
+ or not
+ """
+ if self.hooks_mode is self.HOOKS_DENY_ALL:
+ return category in self.enabled_hook_categories
+ return category not in self.disabled_hook_categories
+
+ def is_hook_activated(self, hook):
+ """return a boolean telling if the given hook class is currently
+ activated or not
+ """
+ return self.is_hook_category_activated(hook.category)
+
# connection management ###################################################
def keep_pool_mode(self, mode):
@@ -408,47 +611,12 @@
"""return the source where the entity with id <eid> is located"""
return self.repo.source_from_eid(eid, self)
- def decorate_rset(self, rset, propagate=False):
- rset.vreg = self.vreg
- rset.req = propagate and self or self.actual_session()
+ def execute(self, rql, kwargs=None, eid_key=None, build_descr=True):
+ """db-api like method directly linked to the querier execute method"""
+ rset = self._execute(self, rql, kwargs, eid_key, build_descr)
+ rset.req = self
return rset
- @property
- def super_session(self):
- try:
- csession = self.childsession
- except AttributeError:
- if isinstance(self, (ChildSession, InternalSession)):
- csession = self
- else:
- csession = ChildSession(self)
- self.childsession = csession
- # need shared pool set
- self.set_pool(checkclosed=False)
- return csession
-
- def unsafe_execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
- propagate=False):
- """like .execute but with security checking disabled (this method is
- internal to the server, it's not part of the db-api)
-
- if `propagate` is true, the super_session will be attached to the result
- set instead of the parent session, hence further query done through
- entities fetched from this result set will bypass security as well
- """
- return self.super_session.execute(rql, kwargs, eid_key, build_descr,
- propagate)
-
- def execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
- propagate=False):
- """db-api like method directly linked to the querier execute method
-
- Becare that unlike actual cursor.execute, `build_descr` default to
- false
- """
- rset = self._execute(self, rql, kwargs, eid_key, build_descr)
- return self.decorate_rset(rset, propagate)
-
def _clear_thread_data(self):
"""remove everything from the thread local storage, except pool
which is explicitly removed by reset_pool, and mode which is set anyway
@@ -472,58 +640,60 @@
return
if self.commit_state:
return
- # on rollback, an operation should have the following state
- # information:
- # - processed by the precommit/commit event or not
- # - if processed, is it the failed operation
- try:
- for trstate in ('precommit', 'commit'):
- processed = []
- self.commit_state = trstate
- try:
- while self.pending_operations:
- operation = self.pending_operations.pop(0)
- operation.processed = trstate
- processed.append(operation)
+ # by default, operations are executed with security turned off
+ with security_enabled(self, False, False):
+ # on rollback, an operation should have the following state
+ # information:
+ # - processed by the precommit/commit event or not
+ # - if processed, is it the failed operation
+ try:
+ for trstate in ('precommit', 'commit'):
+ processed = []
+ self.commit_state = trstate
+ try:
+ while self.pending_operations:
+ operation = self.pending_operations.pop(0)
+ operation.processed = trstate
+ processed.append(operation)
+ operation.handle_event('%s_event' % trstate)
+ self.pending_operations[:] = processed
+ self.debug('%s session %s done', trstate, self.id)
+ except:
+ self.exception('error while %sing', trstate)
+ # if error on [pre]commit:
+ #
+ # * set .failed = True on the operation causing the failure
+ # * call revert<event>_event on processed operations
+ # * call rollback_event on *all* operations
+ #
+ # that seems more natural than not calling rollback_event
+ # for processed operations, and allow generic rollback
+ # instead of having to implements rollback, revertprecommit
+ # and revertcommit, that will be enough in mont case.
+ operation.failed = True
+ for operation in processed:
+ operation.handle_event('revert%s_event' % trstate)
+ # XXX use slice notation since self.pending_operations is a
+ # read-only property.
+ self.pending_operations[:] = processed + self.pending_operations
+ self.rollback(reset_pool)
+ raise
+ self.pool.commit()
+ self.commit_state = trstate = 'postcommit'
+ while self.pending_operations:
+ operation = self.pending_operations.pop(0)
+ operation.processed = trstate
+ try:
operation.handle_event('%s_event' % trstate)
- self.pending_operations[:] = processed
- self.debug('%s session %s done', trstate, self.id)
- except:
- self.exception('error while %sing', trstate)
- # if error on [pre]commit:
- #
- # * set .failed = True on the operation causing the failure
- # * call revert<event>_event on processed operations
- # * call rollback_event on *all* operations
- #
- # that seems more natural than not calling rollback_event
- # for processed operations, and allow generic rollback
- # instead of having to implements rollback, revertprecommit
- # and revertcommit, that will be enough in mont case.
- operation.failed = True
- for operation in processed:
- operation.handle_event('revert%s_event' % trstate)
- # XXX use slice notation since self.pending_operations is a
- # read-only property.
- self.pending_operations[:] = processed + self.pending_operations
- self.rollback(reset_pool)
- raise
- self.pool.commit()
- self.commit_state = trstate = 'postcommit'
- while self.pending_operations:
- operation = self.pending_operations.pop(0)
- operation.processed = trstate
- try:
- operation.handle_event('%s_event' % trstate)
- except:
- self.critical('error while %sing', trstate,
- exc_info=sys.exc_info())
- self.info('%s session %s done', trstate, self.id)
- finally:
- self._clear_thread_data()
- self._touch()
- if reset_pool:
- self.reset_pool(ignoremode=True)
+ except:
+ self.critical('error while %sing', trstate,
+ exc_info=sys.exc_info())
+ self.info('%s session %s done', trstate, self.id)
+ finally:
+ self._clear_thread_data()
+ self._touch()
+ if reset_pool:
+ self.reset_pool(ignoremode=True)
def rollback(self, reset_pool=True):
"""rollback the current session's transaction"""
@@ -533,21 +703,23 @@
self._touch()
self.debug('rollback session %s done (no db activity)', self.id)
return
- try:
- while self.pending_operations:
- try:
- operation = self.pending_operations.pop(0)
- operation.handle_event('rollback_event')
- except:
- self.critical('rollback error', exc_info=sys.exc_info())
- continue
- self.pool.rollback()
- self.debug('rollback for session %s done', self.id)
- finally:
- self._clear_thread_data()
- self._touch()
- if reset_pool:
- self.reset_pool(ignoremode=True)
+ # by default, operations are executed with security turned off
+ with security_enabled(self, False, False):
+ try:
+ while self.pending_operations:
+ try:
+ operation = self.pending_operations.pop(0)
+ operation.handle_event('rollback_event')
+ except:
+ self.critical('rollback error', exc_info=sys.exc_info())
+ continue
+ self.pool.rollback()
+ self.debug('rollback for session %s done', self.id)
+ finally:
+ self._clear_thread_data()
+ self._touch()
+ if reset_pool:
+ self.reset_pool(ignoremode=True)
def close(self):
"""do not close pool on session close, since they are shared now"""
@@ -592,10 +764,10 @@
def add_operation(self, operation, index=None):
"""add an observer"""
assert self.commit_state != 'commit'
- if index is not None:
+ if index is None:
+ self.pending_operations.append(operation)
+ else:
self.pending_operations.insert(index, operation)
- else:
- self.pending_operations.append(operation)
# querier helpers #########################################################
@@ -671,6 +843,25 @@
# deprecated ###############################################################
+ @deprecated("[3.7] control security with session.[read|write]_security")
+ def unsafe_execute(self, rql, kwargs=None, eid_key=None, build_descr=True,
+ propagate=False):
+ """like .execute but with security checking disabled (this method is
+ internal to the server, it's not part of the db-api)
+ """
+ return self.execute(rql, kwargs, eid_key, build_descr)
+
+ @property
+ @deprecated("[3.7] is_super_session is deprecated, test "
+ "session.read_security and or session.write_security")
+ def is_super_session(self):
+ return not self.read_security or not self.write_security
+
+ @deprecated("[3.7] session is actual session")
+ def actual_session(self):
+ """return the original parent session if any, else self"""
+ return self
+
@property
@deprecated("[3.6] use session.vreg.schema")
def schema(self):
@@ -697,84 +888,6 @@
return self.entity_from_eid(eid)
-class ChildSession(Session):
- """child (or internal) session are used to hijack the security system
- """
- cnxtype = 'inmemory'
-
- def __init__(self, parent_session):
- self.id = None
- self.is_internal_session = False
- self.is_super_session = True
- # session which has created this one
- self.parent_session = parent_session
- self.user = InternalManager()
- self.user.req = self # XXX remove when "vreg = user.req.vreg" hack in entity.py is gone
- self.repo = parent_session.repo
- self.vreg = parent_session.vreg
- self.data = parent_session.data
- self.encoding = parent_session.encoding
- self.lang = parent_session.lang
- self._ = self.__ = parent_session._
- # short cut to querier .execute method
- self._execute = self.repo.querier.execute
-
- @property
- def super_session(self):
- return self
-
- def get_mode(self):
- return self.parent_session.mode
- def set_mode(self, value):
- self.parent_session.set_mode(value)
- mode = property(get_mode, set_mode)
-
- def get_commit_state(self):
- return self.parent_session.commit_state
- def set_commit_state(self, value):
- self.parent_session.set_commit_state(value)
- commit_state = property(get_commit_state, set_commit_state)
-
- @property
- def pool(self):
- return self.parent_session.pool
- @property
- def pending_operations(self):
- return self.parent_session.pending_operations
- @property
- def transaction_data(self):
- return self.parent_session.transaction_data
-
- def set_pool(self):
- """the session need a pool to execute some queries"""
- self.parent_session.set_pool()
-
- def reset_pool(self):
- """the session has no longer using its pool, at least for some time
- """
- self.parent_session.reset_pool()
-
- def actual_session(self):
- """return the original parent session if any, else self"""
- return self.parent_session
-
- def commit(self, reset_pool=True):
- """commit the current session's transaction"""
- self.parent_session.commit(reset_pool)
-
- def rollback(self, reset_pool=True):
- """rollback the current session's transaction"""
- self.parent_session.rollback(reset_pool)
-
- def close(self):
- """do not close pool on session close, since they are shared now"""
- self.parent_session.close()
-
- def user_data(self):
- """returns a dictionnary with this user's information"""
- return self.parent_session.user_data()
-
-
class InternalSession(Session):
"""special session created internaly by the repository"""
@@ -784,11 +897,7 @@
self.user.req = self # XXX remove when "vreg = user.req.vreg" hack in entity.py is gone
self.cnxtype = 'inmemory'
self.is_internal_session = True
- self.is_super_session = True
-
- @property
- def super_session(self):
- return self
+ self.disable_hook_categories('integrity')
class InternalManager(object):
--- a/server/sources/extlite.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/sources/extlite.py Fri Mar 12 16:23:21 2010 +0100
@@ -20,12 +20,6 @@
self.source = source
self._cnx = None
- @property
- def logged_user(self):
- if self._cnx is None:
- self._cnx = self.source._sqlcnx
- return self._cnx.logged_user
-
def cursor(self):
if self._cnx is None:
self._cnx = self.source._sqlcnx
--- a/server/sources/native.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/sources/native.py Fri Mar 12 16:23:21 2010 +0100
@@ -21,10 +21,8 @@
from logilab.common.cache import Cache
from logilab.common.decorators import cached, clear_cache
from logilab.common.configuration import Method
-from logilab.common.adbh import get_adv_func_helper
from logilab.common.shellutils import getlogin
-
-from indexer import get_indexer
+from logilab.database import get_db_helper
from cubicweb import UnknownEid, AuthenticationError, Binary, server
from cubicweb.cwconfig import CubicWebNoAppConfiguration
@@ -149,20 +147,11 @@
self.authentifiers = [LoginPasswordAuthentifier(self)]
AbstractSource.__init__(self, repo, appschema, source_config,
*args, **kwargs)
+ # sql generator
+ self._rql_sqlgen = self.sqlgen_class(appschema, self.dbhelper,
+ ATTR_MAP.copy())
# full text index helper
self.do_fti = not repo.config['delay-full-text-indexation']
- if self.do_fti:
- self.indexer = get_indexer(self.dbdriver, self.encoding)
- # XXX should go away with logilab.db
- self.dbhelper.fti_uid_attr = self.indexer.uid_attr
- self.dbhelper.fti_table = self.indexer.table
- self.dbhelper.fti_restriction_sql = self.indexer.restriction_sql
- self.dbhelper.fti_need_distinct_query = self.indexer.need_distinct
- else:
- self.dbhelper.fti_need_distinct_query = False
- # sql generator
- self._rql_sqlgen = self.sqlgen_class(appschema, self.dbhelper,
- self.encoding, ATTR_MAP.copy())
# sql queries cache
self._cache = Cache(repo.config['rql-cache-size'])
self._temp_table_data = {}
@@ -209,7 +198,7 @@
pool.pool_set()
# check full text index availibility
if self.do_fti:
- if not self.indexer.has_fti_table(pool['system']):
+ if not self.dbhelper.has_fti_table(pool['system']):
if not self.repo.config.creating:
self.critical('no text index table')
self.do_fti = False
@@ -323,8 +312,7 @@
assert isinstance(sql, basestring), repr(sql)
try:
cursor = self.doexec(session, sql, args)
- except (self.dbapi_module.OperationalError,
- self.dbapi_module.InterfaceError):
+ except (self.OperationalError, self.InterfaceError):
# FIXME: better detection of deconnection pb
self.info("request failed '%s' ... retry with a new cursor", sql)
session.pool.reconnect(self)
@@ -344,7 +332,7 @@
prefix='ON THE FLY temp data insertion into %s from' % table)
# generate sql queries if we are able to do so
sql, query_args = self._rql_sqlgen.generate(union, args, varmap)
- query = 'INSERT INTO %s %s' % (table, sql.encode(self.encoding))
+ query = 'INSERT INTO %s %s' % (table, sql.encode(self._dbencoding))
self.doexec(session, query, self.merge_args(args, query_args))
def manual_insert(self, results, table, session):
@@ -361,7 +349,7 @@
row = tuple(row)
for index, cell in enumerate(row):
if isinstance(cell, Binary):
- cell = self.binary(cell.getvalue())
+ cell = self._binary(cell.getvalue())
kwargs[str(index)] = cell
kwargs_list.append(kwargs)
self.doexecmany(session, query, kwargs_list)
@@ -616,7 +604,7 @@
index
"""
try:
- self.indexer.cursor_unindex_object(eid, session.pool['system'])
+ self.dbhelper.cursor_unindex_object(eid, session.pool['system'])
except Exception: # let KeyboardInterrupt / SystemExit propagate
self.exception('error while unindexing %s', eid)
@@ -627,8 +615,8 @@
try:
# use cursor_index_object, not cursor_reindex_object since
# unindexing done in the FTIndexEntityOp
- self.indexer.cursor_index_object(entity.eid, entity,
- session.pool['system'])
+ self.dbhelper.cursor_index_object(entity.eid, entity,
+ session.pool['system'])
except Exception: # let KeyboardInterrupt / SystemExit propagate
self.exception('error while reindexing %s', entity)
@@ -661,7 +649,7 @@
def sql_schema(driver):
- helper = get_adv_func_helper(driver)
+ helper = get_db_helper(driver)
tstamp_col_type = helper.TYPE_MAPPING['Datetime']
schema = """
/* Create the repository's system database */
@@ -694,7 +682,7 @@
def sql_drop_schema(driver):
- helper = get_adv_func_helper(driver)
+ helper = get_db_helper(driver)
return """
%s
DROP TABLE entities;
--- a/server/sources/rql2sql.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/sources/rql2sql.py Fri Mar 12 16:23:21 2010 +0100
@@ -332,16 +332,16 @@
protected by a lock
"""
- def __init__(self, schema, dbms_helper, dbencoding='UTF-8', attrmap=None):
+ def __init__(self, schema, dbms_helper, attrmap=None):
self.schema = schema
self.dbms_helper = dbms_helper
- self.dbencoding = dbencoding
+ self.dbencoding = dbms_helper.dbencoding
self.keyword_map = {'NOW' : self.dbms_helper.sql_current_timestamp,
'TODAY': self.dbms_helper.sql_current_date,
}
if not self.dbms_helper.union_parentheses_support:
self.union_sql = self.noparen_union_sql
- if self.dbms_helper.fti_need_distinct_query:
+ if self.dbms_helper.fti_need_distinct:
self.__union_sql = self.union_sql
self.union_sql = self.has_text_need_distinct_union_sql
self._lock = threading.Lock()
@@ -986,10 +986,9 @@
def visit_function(self, func):
"""generate SQL name for a function"""
- # function_description will check function is supported by the backend
- sqlname = self.dbms_helper.func_sqlname(func.name)
- return '%s(%s)' % (sqlname, ', '.join(c.accept(self)
- for c in func.children))
+ # func_sql_call will check function is supported by the backend
+ return self.dbms_helper.func_as_sql(func.name,
+ [c.accept(self) for c in func.children])
def visit_constant(self, constant):
"""generate SQL name for a constant"""
--- a/server/sources/storages.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/sources/storages.py Fri Mar 12 16:23:21 2010 +0100
@@ -92,9 +92,8 @@
cu = sysource.doexec(entity._cw,
'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s' % (
attr, entity.__regid__, entity.eid))
- dbmod = sysource.dbapi_module
- return dbmod.process_value(cu.fetchone()[0], [None, dbmod.BINARY],
- binarywrap=str)
+ return sysource._process_value(cu.fetchone()[0], [None, dbmod.BINARY],
+ binarywrap=str)
class AddFileOp(Operation):
--- a/server/sqlutils.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/sqlutils.py Fri Mar 12 16:23:21 2010 +0100
@@ -11,21 +11,17 @@
import subprocess
from datetime import datetime, date
-import logilab.common as lgc
-from logilab.common import db
+from logilab import database as db, common as lgc
from logilab.common.shellutils import ProgressBar
-from logilab.common.adbh import get_adv_func_helper
-from logilab.common.sqlgen import SQLGenerator
from logilab.common.date import todate, todatetime
-
-from indexer import get_indexer
+from logilab.database.sqlgen import SQLGenerator
from cubicweb import Binary, ConfigurationError
from cubicweb.uilib import remove_html_tags
from cubicweb.schema import PURE_VIRTUAL_RTYPES
from cubicweb.server import SQL_CONNECT_HOOKS
from cubicweb.server.utils import crypt_password
-
+from rql.utils import RQL_FUNCTIONS_REGISTRY
lgc.USE_MX_DATETIME = False
SQL_PREFIX = 'cw_'
@@ -77,8 +73,8 @@
w(native.grant_schema(user, set_owner))
w('')
if text_index:
- indexer = get_indexer(driver)
- w(indexer.sql_grant_user(user))
+ dbhelper = db.get_db_helper(driver)
+ w(dbhelper.sql_grant_user_on_fti(user))
w('')
w(grant_schema(schema, user, set_owner, skip_entities=skip_entities, prefix=SQL_PREFIX))
return '\n'.join(output)
@@ -96,11 +92,10 @@
w = output.append
w(native.sql_schema(driver))
w('')
+ dbhelper = db.get_db_helper(driver)
if text_index:
- indexer = get_indexer(driver)
- w(indexer.sql_init_fti())
+ w(dbhelper.sql_init_fti())
w('')
- dbhelper = get_adv_func_helper(driver)
w(schema2sql(dbhelper, schema, prefix=SQL_PREFIX,
skip_entities=skip_entities, skip_relations=skip_relations))
if dbhelper.users_support and user:
@@ -120,8 +115,8 @@
w(native.sql_drop_schema(driver))
w('')
if text_index:
- indexer = get_indexer(driver)
- w(indexer.sql_drop_fti())
+ dbhelper = db.get_db_helper(driver)
+ w(dbhelper.sql_drop_fti())
w('')
w(dropschema2sql(schema, prefix=SQL_PREFIX,
skip_entities=skip_entities,
@@ -137,65 +132,44 @@
def __init__(self, source_config):
try:
self.dbdriver = source_config['db-driver'].lower()
- self.dbname = source_config['db-name']
+ dbname = source_config['db-name']
except KeyError:
raise ConfigurationError('missing some expected entries in sources file')
- self.dbhost = source_config.get('db-host')
+ dbhost = source_config.get('db-host')
port = source_config.get('db-port')
- self.dbport = port and int(port) or None
- self.dbuser = source_config.get('db-user')
- self.dbpasswd = source_config.get('db-password')
- self.encoding = source_config.get('db-encoding', 'UTF-8')
- self.dbapi_module = db.get_dbapi_compliant_module(self.dbdriver)
- self.dbdriver_extra_args = source_config.get('db-extra-arguments')
- self.binary = self.dbapi_module.Binary
- self.dbhelper = self.dbapi_module.adv_func_helper
+ dbport = port and int(port) or None
+ dbuser = source_config.get('db-user')
+ dbpassword = source_config.get('db-password')
+ dbencoding = source_config.get('db-encoding', 'UTF-8')
+ dbextraargs = source_config.get('db-extra-arguments')
+ self.dbhelper = db.get_db_helper(self.dbdriver)
+ self.dbhelper.record_connection_info(dbname, dbhost, dbport, dbuser,
+ dbpassword, dbextraargs,
+ dbencoding)
self.sqlgen = SQLGenerator()
+ # copy back some commonly accessed attributes
+ dbapi_module = self.dbhelper.dbapi_module
+ self.OperationalError = dbapi_module.OperationalError
+ self.InterfaceError = dbapi_module.InterfaceError
+ self._binary = dbapi_module.Binary
+ self._process_value = dbapi_module.process_value
+ self._dbencoding = dbencoding
- def get_connection(self, user=None, password=None):
+ def get_connection(self):
"""open and return a connection to the database"""
- if user or self.dbuser:
- self.info('connecting to %s@%s for user %s', self.dbname,
- self.dbhost or 'localhost', user or self.dbuser)
- else:
- self.info('connecting to %s@%s', self.dbname,
- self.dbhost or 'localhost')
- extra = {}
- if self.dbdriver_extra_args:
- extra = {'extra_args': self.dbdriver_extra_args}
- cnx = self.dbapi_module.connect(self.dbhost, self.dbname,
- user or self.dbuser,
- password or self.dbpasswd,
- port=self.dbport,
- **extra)
- init_cnx(self.dbdriver, cnx)
- #self.dbapi_module.type_code_test(cnx.cursor())
- return cnx
+ return self.dbhelper.get_connection()
def backup_to_file(self, backupfile, confirm):
- for cmd in self.dbhelper.backup_commands(backupfile=backupfile,
- keepownership=False,
- dbname=self.dbname,
- dbhost=self.dbhost,
- dbuser=self.dbuser,
- dbport=self.dbport):
+ for cmd in self.dbhelper.backup_commands(backupfile,
+ keepownership=False):
if _run_command(cmd):
if not confirm(' [Failed] Continue anyway?', default='n'):
raise Exception('Failed command: %s' % cmd)
def restore_from_file(self, backupfile, confirm, drop=True):
- if 'dbencoding' in self.dbhelper.restore_commands.im_func.func_code.co_varnames:
- kwargs = {'dbencoding': self.encoding}
- else:
- kwargs = {'encoding': self.encoding}
- for cmd in self.dbhelper.restore_commands(backupfile=backupfile,
+ for cmd in self.dbhelper.restore_commands(backupfile,
keepownership=False,
- drop=drop,
- dbname=self.dbname,
- dbhost=self.dbhost,
- dbuser=self.dbuser,
- dbport=self.dbport,
- **kwargs):
+ drop=drop):
if _run_command(cmd):
if not confirm(' [Failed] Continue anyway?', default='n'):
raise Exception('Failed command: %s' % cmd)
@@ -206,7 +180,7 @@
for key, val in args.iteritems():
# convert cubicweb binary into db binary
if isinstance(val, Binary):
- val = self.binary(val.getvalue())
+ val = self._binary(val.getvalue())
newargs[key] = val
# should not collide
newargs.update(query_args)
@@ -216,10 +190,12 @@
def process_result(self, cursor):
"""return a list of CubicWeb compliant values from data in the given cursor
"""
+ # begin bind to locals for optimization
descr = cursor.description
- encoding = self.encoding
- process_value = self.dbapi_module.process_value
+ encoding = self._dbencoding
+ process_value = self._process_value
binary = Binary
+ # /end
results = cursor.fetchall()
for i, line in enumerate(results):
result = []
@@ -250,14 +226,14 @@
value = value.getvalue()
else:
value = crypt_password(value)
- value = self.binary(value)
+ value = self._binary(value)
# XXX needed for sqlite but I don't think it is for other backends
elif atype == 'Datetime' and isinstance(value, date):
value = todatetime(value)
elif atype == 'Date' and isinstance(value, datetime):
value = todate(value)
elif isinstance(value, Binary):
- value = self.binary(value.getvalue())
+ value = self._binary(value.getvalue())
attrs[SQL_PREFIX+str(attr)] = value
return attrs
@@ -267,12 +243,8 @@
set_log_methods(SQLAdapterMixIn, getLogger('cubicweb.sqladapter'))
def init_sqlite_connexion(cnx):
- # XXX should not be publicly exposed
- #def comma_join(strings):
- # return ', '.join(strings)
- #cnx.create_function("COMMA_JOIN", 1, comma_join)
- class concat_strings(object):
+ class group_concat(object):
def __init__(self):
self.values = []
def step(self, value):
@@ -280,10 +252,7 @@
self.values.append(value)
def finalize(self):
return ', '.join(self.values)
- # renamed to GROUP_CONCAT in cubicweb 2.45, keep old name for bw compat for
- # some time
- cnx.create_aggregate("CONCAT_STRINGS", 1, concat_strings)
- cnx.create_aggregate("GROUP_CONCAT", 1, concat_strings)
+ cnx.create_aggregate("GROUP_CONCAT", 1, group_concat)
def _limit_size(text, maxsize, format='text/plain'):
if len(text) < maxsize:
@@ -301,9 +270,9 @@
def limit_size2(text, maxsize):
return _limit_size(text, maxsize)
cnx.create_function("TEXT_LIMIT_SIZE", 2, limit_size2)
+
import yams.constraints
- if hasattr(yams.constraints, 'patch_sqlite_decimal'):
- yams.constraints.patch_sqlite_decimal()
+ yams.constraints.patch_sqlite_decimal()
def fspath(eid, etype, attr):
try:
@@ -328,10 +297,5 @@
raise
cnx.create_function('_fsopen', 1, _fsopen)
-
sqlite_hooks = SQL_CONNECT_HOOKS.setdefault('sqlite', [])
sqlite_hooks.append(init_sqlite_connexion)
-
-def init_cnx(driver, cnx):
- for hook in SQL_CONNECT_HOOKS.get(driver, ()):
- hook(cnx)
--- a/server/ssplanner.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/ssplanner.py Fri Mar 12 16:23:21 2010 +0100
@@ -5,16 +5,114 @@
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
from copy import copy
from rql.stmts import Union, Select
-from rql.nodes import Constant
+from rql.nodes import Constant, Relation
from cubicweb import QueryError, typed_eid
from cubicweb.schema import VIRTUAL_RTYPES
from cubicweb.rqlrewrite import add_types_restriction
+from cubicweb.server.session import security_enabled
+
+READ_ONLY_RTYPES = set(('eid', 'has_text', 'is', 'is_instance_of', 'identity'))
+
+_CONSTANT = object()
+_FROM_SUBSTEP = object()
+
+def _extract_const_attributes(plan, rqlst, to_build):
+ """add constant values to entity def, mark variables to be selected
+ """
+ to_select = {}
+ for relation in rqlst.main_relations:
+ lhs, rhs = relation.get_variable_parts()
+ rtype = relation.r_type
+ if rtype in READ_ONLY_RTYPES:
+ raise QueryError("can't assign to %s" % rtype)
+ try:
+ edef = to_build[str(lhs)]
+ except KeyError:
+ # lhs var is not to build, should be selected and added as an
+ # object relation
+ edef = to_build[str(rhs)]
+ to_select.setdefault(edef, []).append((rtype, lhs, 1))
+ else:
+ if isinstance(rhs, Constant) and not rhs.uid:
+ # add constant values to entity def
+ value = rhs.eval(plan.args)
+ eschema = edef.e_schema
+ attrtype = eschema.subjrels[rtype].objects(eschema)[0]
+ if attrtype == 'Password' and isinstance(value, unicode):
+ value = value.encode('UTF8')
+ edef[rtype] = value
+ elif to_build.has_key(str(rhs)):
+ # create a relation between two newly created variables
+ plan.add_relation_def((edef, rtype, to_build[rhs.name]))
+ else:
+ to_select.setdefault(edef, []).append( (rtype, rhs, 0) )
+ return to_select
+
+def _extract_eid_consts(plan, rqlst):
+ """return a dict mapping rqlst variable object to their eid if specified in
+ the syntax tree
+ """
+ session = plan.session
+ if rqlst.where is None:
+ return {}
+ eidconsts = {}
+ neweids = session.transaction_data.get('neweids', ())
+ checkread = session.read_security
+ eschema = session.vreg.schema.eschema
+ for rel in rqlst.where.get_nodes(Relation):
+ if rel.r_type == 'eid' and not rel.neged(strict=True):
+ lhs, rhs = rel.get_variable_parts()
+ if isinstance(rhs, Constant):
+ eid = typed_eid(rhs.eval(plan.args))
+ # check read permission here since it may not be done by
+ # the generated select substep if not emited (eg nothing
+ # to be selected)
+ if checkread and eid not in neweids:
+ with security_enabled(session, read=False):
+ eschema(session.describe(eid)[0]).check_perm(
+ session, 'read', eid=eid)
+ eidconsts[lhs.variable] = eid
+ return eidconsts
+
+def _build_substep_query(select, origrqlst):
+ """Finalize substep select query that should be executed to get proper
+ selection of stuff to insert/update.
+
+ Return None when no query actually needed, else the given select node that
+ will be used as substep query.
+
+ When select has nothing selected, search in origrqlst for restriction that
+ should be considered.
+ """
+ if select.selection:
+ if origrqlst.where is not None:
+ select.set_where(origrqlst.where.copy(select))
+ return select
+ if origrqlst.where is None:
+ return
+ for rel in origrqlst.where.iget_nodes(Relation):
+ # search for a relation which is neither a type restriction (is) nor an
+ # eid specification (not neged eid with constant node
+ if rel.neged(strict=True) or not (
+ rel.is_types_restriction() or
+ (rel.r_type == 'eid'
+ and isinstance(rel.get_variable_parts()[1], Constant))):
+ break
+ else:
+ return
+ select.set_where(origrqlst.where.copy(select))
+ if not select.selection:
+ # no selection, append one randomly
+ select.append_selected(rel.children[0].copy(select))
+ return select
class SSPlanner(object):
@@ -56,34 +154,37 @@
to_build[var.name] = etype_class(etype)(session)
plan.add_entity_def(to_build[var.name])
# add constant values to entity def, mark variables to be selected
- to_select = plan.relation_definitions(rqlst, to_build)
+ to_select = _extract_const_attributes(plan, rqlst, to_build)
# add necessary steps to add relations and update attributes
step = InsertStep(plan) # insert each entity and its relations
- step.children += self._compute_relation_steps(plan, rqlst.solutions,
- rqlst.where, to_select)
+ step.children += self._compute_relation_steps(plan, rqlst, to_select)
return (step,)
- def _compute_relation_steps(self, plan, solutions, restriction, to_select):
+ def _compute_relation_steps(self, plan, rqlst, to_select):
"""handle the selection of relations for an insert query"""
+ eidconsts = _extract_eid_consts(plan, rqlst)
for edef, rdefs in to_select.items():
# create a select rql st to fetch needed data
select = Select()
eschema = edef.e_schema
- for i in range(len(rdefs)):
- rtype, term, reverse = rdefs[i]
- select.append_selected(term.copy(select))
+ for i, (rtype, term, reverse) in enumerate(rdefs):
+ if getattr(term, 'variable', None) in eidconsts:
+ value = eidconsts[term.variable]
+ else:
+ select.append_selected(term.copy(select))
+ value = _FROM_SUBSTEP
if reverse:
- rdefs[i] = rtype, RelationsStep.REVERSE_RELATION
+ rdefs[i] = (rtype, InsertRelationsStep.REVERSE_RELATION, value)
else:
rschema = eschema.subjrels[rtype]
if rschema.final or rschema.inlined:
- rdefs[i] = rtype, RelationsStep.FINAL
+ rdefs[i] = (rtype, InsertRelationsStep.FINAL, value)
else:
- rdefs[i] = rtype, RelationsStep.RELATION
- if restriction is not None:
- select.set_where(restriction.copy(select))
- step = RelationsStep(plan, edef, rdefs)
- step.children += self._select_plan(plan, select, solutions)
+ rdefs[i] = (rtype, InsertRelationsStep.RELATION, value)
+ step = InsertRelationsStep(plan, edef, rdefs)
+ select = _build_substep_query(select, rqlst)
+ if select is not None:
+ step.children += self._select_plan(plan, select, rqlst.solutions)
yield step
def build_delete_plan(self, plan, rqlst):
@@ -127,37 +228,61 @@
def build_set_plan(self, plan, rqlst):
"""get an execution plan from an SET RQL query"""
- select = Select()
- # extract variables to add to the selection
- selected_index = {}
- index = 0
- relations, attrrelations = [], []
getrschema = self.schema.rschema
- for relation in rqlst.main_relations:
+ select = Select() # potential substep query
+ selectedidx = {} # local state
+ attributes = set() # edited attributes
+ updatedefs = [] # definition of update attributes/relations
+ selidx = residx = 0 # substep selection / resulting rset indexes
+ # search for eid const in the WHERE clause
+ eidconsts = _extract_eid_consts(plan, rqlst)
+ # build `updatedefs` describing things to update and add necessary
+ # variables to the substep selection
+ for i, relation in enumerate(rqlst.main_relations):
if relation.r_type in VIRTUAL_RTYPES:
raise QueryError('can not assign to %r relation'
% relation.r_type)
lhs, rhs = relation.get_variable_parts()
- if not lhs.as_string('utf-8') in selected_index:
- select.append_selected(lhs.copy(select))
- selected_index[lhs.as_string('utf-8')] = index
- index += 1
- if not rhs.as_string('utf-8') in selected_index:
- select.append_selected(rhs.copy(select))
- selected_index[rhs.as_string('utf-8')] = index
- index += 1
+ lhskey = lhs.as_string('utf-8')
+ if not lhskey in selectedidx:
+ if lhs.variable in eidconsts:
+ eid = eidconsts[lhs.variable]
+ lhsinfo = (_CONSTANT, eid, residx)
+ else:
+ select.append_selected(lhs.copy(select))
+ lhsinfo = (_FROM_SUBSTEP, selidx, residx)
+ selidx += 1
+ residx += 1
+ selectedidx[lhskey] = lhsinfo
+ else:
+ lhsinfo = selectedidx[lhskey][:-1] + (None,)
+ rhskey = rhs.as_string('utf-8')
+ if not rhskey in selectedidx:
+ if isinstance(rhs, Constant):
+ rhsinfo = (_CONSTANT, rhs.eval(plan.args), residx)
+ elif getattr(rhs, 'variable', None) in eidconsts:
+ eid = eidconsts[rhs.variable]
+ rhsinfo = (_CONSTANT, eid, residx)
+ else:
+ select.append_selected(rhs.copy(select))
+ rhsinfo = (_FROM_SUBSTEP, selidx, residx)
+ selidx += 1
+ residx += 1
+ selectedidx[rhskey] = rhsinfo
+ else:
+ rhsinfo = selectedidx[rhskey][:-1] + (None,)
rschema = getrschema(relation.r_type)
+ updatedefs.append( (lhsinfo, rhsinfo, rschema) )
if rschema.final or rschema.inlined:
- attrrelations.append(relation)
- else:
- relations.append(relation)
- # add step necessary to fetch all selected variables values
- if rqlst.where is not None:
- select.set_where(rqlst.where.copy(select))
- # set distinct to avoid potential duplicate key error
- select.distinct = True
- step = UpdateStep(plan, attrrelations, relations, selected_index)
- step.children += self._select_plan(plan, select, rqlst.solutions)
+ attributes.add(relation.r_type)
+ # the update step
+ step = UpdateStep(plan, updatedefs, attributes)
+ # when necessary add substep to fetch yet unknown values
+ select = _build_substep_query(select, rqlst)
+ if select is not None:
+ # set distinct to avoid potential duplicate key error
+ select.distinct = True
+ step.children += self._select_plan(plan, select, rqlst.solutions)
return (step,)
# internal methods ########################################################
@@ -308,7 +433,7 @@
# UPDATE/INSERT/DELETE steps ##################################################
-class RelationsStep(Step):
+class InsertRelationsStep(Step):
"""step consisting in adding attributes/relations to entity defs from a
previous FetchStep
@@ -334,33 +459,38 @@
"""execute this step"""
base_edef = self.edef
edefs = []
- result = self.execute_child()
+ if self.children:
+ result = self.execute_child()
+ else:
+ result = [[]]
for row in result:
# get a new entity definition for this row
edef = copy(base_edef)
# complete this entity def using row values
- for i in range(len(self.rdefs)):
- rtype, rorder = self.rdefs[i]
- if rorder == RelationsStep.FINAL:
- edef[rtype] = row[i]
- elif rorder == RelationsStep.RELATION:
- self.plan.add_relation_def( (edef, rtype, row[i]) )
- edef.querier_pending_relations[(rtype, 'subject')] = row[i]
+ index = 0
+ for rtype, rorder, value in self.rdefs:
+ if value is _FROM_SUBSTEP:
+ value = row[index]
+ index += 1
+ if rorder == InsertRelationsStep.FINAL:
+ edef[rtype] = value
+ elif rorder == InsertRelationsStep.RELATION:
+ self.plan.add_relation_def( (edef, rtype, value) )
+ edef.querier_pending_relations[(rtype, 'subject')] = value
else:
- self.plan.add_relation_def( (row[i], rtype, edef) )
- edef.querier_pending_relations[(rtype, 'object')] = row[i]
+ self.plan.add_relation_def( (value, rtype, edef) )
+ edef.querier_pending_relations[(rtype, 'object')] = value
edefs.append(edef)
self.plan.substitute_entity_def(base_edef, edefs)
return result
-
class InsertStep(Step):
"""step consisting in inserting new entities / relations"""
def execute(self):
"""execute this step"""
for step in self.children:
- assert isinstance(step, RelationsStep)
+ assert isinstance(step, InsertRelationsStep)
step.plan = self.plan
step.execute()
# insert entities first
@@ -408,40 +538,46 @@
definitions and from results fetched in previous step
"""
- def __init__(self, plan, attribute_relations, relations, selected_index):
+ def __init__(self, plan, updatedefs, attributes):
Step.__init__(self, plan)
- self.attribute_relations = attribute_relations
- self.relations = relations
- self.selected_index = selected_index
+ self.updatedefs = updatedefs
+ self.attributes = attributes
def execute(self):
"""execute this step"""
- plan = self.plan
session = self.plan.session
repo = session.repo
edefs = {}
# insert relations
- attributes = set([relation.r_type for relation in self.attribute_relations])
- result = self.execute_child()
- for row in result:
- for relation in self.attribute_relations:
- lhs, rhs = relation.get_variable_parts()
- eid = typed_eid(row[self.selected_index[str(lhs)]])
- try:
- edef = edefs[eid]
- except KeyError:
- edefs[eid] = edef = session.entity_from_eid(eid)
- if isinstance(rhs, Constant):
- # add constant values to entity def
- value = rhs.eval(plan.args)
- edef[relation.r_type] = value
+ if self.children:
+ result = self.execute_child()
+ else:
+ result = [[]]
+ for i, row in enumerate(result):
+ newrow = []
+ for (lhsinfo, rhsinfo, rschema) in self.updatedefs:
+ lhsval = _handle_relterm(lhsinfo, row, newrow)
+ rhsval = _handle_relterm(rhsinfo, row, newrow)
+ if rschema.final or rschema.inlined:
+ eid = typed_eid(lhsval)
+ try:
+ edef = edefs[eid]
+ except KeyError:
+ edefs[eid] = edef = session.entity_from_eid(eid)
+ edef[str(rschema)] = rhsval
else:
- edef[relation.r_type] = row[self.selected_index[str(rhs)]]
- for relation in self.relations:
- subj = row[self.selected_index[str(relation.children[0])]]
- obj = row[self.selected_index[str(relation.children[1])]]
- repo.glob_add_relation(session, subj, relation.r_type, obj)
+ repo.glob_add_relation(session, lhsval, str(rschema), rhsval)
+ result[i] = newrow
# update entities
for eid, edef in edefs.iteritems():
- repo.glob_update_entity(session, edef, attributes)
+ repo.glob_update_entity(session, edef, self.attributes)
return result
+
+def _handle_relterm(info, row, newrow):
+ if info[0] is _CONSTANT:
+ val = info[1]
+ else: # _FROM_SUBSTEP
+ val = row[info[1]]
+ if info[-1] is not None:
+ newrow.append(val)
+ return val
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/server/test/data/site_cubicweb.py Fri Mar 12 16:23:21 2010 +0100
@@ -0,0 +1,23 @@
+"""
+
+:organization: Logilab
+:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+"""
+
+from logilab.database import FunctionDescr
+from logilab.database.sqlite import register_sqlite_pyfunc
+from rql.utils import register_function
+
+try:
+ class DUMB_SORT(FunctionDescr):
+ supported_backends = ('sqlite',)
+
+ register_function(DUMB_SORT)
+ def dumb_sort(something):
+ return something
+ register_sqlite_pyfunc(dumb_sort)
+except:
+ # already registered
+ pass
--- a/server/test/data/site_erudi.py Fri Mar 12 16:21:13 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
-"""
-
-:organization: Logilab
-:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-from logilab.common.adbh import FunctionDescr
-from rql.utils import register_function
-
-try:
- class DUMB_SORT(FunctionDescr):
- supported_backends = ('sqlite',)
-
- register_function(DUMB_SORT)
-
-
- def init_sqlite_connexion(cnx):
- def dumb_sort(something):
- return something
- cnx.create_function("DUMB_SORT", 1, dumb_sort)
-
- from cubicweb.server import sqlutils
- sqlutils.SQL_CONNECT_HOOKS['sqlite'].append(init_sqlite_connexion)
-except:
- # already registered
- pass
--- a/server/test/unittest_checkintegrity.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_checkintegrity.py Fri Mar 12 16:23:21 2010 +0100
@@ -13,10 +13,9 @@
from cubicweb.server.checkintegrity import check
-repo, cnx = init_test_database()
-
class CheckIntegrityTC(TestCase):
def test(self):
+ repo, cnx = init_test_database()
sys.stderr = sys.stdout = StringIO()
try:
check(repo, cnx, ('entities', 'relations', 'text_index', 'metadata'),
@@ -24,6 +23,7 @@
finally:
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
+ repo.shutdown()
if __name__ == '__main__':
unittest_main()
--- a/server/test/unittest_hook.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_hook.py Fri Mar 12 16:23:21 2010 +0100
@@ -69,6 +69,10 @@
config.bootstrap_cubes()
schema = config.load_schema()
+def teardown_module(*args):
+ global config, schema
+ del config, schema
+
class AddAnyHook(hook.Hook):
__regid__ = 'addany'
category = 'cat1'
@@ -104,13 +108,19 @@
def test_call_hook(self):
self.o.register(AddAnyHook)
- cw = mock_object(vreg=self.vreg)
- self.assertRaises(HookCalled, self.o.call_hooks, 'before_add_entity', cw)
+ dis = set()
+ cw = mock_object(vreg=self.vreg,
+ set_read_security=lambda *a,**k: None,
+ set_write_security=lambda *a,**k: None,
+ is_hook_activated=lambda x, cls: cls.category not in dis)
+ self.assertRaises(HookCalled,
+ self.o.call_hooks, 'before_add_entity', cw)
self.o.call_hooks('before_delete_entity', cw) # nothing to call
- config.disabled_hooks_categories.add('cat1')
+ dis.add('cat1')
self.o.call_hooks('before_add_entity', cw) # disabled hooks category, not called
- config.disabled_hooks_categories.remove('cat1')
- self.assertRaises(HookCalled, self.o.call_hooks, 'before_add_entity', cw)
+ dis.remove('cat1')
+ self.assertRaises(HookCalled,
+ self.o.call_hooks, 'before_add_entity', cw)
self.o.unregister(AddAnyHook)
self.o.call_hooks('before_add_entity', cw) # nothing to call
--- a/server/test/unittest_ldapuser.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_ldapuser.py Fri Mar 12 16:23:21 2010 +0100
@@ -370,6 +370,11 @@
LDAPUserSourceTC._init_repo()
repo = LDAPUserSourceTC.repo
+def teardown_module(*args):
+ global repo
+ del repo
+ del RQL2LDAPFilterTC.schema
+
class RQL2LDAPFilterTC(RQLGeneratorTC):
schema = repo.schema
--- a/server/test/unittest_migractions.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_migractions.py Fri Mar 12 16:23:21 2010 +0100
@@ -14,6 +14,11 @@
from cubicweb.server.sqlutils import SQL_PREFIX
from cubicweb.server.migractions import *
+migrschema = None
+def teardown_module(*args):
+ global migrschema
+ del migrschema
+ del MigrationCommandsTC.origschema
class MigrationCommandsTC(CubicWebTC):
@@ -35,6 +40,13 @@
def _refresh_repo(cls):
super(MigrationCommandsTC, cls)._refresh_repo()
cls.repo.set_schema(deepcopy(cls.origschema), resetvreg=False)
+ # reset migration schema eids
+ for eschema in migrschema.entities():
+ eschema.eid = None
+ for rschema in migrschema.relations():
+ rschema.eid = None
+ for rdef in rschema.rdefs.values():
+ rdef.eid = None
def setUp(self):
CubicWebTC.setUp(self)
@@ -44,7 +56,6 @@
assert self.cnx is self.mh._cnx
assert self.session is self.mh.session, (self.session.id, self.mh.session.id)
-
def test_add_attribute_int(self):
self.failIf('whatever' in self.schema)
self.request().create_entity('Note')
--- a/server/test/unittest_msplanner.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_msplanner.py Fri Mar 12 16:23:21 2010 +0100
@@ -60,6 +60,11 @@
# keep cnx so it's not garbage collected and the associated session is closed
repo, cnx = init_test_database()
+def teardown_module(*args):
+ global repo, cnx
+ del repo, cnx
+
+
class BaseMSPlannerTC(BasePlannerTC):
"""test planner related feature on a 3-sources repository:
@@ -87,10 +92,10 @@
self.add_source(FakeCardSource, 'cards')
def tearDown(self):
- super(BaseMSPlannerTC, self).tearDown()
# restore hijacked security
self.restore_orig_affaire_security()
self.restore_orig_cwuser_security()
+ super(BaseMSPlannerTC, self).tearDown()
def restore_orig_affaire_security(self):
affreadperms = list(self.schema['Affaire'].permissions['read'])
@@ -1517,15 +1522,11 @@
repo._type_source_cache[999999] = ('Note', 'cards', 999999)
repo._type_source_cache[999998] = ('State', 'system', None)
self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T',
- [('FetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
- [{'N': 'Note', 'T': 'String'}])],
- [self.cards], None, {'N.type': 'table0.C0', 'T': 'table0.C0'}, []),
- ('InsertStep',
- [('RelationsStep',
- [('OneFetchStep', [('Any 999998,T WHERE N type T, N is Note',
+ [('InsertStep',
+ [('InsertRelationsStep',
+ [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
[{'N': 'Note', 'T': 'String'}])],
- None, None, [self.system],
- {'N.type': 'table0.C0', 'T': 'table0.C0'}, [])])
+ None, None, [self.cards], {}, [])])
])
],
{'n': 999999, 's': 999998})
@@ -1534,15 +1535,11 @@
repo._type_source_cache[999999] = ('Note', 'cards', 999999)
repo._type_source_cache[999998] = ('State', 'system', None)
self._test('INSERT Note X: X in_state S, X type T, X migrated_from N WHERE S eid %(s)s, N eid %(n)s, N type T',
- [('FetchStep', [('Any T,N WHERE N eid 999999, N type T, N is Note',
- [{'N': 'Note', 'T': 'String'}])],
- [self.cards], None, {'N': 'table0.C1', 'N.type': 'table0.C0', 'T': 'table0.C0'}, []),
- ('InsertStep',
- [('RelationsStep',
- [('OneFetchStep', [('Any 999998,T,N WHERE N type T, N is Note',
+ [('InsertStep',
+ [('InsertRelationsStep',
+ [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
[{'N': 'Note', 'T': 'String'}])],
- None, None, [self.system],
- {'N': 'table0.C1', 'N.type': 'table0.C0', 'T': 'table0.C0'}, [])
+ None, None, [self.cards], {}, [])
])
])
],
@@ -1553,8 +1550,8 @@
repo._type_source_cache[999998] = ('State', 'cards', 999998)
self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T',
[('InsertStep',
- [('RelationsStep',
- [('OneFetchStep', [('Any 999998,T WHERE N eid 999999, N type T, N is Note',
+ [('InsertRelationsStep',
+ [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
[{'N': 'Note', 'T': 'String'}])],
None, None, [self.cards], {}, [])]
)]
@@ -1566,10 +1563,7 @@
repo._type_source_cache[999998] = ('State', 'system', None)
self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s',
[('InsertStep',
- [('RelationsStep',
- [('OneFetchStep', [('Any 999998,999999', [{}])],
- None, None, [self.system], {}, [])]
- )]
+ [('InsertRelationsStep', [])]
)],
{'n': 999999, 's': 999998})
@@ -1578,12 +1572,14 @@
repo._type_source_cache[999998] = ('State', 'system', None)
self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s, A concerne N',
[('InsertStep',
- [('RelationsStep',
- [('OneFetchStep', [('Any 999998,999999 WHERE A concerne 999999, A is Affaire',
- [{'A': 'Affaire'}])],
- None, None, [self.system], {}, [])]
- )]
- )],
+ [('InsertRelationsStep',
+ [('OneFetchStep',
+ [('Any A WHERE A concerne 999999, A is Affaire',
+ [{'A': 'Affaire'}])],
+ None, None, [self.system], {}, []),
+ ]),
+ ])
+ ],
{'n': 999999, 's': 999998})
def test_delete_relation1(self):
@@ -1664,7 +1660,7 @@
# source, states should only be searched in the system source as well
self._test('SET X in_state S WHERE X eid %(x)s, S name "deactivated"',
[('UpdateStep', [
- ('OneFetchStep', [('DISTINCT Any 5,S WHERE S name "deactivated", S is State',
+ ('OneFetchStep', [('DISTINCT Any S WHERE S name "deactivated", S is State',
[{'S': 'State'}])],
None, None, [self.system], {}, []),
]),
@@ -1814,7 +1810,7 @@
[('FetchStep', [('Any Y WHERE Y multisource_rel 999998, Y is Note', [{'Y': 'Note'}])],
[self.cards], None, {'Y': u'table0.C0'}, []),
('UpdateStep',
- [('OneFetchStep', [('DISTINCT Any 999999,Y WHERE Y migrated_from 999998, Y is Note',
+ [('OneFetchStep', [('DISTINCT Any Y WHERE Y migrated_from 999998, Y is Note',
[{'Y': 'Note'}])],
None, None, [self.system],
{'Y': u'table0.C0'}, [])])],
@@ -1841,14 +1837,9 @@
def test_nonregr11(self):
repo._type_source_cache[999999] = ('Bookmark', 'system', 999999)
self._test('SET X bookmarked_by Y WHERE X eid %(x)s, Y login "hop"',
- [('FetchStep',
- [('Any Y WHERE Y login "hop", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system],
- None, {'Y': 'table0.C0'}, []),
- ('UpdateStep',
- [('OneFetchStep', [('DISTINCT Any 999999,Y WHERE Y is CWUser', [{'Y': 'CWUser'}])],
- None, None, [self.system], {'Y': 'table0.C0'},
- [])]
+ [('UpdateStep',
+ [('OneFetchStep', [('DISTINCT Any Y WHERE Y login "hop", Y is CWUser', [{'Y': 'CWUser'}])],
+ None, None, [self.ldap, self.system], {}, [])]
)],
{'x': 999999})
--- a/server/test/unittest_multisources.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_multisources.py Fri Mar 12 16:23:21 2010 +0100
@@ -48,7 +48,12 @@
def teardown_module(*args):
PyroRQLSource.get_connection = PyroRQLSource_get_connection
Connection.close = Connection_close
-
+ global repo2, cnx2, repo3, cnx3
+ repo2.shutdown()
+ repo3.shutdown()
+ del repo2, cnx2, repo3, cnx3
+ #del TwoSourcesTC.config.vreg
+ #del TwoSourcesTC.config
class TwoSourcesTC(CubicWebTC):
config = TwoSourcesConfiguration('data')
@@ -130,7 +135,7 @@
cu = cnx.cursor()
rset = cu.execute('Any X WHERE X has_text "card"')
self.assertEquals(len(rset), 5, zip(rset.rows, rset.description))
- cnx.close()
+ Connection_close(cnx)
def test_synchronization(self):
cu = cnx2.cursor()
--- a/server/test/unittest_querier.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_querier.py Fri Mar 12 16:23:21 2010 +0100
@@ -35,7 +35,7 @@
SQL_CONNECT_HOOKS['sqlite'].append(init_sqlite_connexion)
-from logilab.common.adbh import _GenericAdvFuncHelper
+from logilab.database import _GenericAdvFuncHelper
TYPEMAP = _GenericAdvFuncHelper.TYPE_MAPPING
class MakeSchemaTC(TestCase):
@@ -48,6 +48,11 @@
repo, cnx = init_test_database()
+def teardown_module(*args):
+ global repo, cnx
+ cnx.close()
+ repo.shutdown()
+ del repo, cnx
class UtilsTC(BaseQuerierTC):
@@ -392,6 +397,18 @@
rset = self.execute('Note X WHERE NOT Y evaluee X')
self.assertEquals(len(rset.rows), 1, rset.rows)
+ def test_select_date_extraction(self):
+ self.execute("INSERT Personne X: X nom 'foo', X datenaiss %(d)s",
+ {'d': datetime(2001, 2,3, 12,13)})
+ test_data = [('YEAR', 2001), ('MONTH', 2), ('DAY', 3),
+ ('HOUR', 12), ('MINUTE', 13)]
+ for funcname, result in test_data:
+ rset = self.execute('Any %s(D) WHERE X is Personne, X datenaiss D'
+ % funcname)
+ self.assertEquals(len(rset.rows), 1)
+ self.assertEquals(rset.rows[0][0], result)
+ self.assertEquals(rset.description, [('Int',)])
+
def test_select_aggregat_count(self):
rset = self.execute('Any COUNT(X)')
self.assertEquals(len(rset.rows), 1)
@@ -425,7 +442,7 @@
self.assertEquals(rset.description, [('Int',)])
def test_select_custom_aggregat_concat_string(self):
- rset = self.execute('Any CONCAT_STRINGS(N) WHERE X is CWGroup, X name N')
+ rset = self.execute('Any GROUP_CONCAT(N) WHERE X is CWGroup, X name N')
self.failUnless(rset)
self.failUnlessEqual(sorted(rset[0][0].split(', ')), ['guests', 'managers',
'owners', 'users'])
@@ -1023,6 +1040,10 @@
{'x': str(eid1), 'y': str(eid2)})
rset = self.execute('Any X, Y WHERE X travaille Y')
self.assertEqual(len(rset.rows), 1)
+ # test add of an existant relation but with NOT X rel Y protection
+ self.failIf(self.execute("SET X travaille Y WHERE X eid %(x)s, Y eid %(y)s,"
+ "NOT X travaille Y",
+ {'x': str(eid1), 'y': str(eid2)}))
def test_update_2ter(self):
rset = self.execute("INSERT Personne X, Societe Y: X nom 'bidule', Y nom 'toto'")
--- a/server/test/unittest_repository.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_repository.py Fri Mar 12 16:23:21 2010 +0100
@@ -21,7 +21,7 @@
from cubicweb import (BadConnectionId, RepositoryError, ValidationError,
UnknownEid, AuthenticationError)
from cubicweb.schema import CubicWebSchema, RQLConstraint
-from cubicweb.dbapi import connect, repo_connect, multiple_connections_unfix
+from cubicweb.dbapi import connect, multiple_connections_unfix
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.devtools.repotest import tuplify
from cubicweb.server import repository, hook
@@ -38,25 +38,29 @@
"""
def test_fill_schema(self):
- self.repo.schema = CubicWebSchema(self.repo.config.appid)
- self.repo.config._cubes = None # avoid assertion error
- self.repo.config.repairing = True # avoid versions checking
- self.repo.fill_schema()
- table = SQL_PREFIX + 'CWEType'
- namecol = SQL_PREFIX + 'name'
- finalcol = SQL_PREFIX + 'final'
- self.session.set_pool()
- cu = self.session.system_sql('SELECT %s FROM %s WHERE %s is NULL' % (
- namecol, table, finalcol))
- self.assertEquals(cu.fetchall(), [])
- cu = self.session.system_sql('SELECT %s FROM %s WHERE %s=%%(final)s ORDER BY %s'
- % (namecol, table, finalcol, namecol), {'final': 'TRUE'})
- self.assertEquals(cu.fetchall(), [(u'Boolean',), (u'Bytes',),
- (u'Date',), (u'Datetime',),
- (u'Decimal',),(u'Float',),
- (u'Int',),
- (u'Interval',), (u'Password',),
- (u'String',), (u'Time',)])
+ origshema = self.repo.schema
+ try:
+ self.repo.schema = CubicWebSchema(self.repo.config.appid)
+ self.repo.config._cubes = None # avoid assertion error
+ self.repo.config.repairing = True # avoid versions checking
+ self.repo.fill_schema()
+ table = SQL_PREFIX + 'CWEType'
+ namecol = SQL_PREFIX + 'name'
+ finalcol = SQL_PREFIX + 'final'
+ self.session.set_pool()
+ cu = self.session.system_sql('SELECT %s FROM %s WHERE %s is NULL' % (
+ namecol, table, finalcol))
+ self.assertEquals(cu.fetchall(), [])
+ cu = self.session.system_sql('SELECT %s FROM %s WHERE %s=%%(final)s ORDER BY %s'
+ % (namecol, table, finalcol, namecol), {'final': 'TRUE'})
+ self.assertEquals(cu.fetchall(), [(u'Boolean',), (u'Bytes',),
+ (u'Date',), (u'Datetime',),
+ (u'Decimal',),(u'Float',),
+ (u'Int',),
+ (u'Interval',), (u'Password',),
+ (u'String',), (u'Time',)])
+ finally:
+ self.repo.set_schema(origshema)
def test_schema_has_owner(self):
repo = self.repo
@@ -180,7 +184,9 @@
repo = self.repo
cnxid = repo.connect(self.admlogin, password=self.admpassword)
# rollback state change which trigger TrInfo insertion
- user = repo._get_session(cnxid).user
+ session = repo._get_session(cnxid)
+ session.set_pool()
+ user = session.user
user.fire_transition('deactivate')
rset = repo.execute(cnxid, 'TrInfo T WHERE T wf_info_for X, X eid %(x)s', {'x': user.eid})
self.assertEquals(len(rset), 1)
@@ -263,6 +269,8 @@
self.fail('something went wrong, thread still alive')
finally:
repository.pyro_unregister(self.repo.config)
+ from logilab.common import pyro_ext
+ pyro_ext._DAEMONS.clear()
def _pyro_client(self, done):
cnx = connect(self.repo.config.appid, u'admin', password='gingkow')
@@ -470,13 +478,6 @@
u'system.version.tag'])
CALLED = []
-class EcritParHook(hook.Hook):
- __regid__ = 'inlinedrelhook'
- __select__ = hook.Hook.__select__ & hook.match_rtype('ecrit_par')
- events = ('before_add_relation', 'after_add_relation',
- 'before_delete_relation', 'after_delete_relation')
- def __call__(self):
- CALLED.append((self.event, self.eidfrom, self.rtype, self.eidto))
class InlineRelHooksTC(CubicWebTC):
"""test relation hooks are called for inlined relations
@@ -491,6 +492,14 @@
def test_inline_relation(self):
"""make sure <event>_relation hooks are called for inlined relation"""
+ class EcritParHook(hook.Hook):
+ __regid__ = 'inlinedrelhook'
+ __select__ = hook.Hook.__select__ & hook.match_rtype('ecrit_par')
+ events = ('before_add_relation', 'after_add_relation',
+ 'before_delete_relation', 'after_delete_relation')
+ def __call__(self):
+ CALLED.append((self.event, self.eidfrom, self.rtype, self.eidto))
+
self.hm.register(EcritParHook)
eidp = self.execute('INSERT Personne X: X nom "toto"')[0][0]
eidn = self.execute('INSERT Note X: X type "T"')[0][0]
--- a/server/test/unittest_rql2sql.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_rql2sql.py Fri Mar 12 16:23:21 2010 +0100
@@ -13,7 +13,6 @@
from logilab.common.testlib import TestCase, unittest_main, mock_object
from rql import BadRQLQuery
-from indexer import get_indexer
#from cubicweb.server.sources.native import remove_unused_solutions
from cubicweb.server.sources.rql2sql import SQLGenerator, remove_unused_solutions
@@ -37,6 +36,10 @@
schema['state_of'].inlined = False
schema['comments'].inlined = False
+def teardown_module(*args):
+ global config, schema
+ del config, schema
+
PARSER = [
(r"Personne P WHERE P nom 'Zig\'oto';",
'''SELECT _P.cw_eid
@@ -1068,7 +1071,7 @@
WHERE rel_is0.eid_to=2'''),
]
-from logilab.common.adbh import get_adv_func_helper
+from logilab.database import get_db_helper
class CWRQLTC(RQLGeneratorTC):
schema = schema
@@ -1102,12 +1105,7 @@
#capture = True
def setUp(self):
RQLGeneratorTC.setUp(self)
- indexer = get_indexer('postgres', 'utf8')
- dbms_helper = get_adv_func_helper('postgres')
- dbms_helper.fti_uid_attr = indexer.uid_attr
- dbms_helper.fti_table = indexer.table
- dbms_helper.fti_restriction_sql = indexer.restriction_sql
- dbms_helper.fti_need_distinct_query = indexer.need_distinct
+ dbms_helper = get_db_helper('postgres')
self.o = SQLGenerator(schema, dbms_helper)
def _norm_sql(self, sql):
@@ -1208,6 +1206,13 @@
FROM cw_CWUser AS _X
WHERE _X.cw_login IS NULL''')
+
+ def test_date_extraction(self):
+ self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
+ '''SELECT CAST(EXTRACT(MONTH from _P.cw_creation_date) AS INTEGER)
+FROM cw_Personne AS _P''')
+
+
def test_parser_parse(self):
for t in self._parse(PARSER):
yield t
@@ -1405,17 +1410,17 @@
def setUp(self):
RQLGeneratorTC.setUp(self)
- indexer = get_indexer('sqlite', 'utf8')
- dbms_helper = get_adv_func_helper('sqlite')
- dbms_helper.fti_uid_attr = indexer.uid_attr
- dbms_helper.fti_table = indexer.table
- dbms_helper.fti_restriction_sql = indexer.restriction_sql
- dbms_helper.fti_need_distinct_query = indexer.need_distinct
+ dbms_helper = get_db_helper('sqlite')
self.o = SQLGenerator(schema, dbms_helper)
def _norm_sql(self, sql):
return sql.strip().replace(' ILIKE ', ' LIKE ').replace('\nINTERSECT ALL\n', '\nINTERSECT\n')
+ def test_date_extraction(self):
+ self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
+ '''SELECT MONTH(_P.cw_creation_date)
+FROM cw_Personne AS _P''')
+
def test_union(self):
for t in self._parse((
('(Any N ORDERBY 1 WHERE X name N, X is State)'
@@ -1513,12 +1518,7 @@
def setUp(self):
RQLGeneratorTC.setUp(self)
- indexer = get_indexer('mysql', 'utf8')
- dbms_helper = get_adv_func_helper('mysql')
- dbms_helper.fti_uid_attr = indexer.uid_attr
- dbms_helper.fti_table = indexer.table
- dbms_helper.fti_restriction_sql = indexer.restriction_sql
- dbms_helper.fti_need_distinct_query = indexer.need_distinct
+ dbms_helper = get_db_helper('mysql')
self.o = SQLGenerator(schema, dbms_helper)
def _norm_sql(self, sql):
@@ -1533,6 +1533,11 @@
latest = firstword
return '\n'.join(newsql)
+ def test_date_extraction(self):
+ self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
+ '''SELECT EXTRACT(MONTH from _P.cw_creation_date)
+FROM cw_Personne AS _P''')
+
def test_from_clause_needed(self):
queries = [("Any 1 WHERE EXISTS(T is CWGroup, T name 'managers')",
'''SELECT 1
--- a/server/test/unittest_rqlannotation.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_rqlannotation.py Fri Mar 12 16:23:21 2010 +0100
@@ -8,6 +8,11 @@
repo, cnx = init_test_database()
+def teardown_module(*args):
+ global repo, cnx
+ del repo, cnx
+
+
class SQLGenAnnotatorTC(BaseQuerierTC):
repo = repo
--- a/server/test/unittest_schemaserial.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_schemaserial.py Fri Mar 12 16:23:21 2010 +0100
@@ -15,9 +15,19 @@
config.bootstrap_cubes()
schema = loader.load(config)
+def teardown_module(*args):
+ global schema, config, loader
+ del schema, config, loader
+
from cubicweb.server.schemaserial import *
from cubicweb.server.schemaserial import _erperms2rql as erperms2rql
+cstrtypemap = {'RQLConstraint': 'RQLConstraint_eid',
+ 'SizeConstraint': 'SizeConstraint_eid',
+ 'StaticVocabularyConstraint': 'StaticVocabularyConstraint_eid',
+ 'FormatConstraint': 'FormatConstraint_eid',
+ }
+
class Schema2RQLTC(TestCase):
def test_eschema2rql1(self):
@@ -34,104 +44,124 @@
{'description': u'', 'final': True, 'name': u'String'})])
def test_eschema2rql_specialization(self):
+ # x: None since eschema.eid are None
self.assertListEquals(sorted(specialize2rql(schema)),
- [('SET X specializes ET WHERE X name %(x)s, ET name %(et)s',
- {'et': 'BaseTransition', 'x': 'Transition'}),
- ('SET X specializes ET WHERE X name %(x)s, ET name %(et)s',
- {'et': 'BaseTransition', 'x': 'WorkflowTransition'}),
- ('SET X specializes ET WHERE X name %(x)s, ET name %(et)s',
- {'et': 'Division', 'x': 'SubDivision'}),
- # ('SET X specializes ET WHERE X name %(x)s, ET name %(et)s',
+ [('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
+ {'et': None, 'x': None}),
+ ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
+ {'et': None, 'x': None}),
+ ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
+ {'et': None, 'x': None}),
+ # ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
# {'et': 'File', 'x': 'Image'}),
- ('SET X specializes ET WHERE X name %(x)s, ET name %(et)s',
- {'et': 'Societe', 'x': 'Division'})])
+ ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
+ {'et': None, 'x': None})])
def test_rschema2rql1(self):
- self.assertListEquals(list(rschema2rql(schema.rschema('relation_type'))),
+ self.assertListEquals(list(rschema2rql(schema.rschema('relation_type'), cstrtypemap)),
[
('INSERT CWRType X: X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s',
{'description': u'link a relation definition to its relation type', 'symmetric': False, 'name': u'relation_type', 'final' : False, 'fulltext_container': None, 'inlined': True}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'relation_type', 'description': u'', 'composite': u'object', 'oe': 'CWRType',
- 'ordernum': 1, 'cardinality': u'1*', 'se': 'CWAttribute'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWRelation',
- {'rt': 'relation_type', 'oe': 'CWRType', 'ctname': u'RQLConstraint', 'se': 'CWAttribute', 'value': u';O;O final TRUE\n'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'', 'composite': u'object', 'cardinality': u'1*',
+ 'ordernum': 1}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'RQLConstraint_eid', 'value': u';O;O final TRUE\n'}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'relation_type', 'description': u'', 'composite': u'object', 'oe': 'CWRType',
- 'ordernum': 1, 'cardinality': u'1*', 'se': 'CWRelation'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWRelation',
- {'rt': 'relation_type', 'oe': 'CWRType', 'ctname': u'RQLConstraint', 'se': 'CWRelation', 'value': u';O;O final FALSE\n'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'', 'composite': u'object',
+ 'ordernum': 1, 'cardinality': u'1*'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'RQLConstraint_eid', 'value': u';O;O final FALSE\n'}),
])
def test_rschema2rql2(self):
- self.assertListEquals(list(rschema2rql(schema.rschema('add_permission'))),
+ self.assertListEquals(list(rschema2rql(schema.rschema('add_permission'), cstrtypemap)),
[
('INSERT CWRType X: X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s', {'description': u'', 'symmetric': False, 'name': u'add_permission', 'final': False, 'fulltext_container': None, 'inlined': False}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'add_permission', 'description': u'groups allowed to add entities/relations of this type', 'composite': None, 'oe': 'CWGroup', 'ordernum': 9999, 'cardinality': u'**', 'se': 'CWEType'}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'add_permission', 'description': u'rql expression allowing to add entities/relations of this type', 'composite': 'subject', 'oe': 'RQLExpression', 'ordernum': 9999, 'cardinality': u'*?', 'se': 'CWEType'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'groups allowed to add entities/relations of this type', 'composite': None, 'ordernum': 9999, 'cardinality': u'**'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'rql expression allowing to add entities/relations of this type', 'composite': 'subject', 'ordernum': 9999, 'cardinality': u'*?'}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'add_permission', 'description': u'groups allowed to add entities/relations of this type', 'composite': None, 'oe': 'CWGroup', 'ordernum': 9999, 'cardinality': u'**', 'se': 'CWRelation'}),
- ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'add_permission', 'description': u'rql expression allowing to add entities/relations of this type', 'composite': 'subject', 'oe': 'RQLExpression', 'ordernum': 9999, 'cardinality': u'*?', 'se': 'CWRelation'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'groups allowed to add entities/relations of this type', 'composite': None, 'ordernum': 9999, 'cardinality': u'**'}),
+ ('INSERT CWRelation X: X cardinality %(cardinality)s,X composite %(composite)s,X description %(description)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'rql expression allowing to add entities/relations of this type', 'composite': 'subject', 'ordernum': 9999, 'cardinality': u'*?'}),
])
def test_rschema2rql3(self):
- self.assertListEquals(list(rschema2rql(schema.rschema('cardinality'))),
+ self.assertListEquals(list(rschema2rql(schema.rschema('cardinality'), cstrtypemap)),
[
('INSERT CWRType X: X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s',
{'description': u'', 'symmetric': False, 'name': u'cardinality', 'final': True, 'fulltext_container': None, 'inlined': False}),
- ('INSERT CWAttribute X: X cardinality %(cardinality)s,X defaultval %(defaultval)s,X description %(description)s,X fulltextindexed %(fulltextindexed)s,X indexed %(indexed)s,X internationalizable %(internationalizable)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'cardinality', 'description': u'subject/object cardinality', 'internationalizable': True, 'fulltextindexed': False, 'ordernum': 5, 'defaultval': None, 'indexed': False, 'cardinality': u'?1', 'oe': 'String', 'se': 'CWAttribute'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWAttribute',
- {'rt': 'cardinality', 'oe': 'String', 'ctname': u'SizeConstraint', 'se': 'CWAttribute', 'value': u'max=2'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWAttribute',
- {'rt': 'cardinality', 'oe': 'String', 'ctname': u'StaticVocabularyConstraint', 'se': 'CWAttribute', 'value': u"u'?1', u'11'"}),
+ ('INSERT CWAttribute X: X cardinality %(cardinality)s,X defaultval %(defaultval)s,X description %(description)s,X fulltextindexed %(fulltextindexed)s,X indexed %(indexed)s,X internationalizable %(internationalizable)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'subject/object cardinality', 'internationalizable': True, 'fulltextindexed': False, 'ordernum': 5, 'defaultval': None, 'indexed': False, 'cardinality': u'?1'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'SizeConstraint_eid', 'value': u'max=2'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'StaticVocabularyConstraint_eid', 'value': u"u'?1', u'11'"}),
- ('INSERT CWAttribute X: X cardinality %(cardinality)s,X defaultval %(defaultval)s,X description %(description)s,X fulltextindexed %(fulltextindexed)s,X indexed %(indexed)s,X internationalizable %(internationalizable)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE name %(se)s,ER name %(rt)s,OE name %(oe)s',
- {'rt': 'cardinality', 'description': u'subject/object cardinality', 'internationalizable': True, 'fulltextindexed': False, 'ordernum': 5, 'defaultval': None, 'indexed': False, 'cardinality': u'?1', 'oe': 'String', 'se': 'CWRelation'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWAttribute',
- {'rt': 'cardinality', 'oe': 'String', 'ctname': u'SizeConstraint', 'se': 'CWRelation', 'value': u'max=2'}),
- ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT name %(ctname)s, EDEF relation_type ER, EDEF from_entity SE, EDEF to_entity OE, ER name %(rt)s, SE name %(se)s, OE name %(oe)s, EDEF is CWAttribute',
- {'rt': 'cardinality', 'oe': 'String', 'ctname': u'StaticVocabularyConstraint', 'se': 'CWRelation', 'value': u"u'?*', u'1*', u'+*', u'**', u'?+', u'1+', u'++', u'*+', u'?1', u'11', u'+1', u'*1', u'??', u'1?', u'+?', u'*?'"}),
+ ('INSERT CWAttribute X: X cardinality %(cardinality)s,X defaultval %(defaultval)s,X description %(description)s,X fulltextindexed %(fulltextindexed)s,X indexed %(indexed)s,X internationalizable %(internationalizable)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'subject/object cardinality', 'internationalizable': True, 'fulltextindexed': False, 'ordernum': 5, 'defaultval': None, 'indexed': False, 'cardinality': u'?1'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'SizeConstraint_eid', 'value': u'max=2'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'ct': u'StaticVocabularyConstraint_eid', 'value': u"u'?*', u'1*', u'+*', u'**', u'?+', u'1+', u'++', u'*+', u'?1', u'11', u'+1', u'*1', u'??', u'1?', u'+?', u'*?'"}),
])
+ def test_rdef2rql(self):
+ self.assertListEquals(list(rdef2rql(schema['description_format'].rdefs[('CWRType', 'String')], cstrtypemap)),
+ [
+ ('INSERT CWAttribute X: X cardinality %(cardinality)s,X defaultval %(defaultval)s,X description %(description)s,X fulltextindexed %(fulltextindexed)s,X indexed %(indexed)s,X internationalizable %(internationalizable)s,X ordernum %(ordernum)s,X relation_type ER,X from_entity SE,X to_entity OE WHERE SE eid %(se)s,ER eid %(rt)s,OE eid %(oe)s',
+ {'se': None, 'rt': None, 'oe': None,
+ 'description': u'', 'internationalizable': True, 'fulltextindexed': False, 'ordernum': 7, 'defaultval': u'text/plain', 'indexed': False, 'cardinality': u'?1'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'value': u'None', 'ct': 'FormatConstraint_eid'}),
+ ('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE CT eid %(ct)s, EDEF eid %(x)s',
+ {'x': None, 'value': u'max=50', 'ct': 'SizeConstraint_eid'})])
+
def test_updateeschema2rql1(self):
- self.assertListEquals(list(updateeschema2rql(schema.eschema('CWAttribute'))),
- [('SET X description %(description)s,X final %(final)s,X name %(name)s WHERE X is CWEType, X name %(et)s',
- {'description': u'define a final relation: link a final relation type from a non final entity to a final entity type. used to build the instance schema', 'et': 'CWAttribute', 'final': False, 'name': u'CWAttribute'}),
+ self.assertListEquals(list(updateeschema2rql(schema.eschema('CWAttribute'), 1)),
+ [('SET X description %(description)s,X final %(final)s,X name %(name)s WHERE X eid %(x)s',
+ {'description': u'define a final relation: link a final relation type from a non final entity to a final entity type. used to build the instance schema', 'x': 1, 'final': False, 'name': u'CWAttribute'}),
])
def test_updateeschema2rql2(self):
- self.assertListEquals(list(updateeschema2rql(schema.eschema('String'))),
- [('SET X description %(description)s,X final %(final)s,X name %(name)s WHERE X is CWEType, X name %(et)s',
- {'description': u'', 'et': 'String', 'final': True, 'name': u'String'})
+ self.assertListEquals(list(updateeschema2rql(schema.eschema('String'), 1)),
+ [('SET X description %(description)s,X final %(final)s,X name %(name)s WHERE X eid %(x)s',
+ {'description': u'', 'x': 1, 'final': True, 'name': u'String'})
])
def test_updaterschema2rql1(self):
- self.assertListEquals(list(updaterschema2rql(schema.rschema('relation_type'))),
+ self.assertListEquals(list(updaterschema2rql(schema.rschema('relation_type'), 1)),
[
- ('SET X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s WHERE X is CWRType, X name %(rt)s',
- {'rt': 'relation_type', 'symmetric': False,
+ ('SET X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s WHERE X eid %(x)s',
+ {'x': 1, 'symmetric': False,
'description': u'link a relation definition to its relation type',
'final': False, 'fulltext_container': None, 'inlined': True, 'name': u'relation_type'})
])
def test_updaterschema2rql2(self):
expected = [
- ('SET X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s WHERE X is CWRType, X name %(rt)s',
- {'rt': 'add_permission', 'symmetric': False,
+ ('SET X description %(description)s,X final %(final)s,X fulltext_container %(fulltext_container)s,X inlined %(inlined)s,X name %(name)s,X symmetric %(symmetric)s WHERE X eid %(x)s',
+ {'x': 1, 'symmetric': False,
'description': u'', 'final': False, 'fulltext_container': None,
'inlined': False, 'name': u'add_permission'})
]
- for i, (rql, args) in enumerate(updaterschema2rql(schema.rschema('add_permission'))):
+ for i, (rql, args) in enumerate(updaterschema2rql(schema.rschema('add_permission'), 1)):
yield self.assertEquals, (rql, args), expected[i]
class Perms2RQLTC(TestCase):
@@ -144,29 +174,29 @@
def test_eperms2rql1(self):
self.assertListEquals([(rql, kwargs) for rql, kwargs in erperms2rql(schema.eschema('CWEType'), self.GROUP_MAPPING)],
- [('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 1}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 2}),
- ('SET X add_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X update_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X delete_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
+ [('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 1}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 2}),
+ ('SET X add_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X update_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X delete_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
])
def test_rperms2rql2(self):
self.assertListEquals([(rql, kwargs) for rql, kwargs in erperms2rql(schema.rschema('read_permission').rdef('CWEType', 'CWGroup'), self.GROUP_MAPPING)],
- [('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 1}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 2}),
- ('SET X add_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X delete_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
+ [('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 1}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 2}),
+ ('SET X add_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X delete_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
])
def test_rperms2rql3(self):
self.assertListEquals([(rql, kwargs) for rql, kwargs in erperms2rql(schema.rschema('name').rdef('CWEType', 'String'), self.GROUP_MAPPING)],
- [('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 1}),
- ('SET X read_permission Y WHERE Y eid %(g)s, ', {'g': 2}),
- ('SET X update_permission Y WHERE Y eid %(g)s, ', {'g': 0}),
+ [('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 1}),
+ ('SET X read_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 2}),
+ ('SET X update_permission Y WHERE Y eid %(g)s, X eid %(x)s', {'g': 0}),
])
#def test_perms2rql(self):
--- a/server/test/unittest_security.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_security.py Fri Mar 12 16:23:21 2010 +0100
@@ -456,8 +456,8 @@
rset = cu.execute('CWUser X WHERE X eid %(x)s', {'x': anon.eid}, 'x')
self.assertEquals(rset.rows, [[anon.eid]])
# but can't modify it
- cu.execute('SET X login "toto" WHERE X eid %(x)s', {'x': anon.eid})
- self.assertRaises(Unauthorized, cnx.commit)
+ self.assertRaises(Unauthorized,
+ cu.execute, 'SET X login "toto" WHERE X eid %(x)s', {'x': anon.eid})
def test_in_group_relation(self):
cnx = self.login('iaminusersgrouponly')
--- a/server/test/unittest_sqlutils.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_sqlutils.py Fri Mar 12 16:23:21 2010 +0100
@@ -20,13 +20,13 @@
def test_init(self):
o = SQLAdapterMixIn(BASE_CONFIG)
- self.assertEquals(o.encoding, 'UTF-8')
+ self.assertEquals(o.dbhelper.dbencoding, 'UTF-8')
def test_init_encoding(self):
config = BASE_CONFIG.copy()
config['db-encoding'] = 'ISO-8859-1'
o = SQLAdapterMixIn(config)
- self.assertEquals(o.encoding, 'ISO-8859-1')
+ self.assertEquals(o.dbhelper.dbencoding, 'ISO-8859-1')
if __name__ == '__main__':
unittest_main()
--- a/server/test/unittest_ssplanner.py Fri Mar 12 16:21:13 2010 +0100
+++ b/server/test/unittest_ssplanner.py Fri Mar 12 16:23:21 2010 +0100
@@ -12,6 +12,10 @@
# keep cnx so it's not garbage collected and the associated session closed
repo, cnx = init_test_database()
+def teardown_module(*args):
+ global repo, cnx
+ del repo, cnx
+
class SSPlannerTC(BasePlannerTC):
repo = repo
_test = test_plan
--- a/sobjects/notification.py Fri Mar 12 16:21:13 2010 +0100
+++ b/sobjects/notification.py Fri Mar 12 16:23:21 2010 +0100
@@ -33,11 +33,9 @@
def recipients(self):
mode = self._cw.vreg.config['default-recipients-mode']
if mode == 'users':
- # use unsafe execute else we may don't have the right to see users
- # to notify...
- execute = self._cw.unsafe_execute
+ execute = self._cw.execute
dests = [(u.get_email(), u.property_value('ui.language'))
- for u in execute(self.user_rql, build_descr=True, propagate=True).entities()]
+ for u in execute(self.user_rql, build_descr=True).entities()]
elif mode == 'default-dest-addrs':
lang = self._cw.vreg.property_value('ui.language')
dests = zip(self._cw.vreg.config['default-dest-addrs'], repeat(lang))
@@ -158,7 +156,8 @@
if not rdef.has_perm(self._cw, 'read', eid=self.cw_rset[0][0]):
continue
# XXX suppose it's a subject relation...
- elif not rschema.has_perm(self._cw, 'read', fromeid=self.cw_rset[0][0]): # XXX toeid
+ elif not rschema.has_perm(self._cw, 'read',
+ fromeid=self.cw_rset[0][0]):
continue
if attr in self.no_detailed_change_attrs:
msg = _('%s updated') % _(attr)
--- a/sobjects/supervising.py Fri Mar 12 16:21:13 2010 +0100
+++ b/sobjects/supervising.py Fri Mar 12 16:23:21 2010 +0100
@@ -92,7 +92,7 @@
return self._cw._('[%s supervision] changes summary') % self._cw.vreg.config.appid
def call(self, changes):
- user = self._cw.actual_session().user
+ user = self._cw.user
self.w(self._cw._('user %s has made the following change(s):\n\n')
% user.login)
for event, changedescr in filter_changes(changes):
@@ -129,17 +129,16 @@
self.w(u' %s' % entity.absolute_url())
def _relation_context(self, changedescr):
- _ = self._cw._
- session = self._cw.actual_session()
+ session = self._cw
def describe(eid):
try:
- return _(session.describe(eid)[0]).lower()
+ return session._(session.describe(eid)[0]).lower()
except UnknownEid:
# may occurs when an entity has been deleted from an external
# source and we're cleaning its relation
- return _('unknown external entity')
+ return session._('unknown external entity')
eidfrom, rtype, eidto = changedescr.eidfrom, changedescr.rtype, changedescr.eidto
- return {'rtype': _(rtype),
+ return {'rtype': session._(rtype),
'eidfrom': eidfrom,
'frometype': describe(eidfrom),
'eidto': eidto,
--- a/test/unittest_dbapi.py Fri Mar 12 16:21:13 2010 +0100
+++ b/test/unittest_dbapi.py Fri Mar 12 16:23:21 2010 +0100
@@ -35,8 +35,8 @@
self.assertEquals(cnx.user(None).login, 'anon')
self.assertEquals(cnx.describe(1), (u'CWGroup', u'system', None))
self.restore_connection() # proper way to close cnx
- self.assertRaises(ConnectionError, cnx.user, None)
- self.assertRaises(ConnectionError, cnx.describe, 1)
+ self.assertRaises(ProgrammingError, cnx.user, None)
+ self.assertRaises(ProgrammingError, cnx.describe, 1)
def test_session_data_api(self):
cnx = self.login('anon')
@@ -64,9 +64,9 @@
cnx.set_shared_data('data', 4)
self.assertEquals(cnx.get_shared_data('data'), 4)
self.restore_connection() # proper way to close cnx
- self.assertRaises(ConnectionError, cnx.check)
- self.assertRaises(ConnectionError, cnx.set_shared_data, 'data', 0)
- self.assertRaises(ConnectionError, cnx.get_shared_data, 'data')
+ self.assertRaises(ProgrammingError, cnx.check)
+ self.assertRaises(ProgrammingError, cnx.set_shared_data, 'data', 0)
+ self.assertRaises(ProgrammingError, cnx.get_shared_data, 'data')
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
--- a/test/unittest_entity.py Fri Mar 12 16:21:13 2010 +0100
+++ b/test/unittest_entity.py Fri Mar 12 16:23:21 2010 +0100
@@ -436,7 +436,7 @@
def test_complete_relation(self):
session = self.session
- eid = session.unsafe_execute(
+ eid = session.execute(
'INSERT TrInfo X: X comment "zou", X wf_info_for U, X from_state S1, X to_state S2 '
'WHERE U login "admin", S1 name "activated", S2 name "deactivated"')[0][0]
trinfo = self.entity('Any X WHERE X eid %(x)s', {'x': eid}, 'x')
--- a/test/unittest_rset.py Fri Mar 12 16:21:13 2010 +0100
+++ b/test/unittest_rset.py Fri Mar 12 16:23:21 2010 +0100
@@ -11,7 +11,7 @@
from rql import parse
-from logilab.common.testlib import TestCase, unittest_main
+from logilab.common.testlib import TestCase, unittest_main, mock_object
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.rset import NotAnEntity, ResultSet, attr_desc_iterator
@@ -60,7 +60,7 @@
self.rset = ResultSet([[12, 'adim'], [13, 'syt']],
'Any U,L where U is CWUser, U login L',
description=[['CWUser', 'String'], ['Bar', 'String']])
- self.rset.vreg = self.vreg
+ self.rset.req = mock_object(vreg=self.vreg)
def compare_urls(self, url1, url2):
info1 = urlsplit(url1)
--- a/utils.py Fri Mar 12 16:21:13 2010 +0100
+++ b/utils.py Fri Mar 12 16:23:21 2010 +0100
@@ -11,35 +11,29 @@
import decimal
import datetime
import random
+from uuid import uuid4
+from warnings import warn
from logilab.mtconverter import xml_escape
from logilab.common.deprecation import deprecated
+_MARKER = object()
+
# initialize random seed from current time
random.seed()
-if sys.version_info[:2] < (2, 5):
+def make_uid(key=None):
+ """Return a unique identifier string.
- from time import time
- from md5 import md5
- from random import randint
+ if specified, `key` is used to prefix the generated uid so it can be used
+ for instance as a DOM id or as sql table names.
- def make_uid(key):
- """forge a unique identifier
- XXX not that unique on win32
- """
- key = str(key)
- msg = key + "%.10f" % time() + str(randint(0, 1000000))
- return key + md5(msg).hexdigest()
-
-else:
-
- from uuid import uuid4
-
- def make_uid(key):
- # remove dash, generated uid are used as identifier sometimes (sql table
- # names at least)
- return str(key) + str(uuid4()).replace('-', '')
+ See uuid.uuid4 documentation for the shape of the generated identifier, but
+ this is basicallly a 32 bits hexadecimal string.
+ """
+ if key is None:
+ return uuid4().hex
+ return str(key) + uuid4().hex
def dump_class(cls, clsname):
@@ -52,14 +46,9 @@
# type doesn't accept unicode name
# return type.__new__(type, str(clsname), (cls,), {})
# __autogenerated__ attribute is just a marker
- return type(str(clsname), (cls,), {'__autogenerated__': True})
-
-
-def merge_dicts(dict1, dict2):
- """update a copy of `dict1` with `dict2`"""
- dict1 = dict(dict1)
- dict1.update(dict2)
- return dict1
+ return type(str(clsname), (cls,), {'__autogenerated__': True,
+ '__doc__': cls.__doc__,
+ '__module__': cls.__module__})
# use networkX instead ?
@@ -166,15 +155,12 @@
def add_post_inline_script(self, content):
self.post_inlined_scripts.append(content)
- def add_onload(self, jscode, jsoncall=False):
- if jsoncall:
- self.add_post_inline_script(u"""jQuery(CubicWeb).one('ajax-loaded', function(event) {
-%s
+ def add_onload(self, jscode, jsoncall=_MARKER):
+ if jsoncall is not _MARKER:
+ warn('[3.7] specifying jsoncall is not needed anymore',
+ DeprecationWarning, stacklevel=2)
+ self.add_post_inline_script(u"""jQuery(CubicWeb).one('server-response', function(event) {
});""" % jscode)
- else:
- self.add_post_inline_script(u"""jQuery(document).ready(function () {
- %s
- });""" % jscode)
def add_js(self, jsfile):
@@ -310,6 +296,7 @@
__answer[0] = True
return True
+
try:
# may not be there if cubicweb-web not installed
from simplejson import dumps, JSONEncoder
@@ -337,6 +324,14 @@
# just return None in those cases.
return None
+
+@deprecated('[3.7] merge_dicts is deprecated')
+def merge_dicts(dict1, dict2):
+ """update a copy of `dict1` with `dict2`"""
+ dict1 = dict(dict1)
+ dict1.update(dict2)
+ return dict1
+
from logilab.common import date
_THIS_MOD_NS = globals()
for funcname in ('date_range', 'todate', 'todatetime', 'datetime2ticks',
--- a/web/application.py Fri Mar 12 16:21:13 2010 +0100
+++ b/web/application.py Fri Mar 12 16:23:21 2010 +0100
@@ -393,7 +393,7 @@
self.exception(repr(ex))
req.set_header('Cache-Control', 'no-cache')
req.remove_header('Etag')
- req.message = None
+ req.reset_message()
req.reset_headers()
if req.json_request:
raise RemoteCallFailed(unicode(ex))
--- a/web/component.py Fri Mar 12 16:21:13 2010 +0100
+++ b/web/component.py Fri Mar 12 16:23:21 2010 +0100
@@ -14,7 +14,6 @@
from logilab.mtconverter import xml_escape
from cubicweb import role
-from cubicweb.utils import merge_dicts
from cubicweb.view import Component
from cubicweb.selectors import (
paginated_rset, one_line_rset, primary_view, match_context_prop,
@@ -116,8 +115,9 @@
del params[self.stop_param]
def page_url(self, path, params, start, stop):
- params = merge_dicts(params, {self.start_param : start,
- self.stop_param : stop,})
+ params = dict(params)
+ params.update({self.start_param : start,
+ self.stop_param : stop,})
if path == 'json':
rql = params.pop('rql', self.cw_rset.printable_rql())
# latest 'true' used for 'swap' mode
--- a/web/controller.py Fri Mar 12 16:21:13 2010 +0100
+++ b/web/controller.py Fri Mar 12 16:23:21 2010 +0100
@@ -8,6 +8,8 @@
"""
__docformat__ = "restructuredtext en"
+from logilab.mtconverter import xml_escape
+
from cubicweb.selectors import yes
from cubicweb.appobject import AppObject
from cubicweb.web import LOGGER, Redirect, RequestError
@@ -79,19 +81,6 @@
self.cw_rset = pp.process_query(rql)
return self.cw_rset
- def check_expected_params(self, params):
- """check that the given list of parameters are specified in the form
- dictionary
- """
- missing = []
- for param in params:
- if not self._cw.form.get(param):
- missing.append(param)
- if missing:
- raise RequestError('missing required parameter(s): %s'
- % ','.join(missing))
-
-
def notify_edited(self, entity):
"""called by edit_entity() to notify which entity is edited"""
# NOTE: we can't use entity.rest_path() at this point because
@@ -100,31 +89,10 @@
if not self._edited_entity:
self._edited_entity = entity
- # XXX move to EditController (only customer)
- def delete_entities(self, eidtypes):
- """delete entities from the repository"""
- redirect_info = set()
- eidtypes = tuple(eidtypes)
- for eid, etype in eidtypes:
- entity = self._cw.entity_from_eid(eid, etype)
- path, params = entity.after_deletion_path()
- redirect_info.add( (path, tuple(params.iteritems())) )
- entity.delete()
- if len(redirect_info) > 1:
- # In the face of ambiguity, refuse the temptation to guess.
- self._after_deletion_path = 'view', ()
- else:
- self._after_deletion_path = iter(redirect_info).next()
- if len(eidtypes) > 1:
- self._cw.set_message(self._cw._('entities deleted'))
- else:
- self._cw.set_message(self._cw._('entity deleted'))
-
def validate_cache(self, view):
view.set_http_cache_headers()
self._cw.validate_cache()
- # XXX is that used AT ALL ?
def reset(self):
"""reset form parameters and redirect to a view determinated by given
parameters
@@ -132,7 +100,7 @@
newparams = {}
# sets message if needed
if self._cw.message:
- newparams['__message'] = self._cw.message
+ newparams['_cwmsgid'] = self._cw.set_redirect_message(self._cw.message)
if self._cw.form.has_key('__action_apply'):
self._return_to_edition_view(newparams)
if self._cw.form.has_key('__action_cancel'):
@@ -140,8 +108,6 @@
else:
self._return_to_original_view(newparams)
-
- # XXX is that used AT ALL ?
def _return_to_original_view(self, newparams):
"""validate-button case"""
# transforms __redirect[*] parameters into regular form parameters
@@ -156,10 +122,13 @@
elif '__redirectpath' in self._cw.form:
# if redirect path was explicitly specified in the form, use it
path = self._cw.form['__redirectpath']
- if self._edited_entity and path != self._edited_entity.rest_path():
- # XXX may be here on modification? if yes the message should be
- # modified where __createdpath is detected (cw.web.request)
- newparams['__createdpath'] = self._edited_entity.rest_path()
+ if (self._edited_entity and path != self._edited_entity.rest_path()
+ and '_cwmsgid' in newparams):
+ # XXX may be here on modification?
+ msg = u'(<a href="%s">%s</a>)' % (
+ xml_escape(self._edited_entity.absolute_url()),
+ self._cw._('click here to see created entity'))
+ self._cw.append_to_redirect_message(msg)
elif self._after_deletion_path:
# else it should have been set during form processing
path, params = self._after_deletion_path
@@ -174,7 +143,6 @@
url = append_url_params(url, self._cw.form.get('__redirectparams'))
raise Redirect(url)
- # XXX is that used AT ALL ?
def _return_to_edition_view(self, newparams):
"""apply-button case"""
form = self._cw.form
@@ -186,7 +154,7 @@
path = 'view'
newparams['rql'] = form['rql']
else:
- self.warning("the edited data seems inconsistent")
+ self.warning('the edited data seems inconsistent')
path = 'view'
# pick up the correction edition view
if form.get('__form_id'):
@@ -198,7 +166,6 @@
raise Redirect(self._cw.build_url(path, **newparams))
- # XXX is that used AT ALL ?
def _return_to_lastpage(self, newparams):
"""cancel-button case: in this case we are always expecting to go back
where we came from, and this is not easy. Currently we suppose that
--- a/web/data/cubicweb.ajax.js Fri Mar 12 16:21:13 2010 +0100
+++ b/web/data/cubicweb.ajax.js Fri Mar 12 16:23:21 2010 +0100
@@ -92,12 +92,9 @@
setFormsTarget(node);
}
loadDynamicFragments(node);
- // XXX simulates document.ready, but the former
- // only runs once, this one potentially many times
- // we probably need to unbind the fired events
- // When this is done, jquery.treeview.js (for instance)
- // can be unpatched.
- jQuery(CubicWeb).trigger('ajax-loaded');
+ // XXX [3.7] jQuery.one is now used instead jQuery.bind,
+ // jquery.treeview.js can be unpatched accordingly.
+ jQuery(CubicWeb).trigger('server-response', [true, node]);
}
/* cubicweb loadxhtml plugin to make jquery handle xhtml response
--- a/web/data/cubicweb.python.js Fri Mar 12 16:21:13 2010 +0100
+++ b/web/data/cubicweb.python.js Fri Mar 12 16:23:21 2010 +0100
@@ -394,4 +394,13 @@
}
};
+jQuery(document).ready(function() {
+ jQuery(CubicWeb).trigger('server-response', [false, document]);
+});
+
+jQuery(CubicWeb).bind('ajax-loaded', function() {
+ log('[3.7] "ajax-loaded" event is deprecated, use "server-response" instead');
+ jQuery(CubicWeb).trigger('server-response', [false, document]);
+});
+
CubicWeb.provide('python.js');
--- a/web/request.py Fri Mar 12 16:21:13 2010 +0100
+++ b/web/request.py Fri Mar 12 16:23:21 2010 +0100
@@ -68,7 +68,6 @@
def __init__(self, vreg, https, form=None):
super(CubicWebRequestBase, self).__init__(vreg)
- self.message = None
self.authmode = vreg.config['auth-mode']
self.https = https
# raw html headers that can be added from any view
@@ -126,35 +125,24 @@
"""
super(CubicWebRequestBase, self).set_connection(cnx, user)
# set request language
- try:
- vreg = self.vreg
- if self.user:
- try:
- # 1. user specified language
- lang = vreg.typed_value('ui.language',
- self.user.properties['ui.language'])
+ vreg = self.vreg
+ if self.user:
+ try:
+ # 1. user specified language
+ lang = vreg.typed_value('ui.language',
+ self.user.properties['ui.language'])
+ self.set_language(lang)
+ return
+ except KeyError:
+ pass
+ if vreg.config['language-negociation']:
+ # 2. http negociated language
+ for lang in self.header_accept_language():
+ if lang in self.translations:
self.set_language(lang)
return
- except KeyError:
- pass
- if vreg.config['language-negociation']:
- # 2. http negociated language
- for lang in self.header_accept_language():
- if lang in self.translations:
- self.set_language(lang)
- return
- # 3. default language
- self.set_default_language(vreg)
- finally:
- # XXX code smell
- # have to be done here because language is not yet set in setup_params
- #
- # special key for created entity, added in controller's reset method
- # if no message set, we don't want this neither
- if '__createdpath' in self.form and self.message:
- self.message += ' (<a href="%s">%s</a>)' % (
- self.build_url(self.form.pop('__createdpath')),
- self._('click here to see created entity'))
+ # 3. default language
+ self.set_default_language(vreg)
def set_language(self, lang):
gettext, self.pgettext = self.translations[lang]
@@ -179,26 +167,27 @@
subclasses should overrides to
"""
+ self.form = {}
if params is None:
- params = {}
- self.form = params
+ return
encoding = self.encoding
- for k, v in params.items():
- if isinstance(v, (tuple, list)):
- v = [unicode(x, encoding) for x in v]
- if len(v) == 1:
- v = v[0]
- if k in self.no_script_form_params:
- v = self.no_script_form_param(k, value=v)
- if isinstance(v, str):
- v = unicode(v, encoding)
- if k == '__message':
- self.set_message(v)
- del self.form[k]
+ for param, val in params.iteritems():
+ if isinstance(val, (tuple, list)):
+ val = [unicode(x, encoding) for x in val]
+ if len(val) == 1:
+ val = val[0]
+ elif isinstance(val, str):
+ val = unicode(val, encoding)
+ if param in self.no_script_form_params and val:
+ val = self.no_script_form_param(param, val)
+ if param == '_cwmsgid':
+ self.set_message_id(val)
+ elif param == '__message':
+ self.set_message(val)
else:
- self.form[k] = v
+ self.form[param] = val
- def no_script_form_param(self, param, default=None, value=None):
+ def no_script_form_param(self, param, value):
"""ensure there is no script in a user form param
by default return a cleaned string instead of raising a security
@@ -208,16 +197,12 @@
that are at some point inserted in a generated html page to protect
against script kiddies
"""
- if value is None:
- value = self.form.get(param, default)
- if not value is default and value:
- # safety belt for strange urls like http://...?vtitle=yo&vtitle=yo
- if isinstance(value, (list, tuple)):
- self.error('no_script_form_param got a list (%s). Who generated the URL ?',
- repr(value))
- value = value[0]
- return remove_html_tags(value)
- return value
+ # safety belt for strange urls like http://...?vtitle=yo&vtitle=yo
+ if isinstance(value, (list, tuple)):
+ self.error('no_script_form_param got a list (%s). Who generated the URL ?',
+ repr(value))
+ value = value[0]
+ return remove_html_tags(value)
def list_form_param(self, param, form=None, pop=False):
"""get param from form parameters and return its value as a list,
@@ -245,9 +230,48 @@
# web state helpers #######################################################
+ @property
+ def message(self):
+ try:
+ return self.get_session_data(self._msgid, default=u'', pop=True)
+ except AttributeError:
+ try:
+ return self._msg
+ except AttributeError:
+ return None
+
def set_message(self, msg):
assert isinstance(msg, unicode)
- self.message = msg
+ self._msg = msg
+
+ def set_message_id(self, msgid):
+ self._msgid = msgid
+
+ @cached
+ def redirect_message_id(self):
+ return make_uid()
+
+ def set_redirect_message(self, msg):
+ assert isinstance(msg, unicode)
+ msgid = self.redirect_message_id()
+ self.set_session_data(msgid, msg)
+ return msgid
+
+ def append_to_redirect_message(self, msg):
+ msgid = self.redirect_message_id()
+ currentmsg = self.get_session_data(msgid)
+ if currentmsg is not None:
+ currentmsg = '%s %s' % (currentmsg, msg)
+ else:
+ currentmsg = msg
+ self.set_session_data(msgid, currentmsg)
+ return msgid
+
+ def reset_message(self):
+ if hasattr(self, '_msg'):
+ del self._msg
+ if hasattr(self, '_msgid'):
+ del self._msgid
def update_search_state(self):
"""update the current search state"""
@@ -481,7 +505,7 @@
# high level methods for HTML headers management ##########################
def add_onload(self, jscode):
- self.html_headers.add_onload(jscode, self.json_request)
+ self.html_headers.add_onload(jscode)
def add_js(self, jsfiles, localfile=True):
"""specify a list of JS files to include in the HTML headers
--- a/web/views/basecontrollers.py Fri Mar 12 16:21:13 2010 +0100
+++ b/web/views/basecontrollers.py Fri Mar 12 16:23:21 2010 +0100
@@ -17,9 +17,10 @@
from logilab.common.decorators import cached
from logilab.common.date import strptime
-from cubicweb import NoSelectableObject, ValidationError, ObjectNotFound, typed_eid
+from cubicweb import (NoSelectableObject, ValidationError, ObjectNotFound,
+ typed_eid)
from cubicweb.utils import CubicWebJsonEncoder
-from cubicweb.selectors import yes, match_user_groups
+from cubicweb.selectors import authenticated_user, match_form_params
from cubicweb.mail import format_mail
from cubicweb.web import ExplicitLogin, Redirect, RemoteCallFailed, json_dumps
from cubicweb.web.controller import Controller
@@ -548,7 +549,7 @@
class SendMailController(Controller):
__regid__ = 'sendmail'
- __select__ = match_user_groups('managers', 'users')
+ __select__ = authenticated_user() & match_form_params('recipient', 'mailbody', 'subject')
def recipients(self):
"""returns an iterator on email's recipients as entities"""
@@ -596,7 +597,7 @@
class MailBugReportController(SendMailController):
__regid__ = 'reportbug'
- __select__ = yes()
+ __select__ = match_form_params('description')
def publish(self, rset=None):
body = self._cw.form['description']
--- a/web/views/editcontroller.py Fri Mar 12 16:21:13 2010 +0100
+++ b/web/views/editcontroller.py Fri Mar 12 16:23:21 2010 +0100
@@ -250,6 +250,25 @@
for reid in seteids:
self.relations_rql.append((rql, {'x': eid, 'y': reid}, ('x', 'y')))
+ def delete_entities(self, eidtypes):
+ """delete entities from the repository"""
+ redirect_info = set()
+ eidtypes = tuple(eidtypes)
+ for eid, etype in eidtypes:
+ entity = self._cw.entity_from_eid(eid, etype)
+ path, params = entity.after_deletion_path()
+ redirect_info.add( (path, tuple(params.iteritems())) )
+ entity.delete()
+ if len(redirect_info) > 1:
+ # In the face of ambiguity, refuse the temptation to guess.
+ self._after_deletion_path = 'view', ()
+ else:
+ self._after_deletion_path = iter(redirect_info).next()
+ if len(eidtypes) > 1:
+ self._cw.set_message(self._cw._('entities deleted'))
+ else:
+ self._cw.set_message(self._cw._('entity deleted'))
+
def _action_apply(self):
self._default_publish()
self.reset()
--- a/web/views/treeview.py Fri Mar 12 16:21:13 2010 +0100
+++ b/web/views/treeview.py Fri Mar 12 16:23:21 2010 +0100
@@ -46,8 +46,7 @@
self._cw.add_css('jquery.treeview.css')
self._cw.add_js(('cubicweb.ajax.js', 'cubicweb.widgets.js', 'jquery.treeview.js'))
self._cw.html_headers.add_onload(u"""
-jQuery("#tree-%s").treeview({toggle: toggleTree, prerendered: true});""" % treeid,
- jsoncall=toplevel_thru_ajax)
+jQuery("#tree-%s").treeview({toggle: toggleTree, prerendered: true});""" % treeid)
def call(self, subvid=None, treeid=None,
initial_load=True, initial_thru_ajax=False, **morekwargs):