# HG changeset patch # User Sylvain Thénault # Date 1311178961 -7200 # Node ID 134613d3b353e291ceb4c09756761735d654df87 # Parent 6397a9051f65f08ccd71c6e3fc6ad612755c8533# Parent 8d9c732ad30ed85447a9c0f8afc90fd91bcb38b7 stable is now 3.13 diff -r 6397a9051f65 -r 134613d3b353 .hgtags --- a/.hgtags Wed Jul 20 14:09:42 2011 +0200 +++ b/.hgtags Wed Jul 20 18:22:41 2011 +0200 @@ -204,9 +204,15 @@ 6dfe78a0797ccc34962510f8c2a57f63d65ce41e cubicweb-debian-version-3.12.5-1 a18dac758150fe9c1f9e4958d898717c32a8f679 cubicweb-version-3.12.6 105767487c7075dbcce36474f1af0485985cbf2c cubicweb-debian-version-3.12.6-1 +b661ef475260ca7d9ea5c36ba2cc86e95e5b17d3 cubicweb-version-3.13.0 +a96137858f571711678954477da6f7f435870cea cubicweb-debian-version-3.13.0-1 628fe57ce746c1dac87fb1b078b2026057df894e cubicweb-version-3.12.7 a07517985136bbbfa6610c428a1b42cd04cd530b cubicweb-debian-version-3.12.7-1 50122a47ce4fb2ecbf3cf20ed2777f4276c93609 cubicweb-version-3.12.8 cf49ed55685a810d8d73585330ad1a57cc76260d cubicweb-debian-version-3.12.8-1 cb2990aaa63cbfe593bcf3afdbb9071e4c76815a cubicweb-version-3.12.9 92464e39134c70e4ddbe6cd78a6e3338a3b88b05 cubicweb-debian-version-3.12.9-1 +7d84317ef185a10c5eb78e6086f2297d2f4bd1e3 cubicweb-version-3.13.1 +cc0578049cbe8b1d40009728e36c17e45da1fc6b cubicweb-debian-version-3.13.1-1 +f9227b9d61835f03163b8133a96da35db37a0c8d cubicweb-version-3.13.2 +9ad5411199e00b2611366439b82f35d7d3285423 cubicweb-debian-version-3.13.2-1 diff -r 6397a9051f65 -r 134613d3b353 __pkginfo__.py --- a/__pkginfo__.py Wed Jul 20 14:09:42 2011 +0200 +++ b/__pkginfo__.py Wed Jul 20 18:22:41 2011 +0200 @@ -22,7 +22,7 @@ modname = distname = "cubicweb" -numversion = (3, 12, 9) +numversion = (3, 13, 2) version = '.'.join(str(num) for num in numversion) description = "a repository of entities / relations for knowledge management" diff -r 6397a9051f65 -r 134613d3b353 appobject.py --- a/appobject.py Wed Jul 20 14:09:42 2011 +0200 +++ b/appobject.py Wed Jul 20 18:22:41 2011 +0200 @@ -180,12 +180,13 @@ return self.__class__.__name__ def search_selector(self, selector): - """search for the given selector or selector instance in the selectors - tree. Return it of None if not found + """search for the given selector, selector instance or tuple of + selectors in the selectors tree. Return None if not found. """ if self is selector: return self - if isinstance(selector, type) and isinstance(self, selector): + if (isinstance(selector, type) or isinstance(selector, tuple)) and \ + isinstance(self, selector): return self return None @@ -250,8 +251,8 @@ return merged_selectors def search_selector(self, selector): - """search for the given selector or selector instance in the selectors - tree. Return it of None if not found + """search for the given selector or selector instance (or tuple of + selectors) in the selectors tree. Return None if not found """ for childselector in self.selectors: if childselector is selector: @@ -259,7 +260,8 @@ found = childselector.search_selector(selector) if found is not None: return found - return None + # if not found in children, maybe we are looking for self? + return super(MultiSelector, self).search_selector(selector) class AndSelector(MultiSelector): diff -r 6397a9051f65 -r 134613d3b353 cwctl.py --- a/cwctl.py Wed Jul 20 14:09:42 2011 +0200 +++ b/cwctl.py Wed Jul 20 18:22:41 2011 +0200 @@ -728,11 +728,9 @@ config = cwcfg.config_for(appid) config.repairing = True # notice we're not starting the server config.verbosity = self.config.verbosity - try: - config.set_sources_mode(self.config.ext_sources or ('migration',)) - except AttributeError: - # not a server config - pass + set_sources_mode = getattr(config, 'set_sources_mode', None) + if set_sources_mode is not None: + set_sources_mode(self.config.ext_sources or ('migration',)) # get instance and installed versions for the server and the componants mih = config.migration_handler() repo = mih.repo_connect() @@ -802,6 +800,28 @@ return False return True + +class ListVersionsInstanceCommand(InstanceCommand): + """List versions used by an instance. + + ... + identifiers of the instances to list versions for. + """ + name = 'versions' + + def versions_instance(self, appid): + from logilab.common.changelog import Version + config = cwcfg.config_for(appid) + # should not raise error if db versions don't match fs versions + config.repairing = True + if hasattr(config, 'set_sources_mode'): + config.set_sources_mode(('migration',)) + repo = config.migration_handler().repo_connect() + vcconf = repo.get_versions() + for key in sorted(vcconf): + print key+': %s.%s.%s' % vcconf[key] + + class ShellCommand(Command): """Run an interactive migration shell on an instance. This is a python shell with enhanced migration commands predefined in the namespace. An additional @@ -964,6 +984,7 @@ StartInstanceCommand, StopInstanceCommand, RestartInstanceCommand, ReloadConfigurationCommand, StatusCommand, UpgradeInstanceCommand, + ListVersionsInstanceCommand, ShellCommand, RecompileInstanceCatalogsCommand, ListInstancesCommand, ListCubesCommand, diff -r 6397a9051f65 -r 134613d3b353 cwvreg.py --- a/cwvreg.py Wed Jul 20 14:09:42 2011 +0200 +++ b/cwvreg.py Wed Jul 20 18:22:41 2011 +0200 @@ -194,17 +194,18 @@ _ = unicode from warnings import warn +from datetime import datetime, date, time, timedelta from logilab.common.decorators import cached, clear_cache from logilab.common.deprecation import deprecated, class_deprecated from logilab.common.modutils import cleanup_sys_modules from rql import RQLHelper +from yams.constraints import BASE_CONVERTERS from cubicweb import (ETYPE_NAME_MAP, Binary, UnknownProperty, UnknownEid, ObjectNotFound, NoSelectableObject, RegistryNotFound, CW_EVENT_MANAGER) -from cubicweb.utils import dump_class from cubicweb.vregistry import VRegistry, Registry, class_regid, classid from cubicweb.rtags import RTAGS @@ -368,7 +369,10 @@ # make a copy event if cls.__regid__ == etype, else we may have pb for # client application using multiple connections to different # repositories (eg shingouz) - cls = dump_class(cls, etype) + # __autogenerated__ attribute is just a marker + cls = type(str(etype), (cls,), {'__autogenerated__': True, + '__doc__': cls.__doc__, + '__module__': cls.__module__}) cls.__regid__ = etype cls.__initialize__(self.schema) return cls @@ -412,10 +416,8 @@ if not isinstance(view, class_deprecated)] try: view = self._select_best(views, req, rset=rset, **kwargs) - if view.linkable(): + if view is not None and view.linkable(): yield view - except NoSelectableObject: - continue except Exception: self.exception('error while trying to select %s view for %s', vid, rset) @@ -849,24 +851,15 @@ return self['views'].select(__vid, req, rset=rset, **kwargs) -import decimal -from datetime import datetime, date, time, timedelta - -YAMS_TO_PY = { # XXX unify with yams.constraints.BASE_CONVERTERS? - 'String' : unicode, - 'Bytes': Binary, - 'Password': str, - - 'Boolean': bool, - 'Int': int, - 'Float': float, - 'Decimal': decimal.Decimal, - +# XXX unify with yams.constraints.BASE_CONVERTERS? +YAMS_TO_PY = BASE_CONVERTERS.copy() +YAMS_TO_PY.update({ + 'Bytes': Binary, 'Date': date, 'Datetime': datetime, 'TZDatetime': datetime, 'Time': time, 'TZTime': time, 'Interval': timedelta, - } + }) diff -r 6397a9051f65 -r 134613d3b353 dataimport.py --- a/dataimport.py Wed Jul 20 14:09:42 2011 +0200 +++ b/dataimport.py Wed Jul 20 18:22:41 2011 +0200 @@ -445,14 +445,14 @@ ObjectStore.__init__(self) if session is None: sys.exit('please provide a session of run this script with cubicweb-ctl shell and pass cnx as session') - if not hasattr(session, 'set_pool'): + if not hasattr(session, 'set_cnxset'): # connection cnx = session session = session.request() - session.set_pool = lambda : None + session.set_cnxset = lambda : None commit = commit or cnx.commit else: - session.set_pool() + session.set_cnxset() self.session = session self._commit = commit or session.commit @@ -462,7 +462,7 @@ def commit(self): txuuid = self._commit() - self.session.set_pool() + self.session.set_cnxset() return txuuid def rql(self, *args): @@ -642,7 +642,9 @@ for k, v in kwargs.iteritems(): kwargs[k] = getattr(v, 'eid', v) entity, rels = self.metagen.base_etype_dicts(etype) + # make a copy to keep cached entity pristine entity = copy(entity) + entity.cw_edited = copy(entity.cw_edited) entity.cw_clear_relation_cache() self.metagen.init_entity(entity) entity.cw_edited.update(kwargs, skipsec=False) diff -r 6397a9051f65 -r 134613d3b353 dbapi.py --- a/dbapi.py Wed Jul 20 14:09:42 2011 +0200 +++ b/dbapi.py Wed Jul 20 18:22:41 2011 +0200 @@ -347,9 +347,9 @@ # server session compat layer ############################################# - def describe(self, eid): + def describe(self, eid, asdict=False): """return a tuple (type, sourceuri, extid) for the entity with id """ - return self.cnx.describe(eid) + return self.cnx.describe(eid, asdict) def source_defs(self): """return the definition of sources used by the repository.""" @@ -483,7 +483,7 @@ def check_not_closed(func): def decorator(self, *args, **kwargs): if self._closed is not None: - raise ProgrammingError('Closed connection') + raise ProgrammingError('Closed connection %s' % self.sessionid) return func(self, *args, **kwargs) return decorator @@ -675,8 +675,15 @@ return self._repo.get_option_value(option, foreid) @check_not_closed - def describe(self, eid): - return self._repo.describe(self.sessionid, eid, **self._txid()) + def describe(self, eid, asdict=False): + metas = self._repo.describe(self.sessionid, eid, **self._txid()) + if len(metas) == 3: # backward compat + metas = list(metas) + metas.append(metas[1]) + if asdict: + return dict(zip(('type', 'source', 'extid', 'asource'), metas)) + # XXX :-1 for cw compat, use asdict=True for full information + return metas[:-1] # db-api like interface #################################################### diff -r 6397a9051f65 -r 134613d3b353 debian/changelog --- a/debian/changelog Wed Jul 20 14:09:42 2011 +0200 +++ b/debian/changelog Wed Jul 20 18:22:41 2011 +0200 @@ -1,9 +1,27 @@ +cubicweb (3.13.2-1) unstable; urgency=low + + * new upstream release + + -- Sylvain Thénault Wed, 20 Jul 2011 17:15:22 +0200 + +cubicweb (3.13.1-1) unstable; urgency=low + + * new upstream release + + -- Sylvain Thénault Tue, 12 Jul 2011 12:23:54 +0200 + cubicweb (3.12.9-1) unstable; urgency=low * new upstream release -- Sylvain Thénault Tue, 12 Jul 2011 11:30:10 +0200 +cubicweb (3.13.0-1) unstable; urgency=low + + * new upstream release + + -- Sylvain Thénault Thu, 09 Jun 2011 20:18:41 +0200 + cubicweb (3.12.8-1) unstable; urgency=low * new upstream release diff -r 6397a9051f65 -r 134613d3b353 devtools/__init__.py --- a/devtools/__init__.py Wed Jul 20 14:09:42 2011 +0200 +++ b/devtools/__init__.py Wed Jul 20 18:22:41 2011 +0200 @@ -28,15 +28,17 @@ import pickle import glob import warnings +import hashlib from datetime import timedelta from os.path import (abspath, join, exists, basename, dirname, normpath, split, isfile, isabs, splitext, isdir, expanduser) from functools import partial -import hashlib from logilab.common.date import strptime from logilab.common.decorators import cached, clear_cache -from cubicweb import CW_SOFTWARE_ROOT, ConfigurationError, schema, cwconfig, BadConnectionId + +from cubicweb import ConfigurationError, ExecutionError, BadConnectionId +from cubicweb import CW_SOFTWARE_ROOT, schema, cwconfig from cubicweb.server.serverconfig import ServerConfiguration from cubicweb.etwist.twconfig import TwistedConfiguration @@ -91,7 +93,7 @@ """ Idea: this is less costly than a full re-creation of the repo object. off: * session are closed, - * pools are closed + * cnxsets are closed * system source is shutdown """ if not repo._needs_refresh: @@ -102,8 +104,8 @@ repo.close(sessionid) except BadConnectionId: #this is strange ? thread issue ? print 'XXX unknown session', sessionid - for pool in repo.pools: - pool.close(True) + for cnxset in repo.cnxsets: + cnxset.close(True) repo.system_source.shutdown() repo._needs_refresh = True repo._has_started = False @@ -111,12 +113,12 @@ def turn_repo_on(repo): """Idea: this is less costly than a full re-creation of the repo object. on: - * pools are connected + * cnxsets are connected * cache are cleared """ if repo._needs_refresh: - for pool in repo.pools: - pool.reconnect() + for cnxset in repo.cnxsets: + cnxset.reconnect() repo._type_source_cache = {} repo._extid_cache = {} repo.querier._rql_cache = {} @@ -197,7 +199,10 @@ directory from wich tests are launched or by specifying an alternative sources file using self.sourcefile. """ - sources = super(TestServerConfiguration, self).sources() + try: + sources = super(TestServerConfiguration, self).sources() + except ExecutionError: + sources = {} if not sources: sources = DEFAULT_SOURCES if 'admin' not in sources: @@ -207,9 +212,6 @@ # web config methods needed here for cases when we use this config as a web # config - def instance_md5_version(self): - return '' - def default_base_url(self): return BASE_URL @@ -475,12 +477,11 @@ repo = self.get_repo(startup=True) cnx = self.get_cnx() session = repo._sessions[cnx.sessionid] - session.set_pool() + session.set_cnxset() _commit = session.commit - def always_pooled_commit(): - _commit() - session.set_pool() - session.commit = always_pooled_commit + def keep_cnxset_commit(): + _commit(free_cnxset=False) + session.commit = keep_cnxset_commit pre_setup_func(session, self.config) session.commit() cnx.close() diff -r 6397a9051f65 -r 134613d3b353 devtools/fake.py --- a/devtools/fake.py Wed Jul 20 14:09:42 2011 +0200 +++ b/devtools/fake.py Wed Jul 20 18:22:41 2011 +0200 @@ -138,13 +138,15 @@ class FakeSession(RequestSessionBase): - read_security = write_security = True - set_read_security = set_write_security = lambda *args, **kwargs: None - def __init__(self, repo=None, user=None): + def __init__(self, repo=None, user=None, vreg=None): self.repo = repo - self.vreg = getattr(self.repo, 'vreg', CubicWebVRegistry(FakeConfig(), initlog=False)) - self.pool = FakePool() + if vreg is None: + vreg = getattr(self.repo, 'vreg', None) + if vreg is None: + vreg = CubicWebVRegistry(FakeConfig(), initlog=False) + self.vreg = vreg + self.cnxset = FakeConnectionsSet() self.user = user or FakeUser() self.is_internal_session = False self.transaction_data = {} @@ -162,6 +164,13 @@ def set_entity_cache(self, entity): pass + # for use with enabled_security context manager + read_security = write_security = True + def init_security(self, *args): + return None, None + def reset_security(self, *args): + return + class FakeRepo(object): querier = None def __init__(self, schema, vreg=None, config=None): @@ -201,6 +210,6 @@ self.uri = uri -class FakePool(object): +class FakeConnectionsSet(object): def source(self, uri): return FakeSource(uri) diff -r 6397a9051f65 -r 134613d3b353 devtools/fill.py --- a/devtools/fill.py Wed Jul 20 14:09:42 2011 +0200 +++ b/devtools/fill.py Wed Jul 20 18:22:41 2011 +0200 @@ -275,9 +275,6 @@ :param choice_func: a function that takes an entity type, an attrname and returns acceptable values for this attribute """ - # XXX HACK, remove or fix asap - if etype in set(('String', 'Int', 'Float', 'Boolean', 'Date', 'CWGroup', 'CWUser')): - return [] queries = [] for index in xrange(entity_num): restrictions = [] diff -r 6397a9051f65 -r 134613d3b353 devtools/repotest.py --- a/devtools/repotest.py Wed Jul 20 14:09:42 2011 +0200 +++ b/devtools/repotest.py Wed Jul 20 18:22:41 2011 +0200 @@ -205,7 +205,7 @@ self.ueid = self.session.user.eid assert self.ueid != -1 self.repo._type_source_cache = {} # clear cache - self.pool = self.session.set_pool() + self.cnxset = self.session.set_cnxset() self.maxeid = self.get_max_eid() do_monkey_patch() self._dumb_sessions = [] @@ -213,7 +213,7 @@ def get_max_eid(self): return self.session.execute('Any MAX(X)')[0][0] def cleanup(self): - self.session.set_pool() + self.session.set_cnxset() self.session.execute('DELETE Any X WHERE X eid > %s' % self.maxeid) def tearDown(self): @@ -225,7 +225,7 @@ for session in self._dumb_sessions: session.rollback() session.close() - self.repo._free_pool(self.pool) + self.repo._free_cnxset(self.cnxset) assert self.session.user.eid != -1 def set_debug(self, debug): @@ -263,7 +263,8 @@ u = self.repo._build_user(self.session, self.session.user.eid) u._groups = set(groups) s = Session(u, self.repo) - s._threaddata.pool = self.pool + s._threaddata.cnxset = self.cnxset + s._threaddata.ctx_count = 1 # register session to ensure it gets closed self._dumb_sessions.append(s) return s @@ -273,7 +274,7 @@ def commit(self): self.session.commit() - self.session.set_pool() + self.session.set_cnxset() class BasePlannerTC(BaseQuerierTC): @@ -287,7 +288,7 @@ # XXX source_defs self.o = self.repo.querier self.session = self.repo._sessions.values()[0] - self.pool = self.session.set_pool() + self.cnxset = self.session.set_cnxset() self.schema = self.o.schema self.sources = self.o._repo.sources self.system = self.sources[-1] @@ -311,7 +312,7 @@ del self.repo.sources_by_uri[source.uri] undo_monkey_patch() for session in self._dumb_sessions: - session._threaddata.pool = None + session._threaddata.cnxset = None session.close() def _prepare_plan(self, rql, kwargs=None): diff -r 6397a9051f65 -r 134613d3b353 devtools/testlib.py --- a/devtools/testlib.py Wed Jul 20 14:09:42 2011 +0200 +++ b/devtools/testlib.py Wed Jul 20 18:22:41 2011 +0200 @@ -274,7 +274,7 @@ def session(self): """return current server side session (using default manager account)""" session = self.repo._sessions[self.cnx.sessionid] - session.set_pool() + session.set_cnxset() return session @property @@ -458,7 +458,7 @@ try: return self.cnx.commit() finally: - self.session.set_pool() # ensure pool still set after commit + self.session.set_cnxset() # ensure cnxset still set after commit @nocoverage def rollback(self): @@ -467,7 +467,7 @@ except dbapi.ProgrammingError: pass # connection closed finally: - self.session.set_pool() # ensure pool still set after commit + self.session.set_cnxset() # ensure cnxset still set after commit # # server side db api ####################################################### @@ -475,7 +475,7 @@ if eid_key is not None: warn('[3.8] eid_key is deprecated, you can safely remove this argument', DeprecationWarning, stacklevel=2) - self.session.set_pool() + self.session.set_cnxset() return self.session.execute(rql, args) # other utilities ######################################################### @@ -500,6 +500,10 @@ it2 = set(getattr(x, 'eid', x) for x in it2) super(CubicWebTC, self).assertItemsEqual(it1, it2, *args, **kwargs) + def assertMessageEqual(self, req, params, msg): + msg = req.session.data[params['_cwmsgid']] + self.assertEqual(msg, msg) + # workflow utilities ####################################################### def assertPossibleTransitions(self, entity, expected): @@ -568,6 +572,8 @@ if views: try: view = viewsvreg._select_best(views, req, rset=rset) + if view is None: + raise NoSelectableObject((req,), {'rset':rset}, views) if view.linkable(): yield view else: @@ -722,7 +728,7 @@ self.assertEqual(session.login, origsession.login) self.assertEqual(session.anonymous_session, False) self.assertEqual(path, 'view') - self.assertEqual(params, {'__message': 'welcome %s !' % req.user.login}) + self.assertMessageEqual(req, params, 'welcome %s !' % req.user.login) def assertAuthFailure(self, req, nbsessions=0): self.app.connect(req) diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/admin/index.rst --- a/doc/book/en/admin/index.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/admin/index.rst Wed Jul 20 18:22:41 2011 +0200 @@ -23,7 +23,6 @@ multisources ldap pyro - gae migration additional-tips rql-logs diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/admin/instance-config.rst --- a/doc/book/en/admin/instance-config.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/admin/instance-config.rst Wed Jul 20 18:22:41 2011 +0200 @@ -43,18 +43,33 @@ use apache (for example) for redirection and the variable `main.https-url` of configuration file. +For this to work you have to activate the following apache modules : + +* rewrite +* proxy +* http_proxy + +The command on Debian based systems for that is :: + + a2enmod rewrite http_proxy proxy + /etc/init.d/apache2 restart + :Example: For an apache redirection of a site accessible via `http://localhost/demo` and `https://localhost/demo` and actually running on port 8080, it takes to the http::: + ProxyPreserveHost On + RewriteEngine On RewriteCond %{REQUEST_URI} ^/demo RewriteRule ^/demo$ /demo/ RewriteRule ^/demo/(.*) http://127.0.0.1:8080/$1 [L,P] and for the https::: + ProxyPreserveHost On + RewriteEngine On RewriteCond %{REQUEST_URI} ^/ demo RewriteRule ^/demo$/demo/ RewriteRule ^/demo/(.*) http://127.0.0.1:8080/https/$1 [L,P] @@ -65,6 +80,11 @@ base-url = http://localhost/demo https-url = https://localhost/demo +Notice that if you simply want a site accessible through https, not *both* http +and https, simply set `base-url` to the https url and the first section into your +apache configuration (as you would have to do for an http configuration with an +apache front-end). + Setting up the web client ------------------------- :`web.embed-allowed`: diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/admin/ldap.rst --- a/doc/book/en/admin/ldap.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/admin/ldap.rst Wed Jul 20 18:22:41 2011 +0200 @@ -27,7 +27,7 @@ Credential checks are _always_ done against the LDAP server. The base functionality for this is in -cubicweb/server/sources/ldapuser.py. +:file:`cubicweb/server/sources/ldapuser.py`. Configurations options ---------------------- @@ -39,14 +39,14 @@ LDAP server connection options: -* host: may contain port information using : notation. -* protocol (choices are ldap, ldaps, ldapi) -* auth-mode (choices are simple, cram_md5, digest_md5, gssapi, support +* `host`, may contain port information using : notation. +* `protocol`, choices are ldap, ldaps, ldapi +* `auth-mode`, (choices are simple, cram_md5, digest_md5, gssapi, support for the later being partial as of now) -* auth-realm, realm to use when using gssapi/kerberos authentication -* data-cnx-dn, user dn to use to open data connection to the ldap (eg +* `auth-realm`, realm to use when using gssapi/kerberos authentication +* `data-cnx-dn`, user dn to use to open data connection to the ldap (eg used to respond to rql queries) -* data-cnx-password, password to use to open data connection to the +* `data-cnx-password`, password to use to open data connection to the ldap (eg used to respond to rql queries) If the LDAP server accepts anonymous binds, then it is possible to @@ -55,16 +55,30 @@ LDAP schema mapping: -* user-base-dn, base DN to lookup for users -* user-scope, user search scope -* user-classes, classes of user -* user-attrs-map, map from ldap user attributes to cubicweb attributes -* user-login-attr, attribute used as login on authentication +* `user-base-dn`, base DN to lookup for users +* `user-scope`, user search scope +* `user-classes`, classes of user +* `user-attrs-map`, map from ldap user attributes to cubicweb attributes +* `user-login-attr`, attribute used as login on authentication LDAP source internal configuration: -* user-default-group, name of a group in which ldap users will be by +* `user-default-group`, name of a group in which ldap users will be by default. You can set multiple groups by separating them by a comma -* synchronization-interval, interval between synchronization with the +* `synchronization-interval`, interval between synchronization with the ldap directory in seconds (default to once a day) -* life time of query cache in minutes (default to two hours). +* `cache-life-time`, life time of query cache in minutes (default to two hours). + +Other notes +----------- + +* Yes, cubicweb is able to start if ldap cannot be reached, even on c-c start, + though that will slow down the instance, since it will indefinitly attempt + to connect to the ldap on each query on users. + +* Changing the name of the ldap server in your script is fine, changing the base + DN isn't since it's used to identify already known users from others + +* You can use the :class:`CWSourceHostConfig` to have variants for a source + configuration according to the host the instance is running on. To do so go on + the source's view from the sources management view. diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/admin/setup-windows.rst --- a/doc/book/en/admin/setup-windows.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/admin/setup-windows.rst Wed Jul 20 18:22:41 2011 +0200 @@ -8,13 +8,12 @@ Setting up a Windows development environment is not too complicated but it requires a series of small steps. -We proposed an example of a typical |cubicweb| installation on Windows +We propose an example of a typical |cubicweb| installation on Windows from sources. We assume everything goes into ``C:\\`` and for any package, without version specification, "the latest is the greatest". -Take into the mind that adjusting the installation drive should be -straightforward. +Mind that adjusting the installation drive should be straightforward. @@ -24,9 +23,9 @@ |cubicweb| requires some base elements that must be installed to run correctly. So, first of all, you must install them : -* python >= 2.5 and < 3 - (`Download Python `_). - You can also consider the Python(x,y) distribution +* python >= 2.5 and < 3 + (`Download Python `_). + You can also consider the Python(x,y) distribution (`Download Python(x,y) `_) as it makes things easier for Windows user by wrapping in a single installer python 2.5 plus numerous useful third-party modules and @@ -34,24 +33,24 @@ IDE for Python under Windows). * `Twisted `_ is an event-driven - networking engine + networking engine (`Download Twisted `_) * `lxml `_ library - (version >=2.2.1) allows working with XML and HTML + (version >=2.2.1) allows working with XML and HTML (`Download lxml `_) -* `Postgresql 8.4 `_, - an object-relational database system - (`Download Postgresql `_) - and its python drivers +* `Postgresql 8.4 `_, + an object-relational database system + (`Download Postgresql `_) + and its python drivers (`Download psycopg `_) -* A recent version of `gettext` +* A recent version of `gettext` (`Download gettext `_). -* `rql `_, - the recent version of the Relationship Query Language parser +* `rql `_, + the recent version of the Relationship Query Language parser (`Download rql `_). Install optional elements @@ -60,22 +59,22 @@ We recommend you to install the following elements. They are not mandatory but they activate very interesting features in |cubicweb|: -* `Simplejson `_ - must be installed if you have python <= 2.5 +* `Simplejson `_ + must be installed if you have python <= 2.5 (`Download simplejson `_). It is included in the Standard library from Python >= 2.6. -* `Pyro `_ +* `Pyro `_ enables remote access to cubicweb repository instances. It also allows the client and the server not running on the same machine (`Download Pyro `_). -* `python-ldap `_ +* `python-ldap `_ provides access to LDAP/Active directory directories (`Download python-ldap `_). -* `graphviz `_ - which allow schema drawings. +* `graphviz `_ + which allow schema drawings. (`Download graphviz `_). It is quite recommended (albeit not mandatory). @@ -88,28 +87,27 @@ Some additional tools could be useful to develop :ref:`cubes ` with the framework. -* `mercurial `_ and its standard - windows GUI (`TortoiseHG `_) - allow you to get the source code of |cubicweb| from control version - repositories. So you will be able to get the latest development - version in an easy way +* `mercurial `_ and its standard windows GUI + (`TortoiseHG `_) allow you to get the source + code of |cubicweb| from control version repositories. So you will be able to + get the latest development version and pre-release bugfixes in an easy way (`Download mercurial `_). * You can also consider the ssh client `Putty` in order to peruse mercurial over ssh (`Download `_). * If you are an Eclipse user, mercurial can be integrated using the - `MercurialEclipse` plugin + `MercurialEclipse` plugin (`Home page `_). Getting the sources ------------------- -There are tow ways to get the sources of |cubicweb| and its +There are two ways to get the sources of |cubicweb| and its :ref:`cubes `: -* download the latest release (:ref:`SourceInstallation`) -* get the development version using Mercurial +* download the latest release (:ref:`SourceInstallation`) +* get the development version using Mercurial (:ref:`MercurialInstallation`) Environment variables @@ -123,8 +121,8 @@ it. That opens a small window allowing edition of user-related and system-wide variables. -We will consider only user variables. First, the ``PATH`` variable. Assuming -you are logged as user *Jane*, add the following paths, separated by +We will consider only user variables. First, the ``PATH`` variable. Assuming +you are logged as user *Jane*, add the following paths, separated by semi-colons:: C:\Documents and Settings\Jane\My Documents\Python\cubicweb\cubicweb\bin @@ -154,3 +152,6 @@ Then start the service with:: net start cubicweb-my_instance + +In case this does not work, you should be able to see error reports in +the application log, using the windows event log viewer. diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/admin/setup.rst --- a/doc/book/en/admin/setup.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/admin/setup.rst Wed Jul 20 18:22:41 2011 +0200 @@ -91,10 +91,10 @@ :ref:`cubicweb with other database ` using the following virtual packages : - * `cubicweb-postgresql-support` contains the necessary dependency for + * `cubicweb-postgresql-support` contains the necessary dependencies for using :ref:`cubicweb with postgresql datatabase ` - * `cubicweb-mysql-support` contains the necessary dependency for using + * `cubicweb-mysql-support` contains the necessary dependencies for using :ref:`cubicweb with mysql database `. .. _`list of sources`: http://wiki.debian.org/SourcesList @@ -144,9 +144,9 @@ .. _`virtualenv`: http://virtualenv.openplans.org/ A working compilation chain is needed to build the modules that include C -extensions. If you definitively wont, installing `Lxml -`_, `Twisted `_ and -`libgecode `_ will help. +extensions. If you definitively wont, installing `Lxml `_, +`Twisted Web `_ and `libgecode +`_ will help. To install |cubicweb| and its dependencies, just run:: diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/annexes/rql/implementation.rst --- a/doc/book/en/annexes/rql/implementation.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/annexes/rql/implementation.rst Wed Jul 20 18:22:41 2011 +0200 @@ -11,7 +11,7 @@ expression. :: - statement:: = (select | delete | insert | update) ';' + statement ::= (select | delete | insert | update) ';' # select specific rules @@ -130,18 +130,7 @@ with an OR. I do not think that the negation is supported on this type of relation (XXX to be confirmed). -- Relations defining the variables must be left to those using them. For - example:: - - Point P where P abs X, P ord Y, P value X+Y - - is valid, but:: - - Point P where P abs X, P value X+Y, P ord Y - - is not. - -- missing proper explicit type conversion, COALESCE and certainly other things... +- missing COALESCE and certainly other things... - writing an rql query requires knowledge of the used schema (with real relation names and entities, not those viewed in the user interface). On the other diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/annexes/rql/language.rst --- a/doc/book/en/annexes/rql/language.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/annexes/rql/language.rst Wed Jul 20 18:22:41 2011 +0200 @@ -5,104 +5,188 @@ RQL syntax ---------- +.. _RQLKeywords: + Reserved keywords ~~~~~~~~~~~~~~~~~ -The keywords are not case sensitive. :: AND, ASC, BEING, DELETE, DESC, DISTINCT, EXISTS, FALSE, GROUPBY, - HAVING, ILIKE, IN, INSERT, LIKE, LIMIT, NOT, NOW, NULL, OFFSET, + HAVING, ILIKE, INSERT, LIKE, LIMIT, NOT, NOW, NULL, OFFSET, OR, ORDERBY, SET, TODAY, TRUE, UNION, WHERE, WITH +The keywords are not case sensitive. You should not use them when defining your +schema, or as RQL variable names. -Variables and Typing + +.. _RQLCase: + +Case +~~~~ + +* Variables should be all upper-cased. + +* Relation should be all lower-cased and match exactly names of relations defined + in the schema. + +* Entity types should start with an upper cased letter and be followed by at least + a lower cased latter. + + +.. _RQLVariables: + +Variables and typing ~~~~~~~~~~~~~~~~~~~~ -With RQL, we do not distinguish between entities and attributes. The -value of an attribute is considered an entity of a particular type (see -below), linked to one (real) entity by a relation called the name of -the attribute. +Entities and values to browse and/or select are represented in the query by +*variables* that must be written in capital letters. + +With RQL, we do not distinguish between entities and attributes. The value of an +attribute is considered as an entity of a particular type (see below), linked to +one (real) entity by a relation called the name of the attribute, where the +entity is the subject and the attribute the object. + +The possible type(s) for each variable is derived from the schema according to +the constraints expressed above and thanks to the relations between each +variable. -Entities and values to browse and/or select are represented in -the query by *variables* that must be written in capital letters. +We can restrict the possible types for a variable using the special relation +**is** in the restrictions. + -There is a special type **Any**, referring to a non specific type. +Virtual relations +~~~~~~~~~~~~~~~~~ -We can restrict the possible types for a variable using the -special relation **is** in the constraints. +Those relations may only be used in RQL query but are not actual attributes of +your entities. + +* `has_text`: relation to use to query the full text index (only for entities + having fulltextindexed attributes). -The possible type(s) for each variable is derived from the schema -according to the constraints expressed above and thanks to the relations -between each variable. +* `identity`: relation to use to tell that a RQL variable is the same as another + when you've to use two different variables for querying purpose. On the + opposite it's also useful together with the :ref:`NOT` operator to tell that two + variables should not identify the same entity + -Built-in types -`````````````` +.. _RQLLiterals: + +Literal expressions +~~~~~~~~~~~~~~~~~~~ + +Bases types supported by RQL are those supported by yams schema. Literal values +are expressed as explained below: -The base types supported are string (between double or single quotes), -integers or floats (the separator is '.'), dates and -boolean. We expect to receive a schema in which types String, -Int, Float, Date and Boolean are defined. +* string should be between double or single quotes. If the value contains a + quote, it should be preceded by a backslash '\' + +* floats separator is dot '.' + +* boolean values are :keyword:`TRUE` and :keyword:`FALSE` keywords -* `String` (literal: between double or single quotes). -* `Int`, `Float` (separator being'.'). -* `Date`, `Datetime`, `Time` (literal: string YYYY/MM/DD [hh:mm] or keywords - `TODAY` and `NOW`). -* `Boolean` (keywords `TRUE` and `FALSE`). -* `Keyword` NULL. +* date and time should be expressed as a string with ISO notation : YYYY/MM/DD + [hh:mm], or using keywords :keyword:`TODAY` and :keyword:`NOW` +You may also use the :keyword:`NULL` keyword, meaning 'unspecified'. + + +.. _RQLOperators: Operators ~~~~~~~~~ -Logical Operators +.. _RQLLogicalOperators: + +Logical operators ````````````````` :: AND, OR, NOT, ',' - ',' is equivalent to 'AND' but with the smallest among the priority - of logical operators (see :ref:`PriorityOperators`). +',' is equivalent to 'AND' but with the smallest among the priority of logical +operators (see :ref:`RQLOperatorsPriority`). -Mathematical Operators +.. _RQLMathematicalOperators: + +Mathematical operators `````````````````````` :: +, -, *, / +Those should behave as you expect. + + +.. _RQLComparisonOperators: + Comparison operators ```````````````````` -:: + :: - =, <, <=, >=, >, ~=, IN, LIKE, ILIKE - -* Syntax to use comparison operator: + =, !=, <, <=, >=, >, IN - `VARIABLE relation operator VALUE` -* The operator `=` is the default operator and can be omitted. +The syntax to use comparison operators is: -* `relation` name is always attended + `VARIABLE attribute VALUE` -* The operator `LIKE` equivalent to `~=` can be used with the - special character `%` in a string to indicate that the chain - must start or finish by a prefix/suffix: - :: +The `=` operator is the default operator and can be omitted, i.e. : + + `VARIABLE attribute = VALUE` - Any X WHERE X name ~= 'Th%' - Any X WHERE X name LIKE '%lt' +is equivalent to -* The operator `ILIKE` is the case insensitive version of `LIKE`. + `VARIABLE attribute VALUE` -* The operator `IN` provides a list of possible values: - :: + +The operator `IN` provides a list of possible values: :: Any X WHERE X name IN ('chauvat', 'fayolle', 'di mascio', 'thenault') -.. XXX nico: "A trick <> 'bar'" wouldn't it be more convenient than "NOT A trick 'bar'" ? +.. _RQLStringOperators: + +String operators +```````````````` +:: + + LIKE, ILIKE, ~=, REGEXP + +The :keyword:`LIKE` string operator can be used with the special character `%` in +a string as wild-card: :: + + # match every entity whose name starts with 'Th' + Any X WHERE X name ~= 'Th%' + # match every entity whose name endswith 'lt' + Any X WHERE X name LIKE '%lt' + # match every entity whose name contains a 'l' and a 't' + Any X WHERE X name LIKE '%l%t%' -.. _PriorityOperators: +:keyword:`ILIKE` is the case insensitive version of :keyword:`LIKE`. It's not +available on all backend (e.g. sqlite doesn't support it). If not available for +your backend, :keyword:`ILIKE` will behave like :keyword:`LIKE`. + +`~=` is a shortcut version of :keyword:`ILIKE`, or of :keyword:`LIKE` when the +former is not available on the back-end. + + +The :keyword:`REGEXP` is an alternative to :keyword:`LIKE` that supports POSIX +regular expressions:: + + # match entities whose title starts with a digit + Any X WHERE X title REGEXP "^[0-9].*" + + +The underlying SQL operator used is back-end-dependent : + +- the ``~`` operator is used for postgresql, +- the ``REGEXP`` operator for mysql and sqlite. + +Other back-ends are not supported yet. + + +.. _RQLOperatorsPriority: Operators priority `````````````````` @@ -116,177 +200,339 @@ #. ',' +.. _RQLSearchQuery: + Search Query ~~~~~~~~~~~~ - [ `DISTINCT`] V1 (, V2) \ * - [ `GROUPBY` V1 (V2) \*] [ `ORDERBY` ] +Simplified grammar of search query: :: + + [ `DISTINCT`] `Any` V1 (, V2) \* + [ `GROUPBY` V1 (, V2) \*] [ `ORDERBY` ] [ `LIMIT` ] [ `OFFSET` ] - [ `WHERE` ] - [ `WITH` V1 (, V2) \ * BEING ()] - [ `HAVING` ] + [ `WHERE` ] + [ `WITH` V1 (, V2)\* BEING ()] + [ `HAVING` ] [ `UNION` ] -:entity type: - Type of selected variables. - The special type `Any` is equivalent to not specify a type. -:restriction: - list of conditions to test successively - `V1 relation V2 | ` -:orderterms: - Definition of the selection order: variable or column number followed by - sorting method ( `ASC`, `DESC`), ASC is the default. -:note for grouped queries: - For grouped queries (e.g., a clause `GROUPBY`), all - selected variables must be aggregated or grouped. +Selection +````````` + +The fist occuring clause is the selection of terms that should be in the result +set. Terms may be variable, literals, function calls, arithmetic, etc. and each +term is separated by a comma. + +There will be as much column in the result set as term in this clause, respecting +order. + +Syntax for function call is somewhat intuitive, for instance: :: + + Any UPPER(N) WHERE P firstname N + + +Grouping and aggregating +```````````````````````` + +The :keyword:`GROUPBY` keyword is followed by a list of terms on which results +should be grouped. They are usually used with aggregate functions, responsible to +aggregate values for each group (see :ref:`RQLAggregateFunctions`). + +For grouped queries, all selected variables must be either aggregated (i.e. used +by an aggregate function) or grouped (i.e. listed in the :keyword:`GROUPBY` +clause). + + +Sorting +``````` + +The :keyword:`ORDERBY` keyword if followed by the definition of the selection +order: variable or column number followed by sorting method (:keyword:`ASC`, +:keyword:`DESC`), :keyword:`ASC` being the default. If the sorting method is not +specified, then the sorting is ascendant (`ASC`). + + +Pagination +`````````` + +The :keyword:`LIMIT` and :keyword:`OFFSET` keywords may be respectively used to +limit the number of results and to tell from which result line to start (for +instance, use `LIMIT 20` to get the first 20 results, then `LIMIT 20 OFFSET 20` +to get the next 20. -Sorting and groups -`````````````````` +Restrictions +```````````` + +The :keyword:`WHERE` keyword introduce one of the "main" part of the query, where +you "define" variables and add some restrictions telling what you're interested +in. + +It's a list of triplets "subject relation object", e.g. `V1 relation +(V2 | )`. Triplets are separated using :ref:`RQLLogicalOperators`. -- For grouped queries (e.g. with a GROUPBY clause), all - selected variables should be grouped at the right of the keyword. +.. Note: + + About the negation operator (:keyword:`NOT`): + + * "NOT X relation Y" is equivalent to "NOT EXISTS(X relation Y)" + + * `Any X WHERE NOT X owned_by U` means "entities that have no relation + `owned_by`". + + * `Any X WHERE NOT X owned_by U, U login "syt"` means "the entity have no + relation `owned_by` with the user syt". They may have a relation "owned_by" + with another user. -- If the sorting method (SORT_METHOD) is not specified, then the sorting is - ascendant (`ASC`). +In this clause, you can also use :keyword:`EXISTS` when you want to know if some +expression is true and do not need the complete set of elements that make it +true. Testing for existence is much faster than fetching the complete set of +results, especially when you think about using `OR` against several expressions. For instance +if you want to retrieve versions which are in state "ready" or tagged by +"priority", you should write : + +:: + + Any X ORDERBY PN,N + WHERE X num N, X version_of P, P name PN, + EXISTS(X in_state S, S name "ready") + OR EXISTS(T tags X, T name "priority") + +not -- Aggregate Functions: COUNT, MIN, MAX, AVG, SUM, GROUP_CONCAT +:: + + Any X ORDERBY PN,N + WHERE X num N, X version_of P, P name PN, + (X in_state S, S name "ready") + OR (T tags X, T name "priority") -.. note:: - Aggregate functions will return None if there is no result row. +Both queries aren't at all equivalent : + +* the former will retrieve all versions, then check for each one which are in the + matching state of or tagged by the expected tag, + +* the later will retrieve all versions, state and tags (cartesian product!), + compute join and then exclude each row which are in the matching state of or + tagged by the expected tag. This implies that : you won't get any result if the + in_state or tag -Having -``````` +You can also use the question mark (`?`) to mark optional relations which allow +you to select entities related **or not** to another. It is a similar concept +that the `Left outer join`_: + + the result of a left outer join (or simply left join) for table A and B + always contains all records of the "left" table (A), even if the + join-condition does not find any matching record in the "right" table (B). + +You must use the `?` behind a variable to specify that the relation toward it +is optional. For instance: + +- Anomalies of a project attached or not to a version :: -The HAVING clause, as in SQL, has been originally introduced to restrict a query + Any X, V WHERE X concerns P, P eid 42, X corrected_in V? + + You will get a result set containing all the project's tickets, with either the + version in which it's corrected or None for tickets not related to a version. + + +- All cards and the project they document if any :: + + Any C, P WHERE C is Card, P? documented_by C + + +Having restrictions +``````````````````` + +The :keyword:`HAVING` clause, as in SQL, may be used to restrict a query according to value returned by an aggregate function, e.g.:: Any X GROUPBY X WHERE X relation Y HAVING COUNT(Y) > 10 -It may however be used for something else... - -In the WHERE clause, we are limited to 3-expression, such thing can't be -expressed directly as in the SQL's way. But this can be expressed using HAVING -comparison expression. - -For instance, let's say you want to get people whose uppercased first name equals -to another person uppercased first name:: +It may however be used for something else: In the :keyword:`WHERE` clause, we are +limited to triplet expressions, so some things may not be expressed there. Let's +take an example : if you want to get people whose upper-cased first name equals to +another person upper-cased first name. There is no proper way to express this +using triplet, so you should use something like: :: - Person X WHERE X firstname XFN, Y firstname YFN HAVING X > Y, UPPER(XFN) = UPPER(YFN) - -This open some new possibilities. Another example:: + Any X WHERE X firstname XFN, Y firstname YFN, NOT X identity Y HAVING UPPER(XFN) = UPPER(YFN) - Person X WHERE X birthday XB HAVING YEAR(XB) = 2000 +Another example: imagine you want person born in 2000: :: -That lets you use transformation functions not only in selection but for -restriction as well and to by-pass limitation of the WHERE clause, which was the -major flaw in the RQL language. + Any X WHERE X birthday XB HAVING YEAR(XB) = 2000 Notice that while we would like this to work without the HAVING clause, this can't be currently be done because it introduces an ambiguity in RQL's grammar that can't be handled by Yapps_, the parser's generator we're using. -Negation -```````` + +Sub-queries +``````````` + +The :keyword:`WITH` keyword introduce sub-queries clause. Each sub-query has the +form: + + V1(,V2) BEING (rql query) -* A query such as `Document X WHERE NOT X owned_by U` means "the documents have - no relation `owned_by`". +Variables at the left of the :keyword:`BEING` keyword defines into which +variables results from the sub-query will be mapped to into the outer query. +Sub-queries are separated from each other using a comma. -* But the query `Document X WHERE NOT X owned_by U, U login "syt"` means "the - documents have no relation `owned_by` with the user syt". They may have a - relation "owned_by" with another user. +Let's say we want to retrieve for each project its number of versions and its +number of tickets. Due to the nature of relational algebra behind the scene, this +can't be achieved using a single query. You have to write something along the +line of: :: + + Any X, VC, TC WHERE X identity XX + WITH X, VC BEING (Any X, COUNT(V) GROUPBY X WHERE V version_of X), + XX, TC BEING (Any X, COUNT(T) GROUPBY X WHERE T ticket_of X) -Identity -```````` +Notice that we can't reuse a same variable name as alias for two different +sub-queries, hence the usage of 'X' and 'XX' in this example, which are then +unified using the special `identity` relation (see :ref:`XXX`). + +.. Warning: + + Sub-queries define a new variable scope, so even if a variable has the same name + in the outer query and in the sub-query, they technically **aren't* the same + variable. So :: -You can use the special relation `identity` in a query to -add an identity constraint between two variables. This is equivalent -to ``is`` in python:: + Any W, REF WITH W, REF BEING + (Any W, REF WHERE W is Workcase, W ref REF, + W concerned_by D, D name "Logilab") + could be written: - Any A WHERE A comments B, A identity B + Any W, REF WITH W, REF BEING + (Any W1, REF1 WHERE W1 is Workcase, W1 ref REF1, + W1 concerned_by D, D name "Logilab") -return all objects that comment themselves. The relation `identity` is -especially useful when defining the rules for securities with `RQLExpressions`. + Also, when a variable is coming from a sub-query, you currently can't reference + its attribute or inlined relations in the outer query, you've to fetch them in + the sub-query. For instance, let's say we want to sort by project name in our + first example, we would have to write :: -Limit / offset -`````````````` -:: + Any X, VC, TC ORDERBY XN WHERE X identity XX + WITH X, XN, VC BEING (Any X, COUNT(V) GROUPBY X,XN WHERE V version_of X, X name XN), + XX, TC BEING (Any X, COUNT(T) GROUPBY X WHERE T ticket_of X) + + instead of :: - Any P ORDERBY N LIMIT 5 OFFSET 10 WHERE P is Person, P firstname N + Any X, VC, TC ORDERBY XN WHERE X identity XX, X name XN, + WITH X, XN, VC BEING (Any X, COUNT(V) GROUPBY X WHERE V version_of X), + XX, TC BEING (Any X, COUNT(T) GROUPBY X WHERE T ticket_of X) + + which would result in a SQL execution error. -Exists -``````` +Union +````` -You can use `EXISTS` when you want to know if some expression is true and do not -need the complete set of elements that make it true. Testing for existence is -much faster than fetching the complete set of results. +You may get a result set containing the concatenation of several queries using +the :keyword:`UNION`. The selection of each query should have the same number of +columns. :: - Any X ORDERBY PN,N - WHERE X num N, X version_of P, P name PN, - EXISTS(X in_state S, S name IN ("dev", "ready")) - OR EXISTS(T tags X, T name "priority") - - -Optional relations -`````````````````` - -It is a similar concept that the `Left outer join`_: - - the result of a left outer join (or simply left join) for table A and B - always contains all records of the "left" table (A), even if the - join-condition does not find any matching record in the "right" table (B). - -* They allow you to select entities related or not to another. - -* You must use the `?` behind the variable to specify that the relation - toward it is optional: - - - Anomalies of a project attached or not to a version :: - - Any X, V WHERE X concerns P, P eid 42, X corrected_in V? - - - All cards and the project they document if necessary :: - - Any C, P WHERE C is Card, P? documented_by C - - Any T,P,V WHERE T is Ticket, T concerns P, T done_in V? + (Any X, XN WHERE X is Person, X surname XN) UNION (Any X,XN WHERE X is Company, X name XN) -Subqueries -`````````` -:: +.. _RQLFunctions: + +Available functions +~~~~~~~~~~~~~~~~~~~ + +Below is the list of aggregate and transformation functions that are supported +nativly by the framework. Notice that cubes may define additional functions. + +.. _RQLAggregateFunctions: + +Aggregate functions +``````````````````` ++--------------------+----------------------------------------------------------+ +| :func:`COUNT` | return the number of rows | ++--------------------+----------------------------------------------------------+ +| :func:`MIN` | return the minimum value | ++--------------------+----------------------------------------------------------+ +| :func:`MAX` | return the maximum value | ++--------------------+----------------------------------------------------------+ +| :func:`AVG` | return the average value | ++--------------------+----------------------------------------------------------+ +| :func:`SUM` | return the sum of values | ++--------------------+----------------------------------------------------------+ +| :func:`COMMA_JOIN` | return each value separated by a comma (for string only) | ++--------------------+----------------------------------------------------------+ + +All aggregate functions above take a single argument. Take care some aggregate +functions (e.g. :keyword:`MAX`, :keyword:`MIN`) may return `None` if there is no +result row. + +.. _RQLStringFunctions: + +String transformation functions +``````````````````````````````` - (Any X WHERE X is Person) UNION (Any X WHERE X is Company) ++-------------------------+-----------------------------------------------------------------+ +| :func:`UPPER(String)` | upper case the string | ++-------------------------+-----------------------------------------------------------------+ +| :func:`LOWER(String)` | lower case the string | ++-------------------------+-----------------------------------------------------------------+ +| :func:`LENGTH(String)` | return the length of the string | ++-------------------------+-----------------------------------------------------------------+ +| :func:`SUBSTRING( | extract from the string a string starting at given index and of | +| String, start, length)`| given length | ++-------------------------+-----------------------------------------------------------------+ +| :func:`LIMIT_SIZE( | if the length of the string is greater than given max size, | +| String, max size)` | strip it and add ellipsis ("..."). The resulting string will | +| | hence have max size + 3 characters | ++-------------------------+-----------------------------------------------------------------+ +| :func:`TEXT_LIMIT_SIZE( | similar to the above, but allow to specify the MIME type of the | +| String, format, | text contained by the string. Supported formats are text/html, | +| max size)` | text/xhtml and text/xml. All others will be considered as plain | +| | text. For non plain text format, sgml tags will be first removed| +| | before limiting the string. | ++-------------------------+-----------------------------------------------------------------+ + +.. _RQLDateFunctions: + +Date extraction functions +````````````````````````` + ++--------------------------+----------------------------------------+ +| :func:`YEAR(Date)` | return the year of a date or datetime | ++--------------------------+----------------------------------------+ +| :func:`MONTH(Date)` | return the year of a date or datetime | ++--------------------------+----------------------------------------+ +| :func:`DAY(Date)` | return the year of a date or datetime | ++--------------------------+----------------------------------------+ +| :func:`HOUR(Datetime)` | return the year of a datetime | ++--------------------------+----------------------------------------+ +| :func:`MINUTE(Datetime)` | return the year of a datetime | ++--------------------------+----------------------------------------+ +| :func:`SECOND(Datetime)` | return the year of a datetime | ++--------------------------+----------------------------------------+ + +.. _RQLOtherFunctions: + +Other functions +``````````````` ++-----------------------+--------------------------------------------------------------------+ +| :func:`ABS(num)` | return the absolute value of a number | ++-----------------------+--------------------------------------------------------------------+ +| :func:`RANDOM()` | return a pseudo-random value from 0.0 to 1.0 | ++-----------------------+--------------------------------------------------------------------+ +| :func:`FSPATH(X)` | expect X to be an attribute whose value is stored in a | +| | :class:`BFSStorage` and return its path on the file system | ++-----------------------+--------------------------------------------------------------------+ +| :func:`FTKIRANK(X)` | expect X to be an entity used in a has_text relation, and return a | +| | number corresponding to the rank order of each resulting entity | ++-----------------------+--------------------------------------------------------------------+ +| :func:`CAST(Type, X)` | expect X to be an attribute and return it casted into the given | +| | final type | ++-----------------------+--------------------------------------------------------------------+ - DISTINCT Any W, REF - WITH W, REF BEING - ( - (Any W, REF WHERE W is Workcase, W ref REF, - W concerned_by D, D name "Logilab") - UNION - (Any W, REF WHERE W is Workcase, W ref REF, ' - W split_into WP, WP name "WP1") - ) - -Function calls -`````````````` -:: - - Any UPPER(N) WHERE P firstname N - Any LOWER(N) WHERE P firstname N - -Functions available on string: `UPPER`, `LOWER` - -.. XXX retrieve available function automatically - -For a performance issue, you can enrich the RQL dialect by RDMS (Relational database management system) functions. - +.. _RQLExamples: Examples ~~~~~~~~ @@ -349,6 +595,8 @@ Any X where X is in (FirstType, SecondType) +.. _RQLInsertQuery: + Insertion query ~~~~~~~~~~~~~~~ @@ -380,6 +628,8 @@ INSERT Person X: X name 'foo', X friend Y WHERE name 'nice' +.. _RQLSetQuery: + Update and relation creation queries ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -401,6 +651,8 @@ SET X know Y WHERE X friend Y +.. _RQLDeleteQuery: + Deletion query ~~~~~~~~~~~~~~ @@ -421,22 +673,6 @@ DELETE X friend Y WHERE X is Person, X name 'foo' -Virtual RQL relations -~~~~~~~~~~~~~~~~~~~~~ - -Those relations may only be used in RQL query and are not actual -attributes of your entities. - -* `has_text`: relation to use to query the full text index (only for - entities having fulltextindexed attributes). - -* `identity`: `Identity`_ relation to use to tell that a RQL variable should be - the same as another (but you've to use two different rql variables - for querying purpose) - -* `is`: relation to enforce possible types for a variable - - - .. _Yapps: http://theory.stanford.edu/~amitp/yapps/ .. _Left outer join: http://en.wikipedia.org/wiki/Join_(SQL)#Left_outer_join + diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/devrepo/datamodel/define-workflows.rst --- a/doc/book/en/devrepo/datamodel/define-workflows.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/devrepo/datamodel/define-workflows.rst Wed Jul 20 18:22:41 2011 +0200 @@ -13,7 +13,7 @@ possible transitions from one state to another state. We will define a simple workflow for a blog, with only the following two states: -`submitted` and `published`. You may want to take a look at :ref:`_TutosBase` if +`submitted` and `published`. You may want to take a look at :ref:`TutosBase` if you want to quickly setup an instance running a blog. Setting up a workflow diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/devweb/edition/dissection.rst --- a/doc/book/en/devweb/edition/dissection.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/devweb/edition/dissection.rst Wed Jul 20 18:22:41 2011 +0200 @@ -1,8 +1,8 @@ .. _form_dissection: -Dissection of a form --------------------- +Dissection of an entity form +---------------------------- This is done (again) with a vanilla instance of the `tracker`_ cube. We will populate the database with a bunch of entities and see @@ -10,44 +10,6 @@ .. _`tracker`: http://www.cubicweb.org/project/cubicweb-tracker -Patching the session object -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In order to play interactively with web side application objects, we -have to cheat a bit: we will decorate the session object with some -missing artifacts that should belong to a web request object. With -that we can instantiate and render forms interactively. - -The function below does the minimum to allow going through this -exercice. Some attributes or methods may be missing for other -purposes. It is nevertheless not complicated to enhance it if need -arises. - -.. sourcecode:: python - - def monkey_patch_session(session): - """ useful to use the cw shell session object - with web appobjects, which expect more than a plain - data repository session - """ - # for autoform selection - session.json_request = False - session.url = lambda: u'http://perdu.com' - session.session = session - session.form = {} - session.list_form_param = lambda *args: [] - # for render - session.use_fckeditor = lambda: False - session._ressources = [] - session.add_js = session.add_css = lambda *args: session._ressources.append(args) - session.external_resource = lambda x:{} - session._tabcount = 0 - def next_tabindex(): - session._tabcount += 1 - return session._tabcount - session.next_tabindex = next_tabindex - return session - Populating the database ~~~~~~~~~~~~~~~~~~~~~~~ @@ -71,10 +33,17 @@ .. sourcecode:: python - >>> monkey_patch_session(session) - >>> form = session.vreg['forms'].select('edition', session, rset=rql('Ticket T')) + >>> cnx.use_web_compatible_requests('http://fakeurl.com') + >>> req = cnx.request() + >>> form = req.vreg['forms'].select('edition', req, rset=rql('Ticket T')) >>> html = form.render() +.. note:: + + In order to play interactively with web side application objects, we have to + cheat a bit to have request object that will looks like HTTP request object, by + calling :meth:`use_web_compatible_requests()` on the connection. + This creates an automatic entity form. The ``.render()`` call yields an html (unicode) string. The html output is shown below (with internal fieldset omitted). diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/devweb/edition/form.rst --- a/doc/book/en/devweb/edition/form.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/devweb/edition/form.rst Wed Jul 20 18:22:41 2011 +0200 @@ -48,9 +48,10 @@ 'sparql': []} -The two most important form families here (for all pracitcal purposes) -are `base` and `edition`. Most of the time one wants alterations of -the AutomaticEntityForm (from the `edition` category). +The two most important form families here (for all practical purposes) are `base` +and `edition`. Most of the time one wants alterations of the +:class:`AutomaticEntityForm` to generate custom forms to handle edition of an +entity. The Automatic Entity Form ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -214,6 +215,158 @@ unpublished versions defined in the project (sorted by number) for which the current user is allowed to establish the relation. + +Building self-posted form with custom fields/widgets +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sometimes you want a form that is not related to entity edition. For those, +you'll have to handle form posting by yourself. Here is a complete example on how +to achieve this (and more). + +Imagine you want a form that selects a month period. There are no proper +field/widget to handle this in CubicWeb, so let's start by defining them: + +.. sourcecode:: python + + # let's have the whole import list at the beginning, even those necessary for + # subsequent snippets + from logilab.common import date + from logilab.mtconverter import xml_escape + from cubicweb.view import View + from cubicweb.selectors import match_kwargs + from cubicweb.web import RequestError, ProcessFormError + from cubicweb.web import formfields as fields, formwidgets as wdgs + from cubicweb.web.views import forms, calendar + + class MonthSelect(wdgs.Select): + """Custom widget to display month and year. Expect value to be given as a + date instance. + """ + + def format_value(self, form, field, value): + return u'%s/%s' % (value.year, value.month) + + def process_field_data(self, form, field): + val = super(MonthSelect, self).process_field_data(form, field) + try: + year, month = val.split('/') + year = int(year) + month = int(month) + return date.date(year, month, 1) + except ValueError: + raise ProcessFormError( + form._cw._('badly formated date string %s') % val) + + + class MonthPeriodField(fields.CompoundField): + """custom field composed of two subfields, 'begin_month' and 'end_month'. + + It expects to be used on form that has 'mindate' and 'maxdate' in its + extra arguments, telling the range of month to display. + """ + + def __init__(self, *args, **kwargs): + kwargs.setdefault('widget', wdgs.IntervalWidget()) + super(MonthPeriodField, self).__init__( + [fields.StringField(name='begin_month', + choices=self.get_range, sort=False, + value=self.get_mindate, + widget=MonthSelect()), + fields.StringField(name='end_month', + choices=self.get_range, sort=False, + value=self.get_maxdate, + widget=MonthSelect())], *args, **kwargs) + + @staticmethod + def get_range(form, field): + mindate = date.todate(form.cw_extra_kwargs['mindate']) + maxdate = date.todate(form.cw_extra_kwargs['maxdate']) + assert mindate <= maxdate + _ = form._cw._ + months = [] + while mindate <= maxdate: + label = '%s %s' % (_(calendar.MONTHNAMES[mindate.month - 1]), + mindate.year) + value = field.widget.format_value(form, field, mindate) + months.append( (label, value) ) + mindate = date.next_month(mindate) + return months + + @staticmethod + def get_mindate(form, field): + return form.cw_extra_kwargs['mindate'] + + @staticmethod + def get_maxdate(form, field): + return form.cw_extra_kwargs['maxdate'] + + def process_posted(self, form): + for field, value in super(MonthPeriodField, self).process_posted(form): + if field.name == 'end_month': + value = date.last_day(value) + yield field, value + + +Here we first define a widget that will be used to select the beginning and the +end of the period, displaying months like ' YYYY' but using 'YYYY/mm' as +actual value. + +We then define a field that will actually hold two fields, one for the beginning +and another for the end of the period. Each subfield uses the widget we defined +earlier, and the outer field itself uses the standard +:class:`IntervalWidget`. The field adds some logic: + +* a vocabulary generation function `get_range`, used to populate each sub-field + +* two 'value' functions `get_mindate` and `get_maxdate`, used to tell to + subfields which value they should consider on form initialization + +* overriding of `process_posted`, called when the form is being posted, so that + the end of the period is properly set to the last day of the month. + +Now, we can define a very simple form: + +.. sourcecode:: python + + class MonthPeriodSelectorForm(forms.FieldsForm): + __regid__ = 'myform' + __select__ = match_kwargs('mindate', 'maxdate') + + form_buttons = [wdgs.SubmitButton()] + form_renderer_id = 'onerowtable' + period = MonthPeriodField() + + +where we simply add our field, set a submit button and use a very simple renderer +(try others!). Also we specify a selector that ensures form will have arguments +necessary to our field. + +Now, we need a view that will wrap the form and handle post when it occurs, +simply displaying posted values in the page: + +.. sourcecode:: python + + class SelfPostingForm(View): + __regid__ = 'myformview' + + def call(self): + mindate, maxdate = date.date(2010, 1, 1), date.date(2012, 1, 1) + form = self._cw.vreg['forms'].select( + 'myform', self._cw, mindate=mindate, maxdate=maxdate, action='') + try: + posted = form.process_posted() + self.w(u'

posted values %s

' % xml_escape(repr(posted))) + except RequestError: # no specified period asked + pass + form.render(w=self.w, formvalues=self._cw.form) + + +Notice usage of the :meth:`process_posted` method, that will return a dictionary +of typed values (because they have been processed by the field). In our case, when +the form is posted you should see a dictionary with 'begin_month' and 'end_month' +as keys with the selected dates as value (as a python `date` object). + + APIs ~~~~ diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/devweb/resource.rst --- a/doc/book/en/devweb/resource.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/devweb/resource.rst Wed Jul 20 18:22:41 2011 +0200 @@ -8,7 +8,7 @@ Static files handling --------------------- -.. automethod:: cubicweb.web.webconfig.WebConfiguration.static_directory +.. autoattribute:: cubicweb.web.webconfig.WebConfiguration.static_directory .. automethod:: cubicweb.web.webconfig.WebConfiguration.static_file_exists .. automethod:: cubicweb.web.webconfig.WebConfiguration.static_file_open .. automethod:: cubicweb.web.webconfig.WebConfiguration.static_file_add diff -r 6397a9051f65 -r 134613d3b353 doc/book/en/intro/concepts.rst --- a/doc/book/en/intro/concepts.rst Wed Jul 20 14:09:42 2011 +0200 +++ b/doc/book/en/intro/concepts.rst Wed Jul 20 18:22:41 2011 +0200 @@ -1,4 +1,3 @@ - .. -*- coding: utf-8 -*- .. _Concepts: @@ -31,7 +30,7 @@ .. note:: The command :command:`cubicweb-ctl list` displays the list of cubes -installed on your system. + installed on your system. .. _`CubicWeb.org Forge`: http://www.cubicweb.org/project/ .. _`cubicweb-blog`: http://www.cubicweb.org/project/cubicweb-blog diff -r 6397a9051f65 -r 134613d3b353 entities/test/unittest_wfobjs.py --- a/entities/test/unittest_wfobjs.py Wed Jul 20 14:09:42 2011 +0200 +++ b/entities/test/unittest_wfobjs.py Wed Jul 20 18:22:41 2011 +0200 @@ -165,7 +165,7 @@ user = self.user() iworkflowable = user.cw_adapt_to('IWorkflowable') iworkflowable.fire_transition('deactivate', comment=u'deactivate user') - user.clear_all_caches() + user.cw_clear_all_caches() self.assertEqual(iworkflowable.state, 'deactivated') self._test_manager_deactivate(user) trinfo = self._test_manager_deactivate(user) @@ -192,7 +192,7 @@ self.commit() iworkflowable.fire_transition('wake up') self.commit() - user.clear_all_caches() + user.cw_clear_all_caches() self.assertEqual(iworkflowable.state, 'deactivated') # XXX test managers can change state without matching transition @@ -274,14 +274,14 @@ self.assertEqual(iworkflowable.subworkflow_input_transition(), None) iworkflowable.fire_transition('swftr1', u'go') self.commit() - group.clear_all_caches() + group.cw_clear_all_caches() self.assertEqual(iworkflowable.current_state.eid, swfstate1.eid) self.assertEqual(iworkflowable.current_workflow.eid, swf.eid) self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid) self.assertEqual(iworkflowable.subworkflow_input_transition().eid, swftr1.eid) iworkflowable.fire_transition('tr1', u'go') self.commit() - group.clear_all_caches() + group.cw_clear_all_caches() self.assertEqual(iworkflowable.current_state.eid, state2.eid) self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid) self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid) @@ -295,10 +295,10 @@ # force back to state1 iworkflowable.change_state('state1', u'gadget') iworkflowable.fire_transition('swftr1', u'au') - group.clear_all_caches() + group.cw_clear_all_caches() iworkflowable.fire_transition('tr2', u'chapeau') self.commit() - group.clear_all_caches() + group.cw_clear_all_caches() self.assertEqual(iworkflowable.current_state.eid, state3.eid) self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid) self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid) @@ -390,7 +390,7 @@ ): iworkflowable.fire_transition(trans) self.commit() - group.clear_all_caches() + group.cw_clear_all_caches() self.assertEqual(iworkflowable.state, nextstate) @@ -408,11 +408,11 @@ wf.add_state('asleep', initial=True) self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s', {'wf': wf.eid, 'x': self.member.eid}) - self.member.clear_all_caches() + self.member.cw_clear_all_caches() iworkflowable = self.member.cw_adapt_to('IWorkflowable') self.assertEqual(iworkflowable.state, 'activated')# no change before commit self.commit() - self.member.clear_all_caches() + self.member.cw_clear_all_caches() self.assertEqual(iworkflowable.current_workflow.eid, wf.eid) self.assertEqual(iworkflowable.state, 'asleep') self.assertEqual(iworkflowable.workflow_history, ()) @@ -429,7 +429,7 @@ self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s', {'wf': wf.eid, 'x': self.member.eid}) self.commit() - self.member.clear_all_caches() + self.member.cw_clear_all_caches() self.assertEqual(iworkflowable.current_workflow.eid, wf.eid) self.assertEqual(iworkflowable.state, 'asleep') self.assertEqual(parse_hist(iworkflowable.workflow_history), @@ -472,10 +472,10 @@ self.commit() self.execute('DELETE X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s', {'wf': wf.eid, 'x': self.member.eid}) - self.member.clear_all_caches() + self.member.cw_clear_all_caches() self.assertEqual(iworkflowable.state, 'asleep')# no change before commit self.commit() - self.member.clear_all_caches() + self.member.cw_clear_all_caches() self.assertEqual(iworkflowable.current_workflow.name, "default user workflow") self.assertEqual(iworkflowable.state, 'activated') self.assertEqual(parse_hist(iworkflowable.workflow_history), @@ -504,13 +504,13 @@ self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s', {'wf': wf.eid, 'x': user.eid}) self.commit() - user.clear_all_caches() + user.cw_clear_all_caches() self.assertEqual(iworkflowable.state, 'asleep') self.assertEqual([t.name for t in iworkflowable.possible_transitions()], ['rest']) iworkflowable.fire_transition('rest') self.commit() - user.clear_all_caches() + user.cw_clear_all_caches() self.assertEqual(iworkflowable.state, 'asleep') self.assertEqual([t.name for t in iworkflowable.possible_transitions()], ['rest']) @@ -520,7 +520,7 @@ self.commit() iworkflowable.fire_transition('rest') self.commit() - user.clear_all_caches() + user.cw_clear_all_caches() self.assertEqual(iworkflowable.state, 'dead') self.assertEqual(parse_hist(iworkflowable.workflow_history), [('asleep', 'asleep', 'rest', None), @@ -557,7 +557,7 @@ def setUp(self): CubicWebTC.setUp(self) self.wf = self.session.user.cw_adapt_to('IWorkflowable').current_workflow - self.session.set_pool() + self.session.set_cnxset() self.s_activated = self.wf.state_by_name('activated').eid self.s_deactivated = self.wf.state_by_name('deactivated').eid self.s_dummy = self.wf.add_state(u'dummy').eid @@ -629,13 +629,13 @@ iworkflowable = user.cw_adapt_to('IWorkflowable') iworkflowable.fire_transition('deactivate') cnx.commit() - session.set_pool() + session.set_cnxset() with self.assertRaises(ValidationError) as cm: iworkflowable.fire_transition('deactivate') self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']), u"transition isn't allowed from") cnx.rollback() - session.set_pool() + session.set_cnxset() # get back now iworkflowable.fire_transition('activate') cnx.commit() diff -r 6397a9051f65 -r 134613d3b353 entities/wfobjs.py --- a/entities/wfobjs.py Wed Jul 20 14:09:42 2011 +0200 +++ b/entities/wfobjs.py Wed Jul 20 18:22:41 2011 +0200 @@ -326,8 +326,8 @@ result[ep.subwf_state.eid] = ep.destination and ep.destination.eid return result - def clear_all_caches(self): - super(WorkflowTransition, self).clear_all_caches() + def cw_clear_all_caches(self): + super(WorkflowTransition, self).cw_clear_all_caches() clear_cache(self, 'exit_points') diff -r 6397a9051f65 -r 134613d3b353 entity.py --- a/entity.py Wed Jul 20 14:09:42 2011 +0200 +++ b/entity.py Wed Jul 20 18:22:41 2011 +0200 @@ -395,8 +395,10 @@ @cached def cw_metainformation(self): - res = dict(zip(('type', 'source', 'extid'), self._cw.describe(self.eid))) - res['source'] = self._cw.source_defs()[res['source']] + res = self._cw.describe(self.eid, asdict=True) + # use 'asource' and not 'source' since this is the actual source, + # while 'source' is the physical source (where it's stored) + res['source'] = self._cw.source_defs()[res.pop('asource')] return res def cw_check_perm(self, action): @@ -431,9 +433,11 @@ use_ext_id = False if 'base_url' not in kwargs and \ getattr(self._cw, 'search_state', ('normal',))[0] == 'normal': - baseurl = self.cw_metainformation()['source'].get('base-url') - if baseurl: - kwargs['base_url'] = baseurl + sourcemeta = self.cw_metainformation()['source'] + if sourcemeta.get('use-cwuri-as-url'): + return self.cwuri # XXX consider kwargs? + if sourcemeta.get('base-url'): + kwargs['base_url'] = sourcemeta['base-url'] use_ext_id = True if method in (None, 'view'): try: @@ -942,7 +946,7 @@ assert role self._cw_related_cache.pop('%s_%s' % (rtype, role), None) - def clear_all_caches(self): # XXX cw_clear_all_caches + def cw_clear_all_caches(self): """flush all caches on this entity. Further attributes/relations access will triggers new database queries to get back values. @@ -1024,6 +1028,10 @@ # deprecated stuff ######################################################### + @deprecated('[3.13] use entity.cw_clear_all_caches()') + def clear_all_caches(self): + return self.cw_clear_all_caches() + @deprecated('[3.9] use entity.cw_attr_value(attr)') def get_value(self, name): return self.cw_attr_value(name) diff -r 6397a9051f65 -r 134613d3b353 etwist/server.py --- a/etwist/server.py Wed Jul 20 14:09:42 2011 +0200 +++ b/etwist/server.py Wed Jul 20 18:22:41 2011 +0200 @@ -1,4 +1,4 @@ -# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This file is part of CubicWeb. @@ -17,14 +17,19 @@ # with CubicWeb. If not, see . """twisted server for CubicWeb web instances""" +from __future__ import with_statement + __docformat__ = "restructuredtext en" import sys import os +import os.path as osp import select import errno import traceback import threading +import re +import hashlib from os.path import join from time import mktime from datetime import date, timedelta @@ -41,7 +46,8 @@ from logilab.common.decorators import monkeypatch -from cubicweb import AuthenticationError, ConfigurationError, CW_EVENT_MANAGER +from cubicweb import (AuthenticationError, ConfigurationError, + CW_EVENT_MANAGER, CubicWebException) from cubicweb.utils import json_dumps from cubicweb.web import Redirect, DirectResponse, StatusResponse, LogOut from cubicweb.web.application import CubicWebPublisher @@ -70,13 +76,84 @@ code=http.FORBIDDEN, stream='Access forbidden') -class File(static.File): - """Prevent from listing directories""" + +class NoListingFile(static.File): + + def set_expires(self, request): + if not self.config.debugmode: + # XXX: Don't provide additional resource information to error responses + # + # the HTTP RFC recommands not going further than 1 year ahead + expires = date.today() + timedelta(days=6*30) + request.setHeader('Expires', generateDateTime(mktime(expires.timetuple()))) + def directoryListing(self): return ForbiddenDirectoryLister() -class LongTimeExpiringFile(File): +class DataLookupDirectory(NoListingFile): + def __init__(self, config, path): + self.md5_version = config.instance_md5_version() + NoListingFile.__init__(self, path) + self.config = config + self.here = path + self._defineChildResources() + if self.config.debugmode: + self.data_modconcat_basepath = '/data/??' + else: + self.data_modconcat_basepath = '/data/%s/??' % self.md5_version + + def _defineChildResources(self): + self.putChild(self.md5_version, self) + + def getChild(self, path, request): + if not path: + uri = request.uri + if uri.startswith('/https/'): + uri = uri[6:] + if uri.startswith(self.data_modconcat_basepath): + resource_relpath = uri[len(self.data_modconcat_basepath):] + if resource_relpath: + paths = resource_relpath.split(',') + try: + self.set_expires(request) + return ConcatFiles(self.config, paths) + except ConcatFileNotFoundError: + return self.childNotFound + return self.directoryListing() + childpath = join(self.here, path) + dirpath, rid = self.config.locate_resource(childpath) + if dirpath is None: + # resource not found + return self.childNotFound + filepath = os.path.join(dirpath, rid) + if os.path.isdir(filepath): + resource = DataLookupDirectory(self.config, childpath) + # cache resource for this segment path to avoid recomputing + # directory lookup + self.putChild(path, resource) + return resource + else: + self.set_expires(request) + return NoListingFile(filepath) + + +class FCKEditorResource(NoListingFile): + def __init__(self, config, path): + NoListingFile.__init__(self, path) + self.config = config + + def getChild(self, path, request): + pre_path = request.path.split('/')[1:] + if pre_path[0] == 'https': + pre_path.pop(0) + uiprops = self.config.https_uiprops + else: + uiprops = self.config.uiprops + return static.File(osp.join(uiprops['FCKEDITOR_PATH'], path)) + + +class LongTimeExpiringFile(DataLookupDirectory): """overrides static.File and sets a far future ``Expires`` date on the resouce. @@ -88,28 +165,84 @@ etc. """ - def render(self, request): - # XXX: Don't provide additional resource information to error responses - # - # the HTTP RFC recommands not going further than 1 year ahead - expires = date.today() + timedelta(days=6*30) - request.setHeader('Expires', generateDateTime(mktime(expires.timetuple()))) - return File.render(self, request) + def _defineChildResources(self): + pass + + +class ConcatFileNotFoundError(CubicWebException): + pass + + +class ConcatFiles(LongTimeExpiringFile): + def __init__(self, config, paths): + _, ext = osp.splitext(paths[0]) + self._resources = {} + # create a unique / predictable filename. We don't consider cubes + # version since uicache is cleared at server startup, and file's dates + # are checked in debug mode + fname = 'cache_concat_' + hashlib.md5(';'.join(paths)).hexdigest() + ext + filepath = osp.join(config.appdatahome, 'uicache', fname) + LongTimeExpiringFile.__init__(self, config, filepath) + self._concat_cached_filepath(filepath, paths) + + def _resource(self, path): + try: + return self._resources[path] + except KeyError: + self._resources[path] = self.config.locate_resource(path) + return self._resources[path] + + def _concat_cached_filepath(self, filepath, paths): + if not self._up_to_date(filepath, paths): + with open(filepath, 'wb') as f: + for path in paths: + dirpath, rid = self._resource(path) + if rid is None: + # In production mode log an error, do not return a 404 + # XXX the erroneous content is cached anyway + LOGGER.error('concatenated data url error: %r file ' + 'does not exist', path) + if self.config.debugmode: + raise ConcatFileNotFoundError(path) + else: + for line in open(osp.join(dirpath, rid)): + f.write(line) + f.write('\n') + + def _up_to_date(self, filepath, paths): + """ + The concat-file is considered up-to-date if it exists. + In debug mode, an additional check is performed to make sure that + concat-file is more recent than all concatenated files + """ + if not osp.isfile(filepath): + return False + if self.config.debugmode: + concat_lastmod = os.stat(filepath).st_mtime + for path in paths: + dirpath, rid = self._resource(path) + if rid is None: + raise ConcatFileNotFoundError(path) + path = osp.join(dirpath, rid) + if os.stat(path).st_mtime > concat_lastmod: + return False + return True class CubicWebRootResource(resource.Resource): def __init__(self, config, vreg=None): + resource.Resource.__init__(self) self.config = config # instantiate publisher here and not in init_publisher to get some # checks done before daemonization (eg versions consistency) self.appli = CubicWebPublisher(config, vreg=vreg) self.base_url = config['base-url'] self.https_url = config['https-url'] - self.children = {} - self.static_directories = set(('data%s' % config.instance_md5_version(), - 'data', 'static', 'fckeditor')) global MAX_POST_LENGTH MAX_POST_LENGTH = config['max-post-length'] + self.putChild('static', NoListingFile(config.static_directory)) + self.putChild('fckeditor', FCKEditorResource(self.config, '')) + self.putChild('data', DataLookupDirectory(self.config, '')) def init_publisher(self): config = self.config @@ -152,38 +285,6 @@ def getChild(self, path, request): """Indicate which resource to use to process down the URL's path""" - pre_path = request.path.split('/')[1:] - if pre_path[0] == 'https': - pre_path.pop(0) - uiprops = self.config.https_uiprops - else: - uiprops = self.config.uiprops - directory = pre_path[0] - # Anything in data/, static/, fckeditor/ and the generated versioned - # data directory is treated as static files - if directory in self.static_directories: - # take care fckeditor may appears as root directory or as a data - # subdirectory - if directory == 'static': - return File(self.config.static_directory) - if directory == 'fckeditor': - return File(uiprops['FCKEDITOR_PATH']) - if directory != 'data': - # versioned directory, use specific file with http cache - # headers so their are cached for a very long time - cls = LongTimeExpiringFile - else: - cls = File - if path == 'fckeditor': - return cls(uiprops['FCKEDITOR_PATH']) - if path == directory: # recurse - return self - datadir, path = self.config.locate_resource(path) - if datadir is None: - return self # recurse - self.debug('static file %s from %s', path, datadir) - return cls(join(datadir, path)) - # Otherwise we use this single resource return self def render(self, request): @@ -409,6 +510,7 @@ # serve it via standard HTTP on port set in the configuration port = config['port'] or 8080 interface = config['interface'] + reactor.suggestThreadPoolSize(config['webserver-threadpool-size']) reactor.listenTCP(port, website, interface=interface) if not config.debugmode: if sys.platform == 'win32': diff -r 6397a9051f65 -r 134613d3b353 etwist/test/unittest_server.py --- a/etwist/test/unittest_server.py Wed Jul 20 14:09:42 2011 +0200 +++ b/etwist/test/unittest_server.py Wed Jul 20 18:22:41 2011 +0200 @@ -15,8 +15,12 @@ # # You should have received a copy of the GNU Lesser General Public License along # with CubicWeb. If not, see . + +import os, os.path as osp, glob + from cubicweb.devtools.testlib import CubicWebTC -from cubicweb.etwist.server import host_prefixed_baseurl +from cubicweb.etwist.server import (host_prefixed_baseurl, ConcatFiles, + ConcatFileNotFoundError) class HostPrefixedBaseURLTC(CubicWebTC): @@ -50,3 +54,30 @@ self._check('http://localhost:8080/hg/', 'code.cubicweb.org', 'http://localhost:8080/hg/') + +class ConcatFilesTC(CubicWebTC): + + def tearDown(self): + super(ConcatFilesTC, self).tearDown() + self._cleanup_concat_cache() + self.config.debugmode = False + + def _cleanup_concat_cache(self): + uicachedir = osp.join(self.config.apphome, 'uicache') + for fname in glob.glob(osp.join(uicachedir, 'cache_concat_*')): + os.unlink(osp.join(uicachedir, fname)) + + def test_cache(self): + concat = ConcatFiles(self.config, ('cubicweb.ajax.js', 'jquery.js')) + self.failUnless(osp.isfile(concat.path)) + + def test_404(self): + # when not in debug mode, should not crash + ConcatFiles(self.config, ('cubicweb.ajax.js', 'dummy.js')) + # in debug mode, raise error + self.config.debugmode = True + try: + self.assertRaises(ConcatFileNotFoundError, ConcatFiles, self.config, + ('cubicweb.ajax.js', 'dummy.js')) + finally: + self.config.debugmode = False diff -r 6397a9051f65 -r 134613d3b353 etwist/twconfig.py --- a/etwist/twconfig.py Wed Jul 20 14:09:42 2011 +0200 +++ b/etwist/twconfig.py Wed Jul 20 18:22:41 2011 +0200 @@ -90,6 +90,13 @@ 'help': 'run a pyro server', 'group': 'main', 'level': 1, }), + ('webserver-threadpool-size', + {'type': 'int', + 'default': 4, + 'help': "size of twisted's reactor threadpool. It should probably be not too \ +much greater than connection-poolsize", + 'group': 'web', 'level': 3, + }), ) + WebConfiguration.options) def server_file(self): diff -r 6397a9051f65 -r 134613d3b353 etwist/twctl.py --- a/etwist/twctl.py Wed Jul 20 14:09:42 2011 +0200 +++ b/etwist/twctl.py Wed Jul 20 18:22:41 2011 +0200 @@ -17,6 +17,10 @@ # with CubicWeb. If not, see . """cubicweb-clt handlers for twisted""" +from os.path import join + +from logilab.common.shellutils import rm + from cubicweb.toolsutils import CommandHandler from cubicweb.web.webctl import WebCreateHandler @@ -32,6 +36,9 @@ def start_server(self, config): from cubicweb.etwist import server + config.info('clear ui caches') + for cachedir in ('uicache', 'uicachehttps'): + rm(join(config.appdatahome, cachedir, '*')) return server.run(config) class TWStopHandler(CommandHandler): diff -r 6397a9051f65 -r 134613d3b353 ext/rest.py --- a/ext/rest.py Wed Jul 20 14:09:42 2011 +0200 +++ b/ext/rest.py Wed Jul 20 18:22:41 2011 +0200 @@ -200,7 +200,7 @@ context = state.document.settings.context context._cw.add_css('pygments.css') except AttributeError: - # used outside cubicweb + # used outside cubicweb XXX use hasattr instead pass return [nodes.raw('', parsed, format='html')] diff -r 6397a9051f65 -r 134613d3b353 hooks/__init__.py --- a/hooks/__init__.py Wed Jul 20 14:09:42 2011 +0200 +++ b/hooks/__init__.py Wed Jul 20 18:22:41 2011 +0200 @@ -46,28 +46,26 @@ session.commit() finally: session.close() - self.repo.looping_task(60*60*24, cleanup_old_transactions, self.repo) + if self.repo.config['undo-support']: + self.repo.looping_task(60*60*24, cleanup_old_transactions, + self.repo) def update_feeds(repo): - session = repo.internal_session() - try: - # don't iter on repo.sources which doesn't include copy based - # sources (the one we're looking for) - for source in repo.sources_by_eid.itervalues(): - if (not source.copy_based_source - or not repo.config.source_enabled(source) - or not source.config['synchronize']): - continue - try: - stats = source.pull_data(session) - if stats.get('created'): - source.info('added %s entities', len(stats['created'])) - if stats.get('updated'): - source.info('updated %s entities', len(stats['updated'])) - session.commit() - except Exception, exc: - session.exception('while trying to update feed %s', source) - session.rollback() - session.set_pool() - finally: - session.close() + # don't iter on repo.sources which doesn't include copy based + # sources (the one we're looking for) + for source in repo.sources_by_eid.itervalues(): + if (not source.copy_based_source + or not repo.config.source_enabled(source) + or not source.config['synchronize']): + continue + session = repo.internal_session() + try: + stats = source.pull_data(session) + if stats.get('created'): + source.info('added %s entities', len(stats['created'])) + if stats.get('updated'): + source.info('updated %s entities', len(stats['updated'])) + except Exception, exc: + session.exception('while trying to update feed %s', source) + finally: + session.close() self.repo.looping_task(60, update_feeds, self.repo) diff -r 6397a9051f65 -r 134613d3b353 hooks/metadata.py --- a/hooks/metadata.py Wed Jul 20 14:09:42 2011 +0200 +++ b/hooks/metadata.py Wed Jul 20 18:22:41 2011 +0200 @@ -23,6 +23,7 @@ from cubicweb.selectors import is_instance from cubicweb.server import hook +from cubicweb.server.edition import EditedEntity class MetaDataHook(hook.Hook): @@ -68,8 +69,9 @@ def precommit_event(self): session = self.session relations = [(eid, session.user.eid) for eid in self.get_data() - # don't consider entities that have been created and - # deleted in the same transaction + # don't consider entities that have been created and deleted in + # the same transaction, nor ones where created_by has been + # explicitly set if not session.deleted_in_transaction(eid) and \ not session.entity_from_eid(eid).created_by] session.add_relations([('created_by', relations)]) @@ -141,3 +143,76 @@ session.repo.system_source.index_entity( session, session.entity_from_eid(self.eidto)) + + +# entity source handling ####################################################### + +class ChangeEntityUpdateCaches(hook.Operation): + def postcommit_event(self): + self.oldsource.reset_caches() + repo = self.session.repo + entity = self.entity + extid = entity.cw_metainformation()['extid'] + repo._type_source_cache[entity.eid] = ( + entity.__regid__, self.newsource.uri, None, self.newsource.uri) + if self.oldsource.copy_based_source: + uri = 'system' + else: + uri = self.oldsource.uri + repo._extid_cache[(extid, uri)] = -entity.eid + +class ChangeEntitySourceDeleteHook(MetaDataHook): + """support for moving an entity from an external source by watching 'Any + cw_source CWSource' relation + """ + + __regid__ = 'cw.metadata.source-change' + __select__ = MetaDataHook.__select__ & hook.match_rtype('cw_source') + events = ('before_delete_relation',) + + def __call__(self): + if (self._cw.deleted_in_transaction(self.eidfrom) + or self._cw.deleted_in_transaction(self.eidto)): + return + schange = self._cw.transaction_data.setdefault('cw_source_change', {}) + schange[self.eidfrom] = self.eidto + + +class ChangeEntitySourceAddHook(MetaDataHook): + __regid__ = 'cw.metadata.source-change' + __select__ = MetaDataHook.__select__ & hook.match_rtype('cw_source') + events = ('before_add_relation',) + + def __call__(self): + schange = self._cw.transaction_data.get('cw_source_change') + if schange is not None and self.eidfrom in schange: + newsource = self._cw.entity_from_eid(self.eidto) + if newsource.name != 'system': + raise Exception('changing source to something else than the ' + 'system source is unsupported') + syssource = newsource.repo_source + oldsource = self._cw.entity_from_eid(schange[self.eidfrom]) + entity = self._cw.entity_from_eid(self.eidfrom) + # copy entity if necessary + if not oldsource.repo_source.copy_based_source: + entity.complete(skip_bytes=False) + entity.cw_edited = EditedEntity(entity, **entity.cw_attr_cache) + syssource.add_entity(self._cw, entity) + # we don't want the moved entity to be reimported later. To + # distinguish this state, the trick is to change the associated + # record in the 'entities' system table with eid=-eid while leaving + # other fields unchanged, and to add a new record with eid=eid, + # source='system'. External source will then have consider case + # where `extid2eid` return a negative eid as 'this entity was known + # but has been moved, ignore it'. + self._cw.system_sql('UPDATE entities SET eid=-eid,source=%(source)s ' + 'WHERE eid=%(eid)s', + {'eid': self.eidfrom, 'source': newsource.name}) + attrs = {'type': entity.__regid__, 'eid': entity.eid, 'extid': None, + 'source': 'system', 'asource': 'system', + 'mtime': datetime.now()} + self._cw.system_sql(syssource.sqlgen.insert('entities', attrs), attrs) + # register an operation to update repository/sources caches + ChangeEntityUpdateCaches(self._cw, entity=entity, + oldsource=oldsource.repo_source, + newsource=syssource) diff -r 6397a9051f65 -r 134613d3b353 hooks/syncschema.py --- a/hooks/syncschema.py Wed Jul 20 14:09:42 2011 +0200 +++ b/hooks/syncschema.py Wed Jul 20 18:22:41 2011 +0200 @@ -42,12 +42,15 @@ TYPE_CONVERTER = { # XXX 'Boolean': bool, 'Int': int, + 'BigInt': int, 'Float': float, 'Password': str, 'String': unicode, 'Date' : unicode, 'Datetime' : unicode, 'Time' : unicode, + 'TZDatetime' : unicode, + 'TZTime' : unicode, } # core entity and relation types which can't be removed @@ -92,7 +95,7 @@ # create index before alter table which may expectingly fail during test # (sqlite) while index creation should never fail (test for index existence # is done by the dbhelper) - session.pool.source('system').create_index(session, table, column) + session.cnxset.source('system').create_index(session, table, column) session.info('added index on %s(%s)', table, column) @@ -252,7 +255,7 @@ description=entity.description) eschema = schema.add_entity_type(etype) # create the necessary table - tablesql = y2sql.eschema2sql(session.pool.source('system').dbhelper, + tablesql = y2sql.eschema2sql(session.cnxset.source('system').dbhelper, eschema, prefix=SQL_PREFIX) for sql in tablesql.split(';'): if sql.strip(): @@ -289,7 +292,7 @@ self.session.vreg.schema.rename_entity_type(oldname, newname) # we need sql to operate physical changes on the system database sqlexec = self.session.system_sql - dbhelper= self.session.pool.source('system').dbhelper + dbhelper= self.session.cnxset.source('system').dbhelper sql = dbhelper.sql_rename_table(SQL_PREFIX+oldname, SQL_PREFIX+newname) sqlexec(sql) @@ -433,7 +436,7 @@ # update the in-memory schema first rdefdef = self.init_rdef(**props) # then make necessary changes to the system source database - syssource = session.pool.source('system') + syssource = session.cnxset.source('system') attrtype = y2sql.type_from_constraints( syssource.dbhelper, rdefdef.object, rdefdef.constraints) # XXX should be moved somehow into lgdb: sqlite doesn't support to @@ -606,7 +609,7 @@ self.oldvalues = dict( (attr, getattr(rdef, attr)) for attr in self.values) rdef.update(self.values) # then make necessary changes to the system source database - syssource = session.pool.source('system') + syssource = session.cnxset.source('system') if 'indexed' in self.values: syssource.update_rdef_indexed(session, rdef) self.indexed_changed = True @@ -624,7 +627,7 @@ # revert changes on in memory schema self.rdef.update(self.oldvalues) # revert changes on database - syssource = self.session.pool.source('system') + syssource = self.session.cnxset.source('system') if self.indexed_changed: syssource.update_rdef_indexed(self.session, self.rdef) if self.null_allowed_changed: @@ -652,7 +655,7 @@ rdef.constraints.remove(self.oldcstr) # then update database: alter the physical schema on size/unique # constraint changes - syssource = session.pool.source('system') + syssource = session.cnxset.source('system') cstrtype = self.oldcstr.type() if cstrtype == 'SizeConstraint': syssource.update_rdef_column(session, rdef) @@ -668,7 +671,7 @@ if self.oldcstr is not None: self.rdef.constraints.append(self.oldcstr) # revert changes on database - syssource = self.session.pool.source('system') + syssource = self.session.cnxset.source('system') if self.size_cstr_changed: syssource.update_rdef_column(self.session, self.rdef) if self.unique_changed: @@ -699,7 +702,7 @@ rdef.constraints.append(newcstr) # then update database: alter the physical schema on size/unique # constraint changes - syssource = session.pool.source('system') + syssource = session.cnxset.source('system') if cstrtype == 'SizeConstraint' and (oldcstr is None or oldcstr.max != newcstr.max): syssource.update_rdef_column(session, rdef) @@ -716,7 +719,7 @@ prefix = SQL_PREFIX table = '%s%s' % (prefix, self.entity.constraint_of[0].name) cols = ['%s%s' % (prefix, r.name) for r in self.entity.relations] - dbhelper= session.pool.source('system').dbhelper + dbhelper= session.cnxset.source('system').dbhelper sqls = dbhelper.sqls_create_multicol_unique_index(table, cols) for sql in sqls: session.system_sql(sql) @@ -736,7 +739,7 @@ session = self.session prefix = SQL_PREFIX table = '%s%s' % (prefix, self.entity.type) - dbhelper= session.pool.source('system').dbhelper + dbhelper= session.cnxset.source('system').dbhelper cols = ['%s%s' % (prefix, c) for c in self.cols] sqls = dbhelper.sqls_drop_multicol_unique_index(table, cols) for sql in sqls: @@ -785,7 +788,7 @@ """ def precommit_event(self): - """the observed connections pool has been commited""" + """the observed connections.cnxset has been commited""" try: erschema = self.session.vreg.schema.schema_by_eid(self.eid) except KeyError: @@ -814,7 +817,7 @@ """ def precommit_event(self): - """the observed connections pool has been commited""" + """the observed connections set has been commited""" try: erschema = self.session.vreg.schema.schema_by_eid(self.eid) except KeyError: @@ -1228,7 +1231,7 @@ source.fti_index_entities(session, [container]) if to_reindex: # Transaction has already been committed - session.pool.commit() + session.cnxset.commit() diff -r 6397a9051f65 -r 134613d3b353 hooks/syncsession.py --- a/hooks/syncsession.py Wed Jul 20 14:09:42 2011 +0200 +++ b/hooks/syncsession.py Wed Jul 20 18:22:41 2011 +0200 @@ -56,7 +56,7 @@ class _DeleteGroupOp(_GroupOperation): """synchronize user when a in_group relation has been deleted""" def postcommit_event(self): - """the observed connections pool has been commited""" + """the observed connections set has been commited""" groups = self.cnxuser.groups try: groups.remove(self.group) @@ -67,7 +67,7 @@ class _AddGroupOp(_GroupOperation): """synchronize user when a in_group relation has been added""" def postcommit_event(self): - """the observed connections pool has been commited""" + """the observed connections set has been commited""" groups = self.cnxuser.groups if self.group in groups: self.warning('user %s already in group %s', self.cnxuser, @@ -97,7 +97,7 @@ hook.Operation.__init__(self, session) def postcommit_event(self): - """the observed connections pool has been commited""" + """the observed connections set has been commited""" try: self.session.repo.close(self.cnxid) except BadConnectionId: @@ -122,7 +122,7 @@ """a user's custom properties has been deleted""" def postcommit_event(self): - """the observed connections pool has been commited""" + """the observed connections set has been commited""" try: del self.cwpropdict[self.key] except KeyError: @@ -133,7 +133,7 @@ """a user's custom properties has been added/changed""" def postcommit_event(self): - """the observed connections pool has been commited""" + """the observed connections set has been commited""" self.cwpropdict[self.key] = self.value @@ -141,7 +141,7 @@ """a user's custom properties has been added/changed""" def postcommit_event(self): - """the observed connections pool has been commited""" + """the observed connections set has been commited""" cwprop = self.cwprop if not cwprop.for_user: self.session.vreg['propertyvalues'][cwprop.pkey] = cwprop.value diff -r 6397a9051f65 -r 134613d3b353 hooks/syncsources.py --- a/hooks/syncsources.py Wed Jul 20 14:09:42 2011 +0200 +++ b/hooks/syncsources.py Wed Jul 20 18:22:41 2011 +0200 @@ -30,6 +30,8 @@ category = 'cw.sources' +# repo sources synchronization ################################################# + class SourceAddedOp(hook.Operation): def postcommit_event(self): self.session.repo.add_source(self.entity) @@ -51,7 +53,7 @@ class SourceRemovedOp(hook.Operation): - def precommit_event(self): + def postcommit_event(self): self.session.repo.remove_source(self.uri) class SourceRemovedHook(SourceHook): @@ -69,8 +71,9 @@ def precommit_event(self): self.__processed = [] for source in self.get_data(): - conf = source.repo_source.check_config(source) - self.__processed.append( (source, conf) ) + if not self.session.deleted_in_transaction(source.eid): + conf = source.repo_source.check_config(source) + self.__processed.append( (source, conf) ) def postcommit_event(self): for source, conf in self.__processed: @@ -100,8 +103,10 @@ pass -# source mapping synchronization. Expect cw_for_source/cw_schema are immutable -# relations (i.e. can't change from a source or schema to another). +# source mapping synchronization ############################################### +# +# Expect cw_for_source/cw_schema are immutable relations (i.e. can't change from +# a source or schema to another). class SourceMappingDeleteHook(SourceHook): """check cw_for_source and cw_schema are immutable relations diff -r 6397a9051f65 -r 134613d3b353 hooks/test/unittest_syncschema.py --- a/hooks/test/unittest_syncschema.py Wed Jul 20 14:09:42 2011 +0200 +++ b/hooks/test/unittest_syncschema.py Wed Jul 20 18:22:41 2011 +0200 @@ -36,9 +36,9 @@ self.__class__.schema_eids = schema_eids_idx(self.repo.schema) def index_exists(self, etype, attr, unique=False): - self.session.set_pool() - dbhelper = self.session.pool.source('system').dbhelper - sqlcursor = self.session.pool['system'] + self.session.set_cnxset() + dbhelper = self.session.cnxset.source('system').dbhelper + sqlcursor = self.session.cnxset['system'] return dbhelper.index_exists(sqlcursor, SQL_PREFIX + etype, SQL_PREFIX + attr, unique=unique) def _set_perms(self, eid): @@ -57,9 +57,9 @@ def test_base(self): schema = self.repo.schema - self.session.set_pool() - dbhelper = self.session.pool.source('system').dbhelper - sqlcursor = self.session.pool['system'] + self.session.set_cnxset() + dbhelper = self.session.cnxset.source('system').dbhelper + sqlcursor = self.session.cnxset['system'] self.failIf(schema.has_entity('Societe2')) self.failIf(schema.has_entity('concerne2')) # schema should be update on insertion (after commit) @@ -170,9 +170,9 @@ # schema modification hooks tests ######################################### def test_uninline_relation(self): - self.session.set_pool() - dbhelper = self.session.pool.source('system').dbhelper - sqlcursor = self.session.pool['system'] + self.session.set_cnxset() + dbhelper = self.session.cnxset.source('system').dbhelper + sqlcursor = self.session.cnxset['system'] self.failUnless(self.schema['state_of'].inlined) try: self.execute('SET X inlined FALSE WHERE X name "state_of"') @@ -195,9 +195,9 @@ self.assertEqual(len(rset), 2) def test_indexed_change(self): - self.session.set_pool() - dbhelper = self.session.pool.source('system').dbhelper - sqlcursor = self.session.pool['system'] + self.session.set_cnxset() + dbhelper = self.session.cnxset.source('system').dbhelper + sqlcursor = self.session.cnxset['system'] try: self.execute('SET X indexed FALSE WHERE X relation_type R, R name "name"') self.failUnless(self.schema['name'].rdef('Workflow', 'String').indexed) @@ -214,9 +214,9 @@ self.failUnless(self.index_exists('Workflow', 'name')) def test_unique_change(self): - self.session.set_pool() - dbhelper = self.session.pool.source('system').dbhelper - sqlcursor = self.session.pool['system'] + self.session.set_cnxset() + dbhelper = self.session.cnxset.source('system').dbhelper + sqlcursor = self.session.cnxset['system'] try: self.execute('INSERT CWConstraint X: X cstrtype CT, DEF constrained_by X ' 'WHERE CT name "UniqueConstraint", DEF relation_type RT, DEF from_entity E,' diff -r 6397a9051f65 -r 134613d3b353 hooks/workflow.py --- a/hooks/workflow.py Wed Jul 20 14:09:42 2011 +0200 +++ b/hooks/workflow.py Wed Jul 20 18:22:41 2011 +0200 @@ -148,7 +148,7 @@ class WorkflowHook(hook.Hook): __abstract__ = True - category = 'workflow' + category = 'metadata' class SetInitialStateHook(WorkflowHook): @@ -160,21 +160,15 @@ _SetInitialStateOp(self._cw, entity=self.entity) -class PrepareStateChangeHook(WorkflowHook): - """record previous state information""" - __regid__ = 'cwdelstate' - __select__ = WorkflowHook.__select__ & hook.match_rtype('in_state') - events = ('before_delete_relation',) +class FireTransitionHook(WorkflowHook): + """check the transition is allowed and add missing information into the + TrInfo entity. - def __call__(self): - self._cw.transaction_data.setdefault('pendingrelations', []).append( - (self.eidfrom, self.rtype, self.eidto)) - - -class FireTransitionHook(WorkflowHook): - """check the transition is allowed, add missing information. Expect that: + Expect that: * wf_info_for inlined relation is set * by_transition or to_state (managers only) inlined relation is set + + Check for automatic transition to be fired at the end """ __regid__ = 'wffiretransition' __select__ = WorkflowHook.__select__ & is_instance('TrInfo') @@ -273,7 +267,7 @@ class FiredTransitionHook(WorkflowHook): - """change related entity state""" + """change related entity state and handle exit of subworkflow""" __regid__ = 'wffiretransition' __select__ = WorkflowHook.__select__ & is_instance('TrInfo') events = ('after_add_entity',) @@ -296,6 +290,7 @@ __regid__ = 'wfcheckinstate' __select__ = WorkflowHook.__select__ & hook.match_rtype('in_state') events = ('before_add_relation',) + category = 'integrity' def __call__(self): session = self._cw diff -r 6397a9051f65 -r 134613d3b353 i18n/de.po --- a/i18n/de.po Wed Jul 20 14:09:42 2011 +0200 +++ b/i18n/de.po Wed Jul 20 18:22:41 2011 +0200 @@ -162,6 +162,11 @@ msgstr "" #, python-format +msgid "" +"'%s' action for in_state relation should at least have 'linkattr=name' option" +msgstr "" + +#, python-format msgid "'%s' action requires 'linkattr' option" msgstr "" @@ -256,6 +261,12 @@ msgid "BaseTransition_plural" msgstr "Übergänge (abstrakt)" +msgid "BigInt" +msgstr "" + +msgid "BigInt_plural" +msgstr "" + msgid "Bookmark" msgstr "Lesezeichen" @@ -1153,6 +1164,10 @@ msgid "allowed transitions from this state" msgstr "erlaubte Übergänge von diesem Zustand" +#, python-format +msgid "allowed values for \"action\" are %s" +msgstr "" + msgid "allowed_transition" msgstr "erlaubter Übergang" @@ -1787,10 +1802,10 @@ msgid "ctxcomponents_edit_box_description" msgstr "Box mit verfügbaren Aktionen für die angezeigten Daten" -msgid "ctxcomponents_facet.filters" +msgid "ctxcomponents_facet.filterbox" msgstr "Filter" -msgid "ctxcomponents_facet.filters_description" +msgid "ctxcomponents_facet.filterbox_description" msgstr "Box mit Filter für aktuelle Suchergebnis-Funktionalität" msgid "ctxcomponents_logo" @@ -1852,6 +1867,9 @@ msgid "ctxtoolbar" msgstr "Werkzeugleiste" +msgid "currently in synchronization" +msgstr "" + msgid "custom_workflow" msgstr "angepasster Workflow" @@ -2388,6 +2406,9 @@ msgid "external page" msgstr "externe Seite" +msgid "facet-loading-msg" +msgstr "" + msgid "facet.filters" msgstr "" @@ -2572,9 +2593,6 @@ "generische Relation, die anzeigt, dass eine Entität mit einer anderen Web-" "Ressource identisch ist (siehe http://www.w3.org/TR/owl-ref/#sameAs-def)." -msgid "go back to the index page" -msgstr "Zurück zur Index-Seite" - msgid "granted to groups" msgstr "an Gruppen gewährt" @@ -3184,6 +3202,15 @@ msgid "no associated permissions" msgstr "keine entsprechende Berechtigung" +msgid "no content next link" +msgstr "" + +msgid "no content prev link" +msgstr "" + +msgid "no edited fields specified" +msgstr "" + #, python-format msgid "no edited fields specified for entity %s" msgstr "kein Eingabefeld spezifiziert Für Entität %s" @@ -3722,9 +3749,6 @@ msgid "siteinfo" msgstr "" -msgid "some errors occurred:" -msgstr "Einige Fehler sind aufgetreten" - msgid "some later transaction(s) touch entity, undo them first" msgstr "" "Eine oder mehrere frühere Transaktion(en) betreffen die Tntität. Machen Sie " @@ -3897,6 +3921,13 @@ msgid "synchronization-interval must be greater than 1 minute" msgstr "" +msgid "synchronizing" +msgstr "" + +msgctxt "CWSource" +msgid "synchronizing" +msgstr "" + msgid "table" msgstr "Tabelle" @@ -3935,6 +3966,12 @@ msgstr "" "Der Wert \"%s\" wird bereits benutzt, bitte verwenden Sie einen anderen Wert" +msgid "there is no next page" +msgstr "" + +msgid "there is no previous page" +msgstr "" + msgid "this action is not reversible!" msgstr "Achtung! Diese Aktion ist unumkehrbar." @@ -4035,7 +4072,7 @@ msgstr "" msgid "transaction undone" -msgstr "Transaktion rückgängig gemacht" +msgstr "" #, python-format msgid "transition %(tr)s isn't allowed from %(st)s" @@ -4319,30 +4356,30 @@ msgid "value" msgstr "Wert" +#, python-format +msgid "value %(value)s must be %(op)s %(boundary)s" +msgstr "" + +#, python-format +msgid "value %(value)s must be <= %(boundary)s" +msgstr "" + +#, python-format +msgid "value %(value)s must be >= %(boundary)s" +msgstr "" + msgid "value associated to this key is not editable manually" msgstr "" "Der mit diesem Schlüssele verbundene Wert kann n icht manuell geändert " "werden." #, python-format -msgid "value must be %(op)s %(boundary)s" -msgstr "Der Wert muss %(op)s %(boundary)s sein." - -#, python-format -msgid "value must be <= %(boundary)s" -msgstr "Der Wert muss <= %(boundary)s sein." +msgid "value should have maximum size of %s but found %s" +msgstr "" #, python-format -msgid "value must be >= %(boundary)s" -msgstr "Der Wert muss >= %(boundary)s sein." - -#, python-format -msgid "value should have maximum size of %s" -msgstr "Der Wert darf höchstens %s betragen." - -#, python-format -msgid "value should have minimum size of %s" -msgstr "Der Wert muss mindestens %s betragen." +msgid "value should have minimum size of %s but found %s" +msgstr "" msgid "vcard" msgstr "VCard" @@ -4488,76 +4525,3 @@ #, python-format msgid "you should un-inline relation %s which is supported and may be crossed " msgstr "" - -#~ msgid "Attributes with non default permissions:" -#~ msgstr "Attribute mit nicht-standard-Berechtigungen" - -#~ msgid "Entity types" -#~ msgstr "Entitätstypen" - -#~ msgid "Index" -#~ msgstr "Index" - -#~ msgid "Permissions for entity types" -#~ msgstr "Berechtigungen für Entitätstypen" - -#~ msgid "Permissions for relations" -#~ msgstr "Berechtigungen für Relationen" - -#~ msgid "Relation types" -#~ msgstr "Relationstypen" - -#~ msgid "am/pm calendar (month)" -#~ msgstr "am/pm Kalender (Monat)" - -#~ msgid "am/pm calendar (semester)" -#~ msgstr "am/pm Kalender (Halbjahr)" - -#~ msgid "am/pm calendar (week)" -#~ msgstr "am/pm Kalender (Woche)" - -#~ msgid "am/pm calendar (year)" -#~ msgstr "am/pm Kalender (Jahr)" - -#~ msgid "application entities" -#~ msgstr "Anwendungs-Entitäten" - -#~ msgid "calendar (month)" -#~ msgstr "Kalender (monatlich)" - -#~ msgid "calendar (semester)" -#~ msgstr "Kalender (halbjährlich)" - -#~ msgid "calendar (week)" -#~ msgstr "Kalender (wöchentlich)" - -#~ msgid "calendar (year)" -#~ msgstr "Kalender (jährlich)" - -#~ msgid "" -#~ "can't set inlined=%(inlined)s, %(stype)s %(rtype)s %(otype)s has " -#~ "cardinality=%(card)s" -#~ msgstr "" -#~ "Kann 'inlined' = %(inlined)s nicht zuweisen, %(stype)s %(rtype)s %(otype)" -#~ "s hat die Kardinalität %(card)s" - -#~ msgid "create an index page" -#~ msgstr "Eine Index-Seite anlegen" - -#~ msgid "edit the index page" -#~ msgstr "Index-Seite bearbeiten" - -#~ msgid "schema entities" -#~ msgstr "Entitäten, die das Schema definieren" - -#~ msgid "schema-security" -#~ msgstr "Rechte" - -#~ msgid "system entities" -#~ msgstr "System-Entitäten" - -#~ msgid "timestamp of the latest source synchronization." -#~ msgstr "Zeitstempel der letzten Synchronisierung mit der Quelle." - -#~ msgid "up" -#~ msgstr "nach oben" diff -r 6397a9051f65 -r 134613d3b353 i18n/en.po --- a/i18n/en.po Wed Jul 20 14:09:42 2011 +0200 +++ b/i18n/en.po Wed Jul 20 18:22:41 2011 +0200 @@ -5,7 +5,7 @@ msgstr "" "Project-Id-Version: 2.0\n" "POT-Creation-Date: 2006-01-12 17:35+CET\n" -"PO-Revision-Date: 2010-09-15 14:55+0200\n" +"PO-Revision-Date: 2011-04-29 12:57+0200\n" "Last-Translator: Sylvain Thenault \n" "Language-Team: English \n" "Language: en\n" @@ -154,6 +154,11 @@ msgstr "" #, python-format +msgid "" +"'%s' action for in_state relation should at least have 'linkattr=name' option" +msgstr "" + +#, python-format msgid "'%s' action requires 'linkattr' option" msgstr "" @@ -245,6 +250,12 @@ msgid "BaseTransition_plural" msgstr "Transitions (abstract)" +msgid "BigInt" +msgstr "Big integer" + +msgid "BigInt_plural" +msgstr "Big integers" + msgid "Bookmark" msgstr "Bookmark" @@ -503,7 +514,7 @@ msgstr "Interval" msgid "IntervalBoundConstraint" -msgstr "interval constraint" +msgstr "Interval constraint" msgid "Interval_plural" msgstr "Intervals" @@ -1111,6 +1122,10 @@ msgid "allowed transitions from this state" msgstr "" +#, python-format +msgid "allowed values for \"action\" are %s" +msgstr "" + msgid "allowed_transition" msgstr "allowed transition" @@ -1742,10 +1757,10 @@ msgid "ctxcomponents_edit_box_description" msgstr "box listing the applicable actions on the displayed data" -msgid "ctxcomponents_facet.filters" +msgid "ctxcomponents_facet.filterbox" msgstr "facets box" -msgid "ctxcomponents_facet.filters_description" +msgid "ctxcomponents_facet.filterbox_description" msgstr "box providing filter within current search results functionality" msgid "ctxcomponents_logo" @@ -1807,6 +1822,9 @@ msgid "ctxtoolbar" msgstr "toolbar" +msgid "currently in synchronization" +msgstr "" + msgid "custom_workflow" msgstr "custom workflow" @@ -2330,6 +2348,9 @@ msgid "external page" msgstr "" +msgid "facet-loading-msg" +msgstr "processing, please wait" + msgid "facet.filters" msgstr "filter" @@ -2512,9 +2533,6 @@ "object as a local one: http://www.w3.org/TR/owl-ref/#sameAs-def" msgstr "" -msgid "go back to the index page" -msgstr "" - msgid "granted to groups" msgstr "" @@ -3095,6 +3113,15 @@ msgid "no associated permissions" msgstr "" +msgid "no content next link" +msgstr "" + +msgid "no content prev link" +msgstr "" + +msgid "no edited fields specified" +msgstr "" + #, python-format msgid "no edited fields specified for entity %s" msgstr "" @@ -3624,9 +3651,6 @@ msgid "siteinfo" msgstr "site information" -msgid "some errors occurred:" -msgstr "" - msgid "some later transaction(s) touch entity, undo them first" msgstr "" @@ -3793,6 +3817,13 @@ msgid "synchronization-interval must be greater than 1 minute" msgstr "" +msgid "synchronizing" +msgstr "" + +msgctxt "CWSource" +msgid "synchronizing" +msgstr "" + msgid "table" msgstr "" @@ -3830,6 +3861,12 @@ msgid "the value \"%s\" is already used, use another one" msgstr "" +msgid "there is no next page" +msgstr "" + +msgid "there is no previous page" +msgstr "" + msgid "this action is not reversible!" msgstr "" @@ -4205,27 +4242,27 @@ msgid "value" msgstr "" +#, python-format +msgid "value %(value)s must be %(op)s %(boundary)s" +msgstr "" + +#, python-format +msgid "value %(value)s must be <= %(boundary)s" +msgstr "" + +#, python-format +msgid "value %(value)s must be >= %(boundary)s" +msgstr "" + msgid "value associated to this key is not editable manually" msgstr "" #, python-format -msgid "value must be %(op)s %(boundary)s" -msgstr "" - -#, python-format -msgid "value must be <= %(boundary)s" +msgid "value should have maximum size of %s but found %s" msgstr "" #, python-format -msgid "value must be >= %(boundary)s" -msgstr "" - -#, python-format -msgid "value should have maximum size of %s" -msgstr "" - -#, python-format -msgid "value should have minimum size of %s" +msgid "value should have minimum size of %s but found %s" msgstr "" msgid "vcard" @@ -4370,10 +4407,3 @@ #, python-format msgid "you should un-inline relation %s which is supported and may be crossed " msgstr "" - -#~ msgctxt "CWAttribute" -#~ msgid "relations_object" -#~ msgstr "constrained by" - -#~ msgid "schema-security" -#~ msgstr "permissions" diff -r 6397a9051f65 -r 134613d3b353 i18n/es.po --- a/i18n/es.po Wed Jul 20 14:09:42 2011 +0200 +++ b/i18n/es.po Wed Jul 20 18:22:41 2011 +0200 @@ -163,6 +163,11 @@ msgstr "la acción '%s' no acepta opciones" #, python-format +msgid "" +"'%s' action for in_state relation should at least have 'linkattr=name' option" +msgstr "" + +#, python-format msgid "'%s' action requires 'linkattr' option" msgstr "la acción '%s' requiere una opción 'linkattr'" @@ -257,6 +262,12 @@ msgid "BaseTransition_plural" msgstr "Transiciones (abstractas)" +msgid "BigInt" +msgstr "" + +msgid "BigInt_plural" +msgstr "" + msgid "Bookmark" msgstr "Favorito" @@ -1163,6 +1174,10 @@ msgid "allowed transitions from this state" msgstr "transiciones autorizadas desde este estado" +#, python-format +msgid "allowed values for \"action\" are %s" +msgstr "" + msgid "allowed_transition" msgstr "transiciones autorizadas" @@ -1812,10 +1827,10 @@ msgid "ctxcomponents_edit_box_description" msgstr "Muestra las acciones posibles a ejecutar para los datos seleccionados" -msgid "ctxcomponents_facet.filters" +msgid "ctxcomponents_facet.filterbox" msgstr "Filtros" -msgid "ctxcomponents_facet.filters_description" +msgid "ctxcomponents_facet.filterbox_description" msgstr "Muestra los filtros aplicables a una búsqueda realizada" msgid "ctxcomponents_logo" @@ -1881,6 +1896,9 @@ msgid "ctxtoolbar" msgstr "Barra de herramientas" +msgid "currently in synchronization" +msgstr "" + msgid "custom_workflow" msgstr "Workflow específico" @@ -2431,6 +2449,9 @@ msgid "external page" msgstr "Página externa" +msgid "facet-loading-msg" +msgstr "" + msgid "facet.filters" msgstr "Filtros" @@ -2615,9 +2636,6 @@ "Relación genérica que indicar que una entidad es idéntica a otro recurso web " "(ver http://www.w3.org/TR/owl-ref/#sameAs-def)." -msgid "go back to the index page" -msgstr "Regresar a la página de inicio" - msgid "granted to groups" msgstr "Otorgado a los grupos" @@ -3152,11 +3170,11 @@ msgctxt "CWSource" msgid "name" -msgstr "nombre" +msgstr "" msgctxt "State" msgid "name" -msgstr "Nombre" +msgstr "nombre" msgctxt "Transition" msgid "name" @@ -3225,6 +3243,15 @@ msgid "no associated permissions" msgstr "No existe permiso asociado" +msgid "no content next link" +msgstr "" + +msgid "no content prev link" +msgstr "" + +msgid "no edited fields specified" +msgstr "" + #, python-format msgid "no edited fields specified for entity %s" msgstr "Ningún campo editable especificado para la entidad %s" @@ -3771,9 +3798,6 @@ msgid "siteinfo" msgstr "información" -msgid "some errors occurred:" -msgstr "Algunos errores encontrados :" - msgid "some later transaction(s) touch entity, undo them first" msgstr "" "Las transacciones más recientes modificaron esta entidad, anúlelas primero" @@ -3947,6 +3971,13 @@ msgid "synchronization-interval must be greater than 1 minute" msgstr "synchronization-interval debe ser mayor a 1 minuto" +msgid "synchronizing" +msgstr "" + +msgctxt "CWSource" +msgid "synchronizing" +msgstr "" + msgid "table" msgstr "Tabla" @@ -3985,6 +4016,12 @@ msgid "the value \"%s\" is already used, use another one" msgstr "El valor \"%s\" ya esta en uso, favor de utilizar otro" +msgid "there is no next page" +msgstr "" + +msgid "there is no previous page" +msgstr "" + msgid "this action is not reversible!" msgstr "Esta acción es irreversible!." @@ -4085,7 +4122,7 @@ msgstr "n° de transición" msgid "transaction undone" -msgstr "Transacciones Anuladas" +msgstr "" #, python-format msgid "transition %(tr)s isn't allowed from %(st)s" @@ -4369,28 +4406,28 @@ msgid "value" msgstr "Vampr" +#, python-format +msgid "value %(value)s must be %(op)s %(boundary)s" +msgstr "" + +#, python-format +msgid "value %(value)s must be <= %(boundary)s" +msgstr "" + +#, python-format +msgid "value %(value)s must be >= %(boundary)s" +msgstr "" + msgid "value associated to this key is not editable manually" msgstr "El valor asociado a este elemento no es editable manualmente" #, python-format -msgid "value must be %(op)s %(boundary)s" -msgstr "El valor debe ser %(op)s %(boundary)s" - -#, python-format -msgid "value must be <= %(boundary)s" -msgstr "El valor debe ser <= %(boundary)s" +msgid "value should have maximum size of %s but found %s" +msgstr "" #, python-format -msgid "value must be >= %(boundary)s" -msgstr "El valor debe ser >= %(boundary)s" - -#, python-format -msgid "value should have maximum size of %s" -msgstr "El valor no debe exceder de %s" - -#, python-format -msgid "value should have minimum size of %s" -msgstr "El valor no puede ser menor a %s" +msgid "value should have minimum size of %s but found %s" +msgstr "" msgid "vcard" msgstr "vcard" @@ -4539,6 +4576,3 @@ msgstr "" "usted debe quitar la puesta en línea de la relación %s que es aceptada y " "puede ser cruzada" - -#~ msgid "add a %s" -#~ msgstr "agregar un %s" diff -r 6397a9051f65 -r 134613d3b353 i18n/fr.po --- a/i18n/fr.po Wed Jul 20 14:09:42 2011 +0200 +++ b/i18n/fr.po Wed Jul 20 18:22:41 2011 +0200 @@ -162,6 +162,11 @@ msgstr "l'action '%s' ne prend pas d'option" #, python-format +msgid "" +"'%s' action for in_state relation should at least have 'linkattr=name' option" +msgstr "" + +#, python-format msgid "'%s' action requires 'linkattr' option" msgstr "l'action '%s' nécessite une option 'linkattr'" @@ -255,6 +260,12 @@ msgid "BaseTransition_plural" msgstr "Transitions (abstraites)" +msgid "BigInt" +msgstr "Entier long" + +msgid "BigInt_plural" +msgstr "Entiers longs" + msgid "Bookmark" msgstr "Signet" @@ -1161,6 +1172,10 @@ msgid "allowed transitions from this state" msgstr "transitions autorisées depuis cet état" +#, python-format +msgid "allowed values for \"action\" are %s" +msgstr "" + msgid "allowed_transition" msgstr "transitions autorisées" @@ -1815,10 +1830,10 @@ msgstr "" "boîte affichant les différentes actions possibles sur les données affichées" -msgid "ctxcomponents_facet.filters" +msgid "ctxcomponents_facet.filterbox" msgstr "boîte à facettes" -msgid "ctxcomponents_facet.filters_description" +msgid "ctxcomponents_facet.filterbox_description" msgstr "" "boîte permettant de filtrer parmi les résultats d'une recherche à l'aide de " "facettes" @@ -1884,6 +1899,9 @@ msgid "ctxtoolbar" msgstr "barre d'outils" +msgid "currently in synchronization" +msgstr "en cours de synchronisation" + msgid "custom_workflow" msgstr "workflow spécifique" @@ -2429,6 +2447,9 @@ msgid "external page" msgstr "page externe" +msgid "facet-loading-msg" +msgstr "en cours de traitement, merci de patienter" + msgid "facet.filters" msgstr "facettes" @@ -2613,9 +2634,6 @@ "relation générique permettant d'indiquer qu'une entité est identique à une " "autre ressource web (voir http://www.w3.org/TR/owl-ref/#sameAs-def)." -msgid "go back to the index page" -msgstr "retourner sur la page d'accueil" - msgid "granted to groups" msgstr "accordée aux groupes" @@ -3224,6 +3242,15 @@ msgid "no associated permissions" msgstr "aucune permission associée" +msgid "no content next link" +msgstr "" + +msgid "no content prev link" +msgstr "" + +msgid "no edited fields specified" +msgstr "aucun champ à éditer spécifié" + #, python-format msgid "no edited fields specified for entity %s" msgstr "aucun champ à éditer spécifié pour l'entité %s" @@ -3771,9 +3798,6 @@ msgid "siteinfo" msgstr "informations" -msgid "some errors occurred:" -msgstr "des erreurs sont survenues" - msgid "some later transaction(s) touch entity, undo them first" msgstr "" "des transactions plus récentes modifient cette entité, annulez les d'abord" @@ -3948,6 +3972,13 @@ msgid "synchronization-interval must be greater than 1 minute" msgstr "synchronization-interval doit être supérieur à 1 minute" +msgid "synchronizing" +msgstr "synchronisation" + +msgctxt "CWSource" +msgid "synchronizing" +msgstr "synchronisation" + msgid "table" msgstr "table" @@ -3985,6 +4016,12 @@ msgid "the value \"%s\" is already used, use another one" msgstr "la valeur \"%s\" est déjà utilisée, veuillez utiliser une autre valeur" +msgid "there is no next page" +msgstr "" + +msgid "there is no previous page" +msgstr "" + msgid "this action is not reversible!" msgstr "" "Attention ! Cette opération va détruire les données de façon irréversible." @@ -4368,28 +4405,28 @@ msgid "value" msgstr "valeur" +#, python-format +msgid "value %(value)s must be %(op)s %(boundary)s" +msgstr "la valeur %(value)s doit être %(op)s %(boundary)s" + +#, python-format +msgid "value %(value)s must be <= %(boundary)s" +msgstr "la valeur %(value)s doit être <= %(boundary)s" + +#, python-format +msgid "value %(value)s must be >= %(boundary)s" +msgstr "la valeur %(value)s doit être >= %(boundary)s" + msgid "value associated to this key is not editable manually" msgstr "la valeur associée à cette clé n'est pas éditable manuellement" #, python-format -msgid "value must be %(op)s %(boundary)s" -msgstr "la valeur doit être %(op)s %(boundary)s" - -#, python-format -msgid "value must be <= %(boundary)s" -msgstr "la valeur doit être <= %(boundary)s" +msgid "value should have maximum size of %s but found %s" +msgstr "la taille maximum est %s mais cette valeur est de taille %s" #, python-format -msgid "value must be >= %(boundary)s" -msgstr "la valeur doit être >= %(boundary)s" - -#, python-format -msgid "value should have maximum size of %s" -msgstr "la valeur doit être de taille %s au maximum" - -#, python-format -msgid "value should have minimum size of %s" -msgstr "la valeur doit être de taille %s au minimum" +msgid "value should have minimum size of %s but found %s" +msgstr "la taille minimum est %s mais cette valeur est de taille %s" msgid "vcard" msgstr "vcard" @@ -4539,66 +4576,3 @@ msgstr "" "vous devriez enlevé la mise en ligne de la relation %s qui est supportée et " "peut-être croisée" - -#~ msgid "Attributes with non default permissions:" -#~ msgstr "Attributs ayant des permissions non-standard" - -#~ msgid "Entity types" -#~ msgstr "Types d'entités" - -#~ msgid "Permissions for entity types" -#~ msgstr "Permissions pour les types d'entités" - -#~ msgid "Permissions for relations" -#~ msgstr "Permissions pour les relations" - -#~ msgid "Relation types" -#~ msgstr "Types de relation" - -#~ msgid "add a %s" -#~ msgstr "ajouter un %s" - -#~ msgid "am/pm calendar (month)" -#~ msgstr "calendrier am/pm (mois)" - -#~ msgid "am/pm calendar (semester)" -#~ msgstr "calendrier am/pm (semestre)" - -#~ msgid "am/pm calendar (week)" -#~ msgstr "calendrier am/pm (semaine)" - -#~ msgid "am/pm calendar (year)" -#~ msgstr "calendrier am/pm (année)" - -#~ msgid "application entities" -#~ msgstr "entités applicatives" - -#~ msgid "calendar (month)" -#~ msgstr "calendrier (mensuel)" - -#~ msgid "calendar (semester)" -#~ msgstr "calendrier (semestriel)" - -#~ msgid "calendar (week)" -#~ msgstr "calendrier (hebdo)" - -#~ msgid "calendar (year)" -#~ msgstr "calendrier (annuel)" - -#~ msgid "create an index page" -#~ msgstr "créer une page d'accueil" - -#~ msgid "edit the index page" -#~ msgstr "éditer la page d'accueil" - -#~ msgid "schema entities" -#~ msgstr "entités définissant le schéma" - -#~ msgid "schema-security" -#~ msgstr "permissions" - -#~ msgid "system entities" -#~ msgstr "entités systèmes" - -#~ msgid "timestamp of the latest source synchronization." -#~ msgstr "date de la dernière synchronisation avec la source." diff -r 6397a9051f65 -r 134613d3b353 misc/migration/3.12.9_Any.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/misc/migration/3.12.9_Any.py Wed Jul 20 18:22:41 2011 +0200 @@ -0,0 +1,1 @@ +sync_schema_props_perms('cw_source') diff -r 6397a9051f65 -r 134613d3b353 misc/migration/3.13.0_Any.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/misc/migration/3.13.0_Any.py Wed Jul 20 18:22:41 2011 +0200 @@ -0,0 +1,4 @@ +sync_schema_props_perms('cw_source', syncprops=False) +add_attribute('CWSource', 'synchronizing') +if schema['BigInt'].eid is None: + add_entity_type('BigInt') diff -r 6397a9051f65 -r 134613d3b353 misc/migration/bootstrapmigration_repository.py --- a/misc/migration/bootstrapmigration_repository.py Wed Jul 20 14:09:42 2011 +0200 +++ b/misc/migration/bootstrapmigration_repository.py Wed Jul 20 18:22:41 2011 +0200 @@ -35,6 +35,12 @@ ss.execschemarql(rql, rdef, ss.rdef2rql(rdef, CSTRMAP, groupmap=None)) commit(ask_confirm=False) +if applcubicwebversion <= (3, 13, 0) and cubicwebversion >= (3, 13, 1): + sql('ALTER TABLE entities ADD COLUMN asource VARCHAR(64)') + sql('UPDATE entities SET asource=cw_name ' + 'FROM cw_CWSource, cw_source_relation ' + 'WHERE entities.eid=cw_source_relation.eid_from AND cw_source_relation.eid_to=cw_CWSource.cw_eid') + if applcubicwebversion == (3, 6, 0) and cubicwebversion >= (3, 6, 0): CSTRMAP = dict(rql('Any T, X WHERE X is CWConstraintType, X name T', ask_confirm=False)) @@ -49,7 +55,7 @@ elif applcubicwebversion < (3, 6, 0) and cubicwebversion >= (3, 6, 0): CSTRMAP = dict(rql('Any T, X WHERE X is CWConstraintType, X name T', ask_confirm=False)) - session.set_pool() + session.set_cnxset() permsdict = ss.deserialize_ertype_permissions(session) with hooks_control(session, session.HOOKS_ALLOW_ALL, 'integrity'): diff -r 6397a9051f65 -r 134613d3b353 misc/scripts/drop_external_entities.py --- a/misc/scripts/drop_external_entities.py Wed Jul 20 14:09:42 2011 +0200 +++ b/misc/scripts/drop_external_entities.py Wed Jul 20 18:22:41 2011 +0200 @@ -3,7 +3,7 @@ sql("DELETE FROM entities WHERE type='Int'") -ecnx = session.pool.connection(source) +ecnx = session.cnxset.connection(source) for e in rql('Any X WHERE X cw_source S, S name %(name)s', {'name': source}).entities(): meta = e.cw_metainformation() assert meta['source']['uri'] == source diff -r 6397a9051f65 -r 134613d3b353 mttransforms.py --- a/mttransforms.py Wed Jul 20 14:09:42 2011 +0200 +++ b/mttransforms.py Wed Jul 20 18:22:41 2011 +0200 @@ -99,10 +99,10 @@ def patch_convert(cls): def _convert(self, trdata, origconvert=cls._convert): - try: - trdata.appobject._cw.add_css('pygments.css') - except AttributeError: # session has no add_css, only http request - pass + add_css = getattr(trdata.appobject._cw, 'add_css', None) + if add_css is not None: + # session has no add_css, only http request + add_css('pygments.css') return origconvert(self, trdata) cls._convert = _convert patch_convert(pygmentstransforms.PygmentsHTMLTransform) diff -r 6397a9051f65 -r 134613d3b353 req.py --- a/req.py Wed Jul 20 14:09:42 2011 +0200 +++ b/req.py Wed Jul 20 18:22:41 2011 +0200 @@ -409,7 +409,7 @@ # abstract methods to override according to the web front-end ############# - def describe(self, eid): + def describe(self, eid, asdict=False): """return a tuple (type, sourceuri, extid) for the entity with id """ raise NotImplementedError diff -r 6397a9051f65 -r 134613d3b353 rset.py --- a/rset.py Wed Jul 20 14:09:42 2011 +0200 +++ b/rset.py Wed Jul 20 18:22:41 2011 +0200 @@ -475,43 +475,57 @@ entity.eid = eid # cache entity req.set_entity_cache(entity) - eschema = entity.e_schema # try to complete the entity if there are some additional columns if len(rowvalues) > 1: - rqlst = self.syntax_tree() - if rqlst.TYPE == 'select': - # UNION query, find the subquery from which this entity has been - # found - select, col = rqlst.locate_subquery(col, etype, self.args) + eschema = entity.e_schema + eid_col, attr_cols, rel_cols = self._rset_structure(eschema, col) + entity.eid = rowvalues[eid_col] + for attr, col_idx in attr_cols.items(): + entity.cw_attr_cache[attr] = rowvalues[col_idx] + for (rtype, role), col_idx in rel_cols.items(): + value = rowvalues[col_idx] + if value is None: + if role == 'subject': + rql = 'Any Y WHERE X %s Y, X eid %s' + else: + rql = 'Any Y WHERE Y %s X, X eid %s' + rrset = ResultSet([], rql % (rtype, entity.eid)) + rrset.req = req + else: + rrset = self._build_entity(row, col_idx).as_rset() + entity.cw_set_relation_cache(rtype, role, rrset) + return entity + + @cached + def _rset_structure(self, eschema, entity_col): + eid_col = col = entity_col + rqlst = self.syntax_tree() + attr_cols = {} + rel_cols = {} + if rqlst.TYPE == 'select': + # UNION query, find the subquery from which this entity has been + # found + select, col = rqlst.locate_subquery(entity_col, eschema.type, self.args) + else: + select = rqlst + # take care, due to outer join support, we may find None + # values for non final relation + for i, attr, role in attr_desc_iterator(select, col, entity_col): + if role == 'subject': + rschema = eschema.subjrels[attr] else: - select = rqlst - # take care, due to outer join support, we may find None - # values for non final relation - for i, attr, role in attr_desc_iterator(select, col, entity.cw_col): - if role == 'subject': - rschema = eschema.subjrels[attr] - if rschema.final: - if attr == 'eid': - entity.eid = rowvalues[i] - else: - entity.cw_attr_cache[attr] = rowvalues[i] - continue + rschema = eschema.objrels[attr] + if rschema.final: + if attr == 'eid': + eid_col = i else: - rschema = eschema.objrels[attr] + attr_cols[attr] = i + else: rdef = eschema.rdef(attr, role) # only keep value if it can't be multivalued if rdef.role_cardinality(role) in '1?': - if rowvalues[i] is None: - if role == 'subject': - rql = 'Any Y WHERE X %s Y, X eid %s' - else: - rql = 'Any Y WHERE Y %s X, X eid %s' - rrset = ResultSet([], rql % (attr, entity.eid)) - rrset.req = req - else: - rrset = self._build_entity(row, i).as_rset() - entity.cw_set_relation_cache(attr, role, rrset) - return entity + rel_cols[(attr, role)] = i + return eid_col, attr_cols, rel_cols @cached def syntax_tree(self): diff -r 6397a9051f65 -r 134613d3b353 schema.py --- a/schema.py Wed Jul 20 14:09:42 2011 +0200 +++ b/schema.py Wed Jul 20 18:22:41 2011 +0200 @@ -544,10 +544,11 @@ rschema = self.add_relation_type(ybo.RelationType('identity')) rschema.final = False + etype_name_re = r'[A-Z][A-Za-z0-9]*[a-z]+[A-Za-z0-9]*$' def add_entity_type(self, edef): edef.name = edef.name.encode() edef.name = bw_normalize_etype(edef.name) - if not re.match(r'[A-Z][A-Za-z0-9]*[a-z]+[0-9]*$', edef.name): + if not re.match(self.etype_name_re, edef.name): raise BadSchemaDefinition( '%r is not a valid name for an entity type. It should start ' 'with an upper cased letter and be followed by at least a ' diff -r 6397a9051f65 -r 134613d3b353 schemas/base.py --- a/schemas/base.py Wed Jul 20 14:09:42 2011 +0200 +++ b/schemas/base.py Wed Jul 20 18:22:41 2011 +0200 @@ -21,7 +21,8 @@ _ = unicode from yams.buildobjs import (EntityType, RelationType, RelationDefinition, - SubjectRelation, String, Datetime, Password, Interval) + SubjectRelation, + String, Datetime, Password, Interval, Boolean) from cubicweb.schema import ( RQLConstraint, WorkflowableEntityType, ERQLExpression, RRQLExpression, PUB_SYSTEM_ENTITY_PERMS, PUB_SYSTEM_REL_PERMS, PUB_SYSTEM_ATTR_PERMS) @@ -265,7 +266,8 @@ url = String(description=_('URLs from which content will be imported. You can put one url per line')) parser = String(description=_('parser to use to extract entities from content retrieved at given URLs.')) latest_retrieval = Datetime(description=_('latest synchronization time')) - + synchronizing = Boolean(description=_('currently in synchronization'), + default=False) ENTITY_MANAGERS_PERMISSIONS = { 'read': ('managers',), @@ -307,8 +309,8 @@ class cw_source(RelationDefinition): __permissions__ = { 'read': ('managers', 'users', 'guests'), - 'add': (), - 'delete': (), + 'add': ('managers',), + 'delete': ('managers',), } subject = '*' object = 'CWSource' diff -r 6397a9051f65 -r 134613d3b353 selectors.py --- a/selectors.py Wed Jul 20 14:09:42 2011 +0200 +++ b/selectors.py Wed Jul 20 18:22:41 2011 +0200 @@ -255,12 +255,19 @@ * if `entity` is specified, return score for this entity's class - * elif `row` is specified, return score for the class of the entity - found in the specified cell, using column specified by `col` or 0 + * elif `rset`, `select` and `filtered_variable` are specified, return score + for the possible classes for variable in the given rql :class:`Select` + node + + * elif `rset` and `row` are specified, return score for the class of the + entity found in the specified cell, using column specified by `col` or 0 - * else return the sum of scores for each entity class found in the column - specified specified by the `col` argument or in column 0 if not specified, - unless: + * elif `rset` is specified return score for each entity class found in the + column specified specified by the `col` argument or in column 0 if not + specified + + When there are several classes to be evaluated, return the sum of scores for + each entity class unless: - `once_is_enough` is False (the default) and some entity class is scored to 0, in which case 0 is returned @@ -276,32 +283,37 @@ self.accept_none = accept_none @lltrace - def __call__(self, cls, req, rset=None, row=None, col=0, accept_none=None, + def __call__(self, cls, req, rset=None, row=None, col=0, entity=None, + select=None, filtered_variable=None, + accept_none=None, **kwargs): - if kwargs.get('entity'): - return self.score_class(kwargs['entity'].__class__, req) + if entity is not None: + return self.score_class(entity.__class__, req) if not rset: return 0 - score = 0 - if row is None: + if select is not None and filtered_variable is not None: + etypes = set(sol[filtered_variable.name] for sol in select.solutions) + elif row is None: if accept_none is None: accept_none = self.accept_none - if not accept_none: - if any(rset[i][col] is None for i in xrange(len(rset))): - return 0 - for etype in rset.column_types(col): - if etype is None: # outer join - return 0 - escore = self.score(cls, req, etype) - if not escore and not self.once_is_enough: - return 0 - elif self.once_is_enough: - return escore - score += escore + if not accept_none and \ + any(rset[i][col] is None for i in xrange(len(rset))): + return 0 + etypes = rset.column_types(col) else: etype = rset.description[row][col] - if etype is not None: - score = self.score(cls, req, etype) + # may have None in rset.description on outer join + if etype is None: + return 0 + etypes = (etype,) + score = 0 + for etype in etypes: + escore = self.score(cls, req, etype) + if not escore and not self.once_is_enough: + return 0 + elif self.once_is_enough: + return escore + score += escore return score def score(self, cls, req, etype): @@ -909,6 +921,7 @@ # hack hack hack def __call__(self, cls, req, **kwargs): + # hack hack hack if self.strict: return EntitySelector.__call__(self, cls, req, **kwargs) return EClassSelector.__call__(self, cls, req, **kwargs) diff -r 6397a9051f65 -r 134613d3b353 server/__init__.py --- a/server/__init__.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/__init__.py Wed Jul 20 18:22:41 2011 +0200 @@ -245,7 +245,7 @@ for path in reversed(paths): mhandler.exec_event_script('pre%s' % event, path) # enter instance'schema into the database - session.set_pool() + session.set_cnxset() serialize_schema(session, schema) # execute cubicweb's post script mhandler.exec_event_script('post%s' % event) diff -r 6397a9051f65 -r 134613d3b353 server/checkintegrity.py --- a/server/checkintegrity.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/checkintegrity.py Wed Jul 20 18:22:41 2011 +0200 @@ -101,7 +101,7 @@ # deactivate modification_date hook since we don't want them # to be updated due to the reindexation repo = session.repo - cursor = session.pool['system'] + cursor = session.cnxset['system'] dbhelper = session.repo.system_source.dbhelper if not dbhelper.has_fti_table(cursor): print 'no text index table' @@ -356,7 +356,7 @@ using given user and password to locally connect to the repository (no running cubicweb server needed) """ - session = repo._get_session(cnx.sessionid, setpool=True) + session = repo._get_session(cnx.sessionid, setcnxset=True) # yo, launch checks if checks: eids_cache = {} @@ -372,6 +372,6 @@ print 'WARNING: Diagnostic run, nothing has been corrected' if reindex: cnx.rollback() - session.set_pool() + session.set_cnxset() reindex_entities(repo.schema, session, withpb=withpb) cnx.commit() diff -r 6397a9051f65 -r 134613d3b353 server/edition.py --- a/server/edition.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/edition.py Wed Jul 20 18:22:41 2011 +0200 @@ -68,6 +68,11 @@ super(EditedEntity, self).__delitem__(attr) self.entity.cw_attr_cache.pop(attr, None) + def __copy__(self): + # default copy protocol fails in EditedEntity.__setitem__ because + # copied entity has no skip_security attribute at this point + return EditedEntity(self.entity, **self) + def pop(self, attr, *args): # don't update skip_security by design (think to storage api) assert not self.saved, 'too late to modify edited attributes' diff -r 6397a9051f65 -r 134613d3b353 server/hook.py --- a/server/hook.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/hook.py Wed Jul 20 18:22:41 2011 +0200 @@ -69,12 +69,19 @@ ~~~~~~~~~~ Operations are subclasses of the :class:`~cubicweb.server.hook.Operation` class -that may be created by hooks and scheduled to happen just before (or after) the -`precommit`, `postcommit` or `rollback` event. Hooks are being fired immediately -on data operations, and it is sometime necessary to delay the actual work down -to a time where all other hooks have run. Also while the order of execution of -hooks is data dependant (and thus hard to predict), it is possible to force an -order on operations. +that may be created by hooks and scheduled to happen on `precommit`, +`postcommit` or `rollback` event (i.e. respectivly before/after a commit or +before a rollback of a transaction). + +Hooks are being fired immediately on data operations, and it is sometime +necessary to delay the actual work down to a time where we can expect all +information to be there, or when all other hooks have run (though take case +since operations may themselves trigger hooks). Also while the order of +execution of hooks is data dependant (and thus hard to predict), it is possible +to force an order on operations. + +So, for such case where you may miss some information that may be set later in +the transaction, you should instantiate an operation in the hook. Operations may be used to: @@ -248,7 +255,7 @@ from logging import getLogger from itertools import chain -from logilab.common.decorators import classproperty +from logilab.common.decorators import classproperty, cached from logilab.common.deprecation import deprecated, class_renamed from logilab.common.logging_ext import set_log_methods @@ -257,7 +264,7 @@ from cubicweb.cwvreg import CWRegistry, VRegistry from cubicweb.selectors import (objectify_selector, lltrace, ExpectedValueSelector, is_instance) -from cubicweb.appobject import AppObject +from cubicweb.appobject import AppObject, NotSelector, OrSelector from cubicweb.server.session import security_enabled ENTITIES_HOOKS = set(('before_add_entity', 'after_add_entity', @@ -318,15 +325,83 @@ else: entities = [] eids_from_to = [] + pruned = self.get_pruned_hooks(session, event, + entities, eids_from_to, kwargs) # by default, hooks are executed with security turned off with security_enabled(session, read=False): for _kwargs in _iter_kwargs(entities, eids_from_to, kwargs): - hooks = sorted(self.possible_objects(session, **_kwargs), + hooks = sorted(self.filtered_possible_objects(pruned, session, **_kwargs), key=lambda x: x.order) with security_enabled(session, write=False): for hook in hooks: - #print hook.category, hook.__regid__ - hook() + hook() + + def get_pruned_hooks(self, session, event, entities, eids_from_to, kwargs): + """return a set of hooks that should not be considered by filtered_possible objects + + the idea is to make a first pass over all the hooks in the + registry and to mark put some of them in a pruned list. The + pruned hooks are the one which: + + * are disabled at the session level + * have a match_rtype or an is_instance selector which does not + match the rtype / etype of the relations / entities for + which we are calling the hooks. This works because the + repository calls the hooks grouped by rtype or by etype when + using the entities or eids_to_from keyword arguments + + Only hooks with a simple selector or an AndSelector of simple + selectors are considered for disabling. + + """ + if 'entity' in kwargs: + entities = [kwargs['entity']] + if len(entities): + look_for_selector = is_instance + etype = entities[0].__regid__ + elif 'rtype' in kwargs: + look_for_selector = match_rtype + etype = None + else: # nothing to prune, how did we get there ??? + return set() + cache_key = (event, kwargs.get('rtype'), etype) + pruned = session.pruned_hooks_cache.get(cache_key) + if pruned is not None: + return pruned + pruned = set() + session.pruned_hooks_cache[cache_key] = pruned + if look_for_selector is not None: + for id, hooks in self.iteritems(): + for hook in hooks: + enabled_cat, main_filter = hook.filterable_selectors() + if enabled_cat is not None: + if not enabled_cat(hook, session): + pruned.add(hook) + continue + if main_filter is not None: + if isinstance(main_filter, match_rtype) and \ + (main_filter.frometypes is not None or \ + main_filter.toetypes is not None): + continue + first_kwargs = _iter_kwargs(entities, eids_from_to, kwargs).next() + if not main_filter(hook, session, **first_kwargs): + pruned.add(hook) + return pruned + + + def filtered_possible_objects(self, pruned, *args, **kwargs): + for appobjects in self.itervalues(): + if pruned: + filtered_objects = [obj for obj in appobjects if obj not in pruned] + if not filtered_objects: + continue + else: + filtered_objects = appobjects + obj = self._select_best(filtered_objects, + *args, **kwargs) + if obj is None: + continue + yield obj class HooksManager(object): def __init__(self, vreg): @@ -464,6 +539,15 @@ # stop pylint from complaining about missing attributes in Hooks classes eidfrom = eidto = entity = rtype = None + @classmethod + @cached + def filterable_selectors(cls): + search = cls.__select__.search_selector + if search((NotSelector, OrSelector)): + return None, None + enabled_cat = search(enabled_category) + main_filter = search((is_instance, match_rtype)) + return enabled_cat, main_filter @classmethod def check_events(cls): @@ -653,8 +737,8 @@ operation. These keyword arguments will be accessible as attributes from the operation instance. - An operation is triggered on connections pool events related to - commit / rollback transations. Possible events are: + An operation is triggered on connections set events related to commit / + rollback transations. Possible events are: * `precommit`: @@ -728,7 +812,7 @@ getattr(self, event)() def precommit_event(self): - """the observed connections pool is preparing a commit""" + """the observed connections set is preparing a commit""" def revertprecommit_event(self): """an error went when pre-commiting this operation or a later one @@ -738,14 +822,13 @@ """ def rollback_event(self): - """the observed connections pool has been rollbacked + """the observed connections set has been rollbacked - do nothing by default, the operation will just be removed from the pool - operation list + do nothing by default """ def postcommit_event(self): - """the observed connections pool has committed""" + """the observed connections set has committed""" @property @deprecated('[3.6] use self.session.user') @@ -1028,7 +1111,7 @@ data_key = 'neweids' def rollback_event(self): - """the observed connections pool has been rollbacked, + """the observed connections set has been rollbacked, remove inserted eid from repository type/source cache """ try: @@ -1042,7 +1125,7 @@ """ data_key = 'pendingeids' def postcommit_event(self): - """the observed connections pool has been rollbacked, + """the observed connections set has been rollbacked, remove inserted eid from repository type/source cache """ try: diff -r 6397a9051f65 -r 134613d3b353 server/migractions.py --- a/server/migractions.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/migractions.py Wed Jul 20 18:22:41 2011 +0200 @@ -201,7 +201,6 @@ versions = repo.get_versions() for cube, version in versions.iteritems(): version_file.write('%s %s\n' % (cube, version)) - if not failed: bkup = tarfile.open(backupfile, 'w|gz') for filename in os.listdir(tmpdir): @@ -242,7 +241,7 @@ written_format = format_file.readline().strip() if written_format in ('portable', 'native'): format = written_format - self.config.open_connections_pools = False + self.config.init_cnxset_pool = False repo = self.repo_connect() for source in repo.sources: if systemonly and source.uri != 'system': @@ -255,7 +254,7 @@ raise SystemExit(1) shutil.rmtree(tmpdir) # call hooks - repo.open_connections_pools() + repo.init_cnxset_pool() repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile) print '-> database restored.' @@ -288,7 +287,7 @@ except (KeyboardInterrupt, EOFError): print 'aborting...' sys.exit(0) - self.session.keep_pool_mode('transaction') + self.session.keep_cnxset_mode('transaction') self.session.data['rebuild-infered'] = False return self._cnx @@ -296,10 +295,10 @@ def session(self): if self.config is not None: session = self.repo._get_session(self.cnx.sessionid) - if session.pool is None: + if session.cnxset is None: session.set_read_security(False) session.set_write_security(False) - session.set_pool() + session.set_cnxset() return session # no access to session on remote instance return None @@ -308,13 +307,13 @@ if hasattr(self, '_cnx'): self._cnx.commit() if self.session: - self.session.set_pool() + self.session.set_cnxset() def rollback(self): if hasattr(self, '_cnx'): self._cnx.rollback() if self.session: - self.session.set_pool() + self.session.set_cnxset() def rqlexecall(self, rqliter, ask_confirm=False): for rql, kwargs in rqliter: @@ -374,18 +373,21 @@ self.cmd_reactivate_verification_hooks() def install_custom_sql_scripts(self, directory, driver): + sql_scripts = [] for fpath in glob(osp.join(directory, '*.sql.%s' % driver)): newname = osp.basename(fpath).replace('.sql.%s' % driver, '.%s.sql' % driver) warn('[3.5.6] rename %s into %s' % (fpath, newname), DeprecationWarning) + sql_scripts.append(fpath) + sql_scripts += glob(osp.join(directory, '*.%s.sql' % driver)) + for fpath in sql_scripts: print '-> installing', fpath - sqlexec(open(fpath).read(), self.session.system_sql, False, - delimiter=';;') - for fpath in glob(osp.join(directory, '*.%s.sql' % driver)): - print '-> installing', fpath - sqlexec(open(fpath).read(), self.session.system_sql, False, - delimiter=';;') + try: + sqlexec(open(fpath).read(), self.session.system_sql, False, + delimiter=';;') + except Exception, exc: + print '-> ERROR:', exc, ', skipping', fpath # schema synchronization internals ######################################## @@ -1375,7 +1377,7 @@ def _cw(self): session = self.session if session is not None: - session.set_pool() + session.set_cnxset() return session return self.cnx.request() diff -r 6397a9051f65 -r 134613d3b353 server/msplanner.py --- a/server/msplanner.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/msplanner.py Wed Jul 20 18:22:41 2011 +0200 @@ -370,7 +370,7 @@ eid = const.eval(self.plan.args) source = self._session.source_from_eid(eid) if (source is self.system_source - or (hasrel and + or (hasrel and varobj._q_invariant and not any(source.support_relation(r.r_type) for r in varobj.stinfo['relations'] if not r is rel))): diff -r 6397a9051f65 -r 134613d3b353 server/pool.py --- a/server/pool.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/pool.py Wed Jul 20 18:22:41 2011 +0200 @@ -1,4 +1,4 @@ -# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This file is part of CubicWeb. @@ -15,19 +15,18 @@ # # You should have received a copy of the GNU Lesser General Public License along # with CubicWeb. If not, see . -"""CubicWeb server connections pool : the repository has a limited number of -connections pools, each of them dealing with a set of connections on each source -used by the repository. A connections pools (`ConnectionsPool`) is an -abstraction for a group of connection to each source. +"""CubicWeb server connections set : the repository has a limited number of +:class:`ConnectionsSet` (defined in configuration, default to 4). Each of them +hold a connection for each source used by the repository. """ __docformat__ = "restructuredtext en" import sys -class ConnectionsPool(object): +class ConnectionsSet(object): """handle connections on a set of sources, at some point associated to a - user session + :class:`Session` """ def __init__(self, sources): @@ -81,9 +80,9 @@ self.reconnect(source) def close(self, i_know_what_i_do=False): - """close all connections in the pool""" + """close all connections in the set""" if i_know_what_i_do is not True: # unexpected closing safety belt - raise RuntimeError('pool shouldn\'t be closed') + raise RuntimeError('connections set shouldn\'t be closed') for cu in self._cursors.values(): try: cu.close() @@ -97,17 +96,17 @@ # internals ############################################################### - def pool_set(self): - """pool is being set""" + def cnxset_set(self): + """connections set is being set on a session""" self.check_connections() - def pool_reset(self): - """pool is being reseted""" + def cnxset_freed(self): + """connections set is being freed from a session""" for source, cnx in self.source_cnxs.values(): - source.pool_reset(cnx) + source.cnxset_freed(cnx) def sources(self): - """return the source objects handled by this pool""" + """return the source objects handled by this connections set""" # implementation details of flying insert requires the system source # first yield self.source_cnxs['system'][0] diff -r 6397a9051f65 -r 134613d3b353 server/querier.py --- a/server/querier.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/querier.py Wed Jul 20 18:22:41 2011 +0200 @@ -169,7 +169,7 @@ # session executing the query self.session = session # quick reference to the system source - self.syssource = session.pool.source('system') + self.syssource = session.cnxset.source('system') # execution steps self.steps = [] # index of temporary tables created during execution @@ -734,8 +734,8 @@ # transaction must been rollbacked # # notes: - # * we should not reset the pool here, since we don't want the - # session to loose its pool during processing + # * we should not reset the connections set here, since we don't want the + # session to loose it during processing # * don't rollback if we're in the commit process, will be handled # by the session if session.commit_state is None: diff -r 6397a9051f65 -r 134613d3b353 server/repository.py --- a/server/repository.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/repository.py Wed Jul 20 18:22:41 2011 +0200 @@ -154,13 +154,13 @@ self.sources_by_uri = {'system': self.system_source} # querier helper, need to be created after sources initialization self.querier = querier.QuerierHelper(self, self.schema) - # cache eid -> type / source + # cache eid -> (type, physical source, extid, actual source) self._type_source_cache = {} # cache (extid, source uri) -> eid self._extid_cache = {} - # open some connections pools - if config.open_connections_pools: - self.open_connections_pools() + # open some connections set + if config.init_cnxset_pool: + self.init_cnxset_pool() @onevent('after-registry-reload', self) def fix_user_classes(self): usercls = self.vreg['etypes'].etype_class('CWUser') @@ -168,10 +168,10 @@ if not isinstance(session.user, InternalManager): session.user.__class__ = usercls - def open_connections_pools(self): + def init_cnxset_pool(self): config = self.config - self._available_pools = Queue.Queue() - self._available_pools.put_nowait(pool.ConnectionsPool(self.sources)) + self._cnxsets_pool = Queue.Queue() + self._cnxsets_pool.put_nowait(pool.ConnectionsSet(self.sources)) if config.quick_start: # quick start, usually only to get a minimal repository to get cubes # information (eg dump/restore/...) @@ -213,14 +213,14 @@ # configurate tsearch according to postgres version for source in self.sources: source.init_creating() - # close initialization pool and reopen fresh ones for proper + # close initialization connetions set and reopen fresh ones for proper # initialization now that we know cubes - self._get_pool().close(True) - # list of available pools (we can't iterate on Queue instance) - self.pools = [] + self._get_cnxset().close(True) + # list of available_cnxsets (we can't iterate on Queue instance) + self.cnxsets = [] for i in xrange(config['connections-pool-size']): - self.pools.append(pool.ConnectionsPool(self.sources)) - self._available_pools.put_nowait(self.pools[-1]) + self.cnxsets.append(pool.ConnectionsSet(self.sources)) + self._cnxsets_pool.put_nowait(self.cnxsets[-1]) if config.quick_start: config.init_cubes(self.get_cubes()) self.hm = hook.HooksManager(self.vreg) @@ -244,7 +244,7 @@ self.sources_by_eid[sourceent.eid] = self.system_source self.system_source.init(True, sourceent) continue - self.add_source(sourceent, add_to_pools=False) + self.add_source(sourceent, add_to_cnxsets=False) finally: session.close() @@ -253,7 +253,7 @@ 'can_cross_relation', 'rel_type_sources'): clear_cache(self, cache) - def add_source(self, sourceent, add_to_pools=True): + def add_source(self, sourceent, add_to_cnxsets=True): source = self.get_source(sourceent.type, sourceent.name, sourceent.host_config, sourceent.eid) self.sources_by_eid[sourceent.eid] = source @@ -261,15 +261,15 @@ if self.config.source_enabled(source): # call source's init method to complete their initialisation if # needed (for instance looking for persistent configuration using an - # internal session, which is not possible until pools have been + # internal session, which is not possible until connections sets have been # initialized) source.init(True, sourceent) if not source.copy_based_source: self.sources.append(source) self.querier.set_planner() - if add_to_pools: - for pool in self.pools: - pool.add_source(source) + if add_to_cnxsets: + for cnxset in self.cnxsets: + cnxset.add_source(source) else: source.init(False, sourceent) self._clear_planning_caches() @@ -280,8 +280,8 @@ if self.config.source_enabled(source) and not source.copy_based_source: self.sources.remove(source) self.querier.set_planner() - for pool in self.pools: - pool.remove_source(source) + for cnxset in self.cnxsets: + cnxset.remove_source(source) self._clear_planning_caches() def get_source(self, type, uri, source_config, eid=None): @@ -368,25 +368,25 @@ t.start() #@locked - def _get_pool(self): + def _get_cnxset(self): try: - return self._available_pools.get(True, timeout=5) + return self._cnxsets_pool.get(True, timeout=5) except Queue.Empty: - raise Exception('no pool available after 5 secs, probably either a ' + raise Exception('no connections set available after 5 secs, probably either a ' 'bug in code (too many uncommited/rollbacked ' 'connections) or too much load on the server (in ' 'which case you can try to set a bigger ' - 'connections pools size)') + 'connections pool size)') - def _free_pool(self, pool): - self._available_pools.put_nowait(pool) + def _free_cnxset(self, cnxset): + self._cnxsets_pool.put_nowait(cnxset) def pinfo(self): - # XXX: session.pool is accessed from a local storage, would be interesting - # to see if there is a pool set in any thread specific data) - return '%s: %s (%s)' % (self._available_pools.qsize(), + # XXX: session.cnxset is accessed from a local storage, would be interesting + # to see if there is a cnxset set in any thread specific data) + return '%s: %s (%s)' % (self._cnxsets_pool.qsize(), ','.join(session.user.login for session in self._sessions.values() - if session.pool), + if session.cnxset), threading.currentThread()) def shutdown(self): """called on server stop event to properly close opened sessions and @@ -409,12 +409,12 @@ or self.config.quick_start): self.hm.call_hooks('server_shutdown', repo=self) self.close_sessions() - while not self._available_pools.empty(): - pool = self._available_pools.get_nowait() + while not self._cnxsets_pool.empty(): + cnxset = self._cnxsets_pool.get_nowait() try: - pool.close(True) + cnxset.close(True) except: - self.exception('error while closing %s' % pool) + self.exception('error while closing %s' % cnxset) continue if self.pyro_registered: if self._use_pyrons(): @@ -496,7 +496,7 @@ results['nb_open_sessions'] = len(self._sessions) results['nb_active_threads'] = threading.activeCount() results['looping_tasks'] = ', '.join(str(t) for t in self._looping_tasks) - results['available_pools'] = self._available_pools.qsize() + results['available_cnxsets'] = self._cnxsets_pool.qsize() results['threads'] = ', '.join(sorted(str(t) for t in threading.enumerate())) return results @@ -535,12 +535,12 @@ # XXX we may want to check we don't give sensible information if foreid is None: return self.config[option] - _, sourceuri, extid = self.type_and_source_from_eid(foreid) + _, sourceuri, extid, _ = self.type_and_source_from_eid(foreid) if sourceuri == 'system': return self.config[option] - pool = self._get_pool() + cnxset = self._get_cnxset() try: - cnx = pool.connection(sourceuri) + cnx = cnxset.connection(sourceuri) # needed to check connection is valid and usable by the current # thread newcnx = self.sources_by_uri[sourceuri].check_connection(cnx) @@ -548,7 +548,7 @@ cnx = newcnx return cnx.get_option_value(option, extid) finally: - self._free_pool(pool) + self._free_cnxset(cnxset) @cached def get_versions(self, checkversions=False): @@ -721,7 +721,7 @@ * build_descr is a flag indicating if the description should be built on select queries """ - session = self._get_session(sessionid, setpool=True, txid=txid) + session = self._get_session(sessionid, setcnxset=True, txid=txid) try: try: rset = self.querier.execute(session, rqlstring, args, @@ -747,21 +747,23 @@ self.exception('unexpected error while executing %s with %s', rqlstring, args) raise finally: - session.reset_pool() + session.free_cnxset() def describe(self, sessionid, eid, txid=None): - """return a tuple (type, source, extid) for the entity with id """ - session = self._get_session(sessionid, setpool=True, txid=txid) + """return a tuple `(type, physical source uri, extid, actual source + uri)` for the entity of the given `eid` + """ + session = self._get_session(sessionid, setcnxset=True, txid=txid) try: return self.type_and_source_from_eid(eid, session) finally: - session.reset_pool() + session.free_cnxset() def check_session(self, sessionid): """raise `BadConnectionId` if the connection is no more valid, else return its latest activity timestamp. """ - return self._get_session(sessionid, setpool=False).timestamp + return self._get_session(sessionid, setcnxset=False).timestamp def get_shared_data(self, sessionid, key, default=None, pop=False, txdata=False): """return value associated to key in the session's data dictionary or @@ -772,7 +774,7 @@ If key isn't defined in the dictionnary, value specified by the `default` argument will be returned. """ - session = self._get_session(sessionid, setpool=False) + session = self._get_session(sessionid, setcnxset=False) return session.get_shared_data(key, default, pop, txdata) def set_shared_data(self, sessionid, key, value, txdata=False): @@ -782,7 +784,7 @@ transaction's data which are cleared on commit/rollback of the current transaction. """ - session = self._get_session(sessionid, setpool=False) + session = self._get_session(sessionid, setcnxset=False) session.set_shared_data(key, value, txdata) def commit(self, sessionid, txid=None): @@ -811,10 +813,10 @@ def close(self, sessionid, txid=None, checkshuttingdown=True): """close the session with the given id""" - session = self._get_session(sessionid, setpool=True, txid=txid, + session = self._get_session(sessionid, setcnxset=True, txid=txid, checkshuttingdown=checkshuttingdown) # operation uncommited before close are rollbacked before hook is called - session.rollback(reset_pool=False) + session.rollback(free_cnxset=False) self.hm.call_hooks('session_close', session) # commit session at this point in case write operation has been done # during `session_close` hooks @@ -829,7 +831,7 @@ * update user information on each user's request (i.e. groups and custom properties) """ - session = self._get_session(sessionid, setpool=False) + session = self._get_session(sessionid, setcnxset=False) if props is not None: self.set_session_props(sessionid, props) user = session.user @@ -841,43 +843,43 @@ * update user information on each user's request (i.e. groups and custom properties) """ - session = self._get_session(sessionid, setpool=False) + session = self._get_session(sessionid, setcnxset=False) for prop, value in props.items(): session.change_property(prop, value) def undoable_transactions(self, sessionid, ueid=None, txid=None, **actionfilters): """See :class:`cubicweb.dbapi.Connection.undoable_transactions`""" - session = self._get_session(sessionid, setpool=True, txid=txid) + session = self._get_session(sessionid, setcnxset=True, txid=txid) try: return self.system_source.undoable_transactions(session, ueid, **actionfilters) finally: - session.reset_pool() + session.free_cnxset() def transaction_info(self, sessionid, txuuid, txid=None): """See :class:`cubicweb.dbapi.Connection.transaction_info`""" - session = self._get_session(sessionid, setpool=True, txid=txid) + session = self._get_session(sessionid, setcnxset=True, txid=txid) try: return self.system_source.tx_info(session, txuuid) finally: - session.reset_pool() + session.free_cnxset() def transaction_actions(self, sessionid, txuuid, public=True, txid=None): """See :class:`cubicweb.dbapi.Connection.transaction_actions`""" - session = self._get_session(sessionid, setpool=True, txid=txid) + session = self._get_session(sessionid, setcnxset=True, txid=txid) try: return self.system_source.tx_actions(session, txuuid, public) finally: - session.reset_pool() + session.free_cnxset() def undo_transaction(self, sessionid, txuuid, txid=None): """See :class:`cubicweb.dbapi.Connection.undo_transaction`""" - session = self._get_session(sessionid, setpool=True, txid=txid) + session = self._get_session(sessionid, setcnxset=True, txid=txid) try: return self.system_source.undo_transaction(session, txuuid) finally: - session.reset_pool() + session.free_cnxset() # public (inter-repository) interface ##################################### @@ -929,14 +931,14 @@ """return a dbapi like connection/cursor using internal user which have every rights on the repository. You'll *have to* commit/rollback or close (rollback implicitly) the session once the job's done, else - you'll leak connections pool up to the time where no more pool is + you'll leak connections set up to the time where no one is available, causing irremediable freeze... """ session = InternalSession(self, cnxprops) - session.set_pool() + session.set_cnxset() return session - def _get_session(self, sessionid, setpool=False, txid=None, + def _get_session(self, sessionid, setcnxset=False, txid=None, checkshuttingdown=True): """return the user associated to the given session identifier""" if checkshuttingdown and self.shutting_down: @@ -945,9 +947,9 @@ session = self._sessions[sessionid] except KeyError: raise BadConnectionId('No such session %s' % sessionid) - if setpool: - session.set_tx_data(txid) # must be done before set_pool - session.set_pool() + if setcnxset: + session.set_tx_data(txid) # must be done before set_cnxset + session.set_cnxset() return session # data sources handling ################################################### @@ -955,7 +957,9 @@ # * correspondance between eid and local id (i.e. specific to a given source) def type_and_source_from_eid(self, eid, session=None): - """return a tuple (type, source, extid) for the entity with id """ + """return a tuple `(type, physical source uri, extid, actual source + uri)` for the entity of the given `eid` + """ try: eid = typed_eid(eid) except ValueError: @@ -965,19 +969,19 @@ except KeyError: if session is None: session = self.internal_session() - reset_pool = True + free_cnxset = True else: - reset_pool = False + free_cnxset = False try: - etype, uri, extid = self.system_source.eid_type_source(session, - eid) + etype, uri, extid, auri = self.system_source.eid_type_source( + session, eid) finally: - if reset_pool: - session.reset_pool() - self._type_source_cache[eid] = (etype, uri, extid) - if uri != 'system': - self._extid_cache[(extid, uri)] = eid - return etype, uri, extid + if free_cnxset: + session.free_cnxset() + self._type_source_cache[eid] = (etype, uri, extid, auri) + if uri != 'system': + self._extid_cache[(extid, uri)] = eid + return etype, uri, extid, auri def clear_caches(self, eids): etcache = self._type_source_cache @@ -985,7 +989,7 @@ rqlcache = self.querier._rql_cache for eid in eids: try: - etype, uri, extid = etcache.pop(typed_eid(eid)) # may be a string in some cases + etype, uri, extid, auri = etcache.pop(typed_eid(eid)) # may be a string in some cases rqlcache.pop('%s X WHERE X eid %s' % (etype, eid), None) extidcache.pop((extid, uri), None) except KeyError: @@ -1019,7 +1023,7 @@ def eid2extid(self, source, eid, session=None): """get local id from an eid""" - etype, uri, extid = self.type_and_source_from_eid(eid, session) + etype, uri, extid, _ = self.type_and_source_from_eid(eid, session) if source.uri != uri: # eid not from the given source raise UnknownEid(eid) @@ -1027,23 +1031,44 @@ def extid2eid(self, source, extid, etype, session=None, insert=True, sourceparams=None): - """get eid from a local id. An eid is attributed if no record is found""" + """Return eid from a local id. If the eid is a negative integer, that + means the entity is known but has been copied back to the system source + hence should be ignored. + + If no record is found, ie the entity is not known yet: + + 1. an eid is attributed + + 2. the source's :meth:`before_entity_insertion` method is called to + build the entity instance + + 3. unless source's :attr:`should_call_hooks` tell otherwise, + 'before_add_entity' hooks are called + + 4. record is added into the system source + + 5. the source's :meth:`after_entity_insertion` method is called to + complete building of the entity instance + + 6. unless source's :attr:`should_call_hooks` tell otherwise, + 'before_add_entity' hooks are called + """ uri = 'system' if source.copy_based_source else source.uri cachekey = (extid, uri) try: return self._extid_cache[cachekey] except KeyError: pass - reset_pool = False + free_cnxset = False if session is None: session = self.internal_session() - reset_pool = True + free_cnxset = True eid = self.system_source.extid2eid(session, uri, extid) if eid is not None: self._extid_cache[cachekey] = eid - self._type_source_cache[eid] = (etype, uri, extid) - if reset_pool: - session.reset_pool() + self._type_source_cache[eid] = (etype, uri, extid, source.uri) + if free_cnxset: + session.free_cnxset() return eid if not insert: return @@ -1055,11 +1080,11 @@ # processing a commit, we have to use another one if not session.is_internal_session: session = self.internal_session() - reset_pool = True + free_cnxset = True try: eid = self.system_source.create_eid(session) self._extid_cache[cachekey] = eid - self._type_source_cache[eid] = (etype, uri, extid) + self._type_source_cache[eid] = (etype, uri, extid, source.uri) entity = source.before_entity_insertion( session, extid, etype, eid, sourceparams) if source.should_call_hooks: @@ -1069,10 +1094,10 @@ source.after_entity_insertion(session, extid, entity, sourceparams) if source.should_call_hooks: self.hm.call_hooks('after_add_entity', session, entity=entity) - session.commit(reset_pool) + session.commit(free_cnxset) return eid except: - session.rollback(reset_pool) + session.rollback(free_cnxset) raise def add_info(self, session, entity, source, extid=None, complete=True): @@ -1195,7 +1220,8 @@ suri = 'system' extid = source.get_extid(entity) self._extid_cache[(str(extid), suri)] = entity.eid - self._type_source_cache[entity.eid] = (entity.__regid__, suri, extid) + self._type_source_cache[entity.eid] = (entity.__regid__, suri, extid, + source.uri) return extid def glob_add_entity(self, session, edited): @@ -1356,7 +1382,7 @@ # in setdefault, this should not be changed without profiling. for eid in eids: - etype, sourceuri, extid = self.type_and_source_from_eid(eid, session) + etype, sourceuri, extid, _ = self.type_and_source_from_eid(eid, session) # XXX should cache entity's cw_metainformation entity = session.entity_from_eid(eid, etype) try: diff -r 6397a9051f65 -r 134613d3b353 server/rqlannotation.py --- a/server/rqlannotation.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/rqlannotation.py Wed Jul 20 18:22:41 2011 +0200 @@ -202,8 +202,8 @@ # since introduced duplicates will be removed if scope.stmt.distinct and diffscope_rels: return iter(_sort(diffscope_rels)).next() - # XXX could use a relation for a different scope if it can't generate - # duplicates, so we would have to check cardinality + # XXX could use a relation from a different scope if it can't generate + # duplicates, so we should have to check cardinality raise CantSelectPrincipal() def _select_main_var(relations): diff -r 6397a9051f65 -r 134613d3b353 server/schemaserial.py --- a/server/schemaserial.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/schemaserial.py Wed Jul 20 18:22:41 2011 +0200 @@ -88,7 +88,7 @@ repo = session.repo dbhelper = repo.system_source.dbhelper # XXX bw compat (3.6 migration) - sqlcu = session.pool['system'] + sqlcu = session.cnxset['system'] sqlcu.execute("SELECT * FROM cw_CWRType WHERE cw_name='symetric'") if sqlcu.fetchall(): sql = dbhelper.sql_rename_col('cw_CWRType', 'cw_symetric', 'cw_symmetric', @@ -138,8 +138,8 @@ except: pass tocleanup = [eid] - tocleanup += (eid for eid, (eidetype, uri, extid) in repo._type_source_cache.items() - if etype == eidetype) + tocleanup += (eid for eid, cached in repo._type_source_cache.iteritems() + if etype == cached[0]) repo.clear_caches(tocleanup) session.commit(False) if needcopy: diff -r 6397a9051f65 -r 134613d3b353 server/serverconfig.py --- a/server/serverconfig.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/serverconfig.py Wed Jul 20 18:22:41 2011 +0200 @@ -130,7 +130,7 @@ ('connections-pool-size', {'type' : 'int', 'default': 4, - 'help': 'size of the connections pools. Each source supporting multiple \ + 'help': 'size of the connections pool. Each source supporting multiple \ connections will have this number of opened connections.', 'group': 'main', 'level': 3, }), @@ -209,9 +209,9 @@ }), ) + CubicWebConfiguration.options) - # should we open connections pools (eg connect to sources). This is usually - # necessary... - open_connections_pools = True + # should we init the connections pool (eg connect to sources). This is + # usually necessary... + init_cnxset_pool = True # read the schema from the database read_instance_schema = True @@ -255,7 +255,7 @@ # configuration file (#16102) @cached def read_sources_file(self): - return read_config(self.sources_file()) + return read_config(self.sources_file(), raise_if_unreadable=True) def sources(self): """return a dictionnaries containing sources definitions indexed by diff -r 6397a9051f65 -r 134613d3b353 server/serverctl.py --- a/server/serverctl.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/serverctl.py Wed Jul 20 18:22:41 2011 +0200 @@ -24,6 +24,7 @@ # completion). So import locally in command helpers. import sys import os +import logging from logilab.common import nullobject from logilab.common.configuration import Configuration @@ -122,11 +123,10 @@ interactive=interactive) # disable autocommit (isolation_level(1)) because DROP and # CREATE DATABASE can't be executed in a transaction - try: - cnx.set_isolation_level(0) - except AttributeError: + set_isolation_level = getattr(cnx, 'set_isolation_level', None) + if set_isolation_level is not None: # set_isolation_level() is psycopg specific - pass + set_isolation_level(0) return cnx def repo_cnx(config): @@ -370,14 +370,21 @@ interactive=not automatic) cursor = cnx.cursor() helper.init_fti_extensions(cursor) + cnx.commit() # postgres specific stuff if driver == 'postgres': - # install plpythonu/plpgsql language if not installed by the cube - langs = sys.platform == 'win32' and ('plpgsql',) or ('plpythonu', 'plpgsql') + # install plpythonu/plpgsql languages + langs = ('plpythonu', 'plpgsql') for extlang in langs: - helper.create_language(cursor, extlang) - cursor.close() - cnx.commit() + if automatic or ASK.confirm('Create language %s ?' % extlang): + try: + helper.create_language(cursor, extlang) + except Exception, exc: + print '-> ERROR:', exc + print '-> could not create language %s, some stored procedures might be unusable' % extlang + cnx.rollback() + else: + cnx.commit() print '-> database for instance %s created and necessary extensions installed.' % appid print if automatic: @@ -560,6 +567,7 @@ """ name = 'reset-admin-pwd' arguments = '' + min_args = max_args = 1 options = ( ('password', {'short': 'p', 'type' : 'string', 'metavar' : '', @@ -649,8 +657,7 @@ appid = args[0] debug = self['debug'] if sys.platform == 'win32' and not debug: - from logging import getLogger - logger = getLogger('cubicweb.ctl') + logger = logging.getLogger('cubicweb.ctl') logger.info('Forcing debug mode on win32 platform') debug = True config = ServerConfiguration.config_for(appid, debugmode=debug) @@ -982,7 +989,7 @@ appid = args[0] config = ServerConfiguration.config_for(appid) repo, cnx = repo_cnx(config) - session = repo._get_session(cnx.sessionid, setpool=True) + session = repo._get_session(cnx.sessionid, setcnxset=True) reindex_entities(repo.schema, session) cnx.commit() @@ -1007,11 +1014,43 @@ mih.cmd_synchronize_schema() +class SynchronizeSourceCommand(Command): + """Force a source synchronization. + + + the identifier of the instance + + the name of the source to synchronize. + """ + name = 'source-sync' + arguments = ' ' + min_args = max_args = 2 + + def run(self, args): + config = ServerConfiguration.config_for(args[0]) + config.global_set_option('log-file', None) + config.log_format = '%(levelname)s %(name)s: %(message)s' + logger = logging.getLogger('cubicweb.sources') + logger.setLevel(logging.INFO) + # only retrieve cnx to trigger authentication, close it right away + repo, cnx = repo_cnx(config) + cnx.close() + try: + source = repo.sources_by_uri[args[1]] + except KeyError: + raise ExecutionError('no source named %r' % args[1]) + session = repo.internal_session() + stats = source.pull_data(session, force=True, raise_on_error=True) + for key, val in stats.iteritems(): + if val: + print key, ':', val + + for cmdclass in (CreateInstanceDBCommand, InitInstanceCommand, GrantUserOnInstanceCommand, ResetAdminPasswordCommand, StartRepositoryCommand, DBDumpCommand, DBRestoreCommand, DBCopyCommand, AddSourceCommand, CheckRepositoryCommand, RebuildFTICommand, - SynchronizeInstanceSchemaCommand, + SynchronizeInstanceSchemaCommand, SynchronizeSourceCommand ): CWCTL.register(cmdclass) diff -r 6397a9051f65 -r 134613d3b353 server/session.py --- a/server/session.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/session.py Wed Jul 20 18:22:41 2011 +0200 @@ -125,21 +125,13 @@ self.categories = categories def __enter__(self): - self.oldmode = self.session.set_hooks_mode(self.mode) - if self.mode is self.session.HOOKS_DENY_ALL: - self.changes = self.session.enable_hook_categories(*self.categories) - else: - self.changes = self.session.disable_hook_categories(*self.categories) + self.oldmode, self.changes = self.session.init_hooks_mode_categories( + self.mode, self.categories) def __exit__(self, exctype, exc, traceback): - if self.changes: - if self.mode is self.session.HOOKS_DENY_ALL: - self.session.disable_hook_categories(*self.changes) - else: - self.session.enable_hook_categories(*self.changes) - self.session.set_hooks_mode(self.oldmode) + self.session.reset_hooks_mode_categories(self.oldmode, self.mode, self.changes) -INDENT = '' + class security_enabled(object): """context manager to control security w/ session.execute, since by default security is disabled on queries executed on the repository @@ -151,33 +143,90 @@ self.write = write def __enter__(self): -# global INDENT - if self.read is not None: - self.oldread = self.session.set_read_security(self.read) -# print INDENT + 'read', self.read, self.oldread - if self.write is not None: - self.oldwrite = self.session.set_write_security(self.write) -# print INDENT + 'write', self.write, self.oldwrite -# INDENT += ' ' + self.oldread, self.oldwrite = self.session.init_security( + self.read, self.write) def __exit__(self, exctype, exc, traceback): -# global INDENT -# INDENT = INDENT[:-2] - if self.read is not None: - self.session.set_read_security(self.oldread) -# print INDENT + 'reset read to', self.oldread - if self.write is not None: - self.session.set_write_security(self.oldwrite) -# print INDENT + 'reset write to', self.oldwrite + self.session.reset_security(self.oldread, self.oldwrite) class TransactionData(object): def __init__(self, txid): self.transactionid = txid + self.ctx_count = 0 + class Session(RequestSessionBase): - """tie session id, user, connections pool and other session data all - together + """Repository usersession, tie a session id, user, connections set and + other session data all together. + + About session storage / transactions + ------------------------------------ + + Here is a description of internal session attributes. Besides :attr:`data` + and :attr:`transaction_data`, you should not have to use attributes + described here but higher level APIs. + + :attr:`data` is a dictionary containing shared data, used to communicate + extra information between the client and the repository + + :attr:`_tx_data` is a dictionary of :class:`TransactionData` instance, one + for each running transaction. The key is the transaction id. By default + the transaction id is the thread name but it can be otherwise (per dbapi + cursor for instance, or per thread name *from another process*). + + :attr:`__threaddata` is a thread local storage whose `txdata` attribute + refers to the proper instance of :class:`TransactionData` according to the + transaction. + + :attr:`_threads_in_transaction` is a set of (thread, connections set) + referencing threads that currently hold a connections set for the session. + + You should not have to use neither :attr:`_txdata` nor :attr:`__threaddata`, + simply access transaction data transparently through the :attr:`_threaddata` + property. Also, you usually don't have to access it directly since current + transaction's data may be accessed/modified through properties / methods: + + :attr:`transaction_data`, similarly to :attr:`data`, is a dictionary + containing some shared data that should be cleared at the end of the + transaction. Hooks and operations may put arbitrary data in there, and + this may also be used as a communication channel between the client and + the repository. + + :attr:`cnxset`, the connections set to use to execute queries on sources. + During a transaction, the connection set may be freed so that is may be + used by another session as long as no writing is done. This means we can + have multiple sessions with a reasonably low connections set pool size. + + :attr:`mode`, string telling the connections set handling mode, may be one + of 'read' (connections set may be freed), 'write' (some write was done in + the connections set, it can't be freed before end of the transaction), + 'transaction' (we want to keep the connections set during all the + transaction, with or without writing) + + :attr:`pending_operations`, ordered list of operations to be processed on + commit/rollback + + :attr:`commit_state`, describing the transaction commit state, may be one + of None (not yet committing), 'precommit' (calling precommit event on + operations), 'postcommit' (calling postcommit event on operations), + 'uncommitable' (some :exc:`ValidationError` or :exc:`Unauthorized` error + has been raised during the transaction and so it must be rollbacked). + + :attr:`read_security` and :attr:`write_security`, boolean flags telling if + read/write security is currently activated. + + :attr:`hooks_mode`, may be either `HOOKS_ALLOW_ALL` or `HOOKS_DENY_ALL`. + + :attr:`enabled_hook_categories`, when :attr:`hooks_mode` is + `HOOKS_DENY_ALL`, this set contains hooks categories that are enabled. + + :attr:`disabled_hook_categories`, when :attr:`hooks_mode` is + `HOOKS_ALLOW_ALL`, this set contains hooks categories that are disabled. + + + :attr:`running_dbapi_query`, boolean flag telling if the executing query + is coming from a dbapi connection or is a query from within the repository """ is_internal_session = False @@ -246,7 +295,10 @@ """return a fake request/session using specified user""" session = Session(user, self.repo) threaddata = session._threaddata - threaddata.pool = self.pool + threaddata.cnxset = self.cnxset + # we attributed a connections set, need to update ctx_count else it will be freed + # while undesired + threaddata.ctx_count = 1 # share pending_operations, else operation added in the hi-jacked # session such as SendMailOp won't ever be processed threaddata.pending_operations = self.pending_operations @@ -388,14 +440,14 @@ """return a sql cursor on the system database""" if sql.split(None, 1)[0].upper() != 'SELECT': self.mode = 'write' - source = self.pool.source('system') + source = self.cnxset.source('system') try: return source.doexec(self, sql, args, rollback=rollback_on_failure) except (source.OperationalError, source.InterfaceError): if not rollback_on_failure: raise source.warning("trying to reconnect") - self.pool.reconnect(source) + self.cnxset.reconnect(source) return source.doexec(self, sql, args, rollback=rollback_on_failure) def set_language(self, language): @@ -446,6 +498,29 @@ def security_enabled(self, read=False, write=False): return security_enabled(self, read=read, write=write) + def init_security(self, read, write): + if read is None: + oldread = None + else: + oldread = self.set_read_security(read) + if write is None: + oldwrite = None + else: + oldwrite = self.set_write_security(write) + self._threaddata.ctx_count += 1 + return oldread, oldwrite + + def reset_security(self, read, write): + txstore = self._threaddata + txstore.ctx_count -= 1 + if txstore.ctx_count == 0: + self._clear_thread_storage(txstore) + else: + if read is not None: + self.set_read_security(read) + if write is not None: + self.set_write_security(write) + @property def read_security(self): """return a boolean telling if read security is activated or not""" @@ -546,6 +621,28 @@ self._threaddata.hooks_mode = mode return oldmode + def init_hooks_mode_categories(self, mode, categories): + oldmode = self.set_hooks_mode(mode) + if mode is self.HOOKS_DENY_ALL: + changes = self.enable_hook_categories(*categories) + else: + changes = self.disable_hook_categories(*categories) + self._threaddata.ctx_count += 1 + return oldmode, changes + + def reset_hooks_mode_categories(self, oldmode, mode, categories): + txstore = self._threaddata + txstore.ctx_count -= 1 + if txstore.ctx_count == 0: + self._clear_thread_storage(txstore) + else: + if categories: + if mode is self.HOOKS_DENY_ALL: + return self.disable_hook_categories(*categories) + else: + return self.enable_hook_categories(*categories) + self.set_hooks_mode(oldmode) + @property def disabled_hook_categories(self): try: @@ -569,17 +666,18 @@ - on HOOKS_ALLOW_ALL mode, ensure those categories are disabled """ changes = set() + self.pruned_hooks_cache.clear() if self.hooks_mode is self.HOOKS_DENY_ALL: - enablecats = self.enabled_hook_categories + enabledcats = self.enabled_hook_categories for category in categories: - if category in enablecats: - enablecats.remove(category) + if category in enabledcats: + enabledcats.remove(category) changes.add(category) else: - disablecats = self.disabled_hook_categories + disabledcats = self.disabled_hook_categories for category in categories: - if category not in disablecats: - disablecats.add(category) + if category not in disabledcats: + disabledcats.add(category) changes.add(category) return tuple(changes) @@ -590,17 +688,18 @@ - on HOOKS_ALLOW_ALL mode, ensure those categories are not disabled """ changes = set() + self.pruned_hooks_cache.clear() if self.hooks_mode is self.HOOKS_DENY_ALL: - enablecats = self.enabled_hook_categories + enabledcats = self.enabled_hook_categories for category in categories: - if category not in enablecats: - enablecats.add(category) + if category not in enabledcats: + enabledcats.add(category) changes.add(category) else: - disablecats = self.disabled_hook_categories + disabledcats = self.disabled_hook_categories for category in categories: - if category in self.disabled_hook_categories: - disablecats.remove(category) + if category in disabledcats: + disabledcats.remove(category) changes.add(category) return tuple(changes) @@ -620,19 +719,19 @@ # connection management ################################################### - def keep_pool_mode(self, mode): - """set pool_mode, e.g. how the session will keep its pool: + def keep_cnxset_mode(self, mode): + """set `mode`, e.g. how the session will keep its connections set: - * if mode == 'write', the pool is freed after each ready query, but kept - until the transaction's end (eg commit or rollback) when a write query - is detected (eg INSERT/SET/DELETE queries) + * if mode == 'write', the connections set is freed after each ready + query, but kept until the transaction's end (eg commit or rollback) + when a write query is detected (eg INSERT/SET/DELETE queries) - * if mode == 'transaction', the pool is only freed after the + * if mode == 'transaction', the connections set is only freed after the transaction's end - notice that a repository has a limited set of pools, and a session has to - wait for a free pool to run any rql query (unless it already has a pool - set). + notice that a repository has a limited set of connections sets, and a + session has to wait for a free connections set to run any rql query + (unless it already has one set). """ assert mode in ('transaction', 'write') if mode == 'transaction': @@ -655,56 +754,58 @@ commit_state = property(get_commit_state, set_commit_state) @property - def pool(self): - """connections pool, set according to transaction mode for each query""" + def cnxset(self): + """connections set, set according to transaction mode for each query""" if self._closed: - self.reset_pool(True) - raise Exception('try to access pool on a closed session') - return getattr(self._threaddata, 'pool', None) + self.free_cnxset(True) + raise Exception('try to access connections set on a closed session %s' % self.id) + return getattr(self._threaddata, 'cnxset', None) - def set_pool(self): - """the session need a pool to execute some queries""" + def set_cnxset(self): + """the session need a connections set to execute some queries""" with self._closed_lock: if self._closed: - self.reset_pool(True) - raise Exception('try to set pool on a closed session') - if self.pool is None: - # get pool first to avoid race-condition - self._threaddata.pool = pool = self.repo._get_pool() + self.free_cnxset(True) + raise Exception('try to set connections set on a closed session %s' % self.id) + if self.cnxset is None: + # get connections set first to avoid race-condition + self._threaddata.cnxset = cnxset = self.repo._get_cnxset() + self._threaddata.ctx_count += 1 try: - pool.pool_set() + cnxset.cnxset_set() except: - self._threaddata.pool = None - self.repo._free_pool(pool) + self._threaddata.cnxset = None + self.repo._free_cnxset(cnxset) raise self._threads_in_transaction.add( - (threading.currentThread(), pool) ) - return self._threaddata.pool + (threading.currentThread(), cnxset) ) + return self._threaddata.cnxset - def _free_thread_pool(self, thread, pool, force_close=False): + def _free_thread_cnxset(self, thread, cnxset, force_close=False): try: - self._threads_in_transaction.remove( (thread, pool) ) + self._threads_in_transaction.remove( (thread, cnxset) ) except KeyError: - # race condition on pool freeing (freed by commit or rollback vs + # race condition on cnxset freeing (freed by commit or rollback vs # close) pass else: if force_close: - pool.reconnect() + cnxset.reconnect() else: - pool.pool_reset() - # free pool once everything is done to avoid race-condition - self.repo._free_pool(pool) + cnxset.cnxset_freed() + # free cnxset once everything is done to avoid race-condition + self.repo._free_cnxset(cnxset) - def reset_pool(self, ignoremode=False): - """the session is no longer using its pool, at least for some time""" - # pool may be none if no operation has been done since last commit + def free_cnxset(self, ignoremode=False): + """the session is no longer using its connections set, at least for some time""" + # cnxset may be none if no operation has been done since last commit # or rollback - pool = getattr(self._threaddata, 'pool', None) - if pool is not None and (ignoremode or self.mode == 'read'): + cnxset = getattr(self._threaddata, 'cnxset', None) + if cnxset is not None and (ignoremode or self.mode == 'read'): # even in read mode, we must release the current transaction - self._free_thread_pool(threading.currentThread(), pool) - del self._threaddata.pool + self._free_thread_cnxset(threading.currentThread(), cnxset) + del self._threaddata.cnxset + self._threaddata.ctx_count -= 1 def _touch(self): """update latest session usage timestamp and reset mode to read""" @@ -770,9 +871,13 @@ def source_defs(self): return self.repo.source_defs() - def describe(self, eid): + def describe(self, eid, asdict=False): """return a tuple (type, sourceuri, extid) for the entity with id """ - return self.repo.type_and_source_from_eid(eid, self) + metas = self.repo.type_and_source_from_eid(eid, self) + if asdict: + return dict(zip(('type', 'source', 'extid', 'asource'), metas)) + # XXX :-1 for cw compat, use asdict=True for full information + return metas[:-1] # db-api like interface ################################################### @@ -793,9 +898,9 @@ rset.req = self return rset - def _clear_thread_data(self, reset_pool=True): - """remove everything from the thread local storage, except pool - which is explicitly removed by reset_pool, and mode which is set anyway + def _clear_thread_data(self, free_cnxset=True): + """remove everything from the thread local storage, except connections set + which is explicitly removed by free_cnxset, and mode which is set anyway by _touch """ try: @@ -803,23 +908,38 @@ except AttributeError: pass else: - if reset_pool: - self._tx_data.pop(txstore.transactionid, None) - try: - del self.__threaddata.txdata - except AttributeError: - pass + if free_cnxset: + self.free_cnxset() + if txstore.ctx_count == 0: + self._clear_thread_storage(txstore) + else: + self._clear_tx_storage(txstore) else: - for name in ('commit_state', 'transaction_data', - 'pending_operations', '_rewriter'): - try: - delattr(txstore, name) - except AttributeError: - continue + self._clear_tx_storage(txstore) + + def _clear_thread_storage(self, txstore): + self._tx_data.pop(txstore.transactionid, None) + try: + del self.__threaddata.txdata + except AttributeError: + pass - def commit(self, reset_pool=True): + def _clear_tx_storage(self, txstore): + for name in ('commit_state', 'transaction_data', + 'pending_operations', '_rewriter', + 'pruned_hooks_cache'): + try: + delattr(txstore, name) + except AttributeError: + continue + + def commit(self, free_cnxset=True, reset_pool=None): """commit the current session's transaction""" - if self.pool is None: + if reset_pool is not None: + warn('[3.13] use free_cnxset argument instead for reset_pool', + DeprecationWarning, stacklevel=2) + free_cnxset = reset_pool + if self.cnxset is None: assert not self.pending_operations self._clear_thread_data() self._touch() @@ -868,9 +988,9 @@ # XXX use slice notation since self.pending_operations is a # read-only property. self.pending_operations[:] = processed + self.pending_operations - self.rollback(reset_pool) + self.rollback(free_cnxset) raise - self.pool.commit() + self.cnxset.commit() self.commit_state = 'postcommit' while self.pending_operations: operation = self.pending_operations.pop(0) @@ -884,15 +1004,19 @@ return self.transaction_uuid(set=False) finally: self._touch() - if reset_pool: - self.reset_pool(ignoremode=True) - self._clear_thread_data(reset_pool) + if free_cnxset: + self.free_cnxset(ignoremode=True) + self._clear_thread_data(free_cnxset) - def rollback(self, reset_pool=True): + def rollback(self, free_cnxset=True, reset_pool=None): """rollback the current session's transaction""" - # don't use self.pool, rollback may be called with _closed == True - pool = getattr(self._threaddata, 'pool', None) - if pool is None: + if reset_pool is not None: + warn('[3.13] use free_cnxset argument instead for reset_pool', + DeprecationWarning, stacklevel=2) + free_cnxset = reset_pool + # don't use self.cnxset, rollback may be called with _closed == True + cnxset = getattr(self._threaddata, 'cnxset', None) + if cnxset is None: self._clear_thread_data() self._touch() self.debug('rollback session %s done (no db activity)', self.id) @@ -907,20 +1031,20 @@ except: self.critical('rollback error', exc_info=sys.exc_info()) continue - pool.rollback() + cnxset.rollback() self.debug('rollback for session %s done', self.id) finally: self._touch() - if reset_pool: - self.reset_pool(ignoremode=True) - self._clear_thread_data(reset_pool) + if free_cnxset: + self.free_cnxset(ignoremode=True) + self._clear_thread_data(free_cnxset) def close(self): - """do not close pool on session close, since they are shared now""" + """do not close connections set on session close, since they are shared now""" with self._closed_lock: self._closed = True # copy since _threads_in_transaction maybe modified while waiting - for thread, pool in self._threads_in_transaction.copy(): + for thread, cnxset in self._threads_in_transaction.copy(): if thread is threading.currentThread(): continue self.info('waiting for thread %s', thread) @@ -930,12 +1054,12 @@ for i in xrange(10): thread.join(1) if not (thread.isAlive() and - (thread, pool) in self._threads_in_transaction): + (thread, cnxset) in self._threads_in_transaction): break else: self.error('thread %s still alive after 10 seconds, will close ' 'session anyway', thread) - self._free_thread_pool(thread, pool, force_close=True) + self._free_thread_cnxset(thread, cnxset, force_close=True) self.rollback() del self.__threaddata del self._tx_data @@ -962,9 +1086,16 @@ self._threaddata.pending_operations = [] return self._threaddata.pending_operations + @property + def pruned_hooks_cache(self): + try: + return self._threaddata.pruned_hooks_cache + except AttributeError: + self._threaddata.pruned_hooks_cache = {} + return self._threaddata.pruned_hooks_cache + def add_operation(self, operation, index=None): - """add an observer""" - assert self.commit_state != 'commit' + """add an operation""" if index is None: self.pending_operations.append(operation) else: @@ -1075,6 +1206,18 @@ def schema_rproperty(self, rtype, eidfrom, eidto, rprop): return getattr(self.rtype_eids_rdef(rtype, eidfrom, eidto), rprop) + @property + @deprecated("[3.13] use .cnxset attribute instead of .pool") + def pool(self): + return self.cnxset + + @deprecated("[3.13] use .set_cnxset() method instead of .set_pool()") + def set_pool(self): + return self.set_cnxset() + + @deprecated("[3.13] use .free_cnxset() method instead of .reset_pool()") + def reset_pool(self): + return self.free_cnxset() @deprecated("[3.7] execute is now unsafe by default in hooks/operation. You" " can also control security with the security_enabled context " @@ -1141,12 +1284,12 @@ self.disable_hook_categories('integrity') @property - def pool(self): - """connections pool, set according to transaction mode for each query""" + def cnxset(self): + """connections set, set according to transaction mode for each query""" if self.repo.shutting_down: - self.reset_pool(True) + self.free_cnxset(True) raise ShuttingDown('repository is shutting down') - return getattr(self._threaddata, 'pool', None) + return getattr(self._threaddata, 'cnxset', None) class InternalManager(object): diff -r 6397a9051f65 -r 134613d3b353 server/sources/__init__.py --- a/server/sources/__init__.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/sources/__init__.py Wed Jul 20 18:22:41 2011 +0200 @@ -110,6 +110,10 @@ # force deactivation (configuration error for instance) disabled = False + # boolean telling if cwuri of entities from this source is the url that + # should be used as entity's absolute url + use_cwuri_as_url = False + # source configuration options options = () @@ -119,6 +123,7 @@ self.support_relations['identity'] = False self.eid = eid self.public_config = source_config.copy() + self.public_config.setdefault('use-cwuri-as-url', self.use_cwuri_as_url) self.remove_sensitive_information(self.public_config) self.uri = source_config.pop('uri') set_log_methods(self, getLogger('cubicweb.sources.'+self.uri)) @@ -213,7 +218,7 @@ """ pass - PUBLIC_KEYS = ('type', 'uri') + PUBLIC_KEYS = ('type', 'uri', 'use-cwuri-as-url') def remove_sensitive_information(self, sourcedef): """remove sensitive information such as login / password from source definition @@ -230,23 +235,23 @@ def check_connection(self, cnx): """Check connection validity, return None if the connection is still - valid else a new connection (called when the pool using the given - connection is being attached to a session). Do nothing by default. + valid else a new connection (called when the connections set using the + given connection is being attached to a session). Do nothing by default. """ pass - def close_pool_connections(self): - for pool in self.repo.pools: - pool._cursors.pop(self.uri, None) - pool.source_cnxs[self.uri][1].close() + def close_source_connections(self): + for cnxset in self.repo.cnxsets: + cnxset._cursors.pop(self.uri, None) + cnxset.source_cnxs[self.uri][1].close() - def open_pool_connections(self): - for pool in self.repo.pools: - pool.source_cnxs[self.uri] = (self, self.get_connection()) + def open_source_connections(self): + for cnxset in self.repo.cnxsets: + cnxset.source_cnxs[self.uri] = (self, self.get_connection()) - def pool_reset(self, cnx): - """the pool using the given connection is being reseted from its current - attached session + def cnxset_freed(self, cnx): + """the connections set holding the given connection is being reseted + from its current attached session. do nothing by default """ @@ -404,7 +409,7 @@ .executemany(). """ res = self.syntax_tree_search(session, union, args, varmap=varmap) - session.pool.source('system').manual_insert(res, table, session) + session.cnxset.source('system').manual_insert(res, table, session) # write modification api ################################################### # read-only sources don't have to implement methods below diff -r 6397a9051f65 -r 134613d3b353 server/sources/datafeed.py --- a/server/sources/datafeed.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/sources/datafeed.py Wed Jul 20 18:22:41 2011 +0200 @@ -18,15 +18,24 @@ """datafeed sources: copy data from an external data stream into the system database """ +from __future__ import with_statement + +import urllib2 +import StringIO from datetime import datetime, timedelta from base64 import b64decode +from cookielib import CookieJar -from cubicweb import RegistryNotFound, ObjectNotFound, ValidationError +from lxml import etree + +from cubicweb import RegistryNotFound, ObjectNotFound, ValidationError, UnknownEid from cubicweb.server.sources import AbstractSource from cubicweb.appobject import AppObject + class DataFeedSource(AbstractSource): copy_based_source = True + use_cwuri_as_url = True options = ( ('synchronize', @@ -71,7 +80,7 @@ def _entity_update(self, source_entity): source_entity.complete() - self.parser = source_entity.parser + self.parser_id = source_entity.parser self.latest_retrieval = source_entity.latest_retrieval self.urls = [url.strip() for url in source_entity.url.splitlines() if url.strip()] @@ -88,12 +97,12 @@ def init(self, activated, source_entity): if activated: self._entity_update(source_entity) - self.parser = source_entity.parser + self.parser_id = source_entity.parser self.load_mapping(source_entity._cw) def _get_parser(self, session, **kwargs): return self.repo.vreg['parsers'].select( - self.parser, session, source=self, **kwargs) + self.parser_id, session, source=self, **kwargs) def load_mapping(self, session): self.mapping = {} @@ -121,27 +130,50 @@ return False return datetime.utcnow() < (self.latest_retrieval + self.synchro_interval) + def update_latest_retrieval(self, session): + self.latest_retrieval = datetime.utcnow() + session.execute('SET X latest_retrieval %(date)s WHERE X eid %(x)s', + {'x': self.eid, 'date': self.latest_retrieval}) + + def acquire_synchronization_lock(self, session): + # XXX race condition until WHERE of SET queries is executed using + # 'SELECT FOR UPDATE' + if not session.execute('SET X synchronizing TRUE WHERE X eid %(x)s, X synchronizing FALSE', + {'x': self.eid}): + self.error('concurrent synchronization detected, skip pull') + session.commit(free_cnxset=False) + return False + session.commit(free_cnxset=False) + return True + + def release_synchronization_lock(self, session): + session.execute('SET X synchronizing FALSE WHERE X eid %(x)s', + {'x': self.eid}) + session.commit() + def pull_data(self, session, force=False, raise_on_error=False): + """Launch synchronization of the source if needed. + + This method is responsible to handle commit/rollback on the given + session. + """ if not force and self.fresh(): return {} + if not self.acquire_synchronization_lock(session): + return {} + try: + with session.transaction(free_cnxset=False): + return self._pull_data(session, force, raise_on_error) + finally: + self.release_synchronization_lock(session) + + def _pull_data(self, session, force=False, raise_on_error=False): if self.config['delete-entities']: myuris = self.source_cwuris(session) else: myuris = None parser = self._get_parser(session, sourceuris=myuris) - error = False - self.info('pulling data for source %s', self.uri) - for url in self.urls: - try: - if parser.process(url): - error = True - except IOError, exc: - if raise_on_error: - raise - self.error('could not pull data while processing %s: %s', - url, exc) - error = True - if error: + if self.process_urls(parser, self.urls, raise_on_error): self.warning("some error occured, don't attempt to delete entities") elif self.config['delete-entities'] and myuris: byetype = {} @@ -151,11 +183,30 @@ for etype, eids in byetype.iteritems(): session.execute('DELETE %s X WHERE X eid IN (%s)' % (etype, ','.join(eids))) - self.latest_retrieval = datetime.utcnow() - session.execute('SET X latest_retrieval %(date)s WHERE X eid %(x)s', - {'x': self.eid, 'date': self.latest_retrieval}) + self.update_latest_retrieval(session) return parser.stats + def process_urls(self, parser, urls, raise_on_error=False): + error = False + for url in urls: + self.info('pulling data from %s', url) + try: + if parser.process(url, raise_on_error): + error = True + except IOError, exc: + if raise_on_error: + raise + self.error('could not pull data while processing %s: %s', + url, exc) + error = True + except Exception, exc: + if raise_on_error: + raise + self.exception('error while processing %s: %s', + url, exc) + error = True + return error + def before_entity_insertion(self, session, lid, etype, eid, sourceparams): """called by the repository when an eid has been attributed for an entity stored here but the entity has not been inserted in the system @@ -195,8 +246,8 @@ class DataFeedParser(AppObject): __registry__ = 'parsers' - def __init__(self, session, source, sourceuris=None): - self._cw = session + def __init__(self, session, source, sourceuris=None, **kwargs): + super(DataFeedParser, self).__init__(session, **kwargs) self.source = source self.sourceuris = sourceuris self.stats = {'created': set(), @@ -213,14 +264,37 @@ raise ValidationError(schemacfg.eid, {None: msg}) def extid2entity(self, uri, etype, **sourceparams): + """return an entity for the given uri. May return None if it should be + skipped + """ + # if cwsource is specified and repository has a source with the same + # name, call extid2eid on that source so entity will be properly seen as + # coming from this source + source = self._cw.repo.sources_by_uri.get( + sourceparams.pop('cwsource', None), self.source) sourceparams['parser'] = self - eid = self.source.extid2eid(str(uri), etype, self._cw, - sourceparams=sourceparams) + try: + eid = source.extid2eid(str(uri), etype, self._cw, + sourceparams=sourceparams) + except ValidationError, ex: + self.source.error('error while creating %s: %s', etype, ex) + return None + if eid < 0: + # entity has been moved away from its original source + # + # Don't give etype to entity_from_eid so we get UnknownEid if the + # entity has been removed + try: + entity = self._cw.entity_from_eid(-eid) + except UnknownEid: + return None + self.notify_updated(entity) # avoid later update from the source's data + return entity if self.sourceuris is not None: self.sourceuris.pop(str(uri), None) return self._cw.entity_from_eid(eid, etype) - def process(self, url): + def process(self, url, partialcommit=True): """main callback: process the url""" raise NotImplementedError @@ -238,3 +312,66 @@ def notify_updated(self, entity): return self.stats['updated'].add(entity.eid) + + +class DataFeedXMLParser(DataFeedParser): + + def process(self, url, raise_on_error=False, partialcommit=True): + """IDataFeedParser main entry point""" + try: + parsed = self.parse(url) + except Exception, ex: + if raise_on_error: + raise + self.source.error(str(ex)) + return True + error = False + for args in parsed: + try: + self.process_item(*args) + if partialcommit: + # commit+set_cnxset instead of commit(free_cnxset=False) to let + # other a chance to get our connections set + self._cw.commit() + self._cw.set_cnxset() + except ValidationError, exc: + if raise_on_error: + raise + if partialcommit: + self.source.error('Skipping %s because of validation error %s' % (args, exc)) + self._cw.rollback() + self._cw.set_cnxset() + error = True + else: + raise + return error + + def parse(self, url): + if url.startswith('http'): + from cubicweb.sobjects.parsers import HOST_MAPPING + for mappedurl in HOST_MAPPING: + if url.startswith(mappedurl): + url = url.replace(mappedurl, HOST_MAPPING[mappedurl], 1) + break + self.source.info('GET %s', url) + stream = _OPENER.open(url) + elif url.startswith('file://'): + stream = open(url[7:]) + else: + stream = StringIO.StringIO(url) + return self.parse_etree(etree.parse(stream).getroot()) + + def parse_etree(self, document): + return [(document,)] + + def process_item(self, *args): + raise NotImplementedError + +# use a cookie enabled opener to use session cookie if any +_OPENER = urllib2.build_opener() +try: + from logilab.common import urllib2ext + _OPENER.add_handler(urllib2ext.HTTPGssapiAuthHandler()) +except ImportError: # python-kerberos not available + pass +_OPENER.add_handler(urllib2.HTTPCookieProcessor(CookieJar())) diff -r 6397a9051f65 -r 134613d3b353 server/sources/extlite.py --- a/server/sources/extlite.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/sources/extlite.py Wed Jul 20 18:22:41 2011 +0200 @@ -102,19 +102,19 @@ def backup(self, backupfile, confirm): """method called to create a backup of the source's data""" - self.close_pool_connections() + self.close_source_connections() try: self.sqladapter.backup_to_file(backupfile, confirm) finally: - self.open_pool_connections() + self.open_source_connections() def restore(self, backupfile, confirm, drop): """method called to restore a backup of source's data""" - self.close_pool_connections() + self.close_source_connections() try: self.sqladapter.restore_from_file(backupfile, confirm, drop) finally: - self.open_pool_connections() + self.open_source_connections() @property def _sqlcnx(self): @@ -174,15 +174,15 @@ def check_connection(self, cnx): """check connection validity, return None if the connection is still valid - else a new connection (called when the pool using the given connection is + else a new connection (called when the connections set holding the given connection is being attached to a session) always return the connection to reset eventually cached cursor """ return cnx - def pool_reset(self, cnx): - """the pool using the given connection is being reseted from its current + def cnxset_freed(self, cnx): + """the connections set holding the given connection is being freed from its current attached session: release the connection lock if the connection wrapper has a connection set """ @@ -286,7 +286,7 @@ """ if server.DEBUG: print 'exec', query, args - cursor = session.pool[self.uri] + cursor = session.cnxset[self.uri] try: # str(query) to avoid error if it's an unicode string cursor.execute(str(query), args) @@ -294,7 +294,7 @@ self.critical("sql: %r\n args: %s\ndbms message: %r", query, args, ex.args[0]) try: - session.pool.connection(self.uri).rollback() + session.cnxset.connection(self.uri).rollback() self.critical('transaction has been rollbacked') except: pass diff -r 6397a9051f65 -r 134613d3b353 server/sources/ldapuser.py --- a/server/sources/ldapuser.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/sources/ldapuser.py Wed Jul 20 18:22:41 2011 +0200 @@ -310,7 +310,11 @@ except Exception: self.error('while trying to authenticate %s', user, exc_info=True) raise AuthenticationError() - return self.extid2eid(user['dn'], 'CWUser', session) + eid = self.extid2eid(user['dn'], 'CWUser', session) + if eid < 0: + # user has been moved away from this source + raise AuthenticationError() + return eid def ldap_name(self, var): if var.stinfo['relations']: @@ -392,7 +396,7 @@ break assert mainvars, rqlst columns, globtransforms = self.prepare_columns(mainvars, rqlst) - eidfilters = [] + eidfilters = [lambda x: x > 0] allresults = [] generator = RQL2LDAPFilter(self, session, args, mainvars) for mainvar in mainvars: @@ -524,9 +528,9 @@ """make an ldap query""" self.debug('ldap search %s %s %s %s %s', self.uri, base, scope, searchstr, list(attrs)) - # XXX for now, we do not have connection pool support for LDAP, so + # XXX for now, we do not have connections set support for LDAP, so # this is always self._conn - cnx = session.pool.connection(self.uri).cnx + cnx = session.cnxset.connection(self.uri).cnx try: res = cnx.search_s(base, scope, searchstr, attrs) except ldap.PARTIAL_RESULTS: diff -r 6397a9051f65 -r 134613d3b353 server/sources/native.py --- a/server/sources/native.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/sources/native.py Wed Jul 20 18:22:41 2011 +0200 @@ -313,9 +313,9 @@ self.dbhelper.dbname = abspath(self.dbhelper.dbname) self.get_connection = lambda: ConnectionWrapper(self) self.check_connection = lambda cnx: cnx - def pool_reset(cnx): + def cnxset_freed(cnx): cnx.close() - self.pool_reset = pool_reset + self.cnxset_freed = cnxset_freed if self.dbdriver == 'sqlite': self._create_eid = None self.create_eid = self._create_eid_sqlite @@ -355,21 +355,21 @@ """execute the query and return its result""" return self.process_result(self.doexec(session, sql, args)) - def init_creating(self, pool=None): + def init_creating(self, cnxset=None): # check full text index availibility if self.do_fti: - if pool is None: - _pool = self.repo._get_pool() - _pool.pool_set() + if cnxset is None: + _cnxset = self.repo._get_cnxset() + _cnxset.cnxset_set() else: - _pool = pool - if not self.dbhelper.has_fti_table(_pool['system']): + _cnxset = cnxset + if not self.dbhelper.has_fti_table(_cnxset['system']): if not self.repo.config.creating: self.critical('no text index table') self.do_fti = False - if pool is None: - _pool.pool_reset() - self.repo._free_pool(_pool) + if cnxset is None: + _cnxset.cnxset_freed() + self.repo._free_cnxset(_cnxset) def backup(self, backupfile, confirm, format='native'): """method called to create a backup of the source's data""" @@ -377,25 +377,25 @@ self.repo.fill_schema() self.set_schema(self.repo.schema) helper = DatabaseIndependentBackupRestore(self) - self.close_pool_connections() + self.close_source_connections() try: helper.backup(backupfile) finally: - self.open_pool_connections() + self.open_source_connections() elif format == 'native': - self.close_pool_connections() + self.close_source_connections() try: self.backup_to_file(backupfile, confirm) finally: - self.open_pool_connections() + self.open_source_connections() else: raise ValueError('Unknown format %r' % format) def restore(self, backupfile, confirm, drop, format='native'): """method called to restore a backup of source's data""" - if self.repo.config.open_connections_pools: - self.close_pool_connections() + if self.repo.config.init_cnxset_pool: + self.close_source_connections() try: if format == 'portable': helper = DatabaseIndependentBackupRestore(self) @@ -405,12 +405,16 @@ else: raise ValueError('Unknown format %r' % format) finally: - if self.repo.config.open_connections_pools: - self.open_pool_connections() + if self.repo.config.init_cnxset_pool: + self.open_source_connections() def init(self, activated, source_entity): - self.init_creating(source_entity._cw.pool) + self.init_creating(source_entity._cw.cnxset) + try: + source_entity._cw.system_sql('SELECT COUNT(asource) FROM entities') + except Exception, ex: + self.eid_type_source = self.eid_type_source_pre_131 def shutdown(self): if self._eid_creation_cnx: @@ -532,13 +536,13 @@ raise # FIXME: better detection of deconnection pb self.warning("trying to reconnect") - session.pool.reconnect(self) + session.cnxset.reconnect(self) cursor = self.doexec(session, sql, args) except (self.DbapiError,), exc: # We get this one with pyodbc and SQL Server when connection was reset if exc.args[0] == '08S01' and session.mode != 'write': self.warning("trying to reconnect") - session.pool.reconnect(self) + session.cnxset.reconnect(self) cursor = self.doexec(session, sql, args) else: raise @@ -585,7 +589,7 @@ for table in temptables: try: self.doexec(session,'DROP TABLE %s' % table) - except: + except Exception: pass try: del self._temp_table_data[table] @@ -727,9 +731,9 @@ """Execute a query. it's a function just so that it shows up in profiling """ - cursor = session.pool[self.uri] + cursor = session.cnxset[self.uri] if server.DEBUG & server.DBG_SQL: - cnx = session.pool.connection(self.uri) + cnx = session.cnxset.connection(self.uri) # getattr to get the actual connection if cnx is a ConnectionWrapper # instance print 'exec', query, args, getattr(cnx, '_cnx', cnx) @@ -744,7 +748,7 @@ query, args, ex.args[0]) if rollback: try: - session.pool.connection(self.uri).rollback() + session.cnxset.connection(self.uri).rollback() if self.repo.config.mode != 'test': self.critical('transaction has been rollbacked') except Exception, ex: @@ -773,7 +777,7 @@ """ if server.DEBUG & server.DBG_SQL: print 'execmany', query, 'with', len(args), 'arguments' - cursor = session.pool[self.uri] + cursor = session.cnxset[self.uri] try: # str(query) to avoid error if it's an unicode string cursor.executemany(str(query), args) @@ -784,10 +788,10 @@ self.critical("sql many: %r\n args: %s\ndbms message: %r", query, args, ex.args[0]) try: - session.pool.connection(self.uri).rollback() + session.cnxset.connection(self.uri).rollback() if self.repo.config.mode != 'test': self.critical('transaction has been rollbacked') - except: + except Exception: pass raise @@ -802,7 +806,7 @@ self.error("backend can't alter %s.%s to %s%s", table, column, coltype, not allownull and 'NOT NULL' or '') return - self.dbhelper.change_col_type(LogCursor(session.pool[self.uri]), + self.dbhelper.change_col_type(LogCursor(session.cnxset[self.uri]), table, column, coltype, allownull) self.info('altered %s.%s: now %s%s', table, column, coltype, not allownull and 'NOT NULL' or '') @@ -817,7 +821,7 @@ return table, column = rdef_table_column(rdef) coltype, allownull = rdef_physical_info(self.dbhelper, rdef) - self.dbhelper.set_null_allowed(LogCursor(session.pool[self.uri]), + self.dbhelper.set_null_allowed(LogCursor(session.cnxset[self.uri]), table, column, coltype, allownull) def update_rdef_indexed(self, session, rdef): @@ -835,29 +839,49 @@ self.drop_index(session, table, column, unique=True) def create_index(self, session, table, column, unique=False): - cursor = LogCursor(session.pool[self.uri]) + cursor = LogCursor(session.cnxset[self.uri]) self.dbhelper.create_index(cursor, table, column, unique) def drop_index(self, session, table, column, unique=False): - cursor = LogCursor(session.pool[self.uri]) + cursor = LogCursor(session.cnxset[self.uri]) self.dbhelper.drop_index(cursor, table, column, unique) # system source interface ################################################# - def eid_type_source(self, session, eid): - """return a tuple (type, source, extid) for the entity with id """ - sql = 'SELECT type, source, extid FROM entities WHERE eid=%s' % eid + def _eid_type_source(self, session, eid, sql, _retry=True): try: res = self.doexec(session, sql).fetchone() - except: - assert session.pool, 'session has no pool set' - raise UnknownEid(eid) - if res is None: - raise UnknownEid(eid) - if res[-1] is not None: + if res is not None: + return res + except (self.OperationalError, self.InterfaceError): + if session.mode == 'read' and _retry: + self.warning("trying to reconnect (eid_type_source())") + session.cnxset.reconnect(self) + return self._eid_type_source(session, eid, sql, _retry=False) + except Exception: + assert session.cnxset, 'session has no connections set' + self.exception('failed to query entities table for eid %s', eid) + raise UnknownEid(eid) + + def eid_type_source(self, session, eid): + """return a tuple (type, source, extid) for the entity with id """ + sql = 'SELECT type, source, extid, asource FROM entities WHERE eid=%s' % eid + res = self._eid_type_source(session, eid, sql) + if res[-2] is not None: if not isinstance(res, list): res = list(res) + res[-2] = b64decode(res[-2]) + return res + + def eid_type_source_pre_131(self, session, eid): + """return a tuple (type, source, extid) for the entity with id """ + sql = 'SELECT type, source, extid FROM entities WHERE eid=%s' % eid + res = self._eid_type_source(session, eid, sql) + if not isinstance(res, list): + res = list(res) + if res[-1] is not None: res[-1] = b64decode(res[-1]) + res.append(res[1]) return res def extid2eid(self, session, source_uri, extid): @@ -874,7 +898,7 @@ result = cursor.fetchone() if result: return result[0] - except: + except Exception: pass return None @@ -929,7 +953,7 @@ return self._create_eid() else: raise - except: # WTF? + except Exception: # WTF? cnx.rollback() self._eid_creation_cnx = None self.exception('create eid failed in an unforeseen way on SQL statement %s', sql) @@ -946,7 +970,7 @@ extid = b64encode(extid) uri = 'system' if source.copy_based_source else source.uri attrs = {'type': entity.__regid__, 'eid': entity.eid, 'extid': extid, - 'source': uri, 'mtime': datetime.now()} + 'source': uri, 'asource': source.uri, 'mtime': datetime.now()} self.doexec(session, self.sqlgen.insert('entities', attrs), attrs) # insert core relations: is, is_instance_of and cw_source try: @@ -1127,7 +1151,7 @@ important note: while undoing of a transaction, only hooks in the 'integrity', 'activeintegrity' and 'undo' categories are called. """ - # set mode so pool isn't released subsquently until commit/rollback + # set mode so connections set isn't released subsquently until commit/rollback session.mode = 'write' errors = [] session.transaction_data['undoing_uuid'] = txuuid @@ -1372,7 +1396,7 @@ def fti_unindex_entities(self, session, entities): """remove text content for entities from the full text index """ - cursor = session.pool['system'] + cursor = session.cnxset['system'] cursor_unindex_object = self.dbhelper.cursor_unindex_object try: for entity in entities: @@ -1385,7 +1409,7 @@ """add text content of created/modified entities to the full text index """ cursor_index_object = self.dbhelper.cursor_index_object - cursor = session.pool['system'] + cursor = session.cnxset['system'] try: # use cursor_index_object, not cursor_reindex_object since # unindexing done in the FTIndexEntityOp @@ -1434,6 +1458,7 @@ eid INTEGER PRIMARY KEY NOT NULL, type VARCHAR(64) NOT NULL, source VARCHAR(64) NOT NULL, + asource VARCHAR(64) NOT NULL, mtime %s NOT NULL, extid VARCHAR(256) );; diff -r 6397a9051f65 -r 134613d3b353 server/sources/pyrorql.py --- a/server/sources/pyrorql.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/sources/pyrorql.py Wed Jul 20 18:22:41 2011 +0200 @@ -235,10 +235,12 @@ if dexturi == 'system' or not ( dexturi in self.repo.sources_by_uri or self._skip_externals): assert etype in self.support_entities, etype - return self.repo.extid2eid(self, str(extid), etype, session), True - if dexturi in self.repo.sources_by_uri: + eid = self.repo.extid2eid(self, str(extid), etype, session) + if eid > 0: + return eid, True + elif dexturi in self.repo.sources_by_uri: source = self.repo.sources_by_uri[dexturi] - cnx = session.pool.connection(source.uri) + cnx = session.cnxset.connection(source.uri) eid = source.local_eid(cnx, dextid, session)[0] return eid, False return None, None @@ -322,7 +324,7 @@ else a new connection """ # we have to transfer manually thread ownership. This can be done safely - # since the pool to which belong the connection is affected to one + # since the connections set holding the connection is affected to one # session/thread and can't be called simultaneously try: cnx._repo._transferThread(threading.currentThread()) @@ -359,7 +361,7 @@ if not args is None: args = args.copy() # get cached cursor anyway - cu = session.pool[self.uri] + cu = session.cnxset[self.uri] if cu is None: # this is a ConnectionWrapper instance msg = session._("can't connect to source %s, some data may be missing") @@ -390,7 +392,7 @@ or uidtype(union, i, etype, args)): needtranslation.append(i) if needtranslation: - cnx = session.pool.connection(self.uri) + cnx = session.cnxset.connection(self.uri) for rowindex in xrange(rset.rowcount - 1, -1, -1): row = rows[rowindex] localrow = False @@ -434,37 +436,45 @@ def update_entity(self, session, entity): """update an entity in the source""" relations, kwargs = self._entity_relations_and_kwargs(session, entity) - cu = session.pool[self.uri] + cu = session.cnxset[self.uri] cu.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations), kwargs) self._query_cache.clear() - entity.clear_all_caches() + entity.cw_clear_all_caches() def delete_entity(self, session, entity): """delete an entity from the source""" - cu = session.pool[self.uri] + if session.deleted_in_transaction (self.eid): + # source is being deleted, don't propagate + self._query_cache.clear() + return + cu = session.cnxset[self.uri] cu.execute('DELETE %s X WHERE X eid %%(x)s' % entity.__regid__, {'x': self.eid2extid(entity.eid, session)}) self._query_cache.clear() def add_relation(self, session, subject, rtype, object): """add a relation to the source""" - cu = session.pool[self.uri] + cu = session.cnxset[self.uri] cu.execute('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype, {'x': self.eid2extid(subject, session), 'y': self.eid2extid(object, session)}) self._query_cache.clear() - session.entity_from_eid(subject).clear_all_caches() - session.entity_from_eid(object).clear_all_caches() + session.entity_from_eid(subject).cw_clear_all_caches() + session.entity_from_eid(object).cw_clear_all_caches() def delete_relation(self, session, subject, rtype, object): """delete a relation from the source""" - cu = session.pool[self.uri] + if session.deleted_in_transaction (self.eid): + # source is being deleted, don't propagate + self._query_cache.clear() + return + cu = session.cnxset[self.uri] cu.execute('DELETE X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype, {'x': self.eid2extid(subject, session), 'y': self.eid2extid(object, session)}) self._query_cache.clear() - session.entity_from_eid(subject).clear_all_caches() - session.entity_from_eid(object).clear_all_caches() + session.entity_from_eid(subject).cw_clear_all_caches() + session.entity_from_eid(object).cw_clear_all_caches() class RQL2RQL(object): diff -r 6397a9051f65 -r 134613d3b353 server/sources/rql2sql.py --- a/server/sources/rql2sql.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/sources/rql2sql.py Wed Jul 20 18:22:41 2011 +0200 @@ -56,6 +56,7 @@ from logilab.database import FunctionDescr, SQL_FUNCTIONS_REGISTRY from rql import BadRQLQuery, CoercionError +from rql.utils import common_parent from rql.stmts import Union, Select from rql.nodes import (SortTerm, VariableRef, Constant, Function, Variable, Or, Not, Comparison, ColumnAlias, Relation, SubQuery, Exists) @@ -669,7 +670,7 @@ else: tocheck.append(compnode) # tocheck hold a set of comparison not implying an aggregat function - # put them in fakehaving if the don't share an Or node as ancestor + # put them in fakehaving if they don't share an Or node as ancestor # with another comparison containing an aggregat function for compnode in tocheck: parents = set() @@ -784,7 +785,20 @@ sorts = select.orderby groups = select.groupby having = select.having - morerestr = extract_fake_having_terms(having) + for restr in extract_fake_having_terms(having): + scope = None + for vref in restr.get_nodes(VariableRef): + vscope = vref.variable.scope + if vscope is select: + continue # ignore select scope, so restriction is added to + # the inner most scope possible + if scope is None: + scope = vscope + elif vscope is not scope: + scope = common_parent(scope, vscope).scope + if scope is None: + scope = select + scope.add_restriction(restr) # remember selection, it may be changed and have to be restored origselection = select.selection[:] # check if the query will have union subquery, if it need sort term @@ -829,7 +843,7 @@ self._in_wrapping_query = False self._state = state try: - sql = self._solutions_sql(select, morerestr, sols, distinct, + sql = self._solutions_sql(select, sols, distinct, needalias or needwrap) # generate groups / having before wrapping query selection to get # correct column aliases @@ -900,15 +914,13 @@ except KeyError: continue - def _solutions_sql(self, select, morerestr, solutions, distinct, needalias): + def _solutions_sql(self, select, solutions, distinct, needalias): sqls = [] for solution in solutions: self._state.reset(solution) # visit restriction subtree if select.where is not None: self._state.add_restriction(select.where.accept(self)) - for restriction in morerestr: - self._state.add_restriction(restriction.accept(self)) sql = [self._selection_sql(select.selection, distinct, needalias)] if self._state.restrictions: sql.append('WHERE %s' % ' AND '.join(self._state.restrictions)) @@ -1372,6 +1384,8 @@ operator = ' LIKE ' else: operator = ' %s ' % operator + elif operator == 'REGEXP': + return ' %s' % self.dbhelper.sql_regexp_match_expression(rhs.accept(self)) elif (operator == '=' and isinstance(rhs, Constant) and rhs.eval(self._args) is None): if lhs is None: @@ -1422,6 +1436,8 @@ if constant.type is None: return 'NULL' value = constant.value + if constant.type == 'etype': + return value if constant.type == 'Int' and isinstance(constant.parent, SortTerm): return value if constant.type in ('Date', 'Datetime'): @@ -1584,8 +1600,14 @@ scope = self._state.scopes[var.scope] self._state.add_table(sql.split('.', 1)[0], scope=scope) except KeyError: - sql = '%s.%s%s' % (self._var_table(var), SQL_PREFIX, rtype) - #self._state.done.add(var.name) + # rtype may be an attribute relation when called from + # _visit_var_attr_relation. take care about 'eid' rtype, since in + # some case we may use the `entities` table, so in that case we've + # to properly use variable'sql + if rtype == 'eid': + sql = var.accept(self) + else: + sql = '%s.%s%s' % (self._var_table(var), SQL_PREFIX, rtype) return sql def _linked_var_sql(self, variable): diff -r 6397a9051f65 -r 134613d3b353 server/sources/storages.py --- a/server/sources/storages.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/sources/storages.py Wed Jul 20 18:22:41 2011 +0200 @@ -204,7 +204,7 @@ """return the current fs_path of the tribute. Return None is the attr is not stored yet.""" - sysource = entity._cw.pool.source('system') + sysource = entity._cw.cnxset.source('system') cu = sysource.doexec(entity._cw, 'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s' % ( attr, entity.__regid__, entity.eid)) diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_datafeed.py --- a/server/test/unittest_datafeed.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_datafeed.py Wed Jul 20 18:22:41 2011 +0200 @@ -39,7 +39,7 @@ class AParser(datafeed.DataFeedParser): __regid__ = 'testparser' - def process(self, url): + def process(self, url, raise_on_error=False): entity = self.extid2entity('http://www.cubicweb.org/', 'Card', item={'title': u'cubicweb.org', 'content': u'the cw web site'}) @@ -64,12 +64,13 @@ self.assertEqual(entity.cw_source[0].name, 'myfeed') self.assertEqual(entity.cw_metainformation(), {'type': 'Card', - 'source': {'uri': 'system', 'type': 'native'}, + 'source': {'uri': 'myfeed', 'type': 'datafeed', 'use-cwuri-as-url': True}, 'extid': 'http://www.cubicweb.org/'} ) + self.assertEqual(entity.absolute_url(), 'http://www.cubicweb.org/') # test repo cache keys self.assertEqual(self.repo._type_source_cache[entity.eid], - ('Card', 'system', 'http://www.cubicweb.org/')) + ('Card', 'system', 'http://www.cubicweb.org/', 'myfeed')) self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')], entity.eid) # test repull @@ -83,7 +84,7 @@ self.assertEqual(stats['created'], set()) self.assertEqual(stats['updated'], set((entity.eid,))) self.assertEqual(self.repo._type_source_cache[entity.eid], - ('Card', 'system', 'http://www.cubicweb.org/')) + ('Card', 'system', 'http://www.cubicweb.org/', 'myfeed')) self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')], entity.eid) @@ -93,6 +94,14 @@ self.assertTrue(dfsource.latest_retrieval) self.assertTrue(dfsource.fresh()) + # test_delete_source + req = self.request() + with self.debugged('DBG_RQL'): + req.execute('DELETE CWSource S WHERE S name "myfeed"') + self.commit() + self.failIf(self.execute('Card X WHERE X title "cubicweb.org"')) + self.failIf(self.execute('Any X WHERE X has_text "cubicweb.org"')) + if __name__ == '__main__': from logilab.common.testlib import unittest_main unittest_main() diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_hook.py --- a/server/test/unittest_hook.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_hook.py Wed Jul 20 18:22:41 2011 +0200 @@ -23,7 +23,7 @@ from logilab.common.testlib import TestCase, unittest_main, mock_object -from cubicweb.devtools import TestServerConfiguration +from cubicweb.devtools import TestServerConfiguration, fake from cubicweb.devtools.testlib import CubicWebTC from cubicweb.server import hook from cubicweb.hooks import integrity, syncschema @@ -124,10 +124,8 @@ def test_call_hook(self): self.o.register(AddAnyHook) dis = set() - cw = mock_object(vreg=self.vreg, - set_read_security=lambda *a,**k: None, - set_write_security=lambda *a,**k: None, - is_hook_activated=lambda x, cls: cls.category not in dis) + cw = fake.FakeSession() + cw.is_hook_activated = lambda cls: cls.category not in dis self.assertRaises(HookCalled, self.o.call_hooks, 'before_add_entity', cw) dis.add('cat1') @@ -203,10 +201,10 @@ # self.assertEqual(self.called, [(1, 'concerne', 2), (3, 'concerne', 4)]) -# def _before_relation_hook(self, pool, subject, r_type, object): +# def _before_relation_hook(self, cnxset, subject, r_type, object): # self.called.append((subject, r_type, object)) -# def _after_relation_hook(self, pool, subject, r_type, object): +# def _after_relation_hook(self, cnxset, subject, r_type, object): # self.called.append((subject, r_type, object)) diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_ldapuser.py --- a/server/test/unittest_ldapuser.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_ldapuser.py Wed Jul 20 18:22:41 2011 +0200 @@ -137,7 +137,7 @@ def test_authenticate(self): source = self.repo.sources_by_uri['ldapuser'] - self.session.set_pool() + self.session.set_cnxset() self.assertRaises(AuthenticationError, source.authenticate, self.session, 'toto', 'toto') @@ -239,7 +239,7 @@ iworkflowable.fire_transition('deactivate') try: cnx.commit() - adim.clear_all_caches() + adim.cw_clear_all_caches() self.assertEqual(adim.in_state[0].name, 'deactivated') trinfo = iworkflowable.latest_trinfo() self.assertEqual(trinfo.owned_by[0].login, SYT) @@ -265,7 +265,7 @@ self.failUnless(self.sexecute('Any X,Y WHERE X login %(syt)s, Y login "cochon"', {'syt': SYT})) def test_exists1(self): - self.session.set_pool() + self.session.set_cnxset() self.session.create_entity('CWGroup', name=u'bougloup1') self.session.create_entity('CWGroup', name=u'bougloup2') self.sexecute('SET U in_group G WHERE G name ~= "bougloup%", U login "admin"') @@ -378,6 +378,23 @@ rset = cu.execute('Any F WHERE X has_text "iaminguestsgrouponly", X firstname F') self.assertEqual(rset.rows, [[None]]) + def test_copy_to_system_source(self): + eid = self.sexecute('CWUser X WHERE X login %(login)s', {'login': SYT})[0][0] + self.sexecute('SET X cw_source S WHERE X eid %(x)s, S name "system"', {'x': eid}) + self.commit() + rset = self.sexecute('CWUser X WHERE X login %(login)s', {'login': SYT}) + self.assertEqual(len(rset), 1) + e = rset.get_entity(0, 0) + self.assertEqual(e.eid, eid) + self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system', 'use-cwuri-as-url': False}, + 'type': 'CWUser', + 'extid': None}) + self.assertEqual(e.cw_source[0].name, 'system') + source = self.repo.sources_by_uri['ldapuser'] + source.synchronize() + rset = self.sexecute('CWUser X WHERE X login %(login)s', {'login': SYT}) + self.assertEqual(len(rset), 1) + def test_nonregr1(self): self.sexecute('Any X,AA ORDERBY AA DESC WHERE E eid %(x)s, E owned_by X, ' 'X modification_date AA', @@ -465,8 +482,8 @@ self._schema = repo.schema super(RQL2LDAPFilterTC, self).setUp() ldapsource = repo.sources[-1] - self.pool = repo._get_pool() - session = mock_object(pool=self.pool) + self.cnxset = repo._get_cnxset() + session = mock_object(cnxset=self.cnxset) self.o = RQL2LDAPFilter(ldapsource, session) self.ldapclasses = ''.join(ldapsource.base_filters) diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_migractions.py --- a/server/test/unittest_migractions.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_migractions.py Wed Jul 20 18:22:41 2011 +0200 @@ -338,7 +338,7 @@ @tag('longrun') def test_sync_schema_props_perms(self): cursor = self.mh.session - cursor.set_pool() + cursor.set_cnxset() nbrqlexpr_start = cursor.execute('Any COUNT(X) WHERE X is RQLExpression')[0][0] migrschema['titre'].rdefs[('Personne', 'String')].order = 7 migrschema['adel'].rdefs[('Personne', 'String')].order = 6 diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_msplanner.py --- a/server/test/unittest_msplanner.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_msplanner.py Wed Jul 20 18:22:41 2011 +0200 @@ -296,7 +296,7 @@ True) def test_not_relation_no_split_external(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') # similar to the above test but with an eid coming from the external source. # the same plan may be used, since we won't find any record in the system source # linking 9999999 to a state @@ -313,13 +313,15 @@ True) def test_simplified_var(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + # need access to source since X table has to be accessed because of the outer join self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR (X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s', {'x': 999999, 'u': self.session.user.eid}, - {self.system: {'P': s[0], 'G': s[0], 'X': s[0], + {self.system: {'P': s[0], 'G': s[0], 'require_permission': s[0], 'in_group': s[0], 'P': s[0], 'require_group': s[0], - 'u': s[0]}}, - False) + 'u': s[0]}, + self.cards: {'X': s[0]}}, + True) def test_delete_relation1(self): ueid = self.session.user.eid @@ -329,7 +331,7 @@ False) def test_crossed_relation_eid_1_needattr(self): - repo._type_source_cache[999999] = ('Note', 'system', 999999) + repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system') ueid = self.session.user.eid self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T', {'x': 999999,}, @@ -337,14 +339,14 @@ True) def test_crossed_relation_eid_1_invariant(self): - repo._type_source_cache[999999] = ('Note', 'system', 999999) + repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system') self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y', {'x': 999999}, {self.system: {'Y': s[0], 'x': s[0]}}, False) def test_crossed_relation_eid_2_invariant(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y', {'x': 999999,}, {self.cards: {'Y': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]}, @@ -352,7 +354,7 @@ False) def test_version_crossed_depends_on_1(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE', {'x': 999999}, {self.cards: {'X': s[0], 'AD': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]}, @@ -360,7 +362,7 @@ True) def test_version_crossed_depends_on_2(self): - repo._type_source_cache[999999] = ('Note', 'system', 999999) + repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system') self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE', {'x': 999999}, {self.cards: {'X': s[0], 'AD': s[0]}, @@ -368,8 +370,8 @@ True) def test_simplified_var_3(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) - repo._type_source_cache[999998] = ('State', 'cards', 999998) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + repo._type_source_cache[999998] = ('State', 'cards', 999998, 'cards') self._test('Any S,T WHERE S eid %(s)s, N eid %(n)s, N type T, N is Note, S is State', {'n': 999999, 's': 999998}, {self.cards: {'s': s[0], 'N': s[0]}}, False) @@ -1266,7 +1268,7 @@ {'x': ueid}) def test_not_relation_no_split_external(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') # similar to the above test but with an eid coming from the external source. # the same plan may be used, since we won't find any record in the system source # linking 9999999 to a state @@ -1297,7 +1299,7 @@ )]) def test_external_attributes_and_relation(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any A,B,C,D WHERE A eid %(x)s,A creation_date B,A modification_date C, A todo_by D?', [('FetchStep', [('Any A,B,C WHERE A eid 999999, A creation_date B, A modification_date C, A is Note', [{'A': 'Note', 'C': 'Datetime', 'B': 'Datetime'}])], @@ -1312,12 +1314,31 @@ {'x': 999999}) - def test_simplified_var(self): + def test_simplified_var_1(self): ueid = self.session.user.eid - repo._type_source_cache[999999] = ('Note', 'cards', 999999) - self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR (X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s', - [('OneFetchStep', [('Any %s WHERE %s in_group G, (G name IN("managers", "logilab")) OR (X require_permission P?, P name "bla", P require_group G), X eid 999999' % (ueid, ueid), - [{'X': 'Note', 'G': 'CWGroup', 'P': 'CWPermission'}])], + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + # need access to cards source since X table has to be accessed because of the outer join + self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR ' + '(X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s', + [('FetchStep', + [('Any 999999', [{}])], [self.cards], + None, {u'%(x)s': 'table0.C0'}, []), + ('OneFetchStep', + [(u'Any 6 WHERE 6 in_group G, (G name IN("managers", "logilab")) OR ' + '(X require_permission P?, P name "bla", P require_group G), ' + 'G is CWGroup, P is CWPermission, X is Note', + [{'G': 'CWGroup', 'P': 'CWPermission', 'X': 'Note'}])], + None, None, [self.system], {u'%(x)s': 'table0.C0'}, [])], + {'x': 999999, 'u': ueid}) + + def test_simplified_var_2(self): + ueid = self.session.user.eid + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + # no need access to source since X is invariant + self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR ' + '(X require_permission P, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s', + [('OneFetchStep', [('Any %s WHERE %s in_group G, (G name IN("managers", "logilab")) OR (999999 require_permission P, P name "bla", P require_group G)' % (ueid, ueid), + [{'G': 'CWGroup', 'P': 'CWPermission'}])], None, None, [self.system], {}, [])], {'x': 999999, 'u': ueid}) @@ -1529,7 +1550,7 @@ {'E': ueid}) def test_eid_dont_cross_relation_1(self): - repo._type_source_cache[999999] = ('Personne', 'system', 999999) + repo._type_source_cache[999999] = ('Personne', 'system', 999999, 'system') self._test('Any Y,YT WHERE X eid %(x)s, X fiche Y, Y title YT', [('OneFetchStep', [('Any Y,YT WHERE X eid 999999, X fiche Y, Y title YT', [{'X': 'Personne', 'Y': 'Card', 'YT': 'String'}])], @@ -1537,7 +1558,7 @@ {'x': 999999}) def test_eid_dont_cross_relation_2(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self.cards.dont_cross_relations.add('concerne') try: self._test('Any Y,S,YT,X WHERE Y concerne X, Y in_state S, X eid 999999, Y ref YT', @@ -1552,7 +1573,7 @@ # external source w/ .cross_relations == ['multisource_crossed_rel'] ###### def test_crossed_relation_eid_1_invariant(self): - repo._type_source_cache[999999] = ('Note', 'system', 999999) + repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system') self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y', [('OneFetchStep', [('Any Y WHERE 999999 multisource_crossed_rel Y', [{u'Y': 'Note'}])], None, None, [self.system], {}, []) @@ -1560,7 +1581,7 @@ {'x': 999999,}) def test_crossed_relation_eid_1_needattr(self): - repo._type_source_cache[999999] = ('Note', 'system', 999999) + repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system') self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T', [('FetchStep', [('Any Y,T WHERE Y type T, Y is Note', [{'T': 'String', 'Y': 'Note'}])], [self.cards, self.system], None, @@ -1573,7 +1594,7 @@ {'x': 999999,}) def test_crossed_relation_eid_2_invariant(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y', [('OneFetchStep', [('Any Y WHERE 999999 multisource_crossed_rel Y, Y is Note', [{'Y': 'Note'}])], None, None, [self.cards, self.system], {}, []) @@ -1581,7 +1602,7 @@ {'x': 999999,}) def test_crossed_relation_eid_2_needattr(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T', [('OneFetchStep', [('Any Y,T WHERE 999999 multisource_crossed_rel Y, Y type T, Y is Note', [{'T': 'String', 'Y': 'Note'}])], @@ -1591,7 +1612,7 @@ {'x': 999999,}) def test_crossed_relation_eid_not_1(self): - repo._type_source_cache[999999] = ('Note', 'system', 999999) + repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system') self._test('Any Y WHERE X eid %(x)s, NOT X multisource_crossed_rel Y', [('FetchStep', [('Any Y WHERE Y is Note', [{'Y': 'Note'}])], [self.cards, self.system], None, {'Y': 'table0.C0'}, []), @@ -1608,7 +1629,7 @@ # {'x': 999999,}) def test_crossed_relation_base_XXXFIXME(self): - repo._type_source_cache[999999] = ('Note', 'system', 999999) + repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system') self._test('Any X,Y,T WHERE X multisource_crossed_rel Y, Y type T, X type T', [('FetchStep', [('Any X,T WHERE X type T, X is Note', [{'T': 'String', 'X': 'Note'}])], [self.cards, self.system], None, @@ -1697,8 +1718,8 @@ # edition queries tests ################################################### def test_insert_simplified_var_1(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) - repo._type_source_cache[999998] = ('State', 'system', None) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + repo._type_source_cache[999998] = ('State', 'system', None, 'system') self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T', [('InsertStep', [('InsertRelationsStep', @@ -1710,8 +1731,8 @@ {'n': 999999, 's': 999998}) def test_insert_simplified_var_2(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) - repo._type_source_cache[999998] = ('State', 'system', None) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + repo._type_source_cache[999998] = ('State', 'system', None, 'system') self._test('INSERT Note X: X in_state S, X type T, X migrated_from N WHERE S eid %(s)s, N eid %(n)s, N type T', [('InsertStep', [('InsertRelationsStep', @@ -1724,8 +1745,8 @@ {'n': 999999, 's': 999998}) def test_insert_simplified_var_3(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) - repo._type_source_cache[999998] = ('State', 'cards', 999998) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + repo._type_source_cache[999998] = ('State', 'cards', 999998, 'cards') self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T', [('InsertStep', [('InsertRelationsStep', @@ -1737,8 +1758,8 @@ {'n': 999999, 's': 999998}) def test_insert_simplified_var_4(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) - repo._type_source_cache[999998] = ('State', 'system', None) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + repo._type_source_cache[999998] = ('State', 'system', None, 'system') self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s', [('InsertStep', [('InsertRelationsStep', [])] @@ -1746,8 +1767,8 @@ {'n': 999999, 's': 999998}) def test_insert_simplified_var_5(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) - repo._type_source_cache[999998] = ('State', 'system', None) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + repo._type_source_cache[999998] = ('State', 'system', None, 'system') self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s, A concerne N', [('InsertStep', [('InsertRelationsStep', @@ -1784,7 +1805,7 @@ {'x': ueid, 'y': ueid}) def test_delete_relation3(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('DELETE Y multisource_inlined_rel X WHERE X eid %(x)s, NOT (Y cw_source S, S name %(source)s)', [('DeleteRelationsStep', [('OneFetchStep', @@ -1796,7 +1817,7 @@ {'x': 999999, 'source': 'cards'}) def test_delete_entity1(self): - repo._type_source_cache[999999] = ('Note', 'system', 999999) + repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system') self._test('DELETE Note X WHERE X eid %(x)s, NOT Y multisource_rel X', [('DeleteEntitiesStep', [('OneFetchStep', [('Any 999999 WHERE NOT EXISTS(Y multisource_rel 999999), Y is IN(Card, Note)', @@ -1807,7 +1828,7 @@ {'x': 999999}) def test_delete_entity2(self): - repo._type_source_cache[999999] = ('Note', 'system', 999999) + repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system') self._test('DELETE Note X WHERE X eid %(x)s, NOT X multisource_inlined_rel Y', [('DeleteEntitiesStep', [('OneFetchStep', [('Any X WHERE X eid 999999, NOT X multisource_inlined_rel Y, X is Note, Y is IN(Affaire, Note)', @@ -1872,7 +1893,7 @@ # ]) def test_ldap_user_related_to_invariant_and_dont_cross_rel(self): - self.repo._type_source_cache[999999] = ('Note', 'cards', 999999) + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self.cards.dont_cross_relations.add('created_by') try: self._test('Any X,XL WHERE E eid %(x)s, E created_by X, X login XL', @@ -1893,7 +1914,7 @@ self.cards.dont_cross_relations.remove('created_by') def test_ambigous_cross_relation(self): - self.repo._type_source_cache[999999] = ('Note', 'cards', 999999) + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self.cards.support_relations['see_also'] = True self.cards.cross_relations.add('see_also') try: @@ -2044,7 +2065,7 @@ ]) def test_source_conflict_1(self): - self.repo._type_source_cache[999999] = ('Note', 'cards', 999999) + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') with self.assertRaises(BadRQLQuery) as cm: self._test('Any X WHERE X cw_source S, S name "system", X eid %(x)s', [], {'x': 999999}) @@ -2067,7 +2088,7 @@ def test_ambigous_cross_relation_source_specified(self): - self.repo._type_source_cache[999999] = ('Note', 'cards', 999999) + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self.cards.support_relations['see_also'] = True self.cards.cross_relations.add('see_also') try: @@ -2198,7 +2219,7 @@ ]) def test_nonregr7(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any S,SUM(DUR),SUM(I),(SUM(I) - SUM(DUR)),MIN(DI),MAX(DI) GROUPBY S ORDERBY S WHERE A is Affaire, A duration DUR, A invoiced I, A modification_date DI, A in_state S, S name SN, (EXISTS(A concerne WP, W multisource_rel WP)) OR (EXISTS(A concerne W)), W eid %(n)s', [('FetchStep', [('Any WP WHERE 999999 multisource_rel WP, WP is Note', [{'WP': 'Note'}])], [self.cards], None, {'WP': u'table0.C0'}, []), @@ -2208,7 +2229,7 @@ {'n': 999999}) def test_nonregr8(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any X,Z WHERE X eid %(x)s, X multisource_rel Y, Z concerne X', [('FetchStep', [('Any 999999 WHERE 999999 multisource_rel Y, Y is Note', [{'Y': 'Note'}])], @@ -2223,8 +2244,8 @@ {'x': 999999}) def test_nonregr9(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) - repo._type_source_cache[999998] = ('Note', 'cards', 999998) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + repo._type_source_cache[999998] = ('Note', 'cards', 999998, 'cards') self._test('SET X migrated_from Y WHERE X eid %(x)s, Y multisource_rel Z, Z eid %(z)s, Y migrated_from Z', [('FetchStep', [('Any Y WHERE Y multisource_rel 999998, Y is Note', [{'Y': 'Note'}])], [self.cards], None, {'Y': u'table0.C0'}, []), @@ -2236,7 +2257,7 @@ {'x': 999999, 'z': 999998}) def test_nonregr10(self): - repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999) + repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap') self._test('Any X,AA,AB ORDERBY AA WHERE E eid %(x)s, E owned_by X, X login AA, X modification_date AB', [('FetchStep', [('Any X,AA,AB WHERE X login AA, X modification_date AB, X is CWUser', @@ -2254,7 +2275,7 @@ {'x': 999999}) def test_nonregr11(self): - repo._type_source_cache[999999] = ('Bookmark', 'system', 999999) + repo._type_source_cache[999999] = ('Bookmark', 'system', 999999, 'system') self._test('SET X bookmarked_by Y WHERE X eid %(x)s, Y login "hop"', [('UpdateStep', [('OneFetchStep', [('DISTINCT Any Y WHERE Y login "hop", Y is CWUser', [{'Y': 'CWUser'}])], @@ -2263,7 +2284,7 @@ {'x': 999999}) def test_nonregr12(self): - repo._type_source_cache[999999] = ('Note', 'cards', 999999) + repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any X ORDERBY Z DESC WHERE X modification_date Z, E eid %(x)s, E see_also X', [('FetchStep', [('Any X,Z WHERE X modification_date Z, X is Note', [{'X': 'Note', 'Z': 'Datetime'}])], @@ -2347,38 +2368,38 @@ {'x': self.session.user.eid}) def test_nonregr14_1(self): - repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999) + repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap') self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s', [('OneFetchStep', [('Any 999999 WHERE 999999 owned_by 999999', [{}])], None, None, [self.system], {}, [])], {'x': 999999, 'u': 999999}) def test_nonregr14_2(self): - repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999) - repo._type_source_cache[999998] = ('Note', 'system', 999998) + repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap') + repo._type_source_cache[999998] = ('Note', 'system', 999998, 'system') self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s', [('OneFetchStep', [('Any 999998 WHERE 999998 owned_by 999999', [{}])], None, None, [self.system], {}, [])], {'x': 999998, 'u': 999999}) def test_nonregr14_3(self): - repo._type_source_cache[999999] = ('CWUser', 'system', 999999) - repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998) + repo._type_source_cache[999999] = ('CWUser', 'system', 999999, 'system') + repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998, 'ldap') self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s', [('OneFetchStep', [('Any 999998 WHERE 999998 owned_by 999999', [{}])], None, None, [self.system], {}, [])], {'x': 999998, 'u': 999999}) def test_nonregr_identity_no_source_access_1(self): - repo._type_source_cache[999999] = ('CWUser', 'ldap', 999998) + repo._type_source_cache[999999] = ('CWUser', 'ldap', 999998, 'ldap') self._test('Any S WHERE S identity U, S eid %(s)s, U eid %(u)s', [('OneFetchStep', [('Any 999999 WHERE 999999 identity 999999', [{}])], None, None, [self.system], {}, [])], {'s': 999999, 'u': 999999}) def test_nonregr_identity_no_source_access_2(self): - repo._type_source_cache[999999] = ('EmailAddress', 'system', 999999) - repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998) + repo._type_source_cache[999999] = ('EmailAddress', 'system', 999999, 'system') + repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998, 'ldap') self._test('Any X WHERE O use_email X, ((EXISTS(O identity U)) OR (EXISTS(O in_group G, G name IN("managers", "staff")))) OR (EXISTS(O in_group G2, U in_group G2, NOT G2 name "users")), X eid %(x)s, U eid %(u)s', [('OneFetchStep', [('Any 999999 WHERE O use_email 999999, ((EXISTS(O identity 999998)) OR (EXISTS(O in_group G, G name IN("managers", "staff")))) OR (EXISTS(O in_group G2, 999998 in_group G2, NOT G2 name "users"))', [{'G': 'CWGroup', 'G2': 'CWGroup', 'O': 'CWUser'}])], @@ -2386,7 +2407,7 @@ {'x': 999999, 'u': 999998}) def test_nonregr_similar_subquery(self): - repo._type_source_cache[999999] = ('Personne', 'system', 999999) + repo._type_source_cache[999999] = ('Personne', 'system', 999999, 'system') self._test('Any T,TD,U,T,UL WITH T,TD,U,UL BEING (' '(Any T,TD,U,UL WHERE X eid %(x)s, T comments X, T content TD, T created_by U?, U login UL)' ' UNION ' @@ -2456,7 +2477,7 @@ def test_linked_external_entities(self): - repo._type_source_cache[999999] = ('Tag', 'system', 999999) + repo._type_source_cache[999999] = ('Tag', 'system', 999999, 'system') self._test('Any X,XT WHERE X is Card, X title XT, T tags X, T eid %(t)s', [('FetchStep', [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])], @@ -2472,7 +2493,7 @@ {'t': 999999}) def test_version_depends_on(self): - self.repo._type_source_cache[999999] = ('Note', 'cards', 999999) + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any X,AD,AE WHERE E eid %(x)s, E migrated_from X, X in_state AD, AD name AE', [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note', [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])], @@ -2488,7 +2509,7 @@ {'x': 999999}) def test_version_crossed_depends_on_1(self): - self.repo._type_source_cache[999999] = ('Note', 'cards', 999999) + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE', [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note', [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])], @@ -2511,7 +2532,7 @@ {'x': 999999}) def test_version_crossed_depends_on_2(self): - self.repo._type_source_cache[999999] = ('Note', 'system', 999999) + self.repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system') self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE', [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note', [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])], @@ -2587,7 +2608,7 @@ ) def test_nonregr_dont_cross_rel_source_filtering_1(self): - self.repo._type_source_cache[999999] = ('Note', 'cards', 999999) + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any S WHERE E eid %(x)s, E in_state S, NOT S name "moved"', [('OneFetchStep', [('Any S WHERE 999999 in_state S, NOT S name "moved", S is State', [{'S': 'State'}])], @@ -2596,7 +2617,7 @@ {'x': 999999}) def test_nonregr_dont_cross_rel_source_filtering_2(self): - self.repo._type_source_cache[999999] = ('Note', 'cards', 999999) + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any X,AA,AB WHERE E eid %(x)s, E in_state X, X name AA, X modification_date AB', [('OneFetchStep', [('Any X,AA,AB WHERE 999999 in_state X, X name AA, X modification_date AB, X is State', [{'AA': 'String', 'AB': 'Datetime', 'X': 'State'}])], @@ -2605,7 +2626,7 @@ {'x': 999999}) def test_nonregr_eid_query(self): - self.repo._type_source_cache[999999] = ('Note', 'cards', 999999) + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') self._test('Any X WHERE X eid 999999', [('OneFetchStep', [('Any 999999', [{}])], None, None, [self.system], {}, [] @@ -2671,6 +2692,29 @@ ]) ]) + def test_remove_from_deleted_source_1(self): + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + self._test('Note X WHERE X eid 999999, NOT X cw_source Y', + [('OneFetchStep', + [('Any 999999 WHERE NOT EXISTS(999999 cw_source Y)', + [{'Y': 'CWSource'}])], + None, None, [self.system], {}, []) + ]) + + def test_remove_from_deleted_source_2(self): + self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards') + self.repo._type_source_cache[999998] = ('Note', 'cards', 999998, 'cards') + self._test('Note X WHERE X eid IN (999998, 999999), NOT X cw_source Y', + [('FetchStep', + [('Any X WHERE X eid IN(999998, 999999), X is Note', + [{'X': 'Note'}])], + [self.cards], None, {'X': 'table0.C0'}, []), + ('OneFetchStep', + [('Any X WHERE NOT EXISTS(X cw_source Y, Y is CWSource), X is Note', + [{'X': 'Note', 'Y': 'CWSource'}])], + None, None, [self.system],{'X': 'table0.C0'}, []) + ]) + class FakeVCSSource(AbstractSource): uri = 'ccc' @@ -2707,17 +2751,17 @@ ]) def test_fully_simplified_extsource(self): - self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998) - self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999) + self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998, 'vcs') + self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999, 'vcs') self._test('Any X, Y WHERE NOT X multisource_rel Y, X eid 999998, Y eid 999999', [('OneFetchStep', [('Any 999998,999999 WHERE NOT EXISTS(999998 multisource_rel 999999)', [{}])], None, None, [self.vcs], {}, []) ]) def test_nonregr_fully_simplified_extsource(self): - self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998) - self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999) - self.repo._type_source_cache[1000000] = ('Note', 'system', 1000000) + self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998, 'vcs') + self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999, 'vcs') + self.repo._type_source_cache[1000000] = ('Note', 'system', 1000000, 'system') self._test('DISTINCT Any T,FALSE,L,M WHERE L eid 1000000, M eid 999999, T eid 999998', [('OneFetchStep', [('DISTINCT Any 999998,FALSE,1000000,999999', [{}])], None, None, [self.system], {}, []) diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_multisources.py --- a/server/test/unittest_multisources.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_multisources.py Wed Jul 20 18:22:41 2011 +0200 @@ -160,11 +160,11 @@ # since they are orderd by eid, we know the 3 first one is coming from the system source # and the others from external source self.assertEqual(rset.get_entity(0, 0).cw_metainformation(), - {'source': {'type': 'native', 'uri': 'system'}, + {'source': {'type': 'native', 'uri': 'system', 'use-cwuri-as-url': False}, 'type': u'Card', 'extid': None}) externent = rset.get_entity(3, 0) metainf = externent.cw_metainformation() - self.assertEqual(metainf['source'], {'type': 'pyrorql', 'base-url': 'http://extern.org/', 'uri': 'extern'}) + self.assertEqual(metainf['source'], {'type': 'pyrorql', 'base-url': 'http://extern.org/', 'uri': 'extern', 'use-cwuri-as-url': False}) self.assertEqual(metainf['type'], 'Card') self.assert_(metainf['extid']) etype = self.sexecute('Any ETN WHERE X is ET, ET name ETN, X eid %(x)s', @@ -381,6 +381,13 @@ def test_nonregr3(self): self.sexecute('DELETE Card X WHERE X eid %(x)s, NOT X multisource_inlined_rel Y', {'x': self.ic1}) + def test_delete_source(self): + req = self.request() + req.execute('DELETE CWSource S WHERE S name "extern"') + self.commit() + cu = self.session.system_sql("SELECT * FROM entities WHERE source='extern'") + self.failIf(cu.fetchall()) + if __name__ == '__main__': from logilab.common.testlib import unittest_main unittest_main() diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_querier.py --- a/server/test/unittest_querier.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_querier.py Wed Jul 20 18:22:41 2011 +0200 @@ -311,6 +311,14 @@ seid = self.execute('State X WHERE X name "deactivated"')[0][0] rset = self.execute('Any U,L,S GROUPBY U,L,S WHERE X in_state S, U login L, S eid %s' % seid) + def test_select_groupby_funccall(self): + rset = self.execute('Any YEAR(CD), COUNT(X) GROUPBY YEAR(CD) WHERE X is CWUser, X creation_date CD') + self.assertListEqual(rset.rows, [[date.today().year, 2]]) + + def test_select_groupby_colnumber(self): + rset = self.execute('Any YEAR(CD), COUNT(X) GROUPBY 1 WHERE X is CWUser, X creation_date CD') + self.assertListEqual(rset.rows, [[date.today().year, 2]]) + def test_select_complex_orderby(self): rset1 = self.execute('Any N ORDERBY N WHERE X name N') self.assertEqual(sorted(rset1.rows), rset1.rows) @@ -443,6 +451,15 @@ self.assertEqual(rset.rows[0][0], result) self.assertEqual(rset.description, [('Int',)]) + def test_regexp_based_pattern_matching(self): + peid1 = self.execute("INSERT Personne X: X nom 'bidule'")[0][0] + peid2 = self.execute("INSERT Personne X: X nom 'cidule'")[0][0] + rset = self.execute('Any X WHERE X is Personne, X nom REGEXP "^b"') + self.assertEqual(len(rset.rows), 1, rset.rows) + self.assertEqual(rset.rows[0][0], peid1) + rset = self.execute('Any X WHERE X is Personne, X nom REGEXP "idu"') + self.assertEqual(len(rset.rows), 2, rset.rows) + def test_select_aggregat_count(self): rset = self.execute('Any COUNT(X)') self.assertEqual(len(rset.rows), 1) @@ -768,7 +785,7 @@ def test_select_boolean(self): rset = self.execute('Any N WHERE X is CWEType, X name N, X final %(val)s', {'val': True}) - self.assertEqual(sorted(r[0] for r in rset.rows), ['Boolean', 'Bytes', + self.assertEqual(sorted(r[0] for r in rset.rows), ['BigInt', 'Boolean', 'Bytes', 'Date', 'Datetime', 'Decimal', 'Float', 'Int', 'Interval', @@ -776,7 +793,7 @@ 'TZDatetime', 'TZTime', 'Time']) rset = self.execute('Any N WHERE X is CWEType, X name N, X final TRUE') - self.assertEqual(sorted(r[0] for r in rset.rows), ['Boolean', 'Bytes', + self.assertEqual(sorted(r[0] for r in rset.rows), ['BigInt', 'Boolean', 'Bytes', 'Date', 'Datetime', 'Decimal', 'Float', 'Int', 'Interval', @@ -1099,7 +1116,7 @@ #'INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y' eeid, = self.o.execute(s, 'INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y WHERE Y is EmailAddress')[0] self.o.execute(s, "DELETE Email X") - sqlc = s.pool['system'] + sqlc = s.cnxset['system'] sqlc.execute('SELECT * FROM recipients_relation') self.assertEqual(len(sqlc.fetchall()), 0) sqlc.execute('SELECT * FROM owned_by_relation WHERE eid_from=%s'%eeid) @@ -1212,7 +1229,7 @@ self.assertEqual(rset.description, [('CWUser',)]) self.assertRaises(Unauthorized, self.execute, "Any P WHERE X is CWUser, X login 'bob', X upassword P") - cursor = self.pool['system'] + cursor = self.cnxset['system'] cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'" % (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX)) passwd = str(cursor.fetchone()[0]) @@ -1227,7 +1244,7 @@ self.assertEqual(rset.description[0][0], 'CWUser') rset = self.execute("SET X upassword %(pwd)s WHERE X is CWUser, X login 'bob'", {'pwd': 'tutu'}) - cursor = self.pool['system'] + cursor = self.cnxset['system'] cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'" % (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX)) passwd = str(cursor.fetchone()[0]) diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_repository.py --- a/server/test/unittest_repository.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_repository.py Wed Jul 20 18:22:41 2011 +0200 @@ -24,6 +24,7 @@ import sys import threading import time +import logging from copy import deepcopy from datetime import datetime @@ -62,13 +63,13 @@ table = SQL_PREFIX + 'CWEType' namecol = SQL_PREFIX + 'name' finalcol = SQL_PREFIX + 'final' - self.session.set_pool() + self.session.set_cnxset() cu = self.session.system_sql('SELECT %s FROM %s WHERE %s is NULL' % ( namecol, table, finalcol)) self.assertEqual(cu.fetchall(), []) cu = self.session.system_sql('SELECT %s FROM %s WHERE %s=%%(final)s ORDER BY %s' % (namecol, table, finalcol, namecol), {'final': 'TRUE'}) - self.assertEqual(cu.fetchall(), [(u'Boolean',), (u'Bytes',), + self.assertEqual(cu.fetchall(), [(u'BigInt',), (u'Boolean',), (u'Bytes',), (u'Date',), (u'Datetime',), (u'Decimal',),(u'Float',), (u'Int',), @@ -259,7 +260,7 @@ cnxid = repo.connect(self.admlogin, password=self.admpassword) # rollback state change which trigger TrInfo insertion session = repo._get_session(cnxid) - session.set_pool() + session.set_cnxset() user = session.user user.cw_adapt_to('IWorkflowable').fire_transition('deactivate') rset = repo.execute(cnxid, 'TrInfo T WHERE T wf_info_for X, X eid %(x)s', {'x': user.eid}) @@ -292,7 +293,7 @@ try: with self.assertRaises(Exception) as cm: run_transaction() - self.assertEqual(str(cm.exception), 'try to access pool on a closed session') + self.assertEqual(str(cm.exception), 'try to access connections set on a closed session %s' % cnxid) finally: t.join() @@ -382,9 +383,9 @@ def test_internal_api(self): repo = self.repo cnxid = repo.connect(self.admlogin, password=self.admpassword) - session = repo._get_session(cnxid, setpool=True) + session = repo._get_session(cnxid, setcnxset=True) self.assertEqual(repo.type_and_source_from_eid(2, session), - ('CWGroup', 'system', None)) + ('CWGroup', 'system', None, 'system')) self.assertEqual(repo.type_from_eid(2, session), 'CWGroup') self.assertEqual(repo.source_from_eid(2, session).uri, 'system') self.assertEqual(repo.eid2extid(repo.system_source, 2, session), None) @@ -394,7 +395,10 @@ def test_public_api(self): self.assertEqual(self.repo.get_schema(), self.repo.schema) - self.assertEqual(self.repo.source_defs(), {'system': {'type': 'native', 'uri': 'system'}}) + self.assertEqual(self.repo.source_defs(), {'system': {'type': 'native', + 'uri': 'system', + 'use-cwuri-as-url': False} + }) # .properties() return a result set self.assertEqual(self.repo.properties().rql, 'Any K,V WHERE P is CWProperty,P pkey K, P value V, NOT P for_user U') @@ -402,7 +406,7 @@ repo = self.repo cnxid = repo.connect(self.admlogin, password=self.admpassword) self.assertEqual(repo.user_info(cnxid), (6, 'admin', set([u'managers']), {})) - self.assertEqual(repo.describe(cnxid, 2), (u'CWGroup', u'system', None)) + self.assertEqual(repo.describe(cnxid, 2), (u'CWGroup', u'system', None, 'system')) repo.close(cnxid) self.assertRaises(BadConnectionId, repo.user_info, cnxid) self.assertRaises(BadConnectionId, repo.describe, cnxid, 1) @@ -519,38 +523,39 @@ class DataHelpersTC(CubicWebTC): def test_create_eid(self): - self.session.set_pool() + self.session.set_cnxset() self.assert_(self.repo.system_source.create_eid(self.session)) def test_source_from_eid(self): - self.session.set_pool() + self.session.set_cnxset() self.assertEqual(self.repo.source_from_eid(1, self.session), self.repo.sources_by_uri['system']) def test_source_from_eid_raise(self): - self.session.set_pool() + self.session.set_cnxset() self.assertRaises(UnknownEid, self.repo.source_from_eid, -2, self.session) def test_type_from_eid(self): - self.session.set_pool() + self.session.set_cnxset() self.assertEqual(self.repo.type_from_eid(2, self.session), 'CWGroup') def test_type_from_eid_raise(self): - self.session.set_pool() + self.session.set_cnxset() self.assertRaises(UnknownEid, self.repo.type_from_eid, -2, self.session) def test_add_delete_info(self): entity = self.repo.vreg['etypes'].etype_class('Personne')(self.session) entity.eid = -1 entity.complete = lambda x: None - self.session.set_pool() + self.session.set_cnxset() self.repo.add_info(self.session, entity, self.repo.system_source) cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1') data = cu.fetchall() - self.assertIsInstance(data[0][3], datetime) + self.assertIsInstance(data[0][4], datetime) data[0] = list(data[0]) - data[0][3] = None - self.assertEqual(tuplify(data), [(-1, 'Personne', 'system', None, None)]) + data[0][4] = None + self.assertEqual(tuplify(data), [(-1, 'Personne', 'system', 'system', + None, None)]) self.repo.delete_info(self.session, entity, 'system', None) #self.repo.commit() cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1') @@ -566,7 +571,7 @@ self.commit() ts = datetime.now() self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1) - self.session.set_pool() + self.session.set_cnxset() cu = self.session.system_sql('SELECT mtime, eid FROM entities WHERE eid = %s' % eidp) omtime = cu.fetchone()[0] # our sqlite datetime adapter is ignore seconds fraction, so we have to @@ -575,7 +580,7 @@ self.execute('SET X nom "tata" WHERE X eid %(x)s', {'x': eidp}) self.commit() self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1) - self.session.set_pool() + self.session.set_cnxset() cu = self.session.system_sql('SELECT mtime FROM entities WHERE eid = %s' % eidp) mtime = cu.fetchone()[0] self.failUnless(omtime < mtime) @@ -646,7 +651,7 @@ CubicWebTC.setUp(self) CALLED[:] = () - def _after_relation_hook(self, pool, fromeid, rtype, toeid): + def _after_relation_hook(self, cnxset, fromeid, rtype, toeid): self.called.append((fromeid, rtype, toeid)) def test_inline_relation(self): @@ -704,13 +709,18 @@ class PerformanceTest(CubicWebTC): - def setup_database(self): - import logging + def setUp(self): + super(PerformanceTest, self).setUp() logger = logging.getLogger('cubicweb.session') #logger.handlers = [logging.StreamHandler(sys.stdout)] logger.setLevel(logging.INFO) self.info = logger.info + def tearDown(self): + super(PerformanceTest, self).tearDown() + logger = logging.getLogger('cubicweb.session') + logger.setLevel(logging.CRITICAL) + def test_composite_deletion(self): req = self.request() personnes = [] @@ -807,6 +817,7 @@ req.cnx.commit() t1 = time.time() self.info('add relations: %.2gs', t1-t0) + def test_session_add_relation_inlined(self): """ to be compared with test_session_add_relations""" req = self.request() @@ -847,7 +858,7 @@ p2 = req.create_entity('Personne', nom=u'Florent') w = req.create_entity('Affaire', ref=u'wc') w.set_relations(todo_by=[p1,p2]) - w.clear_all_caches() + w.cw_clear_all_caches() self.commit() self.assertEqual(len(w.todo_by), 1) self.assertEqual(w.todo_by[0].eid, p2.eid) @@ -860,7 +871,7 @@ w.set_relations(todo_by=p1) self.commit() w.set_relations(todo_by=p2) - w.clear_all_caches() + w.cw_clear_all_caches() self.commit() self.assertEqual(len(w.todo_by), 1) self.assertEqual(w.todo_by[0].eid, p2.eid) diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_rql2sql.py --- a/server/test/unittest_rql2sql.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_rql2sql.py Wed Jul 20 18:22:41 2011 +0200 @@ -550,6 +550,15 @@ GROUP BY rel_todo_by0.eid_to ORDER BY 2 DESC'''), + ('Any R2 WHERE R2 concerne R, R eid RE, R2 eid > RE', + '''SELECT _R2.eid +FROM concerne_relation AS rel_concerne0, entities AS _R2 +WHERE _R2.eid=rel_concerne0.eid_from AND _R2.eid>rel_concerne0.eid_to'''), + + ('Note X WHERE X eid IN (999998, 999999), NOT X cw_source Y', + '''SELECT _X.cw_eid +FROM cw_Note AS _X +WHERE _X.cw_eid IN(999998, 999999) AND NOT (EXISTS(SELECT 1 FROM cw_source_relation AS rel_cw_source0 WHERE rel_cw_source0.eid_from=_X.cw_eid))'''), ] ADVANCED_WITH_GROUP_CONCAT = [ @@ -1360,6 +1369,18 @@ '''SELECT SUBSTR(_P.cw_nom, 1, 1) FROM cw_Personne AS _P''') + def test_cast(self): + self._check("Any CAST(String, P) WHERE P is Personne", + '''SELECT CAST(_P.cw_eid AS text) +FROM cw_Personne AS _P''') + + def test_regexp(self): + self._check("Any X WHERE X login REGEXP '[0-9].*'", + '''SELECT _X.cw_eid +FROM cw_CWUser AS _X +WHERE _X.cw_login ~ [0-9].* +''') + def test_parser_parse(self): for t in self._parse(PARSER): yield t @@ -1622,12 +1643,26 @@ '''SELECT (A || _X.cw_ref) FROM cw_Affaire AS _X''') - def test_or_having_fake_terms(self): + def test_or_having_fake_terms_base(self): self._check('Any X WHERE X is CWUser, X creation_date D HAVING YEAR(D) = "2010" OR D = NULL', '''SELECT _X.cw_eid FROM cw_CWUser AS _X WHERE ((CAST(EXTRACT(YEAR from _X.cw_creation_date) AS INTEGER)=2010) OR (_X.cw_creation_date IS NULL))''') + def test_or_having_fake_terms_exists(self): + # crash with rql <= 0.29.0 + self._check('Any X WHERE X is CWUser, EXISTS(B bookmarked_by X, B creation_date D) HAVING D=2010 OR D=NULL, D=1 OR D=NULL', + '''SELECT _X.cw_eid +FROM cw_CWUser AS _X +WHERE EXISTS(SELECT 1 FROM bookmarked_by_relation AS rel_bookmarked_by0, cw_Bookmark AS _B WHERE rel_bookmarked_by0.eid_from=_B.cw_eid AND rel_bookmarked_by0.eid_to=_X.cw_eid AND ((_B.cw_creation_date=1) OR (_B.cw_creation_date IS NULL)) AND ((_B.cw_creation_date=2010) OR (_B.cw_creation_date IS NULL)))''') + + def test_or_having_fake_terms_nocrash(self): + # crash with rql <= 0.29.0 + self._check('Any X WHERE X is CWUser, X creation_date D HAVING D=2010 OR D=NULL, D=1 OR D=NULL', + '''SELECT _X.cw_eid +FROM cw_CWUser AS _X +WHERE ((_X.cw_creation_date=1) OR (_X.cw_creation_date IS NULL)) AND ((_X.cw_creation_date=2010) OR (_X.cw_creation_date IS NULL))''') + def test_not_no_where(self): # XXX will check if some in_group relation exists, that's it. # We can't actually know if we want to check if there are some @@ -1675,7 +1710,10 @@ for t in self._parse(HAS_TEXT_LG_INDEXER): yield t - def test_or_having_fake_terms(self): + def test_regexp(self): + self.skipTest('regexp-based pattern matching not implemented in sqlserver') + + def test_or_having_fake_terms_base(self): self._check('Any X WHERE X is CWUser, X creation_date D HAVING YEAR(D) = "2010" OR D = NULL', '''SELECT _X.cw_eid FROM cw_CWUser AS _X @@ -1813,6 +1851,11 @@ for t in self._parse(WITH_LIMIT):# + ADVANCED_WITH_LIMIT_OR_ORDERBY): yield t + def test_cast(self): + self._check("Any CAST(String, P) WHERE P is Personne", + '''SELECT CAST(_P.cw_eid AS nvarchar(max)) +FROM cw_Personne AS _P''') + def test_groupby_orderby_insertion_dont_modify_intention(self): self._check('Any YEAR(XECT)*100+MONTH(XECT), COUNT(X),SUM(XCE),AVG(XSCT-XECT) ' 'GROUPBY YEAR(XECT),MONTH(XECT) ORDERBY 1 ' @@ -1835,6 +1878,14 @@ '''SELECT MONTH(_P.cw_creation_date) FROM cw_Personne AS _P''') + def test_regexp(self): + self._check("Any X WHERE X login REGEXP '[0-9].*'", + '''SELECT _X.cw_eid +FROM cw_CWUser AS _X +WHERE _X.cw_login REGEXP [0-9].* +''') + + def test_union(self): for t in self._parse(( ('(Any N ORDERBY 1 WHERE X name N, X is State)' @@ -1947,7 +1998,7 @@ yield t - def test_or_having_fake_terms(self): + def test_or_having_fake_terms_base(self): self._check('Any X WHERE X is CWUser, X creation_date D HAVING YEAR(D) = "2010" OR D = NULL', '''SELECT _X.cw_eid FROM cw_CWUser AS _X @@ -1985,6 +2036,18 @@ '''SELECT EXTRACT(MONTH from _P.cw_creation_date) FROM cw_Personne AS _P''') + def test_cast(self): + self._check("Any CAST(String, P) WHERE P is Personne", + '''SELECT CAST(_P.cw_eid AS mediumtext) +FROM cw_Personne AS _P''') + + def test_regexp(self): + self._check("Any X WHERE X login REGEXP '[0-9].*'", + '''SELECT _X.cw_eid +FROM cw_CWUser AS _X +WHERE _X.cw_login REGEXP [0-9].* +''') + def test_from_clause_needed(self): queries = [("Any 1 WHERE EXISTS(T is CWGroup, T name 'managers')", '''SELECT 1 @@ -2046,7 +2109,7 @@ FROM cw_Personne AS _P''') - def test_or_having_fake_terms(self): + def test_or_having_fake_terms_base(self): self._check('Any X WHERE X is CWUser, X creation_date D HAVING YEAR(D) = "2010" OR D = NULL', '''SELECT _X.cw_eid FROM cw_CWUser AS _X diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_rqlannotation.py --- a/server/test/unittest_rqlannotation.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_rqlannotation.py Wed Jul 20 18:22:41 2011 +0200 @@ -340,6 +340,16 @@ self.assertEqual(rqlst.defined_vars['X']._q_invariant, False) self.assertEqual(rqlst.defined_vars['S']._q_invariant, False) + def test_remove_from_deleted_source_1(self): + rqlst = self._prepare('Note X WHERE X eid 999998, NOT X cw_source Y') + self.failIf('X' in rqlst.defined_vars) # simplified + self.assertEqual(rqlst.defined_vars['Y']._q_invariant, True) + + def test_remove_from_deleted_source_2(self): + rqlst = self._prepare('Note X WHERE X eid IN (999998, 999999), NOT X cw_source Y') + self.assertEqual(rqlst.defined_vars['X']._q_invariant, False) + self.assertEqual(rqlst.defined_vars['Y']._q_invariant, True) + if __name__ == '__main__': from logilab.common.testlib import unittest_main unittest_main() diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_security.py --- a/server/test/unittest_security.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_security.py Wed Jul 20 18:22:41 2011 +0200 @@ -221,7 +221,7 @@ rset = cu.execute('Personne P') self.assertEqual(len(rset), 1) ent = rset.get_entity(0, 0) - session.set_pool() # necessary + session.set_cnxset() # necessary self.assertRaises(Unauthorized, ent.cw_check_perm, 'update') self.assertRaises(Unauthorized, cu.execute, "SET P travaille S WHERE P is Personne, S is Societe") @@ -579,7 +579,7 @@ cnx = self.login('iaminusersgrouponly') session = self.session # needed to avoid check_perm error - session.set_pool() + session.set_cnxset() # needed to remove rql expr granting update perm to the user affaire_perms = self.schema['Affaire'].permissions.copy() self.schema['Affaire'].set_action_permissions('update', self.schema['Affaire'].get_groups('update')) diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_session.py --- a/server/test/unittest_session.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_session.py Wed Jul 20 18:22:41 2011 +0200 @@ -1,4 +1,4 @@ -# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This file is part of CubicWeb. @@ -15,13 +15,12 @@ # # You should have received a copy of the GNU Lesser General Public License along # with CubicWeb. If not, see . -""" +from __future__ import with_statement -""" from logilab.common.testlib import TestCase, unittest_main, mock_object from cubicweb.devtools.testlib import CubicWebTC -from cubicweb.server.session import _make_description +from cubicweb.server.session import _make_description, hooks_control class Variable: def __init__(self, name): @@ -46,11 +45,38 @@ self.assertEqual(_make_description((Function('max', 'A'), Variable('B')), {}, solution), ['Int','CWUser']) + class InternalSessionTC(CubicWebTC): def test_dbapi_query(self): session = self.repo.internal_session() self.assertFalse(session.running_dbapi_query) session.close() + +class SessionTC(CubicWebTC): + + def test_hooks_control(self): + session = self.session + self.assertEqual(session.hooks_mode, session.HOOKS_ALLOW_ALL) + self.assertEqual(session.disabled_hook_categories, set()) + self.assertEqual(session.enabled_hook_categories, set()) + self.assertEqual(len(session._tx_data), 1) + with hooks_control(session, session.HOOKS_DENY_ALL, 'metadata'): + self.assertEqual(session.hooks_mode, session.HOOKS_DENY_ALL) + self.assertEqual(session.disabled_hook_categories, set()) + self.assertEqual(session.enabled_hook_categories, set(('metadata',))) + session.commit() + self.assertEqual(session.hooks_mode, session.HOOKS_DENY_ALL) + self.assertEqual(session.disabled_hook_categories, set()) + self.assertEqual(session.enabled_hook_categories, set(('metadata',))) + session.rollback() + self.assertEqual(session.hooks_mode, session.HOOKS_DENY_ALL) + self.assertEqual(session.disabled_hook_categories, set()) + self.assertEqual(session.enabled_hook_categories, set(('metadata',))) + # leaving context manager with no transaction running should reset the + # transaction local storage (and associated cnxset) + self.assertEqual(session._tx_data, {}) + self.assertEqual(session.cnxset, None) + if __name__ == '__main__': unittest_main() diff -r 6397a9051f65 -r 134613d3b353 server/test/unittest_undo.py --- a/server/test/unittest_undo.py Wed Jul 20 14:09:42 2011 +0200 +++ b/server/test/unittest_undo.py Wed Jul 20 18:22:41 2011 +0200 @@ -153,8 +153,8 @@ txuuid = self.commit() actions = self.cnx.transaction_info(txuuid).actions_list() self.assertEqual(len(actions), 1) - toto.clear_all_caches() - e.clear_all_caches() + toto.cw_clear_all_caches() + e.cw_clear_all_caches() errors = self.cnx.undo_transaction(txuuid) undotxuuid = self.commit() self.assertEqual(undotxuuid, None) # undo not undoable @@ -195,7 +195,7 @@ self.commit() errors = self.cnx.undo_transaction(txuuid) self.commit() - p.clear_all_caches() + p.cw_clear_all_caches() self.assertEqual(p.fiche[0].eid, c2.eid) self.assertEqual(len(errors), 1) self.assertEqual(errors[0], @@ -235,7 +235,7 @@ self.failIf(self.execute('Any X WHERE X eid %(x)s', {'x': c.eid})) self.failIf(self.execute('Any X WHERE X eid %(x)s', {'x': p.eid})) self.failIf(self.execute('Any X,Y WHERE X fiche Y')) - self.session.set_pool() + self.session.set_cnxset() for eid in (p.eid, c.eid): self.failIf(session.system_sql( 'SELECT * FROM entities WHERE eid=%s' % eid).fetchall()) diff -r 6397a9051f65 -r 134613d3b353 sobjects/parsers.py --- a/sobjects/parsers.py Wed Jul 20 14:09:42 2011 +0200 +++ b/sobjects/parsers.py Wed Jul 20 18:22:41 2011 +0200 @@ -31,26 +31,22 @@ """ -import urllib2 -import StringIO import os.path as osp -from cookielib import CookieJar from datetime import datetime, timedelta - -from lxml import etree +from urllib import urlencode +from cgi import parse_qs # in urlparse with python >= 2.6 from logilab.common.date import todate, totime from logilab.common.textutils import splitstrip, text_to_dict +from logilab.common.decorators import classproperty from yams.constraints import BASE_CONVERTERS from yams.schema import role_name as rn -from cubicweb import ValidationError, typed_eid +from cubicweb import ValidationError, RegistryException, typed_eid +from cubicweb.view import Component from cubicweb.server.sources import datafeed - -def ensure_str_keys(dic): - for key in dic: - dic[str(key)] = dic.pop(key) +from cubicweb.server.hook import match_rtype # XXX see cubicweb.cwvreg.YAMS_TO_PY # XXX see cubicweb.web.views.xmlrss.SERIALIZERS @@ -72,15 +68,6 @@ return time(seconds=int(ustr)) DEFAULT_CONVERTERS['Interval'] = convert_interval -# use a cookie enabled opener to use session cookie if any -_OPENER = urllib2.build_opener() -try: - from logilab.common import urllib2ext - _OPENER.add_handler(urllib2ext.HTTPGssapiAuthHandler()) -except ImportError: # python-kerberos not available - pass -_OPENER.add_handler(urllib2.HTTPCookieProcessor(CookieJar())) - def extract_typed_attrs(eschema, stringdict, converters=DEFAULT_CONVERTERS): typeddict = {} for rschema in eschema.subject_relations(): @@ -91,35 +78,6 @@ typeddict[rschema.type] = converters[attrtype](stringdict[rschema]) return typeddict -def _parse_entity_etree(parent): - for node in list(parent): - try: - item = {'cwtype': unicode(node.tag), - 'cwuri': node.attrib['cwuri'], - 'eid': typed_eid(node.attrib['eid']), - } - except KeyError: - # cw < 3.11 compat mode XXX - item = {'cwtype': unicode(node.tag), - 'cwuri': node.find('cwuri').text, - 'eid': typed_eid(node.find('eid').text), - } - rels = {} - for child in node: - role = child.get('role') - if role: - # relation - related = rels.setdefault(role, {}).setdefault(child.tag, []) - related += [ritem for ritem, _ in _parse_entity_etree(child)] - else: - # attribute - item[child.tag] = unicode(child.text) - yield item, rels - -def build_search_rql(etype, attrs): - restrictions = ['X %(attr)s %%(%(attr)s)s'%{'attr': attr} for attr in attrs] - return 'Any X WHERE X is %s, %s' % (etype, ', '.join(restrictions)) - def rtype_role_rql(rtype, role): if role == 'object': return 'Y %s X WHERE X eid %%(x)s' % rtype @@ -127,34 +85,40 @@ return 'X %s Y WHERE X eid %%(x)s' % rtype -def _check_no_option(action, options, eid, _): - if options: - msg = _("'%s' action doesn't take any options") % action - raise ValidationError(eid, {rn('options', 'subject'): msg}) +class CWEntityXMLParser(datafeed.DataFeedXMLParser): + """datafeed parser for the 'xml' entity view -def _check_linkattr_option(action, options, eid, _): - if not 'linkattr' in options: - msg = _("'%s' action requires 'linkattr' option") % action - raise ValidationError(eid, {rn('options', 'subject'): msg}) + Most of the logic is delegated to the following components: + + * an "item builder" component, turning an etree xml node into a specific + python dictionnary representing an entity - -class CWEntityXMLParser(datafeed.DataFeedParser): - """datafeed parser for the 'xml' entity view""" - __regid__ = 'cw.entityxml' + * "action" components, selected given an entity, a relation and its role in + the relation, and responsible to link the entity to given related items + (eg dictionnary) - action_options = { - 'copy': _check_no_option, - 'link-or-create': _check_linkattr_option, - 'link': _check_linkattr_option, - } + So the parser is only doing the gluing service and the connection to the + source. + """ + __regid__ = 'cw.entityxml' def __init__(self, *args, **kwargs): super(CWEntityXMLParser, self).__init__(*args, **kwargs) - self.action_methods = { - 'copy': self.related_copy, - 'link-or-create': self.related_link_or_create, - 'link': self.related_link, - } + self._parsed_urls = {} + self._processed_entities = set() + + def select_linker(self, action, rtype, role, entity=None): + try: + return self._cw.vreg['components'].select( + 'cw.entityxml.action.%s' % action, self._cw, entity=entity, + rtype=rtype, role=role, parser=self) + except RegistryException: + raise RegistryException('Unknown action %s' % action) + + def list_actions(self): + reg = self._cw.vreg['components'] + return sorted(clss[0].action for rid, clss in reg.iteritems() + if rid.startswith('cw.entityxml.action.')) # mapping handling ######################################################### @@ -180,11 +144,15 @@ raise ValidationError(schemacfg.eid, {rn('options', 'subject'): msg}) try: action = options.pop('action') - self.action_options[action](action, options, schemacfg.eid, _) + linker = self.select_linker(action, rtype, role) + linker.check_options(options, schemacfg.eid) except KeyError: msg = _('"action" must be specified in options; allowed values are ' '%s') % ', '.join(self.action_methods) raise ValidationError(schemacfg.eid, {rn('options', 'subject'): msg}) + except RegistryException: + msg = _('allowed values for "action" are %s') % ', '.join(self.list_actions()) + raise ValidationError(schemacfg.eid, {rn('options', 'subject'): msg}) if not checkonly: if role == 'subject': etype = schemacfg.schema.stype.name @@ -208,46 +176,25 @@ # import handling ########################################################## - def process(self, url, partialcommit=True): + def process(self, url, raise_on_error=False, partialcommit=True): """IDataFeedParser main entry point""" - # XXX suppression support according to source configuration. If set, get - # all cwuri of entities from this source, and compare with newly - # imported ones - error = False - for item, rels in self.parse(url): - cwuri = item['cwuri'] - try: - self.process_item(item, rels) - if partialcommit: - # commit+set_pool instead of commit(reset_pool=False) to let - # other a chance to get our pool - self._cw.commit() - self._cw.set_pool() - except ValidationError, exc: - if partialcommit: - self.source.error('Skipping %s because of validation error %s' % (cwuri, exc)) - self._cw.rollback() - self._cw.set_pool() - error = True - else: - raise - return error - - def parse(self, url): - if not url.startswith('http'): - stream = StringIO.StringIO(url) - else: - for mappedurl in HOST_MAPPING: - if url.startswith(mappedurl): - url = url.replace(mappedurl, HOST_MAPPING[mappedurl], 1) - break - self.source.info('GET %s', url) - stream = _OPENER.open(url) - return _parse_entity_etree(etree.parse(stream).getroot()) + super(CWEntityXMLParser, self).process(self.complete_url(url), + raise_on_error, partialcommit) + def parse_etree(self, parent): + for node in list(parent): + builder = self._cw.vreg['components'].select( + 'cw.entityxml.item-builder', self._cw, node=node, + parser=self) + yield builder.build_item() def process_item(self, item, rels): entity = self.extid2entity(str(item.pop('cwuri')), item.pop('cwtype'), - item=item) + cwsource=item.pop('cwsource'), item=item) + if entity is None: + return None + if entity.eid in self._processed_entities: + return entity + self._processed_entities.add(entity.eid) if not (self.created_during_pull(entity) or self.updated_during_pull(entity)): self.notify_updated(entity) item.pop('eid') @@ -262,10 +209,11 @@ rtype, role, entity.__regid__) continue try: - actionmethod = self.action_methods[action] - except KeyError: - raise Exception('Unknown action %s' % action) - actionmethod(entity, rtype, role, related_items, rules) + linker = self.select_linker(action, rtype, role, entity) + except RegistryException: + self.source.error('no linker for action %s', action) + else: + linker.link_items(related_items, rules) return entity def before_entity_copy(self, entity, sourceparams): @@ -273,112 +221,232 @@ attrs = extract_typed_attrs(entity.e_schema, sourceparams['item']) entity.cw_edited.update(attrs) - def related_copy(self, entity, rtype, role, others, rules): - """implementation of 'copy' action + def complete_url(self, url, etype=None, add_relations=True): + """append to the url's query string information about relation that should + be included in the resulting xml, according to source mapping. - Takes no option. + If etype is not specified, try to guess it using the last path part of + the url. """ - assert not any(x[1] for x in rules), "'copy' action takes no option" - ttypes = set([x[0] for x in rules]) - others = [item for item in others if item['cwtype'] in ttypes] - eids = [] # local eids - if not others: - self._clear_relation(entity, rtype, role, ttypes) - return - for item in others: - item, _rels = self._complete_item(item) - other_entity = self.process_item(item, []) - eids.append(other_entity.eid) - self._set_relation(entity, rtype, role, eids) + try: + url, qs = url.split('?', 1) + except ValueError: + qs = '' + if etype is None: + try: + etype = url.rsplit('/', 1)[1] + except ValueError: + return url + try: + etype = self._cw.vreg.case_insensitive_etypes[etype] + except KeyError: + return url + params = parse_qs(qs) + if not 'vid' in params: + params['vid'] = ['xml'] + if add_relations: + relations = params.setdefault('relation', []) + for rtype, role, _ in self.source.mapping.get(etype, ()): + reldef = '%s-%s' % (rtype, role) + if not reldef in relations: + relations.append(reldef) + return url + '?' + self._cw.build_url_params(**params) + + def complete_item(self, item, add_relations=True): + try: + return self._parsed_urls[(item['cwuri'], add_relations)] + except KeyError: + itemurl = self.complete_url(item['cwuri'], item['cwtype'], + add_relations) + item_rels = list(self.parse(itemurl)) + assert len(item_rels) == 1, 'url %s expected to bring back one '\ + 'and only one entity, got %s' % (itemurl, len(item_rels)) + self._parsed_urls[(item['cwuri'], add_relations)] = item_rels[0] + return item_rels[0] - def related_link(self, entity, rtype, role, others, rules): - """implementation of 'link' action + +class CWEntityXMLItemBuilder(Component): + __regid__ = 'cw.entityxml.item-builder' + + def __init__(self, _cw, parser, node, **kwargs): + super(CWEntityXMLItemBuilder, self).__init__(_cw, **kwargs) + self.parser = parser + self.node = node + + def build_item(self): + node = self.node + item = dict(node.attrib.items()) + item['cwtype'] = unicode(node.tag) + item.setdefault('cwsource', None) + try: + item['eid'] = typed_eid(item['eid']) + except KeyError: + # cw < 3.11 compat mode XXX + item['eid'] = typed_eid(node.find('eid').text) + item['cwuri'] = node.find('cwuri').text + rels = {} + for child in node: + role = child.get('role') + if role: + # relation + related = rels.setdefault(role, {}).setdefault(child.tag, []) + related += [ritem for ritem, _ in self.parser.parse_etree(child)] + else: + # attribute + item[child.tag] = unicode(child.text) + return item, rels + + +class CWEntityXMLActionCopy(Component): + """implementation of cubicweb entity xml parser's'copy' action - requires an options to control search of the linked entity. - """ - for ttype, options in rules: - assert 'linkattr' in options, ( - "'link' action requires a list of attributes used to " - "search if the entity already exists") - self._related_link(entity, rtype, role, ttype, others, [options['linkattr']], - create_when_not_found=False) + Takes no option. + """ + __regid__ = 'cw.entityxml.action.copy' + + def __init__(self, _cw, parser, rtype, role, entity=None, **kwargs): + super(CWEntityXMLActionCopy, self).__init__(_cw, **kwargs) + self.parser = parser + self.rtype = rtype + self.role = role + self.entity = entity + + @classproperty + def action(cls): + return cls.__regid__.rsplit('.', 1)[-1] + + def check_options(self, options, eid): + self._check_no_options(options, eid) - def related_link_or_create(self, entity, rtype, role, others, rules): - """implementation of 'link-or-create' action + def _check_no_options(self, options, eid, msg=None): + if options: + if msg is None: + msg = self._cw._("'%s' action doesn't take any options") % self.action + raise ValidationError(eid, {rn('options', 'subject'): msg}) + + def link_items(self, others, rules): + assert not any(x[1] for x in rules), "'copy' action takes no option" + ttypes = frozenset([x[0] for x in rules]) + eids = [] # local eids + for item in others: + if item['cwtype'] in ttypes: + item = self.parser.complete_item(item)[0] + other_entity = self.parser.process_item(item, []) + if other_entity is not None: + eids.append(other_entity.eid) + if eids: + self._set_relation(eids) + else: + self._clear_relation(ttypes) - requires an options to control search of the linked entity. - """ + def _clear_relation(self, ttypes): + if not self.parser.created_during_pull(self.entity): + if len(ttypes) > 1: + typerestr = ', Y is IN(%s)' % ','.join(ttypes) + else: + typerestr = ', Y is %s' % ','.join(ttypes) + self._cw.execute('DELETE ' + rtype_role_rql(self.rtype, self.role) + typerestr, + {'x': self.entity.eid}) + + def _set_relation(self, eids): + assert eids + rtype = self.rtype + rqlbase = rtype_role_rql(rtype, self.role) + eidstr = ','.join(str(eid) for eid in eids) + self._cw.execute('DELETE %s, NOT Y eid IN (%s)' % (rqlbase, eidstr), + {'x': self.entity.eid}) + if self.role == 'object': + rql = 'SET %s, Y eid IN (%s), NOT Y %s X' % (rqlbase, eidstr, rtype) + else: + rql = 'SET %s, Y eid IN (%s), NOT X %s Y' % (rqlbase, eidstr, rtype) + self._cw.execute(rql, {'x': self.entity.eid}) + + +class CWEntityXMLActionLink(CWEntityXMLActionCopy): + """implementation of cubicweb entity xml parser's'link' action + + requires a 'linkattr' option to control search of the linked entity. + """ + __regid__ = 'cw.entityxml.action.link' + + def check_options(self, options, eid): + if not 'linkattr' in options: + msg = self._cw._("'%s' action requires 'linkattr' option") % self.action + raise ValidationError(eid, {rn('options', 'subject'): msg}) + + create_when_not_found = False + + def link_items(self, others, rules): for ttype, options in rules: - assert 'linkattr' in options, ( - "'link-or-create' action requires a list of attributes used to " - "search if the entity already exists") - self._related_link(entity, rtype, role, ttype, others, [options['linkattr']], - create_when_not_found=True) + searchattrs = splitstrip(options.get('linkattr', '')) + self._related_link(ttype, others, searchattrs) - def _related_link(self, entity, rtype, role, ttype, others, searchattrs, - create_when_not_found): + def _related_link(self, ttype, others, searchattrs): def issubset(x,y): return all(z in y for z in x) eids = [] # local eids + source = self.parser.source for item in others: if item['cwtype'] != ttype: continue if not issubset(searchattrs, item): - item, _rels = self._complete_item(item, False) + item = self.parser.complete_item(item, False)[0] if not issubset(searchattrs, item): - self.source.error('missing attribute, got %s expected keys %s' - % item, searchattrs) + source.error('missing attribute, got %s expected keys %s', + item, searchattrs) continue - kwargs = dict((attr, item[attr]) for attr in searchattrs) - rql = build_search_rql(item['cwtype'], kwargs) - rset = self._cw.execute(rql, kwargs) - if len(rset) > 1: - self.source.error('ambiguous link: found %s entity %s with attributes %s', - len(rset), item['cwtype'], kwargs) - elif len(rset) == 1: - eids.append(rset[0][0]) - elif create_when_not_found: - ensure_str_keys(kwargs) # XXX necessary with python < 2.6 + # XXX str() needed with python < 2.6 + kwargs = dict((str(attr), item[attr]) for attr in searchattrs) + targets = self._find_entities(item, kwargs) + if len(targets) > 1: + source.error('ambiguous link: found %s entity %s with attributes %s', + len(targets), item['cwtype'], kwargs) + elif len(targets) == 1: + eids.append(targets[0].eid) + elif self.create_when_not_found: eids.append(self._cw.create_entity(item['cwtype'], **kwargs).eid) else: - self.source.error('can not find %s entity with attributes %s', - item['cwtype'], kwargs) - if not eids: - self._clear_relation(entity, rtype, role, (ttype,)) + source.error('can not find %s entity with attributes %s', + item['cwtype'], kwargs) + if eids: + self._set_relation(eids) else: - self._set_relation(entity, rtype, role, eids) + self._clear_relation((ttype,)) - def _complete_item(self, item, add_relations=True): - itemurl = item['cwuri'] + '?vid=xml' - if add_relations: - for rtype, role, _ in self.source.mapping.get(item['cwtype'], ()): - itemurl += '&relation=%s-%s' % (rtype, role) - item_rels = list(self.parse(itemurl)) - assert len(item_rels) == 1 - return item_rels[0] + def _find_entities(self, item, kwargs): + return tuple(self._cw.find_entities(item['cwtype'], **kwargs)) + + +class CWEntityXMLActionLinkInState(CWEntityXMLActionLink): + """custom implementation of cubicweb entity xml parser's'link' action for + in_state relation + """ + __select__ = match_rtype('in_state') - def _clear_relation(self, entity, rtype, role, ttypes): - if entity.eid not in self.stats['created']: - if len(ttypes) > 1: - typerestr = ', Y is IN(%s)' % ','.join(ttypes) - else: - typerestr = ', Y is %s' % ','.join(ttypes) - self._cw.execute('DELETE ' + rtype_role_rql(rtype, role) + typerestr, - {'x': entity.eid}) + def check_options(self, options, eid): + super(CWEntityXMLActionLinkInState, self).check_options(options, eid) + if not 'name' in options['linkattr']: + msg = self._cw._("'%s' action for in_state relation should at least have 'linkattr=name' option") % self.action + raise ValidationError(eid, {rn('options', 'subject'): msg}) - def _set_relation(self, entity, rtype, role, eids): - rqlbase = rtype_role_rql(rtype, role) - rql = 'DELETE %s' % rqlbase - if eids: - eidstr = ','.join(str(eid) for eid in eids) - rql += ', NOT Y eid IN (%s)' % eidstr - self._cw.execute(rql, {'x': entity.eid}) - if eids: - if role == 'object': - rql = 'SET %s, Y eid IN (%s), NOT Y %s X' % (rqlbase, eidstr, rtype) - else: - rql = 'SET %s, Y eid IN (%s), NOT X %s Y' % (rqlbase, eidstr, rtype) - self._cw.execute(rql, {'x': entity.eid}) + def _find_entities(self, item, kwargs): + assert 'name' in item # XXX else, complete_item + state_name = item['name'] + wf = self.entity.cw_adapt_to('IWorkflowable').current_workflow + state = wf.state_by_name(state_name) + if state is None: + return () + return (state,) + + +class CWEntityXMLActionLinkOrCreate(CWEntityXMLActionLink): + """implementation of cubicweb entity xml parser's'link-or-create' action + + requires a 'linkattr' option to control search of the linked entity. + """ + __regid__ = 'cw.entityxml.action.link-or-create' + create_when_not_found = True + def registration_callback(vreg): vreg.register_all(globals().values(), __name__) diff -r 6397a9051f65 -r 134613d3b353 sobjects/test/unittest_parsers.py --- a/sobjects/test/unittest_parsers.py Wed Jul 20 14:09:42 2011 +0200 +++ b/sobjects/test/unittest_parsers.py Wed Jul 20 18:22:41 2011 +0200 @@ -57,11 +57,14 @@ + + + '''.splitlines()) -RELATEDXML ={ +RELATEDXML = { 'http://pouet.org/6': u''' @@ -101,20 +104,47 @@ ''', } + +OTHERXML = ''.join(u''' + + + sthenault + toto + 2011-01-25 14:14:06 + 2010-01-22 10:27:59 + 2011-01-25 14:14:06 + + +'''.splitlines() +) class CWEntityXMLParserTC(CubicWebTC): - def setup_database(self): - req = self.request() - source = req.create_entity('CWSource', name=u'myfeed', type=u'datafeed', + test_db_id = 'xmlparser' + @classmethod + def pre_setup_database(cls, session, config): + source = session.create_entity('CWSource', name=u'myfeed', type=u'datafeed', parser=u'cw.entityxml', url=BASEXML) - self.commit() + session.create_entity('CWSource', name=u'myotherfeed', type=u'datafeed', + parser=u'cw.entityxml', url=OTHERXML) + session.commit() source.init_mapping([(('CWUser', 'use_email', '*'), u'role=subject\naction=copy'), (('CWUser', 'in_group', '*'), u'role=subject\naction=link\nlinkattr=name'), + (('CWUser', 'in_state', '*'), + u'role=subject\naction=link\nlinkattr=name'), (('*', 'tags', 'CWUser'), u'role=object\naction=link-or-create\nlinkattr=name'), ]) - req.create_entity('Tag', name=u'hop') + session.create_entity('Tag', name=u'hop') + + def test_complete_url(self): + dfsource = self.repo.sources_by_uri['myfeed'] + parser = dfsource._get_parser(self.session) + self.assertEqual(parser.complete_url('http://www.cubicweb.org/cwuser'), + 'http://www.cubicweb.org/cwuser?relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject&vid=xml') + self.assertEqual(parser.complete_url('http://www.cubicweb.org/cwuser?vid=rdf&relation=hop'), + 'http://www.cubicweb.org/cwuser?relation=hop&relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject&vid=rdf') + def test_actions(self): dfsource = self.repo.sources_by_uri['myfeed'] @@ -122,6 +152,8 @@ {u'CWUser': { (u'in_group', u'subject', u'link'): [ (u'CWGroup', {u'linkattr': u'name'})], + (u'in_state', u'subject', u'link'): [ + (u'State', {u'linkattr': u'name'})], (u'tags', u'object', u'link-or-create'): [ (u'Tag', {u'linkattr': u'name'})], (u'use_email', u'subject', u'copy'): [ @@ -139,11 +171,13 @@ self.assertEqual(user.modification_date, datetime(2011, 01, 25, 14, 14, 06)) self.assertEqual(user.cwuri, 'http://pouet.org/5') self.assertEqual(user.cw_source[0].name, 'myfeed') + self.assertEqual(user.absolute_url(), 'http://pouet.org/5') self.assertEqual(len(user.use_email), 1) # copy action email = user.use_email[0] self.assertEqual(email.address, 'syt@logilab.fr') self.assertEqual(email.cwuri, 'http://pouet.org/6') + self.assertEqual(email.absolute_url(), 'http://pouet.org/6') self.assertEqual(email.cw_source[0].name, 'myfeed') # link action self.assertFalse(self.execute('CWGroup X WHERE X name "unknown"')) @@ -156,14 +190,67 @@ self.assertEqual(tag.cwuri, 'http://testing.fr/cubicweb/%s' % tag.eid) self.assertEqual(tag.cw_source[0].name, 'system') + session.set_cnxset() stats = dfsource.pull_data(session, force=True, raise_on_error=True) self.assertEqual(stats['created'], set()) self.assertEqual(len(stats['updated']), 2) self.repo._type_source_cache.clear() self.repo._extid_cache.clear() + session.set_cnxset() stats = dfsource.pull_data(session, force=True, raise_on_error=True) self.assertEqual(stats['created'], set()) self.assertEqual(len(stats['updated']), 2) + session.commit() + + # test move to system source + self.sexecute('SET X cw_source S WHERE X eid %(x)s, S name "system"', {'x': email.eid}) + self.commit() + rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"') + self.assertEqual(len(rset), 1) + e = rset.get_entity(0, 0) + self.assertEqual(e.eid, email.eid) + self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system', + 'use-cwuri-as-url': False}, + 'type': 'EmailAddress', + 'extid': None}) + self.assertEqual(e.cw_source[0].name, 'system') + self.assertEqual(e.reverse_use_email[0].login, 'sthenault') + self.commit() + # test everything is still fine after source synchronization + session.set_cnxset() + stats = dfsource.pull_data(session, force=True, raise_on_error=True) + rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"') + self.assertEqual(len(rset), 1) + e = rset.get_entity(0, 0) + self.assertEqual(e.eid, email.eid) + self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system', + 'use-cwuri-as-url': False}, + 'type': 'EmailAddress', + 'extid': None}) + self.assertEqual(e.cw_source[0].name, 'system') + self.assertEqual(e.reverse_use_email[0].login, 'sthenault') + session.commit() + + # test delete entity + e.cw_delete() + self.commit() + # test everything is still fine after source synchronization + session.set_cnxset() + stats = dfsource.pull_data(session, force=True, raise_on_error=True) + rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"') + self.assertEqual(len(rset), 0) + rset = self.sexecute('Any X WHERE X use_email E, X login "sthenault"') + self.assertEqual(len(rset), 0) + + def test_external_entity(self): + dfsource = self.repo.sources_by_uri['myotherfeed'] + session = self.repo.internal_session() + stats = dfsource.pull_data(session, force=True, raise_on_error=True) + user = self.execute('CWUser X WHERE X login "sthenault"').get_entity(0, 0) + self.assertEqual(user.creation_date, datetime(2010, 01, 22, 10, 27, 59)) + self.assertEqual(user.modification_date, datetime(2011, 01, 25, 14, 14, 06)) + self.assertEqual(user.cwuri, 'http://pouet.org/5') + self.assertEqual(user.cw_source[0].name, 'myfeed') if __name__ == '__main__': from logilab.common.testlib import unittest_main diff -r 6397a9051f65 -r 134613d3b353 test/unittest_dbapi.py --- a/test/unittest_dbapi.py Wed Jul 20 14:09:42 2011 +0200 +++ b/test/unittest_dbapi.py Wed Jul 20 18:22:41 2011 +0200 @@ -32,7 +32,8 @@ def test_public_repo_api(self): cnx = self.login('anon') self.assertEqual(cnx.get_schema(), self.repo.schema) - self.assertEqual(cnx.source_defs(), {'system': {'type': 'native', 'uri': 'system'}}) + self.assertEqual(cnx.source_defs(), {'system': {'type': 'native', 'uri': 'system', + 'use-cwuri-as-url': False}}) self.restore_connection() # proper way to close cnx self.assertRaises(ProgrammingError, cnx.get_schema) self.assertRaises(ProgrammingError, cnx.source_defs) diff -r 6397a9051f65 -r 134613d3b353 test/unittest_entity.py --- a/test/unittest_entity.py Wed Jul 20 14:09:42 2011 +0200 +++ b/test/unittest_entity.py Wed Jul 20 18:22:41 2011 +0200 @@ -572,7 +572,7 @@ self.assertEqual(person.rest_path(), 'personne/doe') # ambiguity test person2 = req.create_entity('Personne', prenom=u'remi', nom=u'doe') - person.clear_all_caches() + person.cw_clear_all_caches() self.assertEqual(person.rest_path(), 'personne/eid/%s' % person.eid) self.assertEqual(person2.rest_path(), 'personne/eid/%s' % person2.eid) # unique attr with None value (wikiid in this case) @@ -610,7 +610,9 @@ req = self.request() note = req.create_entity('Note', type=u'z') metainf = note.cw_metainformation() - self.assertEqual(metainf, {'source': {'type': 'native', 'uri': 'system'}, 'type': u'Note', 'extid': None}) + self.assertEqual(metainf, {'source': {'type': 'native', 'uri': 'system', + 'use-cwuri-as-url': False}, + 'type': u'Note', 'extid': None}) self.assertEqual(note.absolute_url(), 'http://testing.fr/cubicweb/note/%s' % note.eid) metainf['source'] = metainf['source'].copy() metainf['source']['base-url'] = 'http://cubicweb2.com/' diff -r 6397a9051f65 -r 134613d3b353 test/unittest_schema.py --- a/test/unittest_schema.py Wed Jul 20 14:09:42 2011 +0200 +++ b/test/unittest_schema.py Wed Jul 20 18:22:41 2011 +0200 @@ -29,7 +29,7 @@ from yams import ValidationError, BadSchemaDefinition from yams.constraints import SizeConstraint, StaticVocabularyConstraint from yams.buildobjs import RelationDefinition, EntityType, RelationType -from yams.reader import PyFileReader +from yams.reader import fill_schema from cubicweb.schema import ( CubicWebSchema, CubicWebEntitySchema, CubicWebSchemaLoader, @@ -159,7 +159,7 @@ self.assert_(isinstance(schema, CubicWebSchema)) self.assertEqual(schema.name, 'data') entities = sorted([str(e) for e in schema.entities()]) - expected_entities = ['BaseTransition', 'Bookmark', 'Boolean', 'Bytes', 'Card', + expected_entities = ['BaseTransition', 'BigInt', 'Bookmark', 'Boolean', 'Bytes', 'Card', 'Date', 'Datetime', 'Decimal', 'CWCache', 'CWConstraint', 'CWConstraintType', 'CWEType', 'CWAttribute', 'CWGroup', 'EmailAddress', 'CWRelation', @@ -209,7 +209,7 @@ 'read_permission', 'relation_type', 'relations', 'require_group', - 'specializes', 'state_of', 'subworkflow', 'subworkflow_exit', 'subworkflow_state', 'surname', 'symmetric', 'synopsis', + 'specializes', 'state_of', 'subworkflow', 'subworkflow_exit', 'subworkflow_state', 'surname', 'symmetric', 'synchronizing', 'synopsis', 'tags', 'timestamp', 'title', 'to_entity', 'to_state', 'transition_of', 'travaille', 'type', @@ -260,18 +260,23 @@ self.assertEqual([x.expression for x in aschema.get_rqlexprs('update')], ['U has_update_permission X']) + def test_nonregr_allowed_type_names(self): + schema = CubicWebSchema('Test Schema') + schema.add_entity_type(EntityType('NaN')) + + class BadSchemaTC(TestCase): def setUp(self): self.loader = CubicWebSchemaLoader() self.loader.defined = {} self.loader.loaded_files = [] self.loader.post_build_callbacks = [] - self.loader._pyreader = PyFileReader(self.loader) def _test(self, schemafile, msg): self.loader.handle_file(join(DATADIR, schemafile)) + sch = self.loader.schemacls('toto') with self.assertRaises(BadSchemaDefinition) as cm: - self.loader._build_schema('toto', False) + fill_schema(sch, self.loader.defined, False) self.assertEqual(str(cm.exception), msg) def test_lowered_etype(self): diff -r 6397a9051f65 -r 134613d3b353 test/unittest_selectors.py --- a/test/unittest_selectors.py Wed Jul 20 14:09:42 2011 +0200 +++ b/test/unittest_selectors.py Wed Jul 20 18:22:41 2011 +0200 @@ -26,7 +26,7 @@ from cubicweb.appobject import Selector, AndSelector, OrSelector from cubicweb.selectors import (is_instance, adaptable, match_user_groups, multi_lines_rset, score_entity, is_in_state, - on_transition, rql_condition) + on_transition, rql_condition, relation_possible) from cubicweb.web import action @@ -102,6 +102,10 @@ self.assertIs(csel.search_selector(is_instance), sel) csel = AndSelector(Selector(), sel) self.assertIs(csel.search_selector(is_instance), sel) + self.assertIs(csel.search_selector((AndSelector, OrSelector)), csel) + self.assertIs(csel.search_selector((OrSelector, AndSelector)), csel) + self.assertIs(csel.search_selector((is_instance, score_entity)), sel) + self.assertIs(csel.search_selector((score_entity, is_instance)), sel) def test_inplace_and(self): selector = _1_() @@ -140,35 +144,6 @@ self.assertEqual(selector(None), 0) -class IsInStateSelectorTC(CubicWebTC): - def setup_database(self): - wf = self.shell().add_workflow("testwf", 'StateFull', default=True) - initial = wf.add_state(u'initial', initial=True) - final = wf.add_state(u'final') - wf.add_transition(u'forward', (initial,), final) - - def test_initial_state(self): - req = self.request() - entity = req.create_entity('StateFull') - selector = is_in_state(u'initial') - self.commit() - score = selector(entity.__class__, None, entity=entity) - self.assertEqual(score, 1) - - def test_final_state(self): - req = self.request() - entity = req.create_entity('StateFull') - selector = is_in_state(u'initial') - self.commit() - entity.cw_adapt_to('IWorkflowable').fire_transition(u'forward') - self.commit() - score = selector(entity.__class__, None, entity=entity) - self.assertEqual(score, 0) - selector = is_in_state(u'final') - score = selector(entity.__class__, None, entity=entity) - self.assertEqual(score, 1) - - class ImplementsSelectorTC(CubicWebTC): def test_etype_priority(self): req = self.request() @@ -193,7 +168,7 @@ class WorkflowSelectorTC(CubicWebTC): def _commit(self): self.commit() - self.wf_entity.clear_all_caches() + self.wf_entity.cw_clear_all_caches() def setup_database(self): wf = self.shell().add_workflow("wf_test", 'StateFull', default=True) @@ -315,6 +290,27 @@ self.assertEqual(selector(None, self.req, rset=self.rset), 0) +class RelationPossibleTC(CubicWebTC): + + def test_rqlst_1(self): + req = self.request() + selector = relation_possible('in_group') + select = self.vreg.parse(req, 'Any X WHERE X is CWUser').children[0] + score = selector(None, req, rset=1, + select=select, filtered_variable=select.defined_vars['X']) + self.assertEqual(score, 1) + + def test_rqlst_2(self): + req = self.request() + selector = relation_possible('in_group') + select = self.vreg.parse(req, 'Any 1, COUNT(X) WHERE X is CWUser, X creation_date XD, ' + 'Y creation_date YD, Y is CWGroup ' + 'HAVING DAY(XD)=DAY(YD)').children[0] + score = selector(None, req, rset=1, + select=select, filtered_variable=select.defined_vars['X']) + self.assertEqual(score, 1) + + class MatchUserGroupsTC(CubicWebTC): def test_owners_group(self): """tests usage of 'owners' group with match_user_group""" diff -r 6397a9051f65 -r 134613d3b353 test/unittest_utils.py --- a/test/unittest_utils.py Wed Jul 20 14:09:42 2011 +0200 +++ b/test/unittest_utils.py Wed Jul 20 18:22:41 2011 +0200 @@ -21,9 +21,12 @@ import decimal import datetime + from logilab.common.testlib import TestCase, DocTest, unittest_main -from cubicweb.utils import make_uid, UStringIO, SizeConstrainedList, RepeatList +from cubicweb.devtools.testlib import CubicWebTC +from cubicweb.utils import (make_uid, UStringIO, SizeConstrainedList, + RepeatList, HTMLHead) from cubicweb.entity import Entity try: @@ -155,6 +158,102 @@ def test_encoding_unknown_stuff(self): self.assertEqual(self.encode(TestCase), 'null') +class HTMLHeadTC(CubicWebTC): + def test_concat_urls(self): + base_url = u'http://test.fr/data/' + head = HTMLHead(base_url) + urls = [base_url + u'bob1.js', + base_url + u'bob2.js', + base_url + u'bob3.js'] + result = head.concat_urls(urls) + expected = u'http://test.fr/data/??bob1.js,bob2.js,bob3.js' + self.assertEqual(result, expected) + + def test_group_urls(self): + base_url = u'http://test.fr/data/' + head = HTMLHead(base_url) + urls_spec = [(base_url + u'bob0.js', None), + (base_url + u'bob1.js', None), + (u'http://ext.com/bob2.js', None), + (u'http://ext.com/bob3.js', None), + (base_url + u'bob4.css', 'all'), + (base_url + u'bob5.css', 'all'), + (base_url + u'bob6.css', 'print'), + (base_url + u'bob7.css', 'print'), + (base_url + u'bob8.css', ('all', u'[if IE 8]')), + (base_url + u'bob9.css', ('print', u'[if IE 8]')) + ] + result = head.group_urls(urls_spec) + expected = [(base_url + u'??bob0.js,bob1.js', None), + (u'http://ext.com/bob2.js', None), + (u'http://ext.com/bob3.js', None), + (base_url + u'??bob4.css,bob5.css', 'all'), + (base_url + u'??bob6.css,bob7.css', 'print'), + (base_url + u'bob8.css', ('all', u'[if IE 8]')), + (base_url + u'bob9.css', ('print', u'[if IE 8]')) + ] + self.assertEqual(list(result), expected) + + def test_getvalue_with_concat(self): + base_url = u'http://test.fr/data/' + head = HTMLHead(base_url) + head.add_js(base_url + u'bob0.js') + head.add_js(base_url + u'bob1.js') + head.add_js(u'http://ext.com/bob2.js') + head.add_js(u'http://ext.com/bob3.js') + head.add_css(base_url + u'bob4.css') + head.add_css(base_url + u'bob5.css') + head.add_css(base_url + u'bob6.css', 'print') + head.add_css(base_url + u'bob7.css', 'print') + head.add_ie_css(base_url + u'bob8.css') + head.add_ie_css(base_url + u'bob9.css', 'print', u'[if lt IE 7]') + result = head.getvalue() + expected = u""" + + + + + + + +""" + self.assertEqual(result, expected) + + def test_getvalue_without_concat(self): + base_url = u'http://test.fr/data/' + head = HTMLHead() + head.add_js(base_url + u'bob0.js') + head.add_js(base_url + u'bob1.js') + head.add_js(u'http://ext.com/bob2.js') + head.add_js(u'http://ext.com/bob3.js') + head.add_css(base_url + u'bob4.css') + head.add_css(base_url + u'bob5.css') + head.add_css(base_url + u'bob6.css', 'print') + head.add_css(base_url + u'bob7.css', 'print') + head.add_ie_css(base_url + u'bob8.css') + head.add_ie_css(base_url + u'bob9.css', 'print', u'[if lt IE 7]') + result = head.getvalue() + expected = u""" + + + + + + + + + + +""" + self.assertEqual(result, expected) class DocTest(DocTest): from cubicweb import utils as module diff -r 6397a9051f65 -r 134613d3b353 toolsutils.py --- a/toolsutils.py Wed Jul 20 14:09:42 2011 +0200 +++ b/toolsutils.py Wed Jul 20 18:22:41 2011 +0200 @@ -159,15 +159,11 @@ print '-> set permissions to 0600 for %s' % filepath chmod(filepath, 0600) -def read_config(config_file): - """read the instance configuration from a file and return it as a - dictionnary - - :type config_file: str - :param config_file: path to the configuration file - - :rtype: dict - :return: a dictionary with specified values associated to option names +def read_config(config_file, raise_if_unreadable=False): + """read some simple configuration from `config_file` and return it as a + dictionary. If `raise_if_unreadable` is false (the default), an empty + dictionary will be returned if the file is inexistant or unreadable, else + :exc:`ExecutionError` will be raised. """ from logilab.common.fileutils import lines config = current = {} @@ -190,8 +186,12 @@ value = value.strip() current[option] = value or None except IOError, ex: - warning('missing or non readable configuration file %s (%s)', - config_file, ex) + if raise_if_unreadable: + raise ExecutionError('%s. Are you logged with the correct user ' + 'to use this instance?' % ex) + else: + warning('missing or non readable configuration file %s (%s)', + config_file, ex) return config diff -r 6397a9051f65 -r 134613d3b353 uilib.py --- a/uilib.py Wed Jul 20 14:09:42 2011 +0200 +++ b/uilib.py Wed Jul 20 18:22:41 2011 +0200 @@ -31,7 +31,7 @@ from logilab.mtconverter import xml_escape, html_unescape from logilab.common.date import ustrftime -from cubicweb.utils import json_dumps +from cubicweb.utils import JSString, json_dumps def rql_for_eid(eid): @@ -51,31 +51,65 @@ assert eid is not None return '%s:%s' % (name, eid) +def print_bytes(value, req, props, displaytime=True): + return u'' + +def print_string(value, req, props, displaytime=True): + # don't translate empty value if you don't want strange results + if props is not None and value and props.get('internationalizable'): + return req._(value) + return value + +def print_date(value, req, props, displaytime=True): + return ustrftime(value, req.property_value('ui.date-format')) + +def print_time(value, req, props, displaytime=True): + return ustrftime(value, req.property_value('ui.time-format')) + +def print_tztime(value, req, props, displaytime=True): + return ustrftime(value, req.property_value('ui.time-format')) + u' UTC' + +def print_datetime(value, req, props, displaytime=True): + if displaytime: + return ustrftime(value, req.property_value('ui.datetime-format')) + return ustrftime(value, req.property_value('ui.date-format')) + +def print_tzdatetime(value, req, props, displaytime=True): + if displaytime: + return ustrftime(value, req.property_value('ui.datetime-format')) + u' UTC' + return ustrftime(value, req.property_value('ui.date-format')) + +def print_boolean(value, req, props, displaytime=True): + if value: + return req._('yes') + return req._('no') + +def print_float(value, req, props, displaytime=True): + return unicode(req.property_value('ui.float-format') % value) + +PRINTERS = { + 'Bytes': print_bytes, + 'String': print_string, + 'Date': print_date, + 'Time': print_time, + 'TZTime': print_tztime, + 'Datetime': print_datetime, + 'TZDatetime': print_tzdatetime, + 'Boolean': print_boolean, + 'Float': print_float, + 'Decimal': print_float, + # XXX Interval + } + def printable_value(req, attrtype, value, props=None, displaytime=True): """return a displayable value (i.e. unicode string)""" - if value is None or attrtype == 'Bytes': + if value is None: return u'' - if attrtype == 'String': - # don't translate empty value if you don't want strange results - if props is not None and value and props.get('internationalizable'): - return req._(value) - return value - if attrtype == 'Date': - return ustrftime(value, req.property_value('ui.date-format')) - if attrtype in ('Time', 'TZTime'): - return ustrftime(value, req.property_value('ui.time-format')) - if attrtype in ('Datetime', 'TZDatetime'): - if displaytime: - return ustrftime(value, req.property_value('ui.datetime-format')) - return ustrftime(value, req.property_value('ui.date-format')) - if attrtype == 'Boolean': - if value: - return req._('yes') - return req._('no') - if attrtype in ('Float', 'Decimal'): - value = req.property_value('ui.float-format') % value - # XXX Interval - return unicode(value) + try: + printer = PRINTERS[attrtype] + except KeyError: + return unicode(value) + return printer(value, req, props, displaytime) # text publishing ############################################################# @@ -275,16 +309,23 @@ self.args = args self.parent = parent def __unicode__(self): - args = u','.join(json_dumps(arg) for arg in self.args) + args = [] + for arg in self.args: + if isinstance(arg, JSString): + args.append(arg) + else: + args.append(json_dumps(arg)) if self.parent: - return u'%s(%s)' % (self.parent, args) - return args + return u'%s(%s)' % (self.parent, ','.join(args)) + return ','.join(args) class _JS(object): def __getattr__(self, attr): return _JSId(attr) -"""magic object to return strings suitable to call some javascript function with +js = _JS() +js.__doc__ = """\ +magic object to return strings suitable to call some javascript function with the given arguments (which should be correctly typed). >>> str(js.pouet(1, "2")) @@ -292,9 +333,10 @@ >>> str(js.cw.pouet(1, "2")) 'cw.pouet(1,"2")' >>> str(js.cw.pouet(1, "2").pouet(None)) -'cw.pouet(1,"2").pouet(null)') +'cw.pouet(1,"2").pouet(null)' +>>> str(js.cw.pouet(1, JSString("$")).pouet(None)) +'cw.pouet(1,$).pouet(null)' """ -js = _JS() def domid(string): """return a valid DOM id from a string (should also be usable in jQuery diff -r 6397a9051f65 -r 134613d3b353 utils.py --- a/utils.py Wed Jul 20 14:09:42 2011 +0200 +++ b/utils.py Wed Jul 20 18:22:41 2011 +0200 @@ -51,20 +51,6 @@ return str(key) + uuid4().hex -def dump_class(cls, clsname): - """create copy of a class by creating an empty class inheriting - from the given cls. - - Those class will be used as place holder for attribute and relation - description - """ - # type doesn't accept unicode name - # return type.__new__(type, str(clsname), (cls,), {}) - # __autogenerated__ attribute is just a marker - return type(str(clsname), (cls,), {'__autogenerated__': True, - '__doc__': cls.__doc__, - '__module__': cls.__module__}) - def support_args(callable, *argnames): """return true if the callable support given argument names""" if isinstance(callable, type): @@ -241,7 +227,7 @@ xhtml_safe_script_opening = u'' - def __init__(self): + def __init__(self, datadir_url=None): super(HTMLHead, self).__init__() self.jsvars = [] self.jsfiles = [] @@ -249,6 +235,7 @@ self.ie_cssfiles = [] self.post_inlined_scripts = [] self.pagedata_unload = False + self.datadir_url = datadir_url def add_raw(self, rawheader): @@ -285,7 +272,7 @@ if jsfile not in self.jsfiles: self.jsfiles.append(jsfile) - def add_css(self, cssfile, media): + def add_css(self, cssfile, media='all'): """adds `cssfile` to the list of javascripts used in the webpage This function checks if the file has already been added @@ -305,6 +292,45 @@ self.post_inlined_scripts.append(self.js_unload_code) self.pagedata_unload = True + def concat_urls(self, urls): + """concatenates urls into one url usable by Apache mod_concat + + This method returns the url without modifying it if there is only + one element in the list + :param urls: list of local urls/filenames to concatenate + """ + if len(urls) == 1: + return urls[0] + len_prefix = len(self.datadir_url) + concated = u','.join(url[len_prefix:] for url in urls) + return (u'%s??%s' % (self.datadir_url, concated)) + + def group_urls(self, urls_spec): + """parses urls_spec in order to generate concatenated urls + for js and css includes + + This method checks if the file is local and if it shares options + with direct neighbors + :param urls_spec: entire list of urls/filenames to inspect + """ + concatable = [] + prev_islocal = False + prev_key = None + for url, key in urls_spec: + islocal = url.startswith(self.datadir_url) + if concatable and (islocal != prev_islocal or key != prev_key): + yield (self.concat_urls(concatable), prev_key) + del concatable[:] + if not islocal: + yield (url, key) + else: + concatable.append(url) + prev_islocal = islocal + prev_key = key + if concatable: + yield (self.concat_urls(concatable), prev_key) + + def getvalue(self, skiphead=False): """reimplement getvalue to provide a consistent (and somewhat browser optimzed cf. http://stevesouders.com/cuzillion) order in external @@ -322,25 +348,47 @@ w(vardecl + u'\n') w(self.xhtml_safe_script_closing) # 2/ css files - for cssfile, media in self.cssfiles: + for cssfile, media in (self.group_urls(self.cssfiles) if self.datadir_url else self.cssfiles): w(u'\n' % (media, xml_escape(cssfile))) # 3/ ie css if necessary if self.ie_cssfiles: - for cssfile, media, iespec in self.ie_cssfiles: + ie_cssfiles = ((x, (y, z)) for x, y, z in self.ie_cssfiles) + for cssfile, (media, iespec) in (self.group_urls(ie_cssfiles) if self.datadir_url else ie_cssfiles): w(u' \n') # 4/ js files - for jsfile in self.jsfiles: - w(u'\n' % - xml_escape(jsfile)) + jsfiles = ((x, None) for x in self.jsfiles) + for jsfile, media in self.group_urls(jsfiles) if self.datadir_url else jsfiles: + if skiphead: + # Don't insert \n' % + xml_escape(jsfile)) # 5/ post inlined scripts (i.e. scripts depending on other JS files) if self.post_inlined_scripts: - w(self.xhtml_safe_script_opening) - w(u'\n\n'.join(self.post_inlined_scripts)) - w(self.xhtml_safe_script_closing) + if skiphead: + for script in self.post_inlined_scripts: + w(u'
')
+                    w(xml_escape(script))
+                    w(u'
') + else: + w(self.xhtml_safe_script_opening) + w(u'\n\n'.join(self.post_inlined_scripts)) + w(self.xhtml_safe_script_closing) header = super(HTMLHead, self).getvalue() if skiphead: return header @@ -416,7 +464,7 @@ else: import json except ImportError: - json_dumps = None + json_dumps = JSString = None else: from logilab.common.date import ustrftime @@ -450,6 +498,40 @@ return json.dumps(value, cls=CubicWebJsonEncoder) + class JSString(str): + """use this string sub class in values given to :func:`js_dumps` to + insert raw javascript chain in some JSON string + """ + + def _dict2js(d, predictable=False): + res = [key + ': ' + js_dumps(val, predictable) + for key, val in d.iteritems()] + return '{%s}' % ', '.join(res) + + def _list2js(l, predictable=False): + return '[%s]' % ', '.join([js_dumps(val, predictable) for val in l]) + + def js_dumps(something, predictable=False): + """similar as :func:`json_dumps`, except values which are instances of + :class:`JSString` are expected to be valid javascript and will be output + as is + + >>> js_dumps({'hop': JSString('$.hop'), 'bar': None}, predictable=True) + '{bar: null, hop: $.hop}' + >>> js_dumps({'hop': '$.hop'}) + '{hop: "$.hop"}' + >>> js_dumps({'hip': {'hop': JSString('momo')}}) + '{hip: {hop: momo}}' + """ + if isinstance(something, dict): + return _dict2js(something, predictable) + if isinstance(something, list): + return _list2js(something, predictable) + if isinstance(something, JSString): + return something + return json_dumps(something) + + @deprecated('[3.7] merge_dicts is deprecated') def merge_dicts(dict1, dict2): """update a copy of `dict1` with `dict2`""" diff -r 6397a9051f65 -r 134613d3b353 vregistry.py --- a/vregistry.py Wed Jul 20 14:09:42 2011 +0200 +++ b/vregistry.py Wed Jul 20 18:22:41 2011 +0200 @@ -184,7 +184,10 @@ raise :exc:`NoSelectableObject` if not object apply """ - return self._select_best(self[__oid], *args, **kwargs) + obj = self._select_best(self[__oid], *args, **kwargs) + if obj is None: + raise NoSelectableObject(args, kwargs, self[__oid] ) + return obj def select_or_none(self, __oid, *args, **kwargs): """return the most specific object among those with the given oid @@ -202,16 +205,18 @@ context """ for appobjects in self.itervalues(): - try: - yield self._select_best(appobjects, *args, **kwargs) - except NoSelectableObject: + obj = self._select_best(appobjects, *args, **kwargs) + if obj is None: continue + yield obj def _select_best(self, appobjects, *args, **kwargs): """return an instance of the most specific object according to parameters - raise `NoSelectableObject` if not object apply + return None if not object apply (don't raise `NoSelectableObject` since + it's costly when searching appobjects using `possible_objects` + (e.g. searching for hooks). """ if len(args) > 1: warn('[3.5] only the request param can not be named when calling select*', @@ -224,7 +229,7 @@ elif appobjectscore > 0 and appobjectscore == score: winners.append(appobject) if winners is None: - raise NoSelectableObject(args, kwargs, appobjects) + return None if len(winners) > 1: # log in production environement / test, error while debugging msg = 'select ambiguity: %s\n(args: %s, kwargs: %s)' diff -r 6397a9051f65 -r 134613d3b353 web/component.py --- a/web/component.py Wed Jul 20 14:09:42 2011 +0200 +++ b/web/component.py Wed Jul 20 18:22:41 2011 +0200 @@ -1,4 +1,4 @@ -# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This file is part of CubicWeb. @@ -57,8 +57,6 @@ page_link_templ = u'%s' selected_page_link_templ = u'%s' previous_page_link_templ = next_page_link_templ = page_link_templ - no_previous_page_link = u'<<' - no_next_page_link = u'>>' def __init__(self, req, rset, **kwargs): super(NavigationComponent, self).__init__(req, rset=rset, **kwargs) @@ -131,7 +129,37 @@ return self.selected_page_link_templ % (url, content, content) return self.page_link_templ % (url, content, content) - def previous_link(self, path, params, content='<<', title=_('previous_results')): + @property + def prev_icon_url(self): + return xml_escape(self._cw.data_url('go_prev.png')) + + @property + def next_icon_url(self): + return xml_escape(self._cw.data_url('go_next.png')) + + @property + def no_previous_page_link(self): + return (u'%s' % + (self.prev_icon_url, self._cw._('there is no previous page'))) + + @property + def no_next_page_link(self): + return (u'%s' % + (self.next_icon_url, self._cw._('there is no next page'))) + + @property + def no_content_prev_link(self): + return (u'%s' % ( + (self.prev_icon_url, self._cw._('no content prev link')))) + + @property + def no_content_next_link(self): + return (u'%s' % + (self.next_icon_url, self._cw._('no content next link'))) + + def previous_link(self, path, params, content=None, title=_('previous_results')): + if not content: + content = self.no_content_prev_link start = self.starting_from if not start : return self.no_previous_page_link @@ -140,7 +168,9 @@ url = xml_escape(self.page_url(path, params, start, stop)) return self.previous_page_link_templ % (url, title, content) - def next_link(self, path, params, content='>>', title=_('next_results')): + def next_link(self, path, params, content=None, title=_('next_results')): + if not content: + content = self.no_content_next_link start = self.starting_from + self.page_size if start >= self.total: return self.no_next_page_link diff -r 6397a9051f65 -r 134613d3b353 web/controller.py --- a/web/controller.py Wed Jul 20 14:09:42 2011 +0200 +++ b/web/controller.py Wed Jul 20 18:22:41 2011 +0200 @@ -1,4 +1,4 @@ -# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved. +# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This file is part of CubicWeb. @@ -114,7 +114,7 @@ [recipient], body, subject) if not self._cw.vreg.config.sendmails([(msg, [recipient])]): msg = self._cw._('could not connect to the SMTP server') - url = self._cw.build_url(__message=msg) + url = self._cw.build_url(__message=msgid) raise Redirect(url) def reset(self): @@ -123,8 +123,10 @@ """ newparams = {} # sets message if needed - if self._cw.message: - newparams['_cwmsgid'] = self._cw.set_redirect_message(self._cw.message) + # XXX - don't call .message twice since it pops the id + msg = self._cw.message + if msg: + newparams['_cwmsgid'] = self._cw.set_redirect_message(msg) if self._cw.form.has_key('__action_apply'): self._return_to_edition_view(newparams) if self._cw.form.has_key('__action_cancel'): @@ -165,7 +167,7 @@ elif self._edited_entity: # clear caches in case some attribute participating to the rest path # has been modified - self._edited_entity.clear_all_caches() + self._edited_entity.cw_clear_all_caches() path = self._edited_entity.rest_path() else: path = 'view' diff -r 6397a9051f65 -r 134613d3b353 web/data/accessories-text-editor.png Binary file web/data/accessories-text-editor.png has changed diff -r 6397a9051f65 -r 134613d3b353 web/data/add_button.png Binary file web/data/add_button.png has changed diff -r 6397a9051f65 -r 134613d3b353 web/data/banner.png Binary file web/data/banner.png has changed diff -r 6397a9051f65 -r 134613d3b353 web/data/bg_trame_grise.png Binary file web/data/bg_trame_grise.png has changed diff -r 6397a9051f65 -r 134613d3b353 web/data/black-check.png Binary file web/data/black-check.png has changed diff -r 6397a9051f65 -r 134613d3b353 web/data/bullet.png Binary file web/data/bullet.png has changed diff -r 6397a9051f65 -r 134613d3b353 web/data/bullet_orange.png Binary file web/data/bullet_orange.png has changed diff -r 6397a9051f65 -r 134613d3b353 web/data/critical.png Binary file web/data/critical.png has changed diff -r 6397a9051f65 -r 134613d3b353 web/data/cubicweb.ajax.js --- a/web/data/cubicweb.ajax.js Wed Jul 20 14:09:42 2011 +0200 +++ b/web/data/cubicweb.ajax.js Wed Jul 20 18:22:41 2011 +0200 @@ -22,6 +22,9 @@ * * dummy ultra minimalist implementation of deferred for jQuery */ + +cw.ajax = new Namespace('cw.ajax'); + function Deferred() { this.__init__(this); } @@ -86,40 +89,133 @@ var JSON_BASE_URL = baseuri() + 'json?'; -//============= utility function handling remote calls responses. ==============// -function _loadAjaxHtmlHead($node, $head, tag, srcattr) { - var jqtagfilter = tag + '[' + srcattr + ']'; - if (cw['loaded_'+srcattr] === undefined) { - cw['loaded_'+srcattr] = []; - var loaded = cw['loaded_'+srcattr]; - jQuery('head ' + jqtagfilter).each(function(i) { - loaded.push(this.getAttribute(srcattr)); - }); - } else { - var loaded = cw['loaded_'+srcattr]; + +jQuery.extend(cw.ajax, { + /* variant of jquery evalScript with cache: true in ajax call */ + _evalscript: function ( i, elem ) { + var src = elem.getAttribute('src'); + if (src) { + jQuery.ajax({ + url: src, + async: false, + cache: true, + dataType: "script" + }); + } else { + jQuery.globalEval( elem.text || elem.textContent || elem.innerHTML || "" ); + } + if ( elem.parentNode ) { + elem.parentNode.removeChild( elem ); + } + }, + + evalscripts: function ( scripts ) { + if ( scripts.length ) { + jQuery.each(scripts, cw.ajax._evalscript); + } + }, + + /** + * returns true if `url` is a mod_concat-like url + * (e.g. http://..../data??resource1.js,resource2.js) + */ + _modconcatLikeUrl: function(url) { + var base = baseuri(); + if (!base.endswith('/')) { base += '/'; } + var modconcat_rgx = new RegExp('(' + base + 'data/([a-z0-9]+/)?)\\?\\?(.+)'); + return modconcat_rgx.exec(url); + }, + + /** + * decomposes a mod_concat-like url into its corresponding list of + * resources' urls + * >>> _listResources('http://foo.com/data/??a.js,b.js,c.js') + * ['http://foo.com/data/a.js', 'http://foo.com/data/b.js', 'http://foo.com/data/c.js'] + */ + _listResources: function(src) { + var resources = []; + var groups = cw.ajax._modconcatLikeUrl(src); + if (groups == null) { + resources.push(src); + } else { + var dataurl = groups[1]; + $.each(cw.utils.lastOf(groups).split(','), + function() { + resources.push(dataurl + this); + } + ); + } + return resources; + }, + + _buildMissingResourcesUrl: function(url, loadedResources) { + var resources = cw.ajax._listResources(url); + var missingResources = $.grep(resources, function(resource) { + return $.inArray(resource, loadedResources) == -1; + }); + cw.utils.extend(loadedResources, missingResources); + var missingResourceUrl = null; + if (missingResources.length == 1) { + // only one resource missing: build a node with a single resource url + // (maybe the browser has it in cache already) + missingResourceUrl = missingResources[0]; + } else if (missingResources.length > 1) { + // several resources missing: build a node with a concatenated + // resources url + var dataurl = cw.ajax._modconcatLikeUrl(url)[1]; + var missing_path = $.map(missingResources, function(resource) { + return resource.substring(dataurl.length); + }); + missingResourceUrl = dataurl + '??' + missing_path.join(','); + } + return missingResourceUrl; + }, + + _loadAjaxStylesheets: function($responseHead, $head) { + $responseHead.find('link[href]').each(function(i) { + var $srcnode = $(this); + var url = $srcnode.attr('href'); + if (url) { + var missingStylesheetsUrl = cw.ajax._buildMissingResourcesUrl(url, cw.loaded_links); + // compute concat-like url for missing resources and append + // element to $head + if (missingStylesheetsUrl) { + $srcnode.attr('href', missingStylesheetsUrl); + $srcnode.appendTo($head); + } + } + }); + $responseHead.find('link[href]').remove(); + }, + + _loadAjaxScripts: function($responseHead, $head) { + $responseHead.find('pre.script').each(function(i) { + var $srcnode = $(this); + var url = $srcnode.attr('src'); + if (url) { + var missingScriptsUrl = cw.ajax._buildMissingResourcesUrl(url, cw.loaded_scripts); + if (missingScriptsUrl) { + $srcnode.attr('src', missingScriptsUrl); + /* special handling of