# HG changeset patch
# User Sylvain Thénault
# Date 1323428907 -3600
# Node ID a4e667270dd43377e04283e4272e0e8aca50e97e
# Parent d8bb8f631d41cc2ca526fe560cac6bb7c5fa64c6# Parent 7b2c7f3d370303bdcd6ac264da75bc7ef0ce3eda
oldstable is 3.13.X
diff -r d8bb8f631d41 -r a4e667270dd4 .hgignore
--- a/.hgignore Mon Sep 26 18:37:23 2011 +0200
+++ b/.hgignore Fri Dec 09 12:08:27 2011 +0100
@@ -14,4 +14,6 @@
.*/data/database/.*\.sqlite
.*/data/database/.*\.config
.*/data/database/tmpdb.*
-
+^doc/html/
+^doc/doctrees/
+^doc/book/en/devweb/js_api/
diff -r d8bb8f631d41 -r a4e667270dd4 .hgtags
--- a/.hgtags Mon Sep 26 18:37:23 2011 +0200
+++ b/.hgtags Fri Dec 09 12:08:27 2011 +0100
@@ -204,11 +204,34 @@
6dfe78a0797ccc34962510f8c2a57f63d65ce41e cubicweb-debian-version-3.12.5-1
a18dac758150fe9c1f9e4958d898717c32a8f679 cubicweb-version-3.12.6
105767487c7075dbcce36474f1af0485985cbf2c cubicweb-debian-version-3.12.6-1
+b661ef475260ca7d9ea5c36ba2cc86e95e5b17d3 cubicweb-version-3.13.0
+a96137858f571711678954477da6f7f435870cea cubicweb-debian-version-3.13.0-1
628fe57ce746c1dac87fb1b078b2026057df894e cubicweb-version-3.12.7
a07517985136bbbfa6610c428a1b42cd04cd530b cubicweb-debian-version-3.12.7-1
50122a47ce4fb2ecbf3cf20ed2777f4276c93609 cubicweb-version-3.12.8
cf49ed55685a810d8d73585330ad1a57cc76260d cubicweb-debian-version-3.12.8-1
cb2990aaa63cbfe593bcf3afdbb9071e4c76815a cubicweb-version-3.12.9
92464e39134c70e4ddbe6cd78a6e3338a3b88b05 cubicweb-debian-version-3.12.9-1
+7d84317ef185a10c5eb78e6086f2297d2f4bd1e3 cubicweb-version-3.13.1
+cc0578049cbe8b1d40009728e36c17e45da1fc6b cubicweb-debian-version-3.13.1-1
+f9227b9d61835f03163b8133a96da35db37a0c8d cubicweb-version-3.13.2
+9ad5411199e00b2611366439b82f35d7d3285423 cubicweb-debian-version-3.13.2-1
+0e82e7e5a34f57d7239c7a42e48ba4d5e53abab2 cubicweb-version-3.13.3
+fb48c55cb80234bc0164c9bcc0e2cfc428836e5f cubicweb-debian-version-3.13.3-1
+223ecf0620b6c87d997f8011aca0d9f0ee4750af cubicweb-version-3.13.4
+52f26475d764129c5559b2d80fd57e6ea1bdd6ba cubicweb-debian-version-3.13.4-1
+a62f24e1497e953fbaed5894f6064a64f7ac0be3 cubicweb-version-3.10.x
+20d9c550c57eb6f9adcb0cfab1c11b6b8793afb6 cubicweb-version-3.13.5
+2e9dd7d945557c210d3b79153c65f6885e755315 cubicweb-debian-version-3.13.5-1
074c848a3712a77737d9a1bfbb618c75f5c0cbfa cubicweb-version-3.12.10
9dfd21fa0a8b9f121a08866ad3e2ebd1dd06790d cubicweb-debian-version-3.12.10-1
+17c007ad845abbac82e12146abab32a634657574 cubicweb-version-3.13.6
+8a8949ca5351d48c5cf795ccdff06c1d4aab2ce0 cubicweb-debian-version-3.13.6-1
+68e8c81fa96d6bcd21cc17bc9832d388ce05a9eb cubicweb-version-3.13.7
+2f93ce32febe2f82565994fbd454f331f76ca883 cubicweb-debian-version-3.13.7-1
+249bd41693392d4716686f05c6b84628cd14dfcd cubicweb-version-3.13.8
+43f83f5d0a4d57a06e9a4990bc957fcfa691eec3 cubicweb-debian-version-3.13.8-1
+07afe32945aa275052747f78ef1f55858aaf6fa9 cubicweb-version-3.13.9
+0a3cb5e60d57a7a9851371b4ae487094ec2bf614 cubicweb-debian-version-3.13.9-1
+2ad4e5173c73a43804c265207bcabb8940bd42f4 cubicweb-version-3.13.10
+2eab9a5a6bf8e3b0cf706bee8cdf697759c0a33a cubicweb-debian-version-3.13.10-1
diff -r d8bb8f631d41 -r a4e667270dd4 MANIFEST.in
--- a/MANIFEST.in Mon Sep 26 18:37:23 2011 +0200
+++ b/MANIFEST.in Fri Dec 09 12:08:27 2011 +0100
@@ -5,7 +5,7 @@
include bin/cubicweb-*
include man/cubicweb-ctl.1
-recursive-include doc README makefile *.conf *.css *.py *.rst *.txt *.html *.png *.svg *.zargo *.dia
+recursive-include doc README makefile *.conf *.js *.css *.py *.rst *.txt *.html *.png *.svg *.zargo *.dia
recursive-include misc *.py *.png *.display
@@ -32,5 +32,6 @@
prune doc/book/en/.static
prune doc/book/fr/.static
+prune doc/html/_sources/
prune misc/cwfs
prune goa
diff -r d8bb8f631d41 -r a4e667270dd4 README
--- a/README Mon Sep 26 18:37:23 2011 +0200
+++ b/README Fri Dec 09 12:08:27 2011 +0100
@@ -5,20 +5,21 @@
developped at Logilab.
This package contains:
-* a repository server
-* a RQL command line client to the repository
-* an adaptative modpython interface to the server
-* a bunch of other management tools
+
+- a repository server
+- a RQL command line client to the repository
+- an adaptative modpython interface to the server
+- a bunch of other management tools
Install
-------
-More details at http://www.cubicweb.org/doc/en/admin/setup
+More details at http://docs.cubicweb.org/admin/setup
Getting started
---------------
-Execute:
+Execute::
apt-get install cubicweb cubicweb-dev cubicweb-blog
cubicweb-ctl create blog myblog
diff -r d8bb8f631d41 -r a4e667270dd4 __init__.py
--- a/__init__.py Mon Sep 26 18:37:23 2011 +0200
+++ b/__init__.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
diff -r d8bb8f631d41 -r a4e667270dd4 __pkginfo__.py
--- a/__pkginfo__.py Mon Sep 26 18:37:23 2011 +0200
+++ b/__pkginfo__.py Fri Dec 09 12:08:27 2011 +0100
@@ -22,7 +22,7 @@
modname = distname = "cubicweb"
-numversion = (3, 12, 10)
+numversion = (3, 13, 10)
version = '.'.join(str(num) for num in numversion)
description = "a repository of entities / relations for knowledge management"
@@ -40,10 +40,10 @@
]
__depends__ = {
- 'logilab-common': '>= 0.55.2',
+ 'logilab-common': '>= 0.56.2',
'logilab-mtconverter': '>= 0.8.0',
'rql': '>= 0.28.0',
- 'yams': '>= 0.32.0',
+ 'yams': '>= 0.33.0',
'docutils': '>= 0.6',
#gettext # for xgettext, msgcat, etc...
# web dependancies
diff -r d8bb8f631d41 -r a4e667270dd4 appobject.py
--- a/appobject.py Mon Sep 26 18:37:23 2011 +0200
+++ b/appobject.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -180,12 +180,13 @@
return self.__class__.__name__
def search_selector(self, selector):
- """search for the given selector or selector instance in the selectors
- tree. Return it of None if not found
+ """search for the given selector, selector instance or tuple of
+ selectors in the selectors tree. Return None if not found.
"""
if self is selector:
return self
- if isinstance(selector, type) and isinstance(self, selector):
+ if (isinstance(selector, type) or isinstance(selector, tuple)) and \
+ isinstance(self, selector):
return self
return None
@@ -240,7 +241,7 @@
for selector in selectors:
try:
selector = _instantiate_selector(selector)
- except:
+ except Exception:
pass
#assert isinstance(selector, Selector), selector
if isinstance(selector, cls):
@@ -250,8 +251,8 @@
return merged_selectors
def search_selector(self, selector):
- """search for the given selector or selector instance in the selectors
- tree. Return it of None if not found
+ """search for the given selector or selector instance (or tuple of
+ selectors) in the selectors tree. Return None if not found
"""
for childselector in self.selectors:
if childselector is selector:
@@ -259,7 +260,8 @@
found = childselector.search_selector(selector)
if found is not None:
return found
- return None
+ # if not found in children, maybe we are looking for self?
+ return super(MultiSelector, self).search_selector(selector)
class AndSelector(MultiSelector):
@@ -322,7 +324,7 @@
selected according to a context (usually at least a request and a result
set).
- The following attributes should be set on concret appobject classes:
+ The following attributes should be set on concrete appobject classes:
:attr:`__registry__`
name of the registry for this object (string like 'views',
@@ -413,7 +415,7 @@
appobject is returned without any transformation.
"""
try: # XXX < 3.6 bw compat
- pdefs = cls.property_defs
+ pdefs = cls.property_defs # pylint: disable=E1101
except AttributeError:
pdefs = getattr(cls, 'cw_property_defs', {})
else:
diff -r d8bb8f631d41 -r a4e667270dd4 crypto.py
--- a/crypto.py Mon Sep 26 18:37:23 2011 +0200
+++ b/crypto.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -15,9 +15,7 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see .
-"""Simple cryptographic routines, based on python-crypto.
-
-"""
+"""Simple cryptographic routines, based on python-crypto."""
__docformat__ = "restructuredtext en"
from pickle import dumps, loads
diff -r d8bb8f631d41 -r a4e667270dd4 cwconfig.py
--- a/cwconfig.py Mon Sep 26 18:37:23 2011 +0200
+++ b/cwconfig.py Fri Dec 09 12:08:27 2011 +0100
@@ -22,6 +22,9 @@
Resource mode
-------------
+Standard resource mode
+```````````````````````````
+
A resource *mode* is a predefined set of settings for various resources
directories, such as cubes, instances, etc. to ease development with the
framework. There are two running modes with *CubicWeb*:
@@ -30,7 +33,7 @@
usually requiring root access):
- instances are stored in :file:`/etc/cubicweb.d`
- - temporary files (such as pid file) in :file:`/var/run/cubicweb`
+ - temporary files (such as pid file) in :file:`/var/run/cubicweb`
where `` is the detected installation prefix ('/usr/local' for
instance).
@@ -42,6 +45,25 @@
+
+.. _CubicwebWithinVirtualEnv:
+
+Within virtual environment
+```````````````````````````
+
+If you are not administrator of you machine or if you need to play with some
+specific version of |cubicweb| you can use `virtualenv`_ a tool to create
+isolated Python environments. Since version 3.9 |cubicweb| is **`virtualenv`
+friendly** and won't write any file outside the virtualenv directory.
+
+- instances are stored in :file:`/etc/cubicweb.d`
+- temporary files (such as pid file) in :file:`/var/run/cubicweb`
+
+.. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
+
+Custom resource location
+````````````````````````````````
+
Notice that each resource path may be explicitly set using an environment
variable if the default doesn't suit your needs. Here are the default resource
directories that are affected according to mode:
@@ -49,8 +71,8 @@
* **system**: ::
CW_INSTANCES_DIR = /etc/cubicweb.d/
- CW_INSTANCES_DATA_DIR = /var/lib/cubicweb/instances/
- CW_RUNTIME_DIR = /var/run/cubicweb/
+ CW_INSTANCES_DATA_DIR = /var/lib/cubicweb/instances/
+ CW_RUNTIME_DIR = /var/run/cubicweb/
* **user**: ::
@@ -60,10 +82,13 @@
Cubes search path is also affected, see the :ref:`Cube` section.
-By default, the mode automatically set to `user` if a :file:`.hg` directory is found
-in the cubicweb package, else it's set to `system`. You can force this by setting
-the :envvar:`CW_MODE` environment variable to either `user` or `system` so you can
-easily:
+Setting Cubicweb Mode
+`````````````````````
+
+By default, the mode is set to 'system' for standard installation. The mode is
+set to 'user' if `cubicweb is used from a mercurial repository`_. You can force
+this by setting the :envvar:`CW_MODE` environment variable to either 'user' or
+'system' so you can easily:
* use system wide installation but user specific instances and all, without root
privileges on the system (`export CW_MODE=user`)
@@ -74,7 +99,15 @@
If you've a doubt about the mode you're currently running, check the first line
outputed by the :command:`cubicweb-ctl list` command.
-Also, if cubicweb is a mercurial checkout located in ``:
+.. _`cubicweb is used from a mercurial repository`: CubicwebDevelopmentMod_
+
+.. _CubicwebDevelopmentMod:
+
+Development Mode
+`````````````````````
+If :file:`.hg` directory is found into the cubicweb package, there are specific resource rules.
+
+`` is the mercurial checkout of cubicweb:
* main cubes directory is `/../cubes`. You can specify
another one with :envvar:`CW_INSTANCES_DIR` environment variable or simply
@@ -144,7 +177,8 @@
from threading import Lock
from os.path import (exists, join, expanduser, abspath, normpath,
basename, isdir, dirname, splitext)
-from warnings import warn
+from warnings import warn, filterwarnings
+
from logilab.common.decorators import cached, classproperty
from logilab.common.deprecation import deprecated
from logilab.common.logging_ext import set_log_methods, init_log
@@ -618,7 +652,7 @@
try:
__import__('cubes.%s.ccplugin' % cube)
cls.info('loaded cubicweb-ctl plugin from %s', cube)
- except:
+ except Exception:
cls.exception('while loading plugin %s', pluginfile)
elif exists(oldpluginfile):
warn('[3.6] %s: ecplugin module should be renamed to ccplugin' % cube,
@@ -626,12 +660,12 @@
try:
__import__('cubes.%s.ecplugin' % cube)
cls.info('loaded cubicweb-ctl plugin from %s', cube)
- except:
+ except Exception:
cls.exception('while loading plugin %s', oldpluginfile)
elif exists(initfile):
try:
__import__('cubes.%s' % cube)
- except:
+ except Exception:
cls.exception('while loading cube %s', cube)
else:
cls.warning('no __init__ file in cube %s', cube)
@@ -696,6 +730,9 @@
return vregpath
def __init__(self, debugmode=False):
+ if debugmode:
+ # in python 2.7, DeprecationWarning are not shown anymore by default
+ filterwarnings('default', category=DeprecationWarning)
register_stored_procedures()
self._cubes = None
super(CubicWebNoAppConfiguration, self).__init__()
@@ -826,6 +863,13 @@
"""
return [self.cube_dir(p) for p in self.cubes()]
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ @classmethod
+ def debug(cls, msg, *a, **kw):
+ pass
+ info = warning = error = critical = exception = debug
+
class CubicWebConfiguration(CubicWebNoAppConfiguration):
"""base class for cubicweb server and web configurations"""
@@ -849,6 +893,9 @@
# wouldn't be possible otherwise
repairing = False
+ # set by upgrade command
+ verbosity = 0
+
options = CubicWebNoAppConfiguration.options + (
('log-file',
{'type' : 'string',
@@ -1068,13 +1115,13 @@
@cached
def instance_md5_version(self):
- import hashlib
+ from hashlib import md5 # pylint: disable=E0611
infos = []
for pkg in sorted(self.cubes()):
version = self.cube_version(pkg)
infos.append('%s-%s' % (pkg, version))
infos.append('cubicweb-%s' % str(self.cubicweb_version()))
- return hashlib.md5(';'.join(infos)).hexdigest()
+ return md5(';'.join(infos)).hexdigest()
def load_configuration(self):
"""load instance's configuration files"""
@@ -1118,7 +1165,7 @@
def _gettext_init(self):
"""set language for gettext"""
- from gettext import translation
+ from cubicweb.gettext import translation
path = join(self.apphome, 'i18n')
for language in self.available_languages():
self.info("loading language %s", language)
@@ -1184,13 +1231,6 @@
SMTP_LOCK.release()
return True
- # these are overridden by set_log_methods below
- # only defining here to prevent pylint from complaining
- @classmethod
- def debug(cls, msg, *a, **kw):
- pass
- info = warning = error = critical = exception = debug
-
set_log_methods(CubicWebNoAppConfiguration,
logging.getLogger('cubicweb.configuration'))
@@ -1235,6 +1275,7 @@
class LIMIT_SIZE(FunctionDescr):
supported_backends = ('postgres', 'sqlite',)
+ minargs = maxargs = 3
rtype = 'String'
def st_description(self, funcnode, mainindex, tr):
@@ -1245,6 +1286,7 @@
class TEXT_LIMIT_SIZE(LIMIT_SIZE):
supported_backends = ('mysql', 'postgres', 'sqlite',)
+ minargs = maxargs = 2
register_function(TEXT_LIMIT_SIZE)
@@ -1297,7 +1339,7 @@
try:
return Binary(fpath)
except OSError, ex:
- self.critical("can't open %s: %s", fpath, ex)
+ source.critical("can't open %s: %s", fpath, ex)
return None
register_function(FSPATH)
diff -r d8bb8f631d41 -r a4e667270dd4 cwctl.py
--- a/cwctl.py Mon Sep 26 18:37:23 2011 +0200
+++ b/cwctl.py Fri Dec 09 12:08:27 2011 +0100
@@ -554,7 +554,7 @@
pid = int(open(pidf).read().strip())
try:
kill(pid, signal.SIGTERM)
- except:
+ except Exception:
print >> sys.stderr, "process %s seems already dead." % pid
else:
try:
@@ -564,7 +564,7 @@
print >> sys.stderr, 'trying SIGKILL'
try:
kill(pid, signal.SIGKILL)
- except:
+ except Exception:
# probably dead now
pass
wait_process_end(pid)
@@ -728,11 +728,9 @@
config = cwcfg.config_for(appid)
config.repairing = True # notice we're not starting the server
config.verbosity = self.config.verbosity
- try:
- config.set_sources_mode(self.config.ext_sources or ('migration',))
- except AttributeError:
- # not a server config
- pass
+ set_sources_mode = getattr(config, 'set_sources_mode', None)
+ if set_sources_mode is not None:
+ set_sources_mode(self.config.ext_sources or ('migration',))
# get instance and installed versions for the server and the componants
mih = config.migration_handler()
repo = mih.repo_connect()
@@ -802,6 +800,28 @@
return False
return True
+
+class ListVersionsInstanceCommand(InstanceCommand):
+ """List versions used by an instance.
+
+ ...
+ identifiers of the instances to list versions for.
+ """
+ name = 'versions'
+
+ def versions_instance(self, appid):
+ from logilab.common.changelog import Version
+ config = cwcfg.config_for(appid)
+ # should not raise error if db versions don't match fs versions
+ config.repairing = True
+ if hasattr(config, 'set_sources_mode'):
+ config.set_sources_mode(('migration',))
+ repo = config.migration_handler().repo_connect()
+ vcconf = repo.get_versions()
+ for key in sorted(vcconf):
+ print key+': %s.%s.%s' % vcconf[key]
+
+
class ShellCommand(Command):
"""Run an interactive migration shell on an instance. This is a python shell
with enhanced migration commands predefined in the namespace. An additional
@@ -964,6 +984,7 @@
StartInstanceCommand, StopInstanceCommand, RestartInstanceCommand,
ReloadConfigurationCommand, StatusCommand,
UpgradeInstanceCommand,
+ ListVersionsInstanceCommand,
ShellCommand,
RecompileInstanceCatalogsCommand,
ListInstancesCommand, ListCubesCommand,
diff -r d8bb8f631d41 -r a4e667270dd4 cwvreg.py
--- a/cwvreg.py Mon Sep 26 18:37:23 2011 +0200
+++ b/cwvreg.py Fri Dec 09 12:08:27 2011 +0100
@@ -194,17 +194,18 @@
_ = unicode
from warnings import warn
+from datetime import datetime, date, time, timedelta
from logilab.common.decorators import cached, clear_cache
from logilab.common.deprecation import deprecated, class_deprecated
from logilab.common.modutils import cleanup_sys_modules
from rql import RQLHelper
+from yams.constraints import BASE_CONVERTERS
from cubicweb import (ETYPE_NAME_MAP, Binary, UnknownProperty, UnknownEid,
ObjectNotFound, NoSelectableObject, RegistryNotFound,
CW_EVENT_MANAGER)
-from cubicweb.utils import dump_class
from cubicweb.vregistry import VRegistry, Registry, class_regid, classid
from cubicweb.rtags import RTAGS
@@ -368,7 +369,10 @@
# make a copy event if cls.__regid__ == etype, else we may have pb for
# client application using multiple connections to different
# repositories (eg shingouz)
- cls = dump_class(cls, etype)
+ # __autogenerated__ attribute is just a marker
+ cls = type(str(etype), (cls,), {'__autogenerated__': True,
+ '__doc__': cls.__doc__,
+ '__module__': cls.__module__})
cls.__regid__ = etype
cls.__initialize__(self.schema)
return cls
@@ -412,10 +416,8 @@
if not isinstance(view, class_deprecated)]
try:
view = self._select_best(views, req, rset=rset, **kwargs)
- if view.linkable():
+ if view is not None and view.linkable():
yield view
- except NoSelectableObject:
- continue
except Exception:
self.exception('error while trying to select %s view for %s',
vid, rset)
@@ -849,24 +851,15 @@
return self['views'].select(__vid, req, rset=rset, **kwargs)
-import decimal
-from datetime import datetime, date, time, timedelta
-
-YAMS_TO_PY = { # XXX unify with yams.constraints.BASE_CONVERTERS?
- 'String' : unicode,
- 'Bytes': Binary,
- 'Password': str,
-
- 'Boolean': bool,
- 'Int': int,
- 'Float': float,
- 'Decimal': decimal.Decimal,
-
+# XXX unify with yams.constraints.BASE_CONVERTERS?
+YAMS_TO_PY = BASE_CONVERTERS.copy()
+YAMS_TO_PY.update({
+ 'Bytes': Binary,
'Date': date,
'Datetime': datetime,
'TZDatetime': datetime,
'Time': time,
'TZTime': time,
'Interval': timedelta,
- }
+ })
diff -r d8bb8f631d41 -r a4e667270dd4 dataimport.py
--- a/dataimport.py Mon Sep 26 18:37:23 2011 +0200
+++ b/dataimport.py Fri Dec 09 12:08:27 2011 +0100
@@ -445,14 +445,14 @@
ObjectStore.__init__(self)
if session is None:
sys.exit('please provide a session of run this script with cubicweb-ctl shell and pass cnx as session')
- if not hasattr(session, 'set_pool'):
+ if not hasattr(session, 'set_cnxset'):
# connection
cnx = session
session = session.request()
- session.set_pool = lambda : None
+ session.set_cnxset = lambda : None
commit = commit or cnx.commit
else:
- session.set_pool()
+ session.set_cnxset()
self.session = session
self._commit = commit or session.commit
@@ -462,7 +462,7 @@
def commit(self):
txuuid = self._commit()
- self.session.set_pool()
+ self.session.set_cnxset()
return txuuid
def rql(self, *args):
@@ -554,7 +554,7 @@
self.tell("Run import function '%s'..." % func_name)
try:
func(self)
- except:
+ except Exception:
if self.catcherrors:
self.record_error(func_name, 'While calling %s' % func.__name__)
else:
@@ -642,7 +642,9 @@
for k, v in kwargs.iteritems():
kwargs[k] = getattr(v, 'eid', v)
entity, rels = self.metagen.base_etype_dicts(etype)
+ # make a copy to keep cached entity pristine
entity = copy(entity)
+ entity.cw_edited = copy(entity.cw_edited)
entity.cw_clear_relation_cache()
self.metagen.init_entity(entity)
entity.cw_edited.update(kwargs, skipsec=False)
diff -r d8bb8f631d41 -r a4e667270dd4 dbapi.py
--- a/dbapi.py Mon Sep 26 18:37:23 2011 +0200
+++ b/dbapi.py Fri Dec 09 12:08:27 2011 +0100
@@ -254,6 +254,8 @@
def anonymous_session(self):
return not self.cnx or self.cnx.anonymous_connection
+ def __repr__(self):
+ return '' % self.sessionid
class DBAPIRequest(RequestSessionBase):
@@ -292,7 +294,7 @@
self.user = user
self.set_entity_cache(user)
- def execute(self, *args, **kwargs):
+ def execute(self, *args, **kwargs): # pylint: disable=E0202
"""overriden when session is set. By default raise authentication error
so authentication is requested.
"""
@@ -301,7 +303,7 @@
def set_default_language(self, vreg):
try:
self.lang = vreg.property_value('ui.language')
- except: # property may not be registered
+ except Exception: # property may not be registered
self.lang = 'en'
# use req.__ to translate a message without registering it to the catalog
try:
@@ -311,7 +313,7 @@
except KeyError:
# this occurs usually during test execution
self._ = self.__ = unicode
- self.pgettext = lambda x, y: y
+ self.pgettext = lambda x, y: unicode(y)
self.debug('request default language: %s', self.lang)
# entities cache management ###############################################
@@ -347,9 +349,9 @@
# server session compat layer #############################################
- def describe(self, eid):
+ def describe(self, eid, asdict=False):
"""return a tuple (type, sourceuri, extid) for the entity with id """
- return self.cnx.describe(eid)
+ return self.cnx.describe(eid, asdict)
def source_defs(self):
"""return the definition of sources used by the repository."""
@@ -483,7 +485,7 @@
def check_not_closed(func):
def decorator(self, *args, **kwargs):
if self._closed is not None:
- raise ProgrammingError('Closed connection')
+ raise ProgrammingError('Closed connection %s' % self.sessionid)
return func(self, *args, **kwargs)
return decorator
@@ -532,7 +534,7 @@
if self._closed is None and self._close_on_del:
try:
self.close()
- except:
+ except Exception:
pass
# connection initialization methods ########################################
@@ -621,7 +623,8 @@
"""
return self._repo.check_session(self.sessionid)
- def _txid(self, cursor=None): # XXX could now handle various isolation level!
+ def _txid(self, cursor=None): # pylint: disable=E0202
+ # XXX could now handle various isolation level!
# return a dict as bw compat trick
return {'txid': currentThread().getName()}
@@ -675,8 +678,15 @@
return self._repo.get_option_value(option, foreid)
@check_not_closed
- def describe(self, eid):
- return self._repo.describe(self.sessionid, eid, **self._txid())
+ def describe(self, eid, asdict=False):
+ metas = self._repo.describe(self.sessionid, eid, **self._txid())
+ if len(metas) == 3: # backward compat
+ metas = list(metas)
+ metas.append(metas[1])
+ if asdict:
+ return dict(zip(('type', 'source', 'extid', 'asource'), metas))
+ # XXX :-1 for cw compat, use asdict=True for full information
+ return metas[:-1]
# db-api like interface ####################################################
diff -r d8bb8f631d41 -r a4e667270dd4 debian/changelog
--- a/debian/changelog Mon Sep 26 18:37:23 2011 +0200
+++ b/debian/changelog Fri Dec 09 12:08:27 2011 +0100
@@ -1,3 +1,63 @@
+cubicweb (3.13.10-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Thu, 08 Dec 2011 13:22:05 +0100
+
+cubicweb (3.13.9-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Fri, 21 Oct 2011 11:03:45 +0200
+
+cubicweb (3.13.8-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Fri, 07 Oct 2011 16:20:35 +0200
+
+cubicweb (3.13.7-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Thu, 29 Sep 2011 14:08:07 +0200
+
+cubicweb (3.13.6-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Mon, 26 Sep 2011 18:36:00 +0200
+
+cubicweb (3.13.5-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Thu, 08 Sep 2011 16:53:13 +0200
+
+cubicweb (3.13.4-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Fri, 05 Aug 2011 12:22:11 +0200
+
+cubicweb (3.13.3-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Wed, 27 Jul 2011 19:06:16 +0200
+
+cubicweb (3.13.2-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Wed, 20 Jul 2011 17:15:22 +0200
+
+cubicweb (3.13.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Tue, 12 Jul 2011 12:23:54 +0200
+
cubicweb (3.12.10-1) unstable; urgency=low
* new upstream release
@@ -10,6 +70,12 @@
-- Sylvain Thénault Tue, 12 Jul 2011 11:30:10 +0200
+cubicweb (3.13.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Sylvain Thénault Thu, 09 Jun 2011 20:18:41 +0200
+
cubicweb (3.12.8-1) unstable; urgency=low
* new upstream release
diff -r d8bb8f631d41 -r a4e667270dd4 debian/control
--- a/debian/control Mon Sep 26 18:37:23 2011 +0200
+++ b/debian/control Fri Dec 09 12:08:27 2011 +0100
@@ -7,9 +7,9 @@
Adrien Di Mascio ,
Aurélien Campéas ,
Nicolas Chauvat
-Build-Depends: debhelper (>= 7), python (>= 2.5), python-central (>= 0.5)
+Build-Depends: debhelper (>= 7), python (>= 2.5), python-central (>= 0.5), python-sphinx
# for the documentation:
-# python-sphinx, python-logilab-common, python-unittest2,
+# python-sphinx, python-logilab-common, python-unittest2, logilab-doctools, logilab-xml
Standards-Version: 3.9.1
Homepage: http://www.cubicweb.org
XS-Python-Version: >= 2.5, << 2.7
@@ -35,7 +35,7 @@
Conflicts: cubicweb-multisources
Replaces: cubicweb-multisources
Provides: cubicweb-multisources
-Depends: ${misc:Depends}, ${python:Depends}, cubicweb-common (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-logilab-database (>= 1.5.0), cubicweb-postgresql-support | cubicweb-mysql-support | python-pysqlite2
+Depends: ${misc:Depends}, ${python:Depends}, cubicweb-common (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-logilab-database (>= 1.5.0), cubicweb-postgresql-support | cubicweb-mysql-support | python-pysqlite2, python-logilab-common (>= 0.56.2)
Recommends: pyro (<< 4.0.0), cubicweb-documentation (= ${source:Version})
Description: server part of the CubicWeb framework
CubicWeb is a semantic web application framework.
@@ -70,7 +70,7 @@
Architecture: all
XB-Python-Version: ${python:Versions}
Provides: cubicweb-web-frontend
-Depends: ${misc:Depends}, ${python:Depends}, cubicweb-web (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-twisted-web
+Depends: ${misc:Depends}, ${python:Depends}, cubicweb-web (= ${source:Version}), cubicweb-ctl (= ${source:Version}), python-twisted-web, python-logilab-common (>= 0.56.2)
Recommends: pyro (<< 4.0.0), cubicweb-documentation (= ${source:Version})
Description: twisted-based web interface for the CubicWeb framework
CubicWeb is a semantic web application framework.
@@ -99,7 +99,7 @@
Package: cubicweb-common
Architecture: all
XB-Python-Version: ${python:Versions}
-Depends: ${misc:Depends}, ${python:Depends}, graphviz, gettext, python-logilab-mtconverter (>= 0.8.0), python-logilab-common (>= 0.55.2), python-yams (>= 0.32.0), python-rql (>= 0.28.0), python-lxml
+Depends: ${misc:Depends}, ${python:Depends}, graphviz, gettext, python-logilab-mtconverter (>= 0.8.0), python-logilab-common (>= 0.55.2), python-yams (>= 0.33.0), python-rql (>= 0.28.0), python-lxml
Recommends: python-simpletal (>= 4.0), python-crypto
Conflicts: cubicweb-core
Replaces: cubicweb-core
diff -r d8bb8f631d41 -r a4e667270dd4 debian/cubicweb-ctl.cubicweb.init
--- a/debian/cubicweb-ctl.cubicweb.init Mon Sep 26 18:37:23 2011 +0200
+++ b/debian/cubicweb-ctl.cubicweb.init Fri Dec 09 12:08:27 2011 +0100
@@ -4,8 +4,8 @@
# Provides: cubicweb
# Required-Start: $remote_fs $syslog $local_fs $network
# Required-Stop: $remote_fs $syslog $local_fs $network
-# Should-Start: $postgresql $pyro-nsd
-# Should-Stop: $postgresql $pyro-nsd
+# Should-Start: postgresql pyro-nsd
+# Should-Stop: postgresql pyro-nsd
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start cubicweb application at boot time
diff -r d8bb8f631d41 -r a4e667270dd4 debian/rules
--- a/debian/rules Mon Sep 26 18:37:23 2011 +0200
+++ b/debian/rules Fri Dec 09 12:08:27 2011 +0100
@@ -10,14 +10,11 @@
build: build-stamp
build-stamp:
dh_testdir
- # XXX doesn't work if logilab-doctools, logilab-xml are not in build depends
- # and I can't get pbuilder find them in its chroot :(
- # cd doc && make
- # FIXME cleanup and use sphinx-build as build-depends ?
NO_SETUPTOOLS=1 python setup.py build
- # XXX uncomment this and associated build-depends in control
- #when necessary sphinx version is in all built distribution
- #PYTHONPATH=$(CURDIR)/.. $(MAKE) -C doc/book/en all
+ # documentation build is now made optional since it can break for old
+ # distributions and we don't want to block a new release of Cubicweb
+ # because of documentation issues.
+ -PYTHONPATH=$(CURDIR)/.. $(MAKE) -C doc/book/en all
touch build-stamp
clean:
@@ -73,7 +70,7 @@
dh_installman -i
dh_installchangelogs -i
dh_link -i
- dh_compress -i -X.py -X.ini -X.xml
+ dh_compress -i -X.py -X.ini -X.xml -X.js -X.rst
dh_fixperms -i
dh_installdeb -i
dh_gencontrol -i
diff -r d8bb8f631d41 -r a4e667270dd4 devtools/__init__.py
--- a/devtools/__init__.py Mon Sep 26 18:37:23 2011 +0200
+++ b/devtools/__init__.py Fri Dec 09 12:08:27 2011 +0100
@@ -28,15 +28,17 @@
import pickle
import glob
import warnings
+from hashlib import sha1 # pylint: disable=E0611
from datetime import timedelta
from os.path import (abspath, join, exists, basename, dirname, normpath, split,
isfile, isabs, splitext, isdir, expanduser)
from functools import partial
-import hashlib
from logilab.common.date import strptime
from logilab.common.decorators import cached, clear_cache
-from cubicweb import CW_SOFTWARE_ROOT, ConfigurationError, schema, cwconfig, BadConnectionId
+
+from cubicweb import ConfigurationError, ExecutionError, BadConnectionId
+from cubicweb import CW_SOFTWARE_ROOT, schema, cwconfig
from cubicweb.server.serverconfig import ServerConfiguration
from cubicweb.etwist.twconfig import TwistedConfiguration
@@ -91,7 +93,7 @@
""" Idea: this is less costly than a full re-creation of the repo object.
off:
* session are closed,
- * pools are closed
+ * cnxsets are closed
* system source is shutdown
"""
if not repo._needs_refresh:
@@ -102,8 +104,8 @@
repo.close(sessionid)
except BadConnectionId: #this is strange ? thread issue ?
print 'XXX unknown session', sessionid
- for pool in repo.pools:
- pool.close(True)
+ for cnxset in repo.cnxsets:
+ cnxset.close(True)
repo.system_source.shutdown()
repo._needs_refresh = True
repo._has_started = False
@@ -111,12 +113,12 @@
def turn_repo_on(repo):
"""Idea: this is less costly than a full re-creation of the repo object.
on:
- * pools are connected
+ * cnxsets are connected
* cache are cleared
"""
if repo._needs_refresh:
- for pool in repo.pools:
- pool.reconnect()
+ for cnxset in repo.cnxsets:
+ cnxset.reconnect()
repo._type_source_cache = {}
repo._extid_cache = {}
repo.querier._rql_cache = {}
@@ -197,7 +199,10 @@
directory from wich tests are launched or by specifying an alternative
sources file using self.sourcefile.
"""
- sources = super(TestServerConfiguration, self).sources()
+ try:
+ sources = super(TestServerConfiguration, self).sources()
+ except ExecutionError:
+ sources = {}
if not sources:
sources = DEFAULT_SOURCES
if 'admin' not in sources:
@@ -207,9 +212,6 @@
# web config methods needed here for cases when we use this config as a web
# config
- def instance_md5_version(self):
- return ''
-
def default_base_url(self):
return BASE_URL
@@ -258,8 +260,9 @@
Example usage::
class MyTests(CubicWebTC):
- _config = RealDatabseConfiguration('myapp',
- sourcefile='/path/to/sources')
+ _config = RealDatabaseConfiguration('myapp',
+ sourcefile='/path/to/sources')
+
def test_something(self):
rset = self.execute('Any X WHERE X is CWUser')
self.view('foaf', rset)
@@ -475,12 +478,11 @@
repo = self.get_repo(startup=True)
cnx = self.get_cnx()
session = repo._sessions[cnx.sessionid]
- session.set_pool()
+ session.set_cnxset()
_commit = session.commit
- def always_pooled_commit():
- _commit()
- session.set_pool()
- session.commit = always_pooled_commit
+ def keep_cnxset_commit():
+ _commit(free_cnxset=False)
+ session.commit = keep_cnxset_commit
pre_setup_func(session, self.config)
session.commit()
cnx.close()
@@ -576,7 +578,7 @@
templcursor.close()
cnx.close()
init_repository(self.config, interactive=False)
- except:
+ except BaseException:
if self.dbcnx is not None:
self.dbcnx.rollback()
print >> sys.stderr, 'building', self.dbname, 'failed'
@@ -596,7 +598,7 @@
@property
def _config_id(self):
- return hashlib.sha1(self.config.apphome).hexdigest()[:10]
+ return sha1(self.config.apphome).hexdigest()[:10]
def _backup_name(self, db_id): # merge me with parent
backup_name = '_'.join(('cache', self._config_id, self.dbname, db_id))
@@ -656,6 +658,25 @@
class SQLiteTestDataBaseHandler(TestDataBaseHandler):
DRIVER = 'sqlite'
+ __TMPDB = set()
+
+ @classmethod
+ def _cleanup_all_tmpdb(cls):
+ for dbpath in cls.__TMPDB:
+ cls._cleanup_database(dbpath)
+
+
+
+ def __init__(self, *args, **kwargs):
+ super(SQLiteTestDataBaseHandler, self).__init__(*args, **kwargs)
+ # use a dedicated base for each process.
+ if 'global-db-name' not in self.system_source:
+ self.system_source['global-db-name'] = self.system_source['db-name']
+ process_db = self.system_source['db-name'] + str(os.getpid())
+ self.system_source['db-name'] = process_db
+ process_db = self.absolute_dbfile() # update db-name to absolute path
+ self.__TMPDB.add(process_db)
+
@staticmethod
def _cleanup_database(dbfile):
try:
@@ -664,6 +685,10 @@
except OSError:
pass
+ @property
+ def dbname(self):
+ return self.system_source['global-db-name']
+
def absolute_dbfile(self):
"""absolute path of current database file"""
dbfile = join(self._ensure_test_backup_db_dir(),
@@ -671,7 +696,6 @@
self.config.sources()['system']['db-name'] = dbfile
return dbfile
-
def process_cache_entry(self, directory, dbname, db_id, entry):
return entry.get('sqlite')
@@ -706,6 +730,9 @@
self._cleanup_database(self.absolute_dbfile())
init_repository(self.config, interactive=False)
+import atexit
+atexit.register(SQLiteTestDataBaseHandler._cleanup_all_tmpdb)
+
def install_sqlite_patch(querier):
"""This patch hotfixes the following sqlite bug :
@@ -726,13 +753,13 @@
value = value.rsplit('.', 1)[0]
try:
row[cellindex] = strptime(value, '%Y-%m-%d %H:%M:%S')
- except:
+ except Exception:
row[cellindex] = strptime(value, '%Y-%m-%d')
if vtype == 'Time' and type(value) is unicode:
found_date = True
try:
row[cellindex] = strptime(value, '%H:%M:%S')
- except:
+ except Exception:
# DateTime used as Time?
row[cellindex] = strptime(value, '%Y-%m-%d %H:%M:%S')
if vtype == 'Interval' and type(value) is int:
diff -r d8bb8f631d41 -r a4e667270dd4 devtools/devctl.py
--- a/devtools/devctl.py Mon Sep 26 18:37:23 2011 +0200
+++ b/devtools/devctl.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -88,6 +88,9 @@
continue
if not hasattr(mod, '__file__'):
continue
+ if mod.__file__ is None:
+ # odd/rare but real
+ continue
for path in config.vregistry_path():
if mod.__file__.startswith(path):
del sys.modules[name]
@@ -155,6 +158,9 @@
add_msg(w, cstrtype)
done = set()
for eschema in sorted(schema.entities()):
+ if eschema.type in libschema:
+ done.add(eschema.description)
+ for eschema in sorted(schema.entities()):
etype = eschema.type
if etype not in libschema:
add_msg(w, etype)
@@ -203,15 +209,19 @@
w('# (no object form for final or symmetric relation types)\n')
w('\n')
for rschema in sorted(schema.relations()):
+ if rschema.type in libschema:
+ done.add(rschema.type)
+ done.add(rschema.description)
+ for rschema in sorted(schema.relations()):
rtype = rschema.type
if rtype not in libschema:
# bw compat, necessary until all translation of relation are done
# properly...
add_msg(w, rtype)
+ done.add(rtype)
if rschema.description and rschema.description not in done:
- done.add(rschema.description)
add_msg(w, rschema.description)
- done.add(rtype)
+ done.add(rschema.description)
librschema = None
else:
librschema = libschema.rschema(rtype)
@@ -221,7 +231,7 @@
for subjschema in rschema.subjects():
if not subjschema in libsubjects:
add_msg(w, rtype, subjschema.type)
- if not (schema.rschema(rtype).final or rschema.symmetric):
+ if not (rschema.final or rschema.symmetric):
if rschema not in NO_I18NCONTEXT:
libobjects = librschema and librschema.objects() or ()
for objschema in rschema.objects():
@@ -231,6 +241,14 @@
# bw compat, necessary until all translation of relation are
# done properly...
add_msg(w, '%s_object' % rtype)
+ for rdef in rschema.rdefs.itervalues():
+ if not rdef.description or rdef.description in done:
+ continue
+ if (librschema is None or
+ (rdef.subject, rdef.object) not in librschema.rdefs or
+ librschema.rdefs[(rdef.subject, rdef.object)].description != rdef.description):
+ add_msg(w, rdef.description)
+ done.add(rdef.description)
for objid in _iter_vreg_objids(vreg, vregdone):
add_msg(w, '%s_description' % objid)
add_msg(w, objid)
diff -r d8bb8f631d41 -r a4e667270dd4 devtools/fake.py
--- a/devtools/fake.py Mon Sep 26 18:37:23 2011 +0200
+++ b/devtools/fake.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -63,8 +63,8 @@
self._session_data = {}
self._headers_in = Headers()
- def set_cookie(self, cookie, key, maxage=300, expires=None):
- super(FakeRequest, self).set_cookie(cookie, key, maxage=300, expires=None)
+ def set_cookie(self, name, value, maxage=300, expires=None, secure=False):
+ super(FakeRequest, self).set_cookie(name, value, maxage, expires, secure)
cookie = self.get_response_header('Set-Cookie')
self._headers_in.setHeader('Cookie', cookie)
@@ -138,13 +138,15 @@
class FakeSession(RequestSessionBase):
- read_security = write_security = True
- set_read_security = set_write_security = lambda *args, **kwargs: None
- def __init__(self, repo=None, user=None):
+ def __init__(self, repo=None, user=None, vreg=None):
self.repo = repo
- self.vreg = getattr(self.repo, 'vreg', CubicWebVRegistry(FakeConfig(), initlog=False))
- self.pool = FakePool()
+ if vreg is None:
+ vreg = getattr(self.repo, 'vreg', None)
+ if vreg is None:
+ vreg = CubicWebVRegistry(FakeConfig(), initlog=False)
+ self.vreg = vreg
+ self.cnxset = FakeConnectionsSet()
self.user = user or FakeUser()
self.is_internal_session = False
self.transaction_data = {}
@@ -162,6 +164,13 @@
def set_entity_cache(self, entity):
pass
+ # for use with enabled_security context manager
+ read_security = write_security = True
+ def init_security(self, *args):
+ return None, None
+ def reset_security(self, *args):
+ return
+
class FakeRepo(object):
querier = None
def __init__(self, schema, vreg=None, config=None):
@@ -201,6 +210,6 @@
self.uri = uri
-class FakePool(object):
+class FakeConnectionsSet(object):
def source(self, uri):
return FakeSource(uri)
diff -r d8bb8f631d41 -r a4e667270dd4 devtools/fill.py
--- a/devtools/fill.py Mon Sep 26 18:37:23 2011 +0200
+++ b/devtools/fill.py Fri Dec 09 12:08:27 2011 +0100
@@ -275,9 +275,6 @@
:param choice_func: a function that takes an entity type, an attrname and
returns acceptable values for this attribute
"""
- # XXX HACK, remove or fix asap
- if etype in set(('String', 'Int', 'Float', 'Boolean', 'Date', 'CWGroup', 'CWUser')):
- return []
queries = []
for index in xrange(entity_num):
restrictions = []
@@ -355,7 +352,7 @@
if objtype:
rql += ', %s is %s' % (selectvar, objtype)
rset = cursor.execute(rql)
- except:
+ except Exception:
print "could restrict eid_list with given constraints (%r)" % constraints
return []
return set(eid for eid, in rset.rows)
diff -r d8bb8f631d41 -r a4e667270dd4 devtools/livetest.py
--- a/devtools/livetest.py Mon Sep 26 18:37:23 2011 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,194 +0,0 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see .
-"""provide utilies for web (live) unit testing
-
-"""
-
-import os
-import socket
-import logging
-from os.path import join, dirname, normpath, abspath
-from StringIO import StringIO
-
-#from twisted.application import service, strports
-# from twisted.internet import reactor, task
-from twisted.web2 import channel
-from twisted.web2 import server
-from twisted.web2 import static
-from twisted.internet import reactor
-from twisted.internet.error import CannotListenError
-
-from logilab.common.testlib import TestCase
-
-from cubicweb.dbapi import in_memory_repo_cnx
-from cubicweb.etwist.server import CubicWebRootResource
-from cubicweb.devtools import BaseApptestConfiguration, init_test_database
-
-
-
-def get_starturl(port=7777, login=None, passwd=None):
- if login:
- return 'http://%s:%s/view?login=%s&password=%s' % (socket.gethostname(), port, login, passwd)
- else:
- return 'http://%s:%s/' % (socket.gethostname(), port)
-
-
-class LivetestResource(CubicWebRootResource):
- """redefines main resource to search for data files in several directories"""
-
- def locateChild(self, request, segments):
- """Indicate which resource to use to process down the URL's path"""
- if len(segments) and segments[0] == 'data':
- # Anything in data/ is treated as static files
- datadir = self.config.locate_resource(segments[1])[0]
- if datadir:
- return static.File(str(datadir), segments[1:])
- # Otherwise we use this single resource
- return self, ()
-
-
-
-class LivetestConfiguration(BaseApptestConfiguration):
- init_repository = False
-
- def __init__(self, cube=None, sourcefile=None, pyro_name=None,
- log_threshold=logging.CRITICAL):
- BaseApptestConfiguration.__init__(self, cube, log_threshold=log_threshold)
- self.appid = pyro_name or cube
- # don't change this, else some symlink problems may arise in some
- # environment (e.g. mine (syt) ;o)
- # XXX I'm afraid this test will prevent to run test from a production
- # environment
- self._sources = None
- # instance cube test
- if cube is not None:
- self.apphome = self.cube_dir(cube)
- elif 'web' in os.getcwd().split(os.sep):
- # web test
- self.apphome = join(normpath(join(dirname(__file__), '..')), 'web')
- else:
- # cube test
- self.apphome = abspath('..')
- self.sourcefile = sourcefile
- self.global_set_option('realm', '')
- self.use_pyro = pyro_name is not None
-
- def pyro_enabled(self):
- if self.use_pyro:
- return True
- else:
- return False
-
-
-
-def make_site(cube, options=None):
- from cubicweb.etwist import twconfig # trigger configuration registration
- config = LivetestConfiguration(cube, options.sourcefile,
- pyro_name=options.pyro_name,
- log_threshold=logging.DEBUG)
- init_test_database(config=config)
- # if '-n' in sys.argv: # debug mode
- cubicweb = LivetestResource(config, debug=True)
- toplevel = cubicweb
- website = server.Site(toplevel)
- cube_dir = config.cube_dir(cube)
- source = config.sources()['system']
- for port in xrange(7777, 7798):
- try:
- reactor.listenTCP(port, channel.HTTPFactory(website))
- saveconf(cube_dir, port, source['db-user'], source['db-password'])
- break
- except CannotListenError:
- print "port %s already in use, I will try another one" % port
- else:
- raise
- cubicweb.base_url = get_starturl(port=port)
- print "you can go here : %s" % cubicweb.base_url
-
-def runserver():
- reactor.run()
-
-def saveconf(templhome, port, user, passwd):
- import pickle
- conffile = file(join(templhome, 'test', 'livetest.conf'), 'w')
-
- pickle.dump((port, user, passwd, get_starturl(port, user, passwd)),
- conffile)
- conffile.close()
-
-
-def loadconf(filename='livetest.conf'):
- import pickle
- return pickle.load(file(filename))
-
-
-def execute_scenario(filename, **kwargs):
- """based on twill.parse.execute_file, but inserts cubicweb extensions"""
- from twill.parse import _execute_script
- stream = StringIO('extend_with cubicweb.devtools.cubicwebtwill\n' + file(filename).read())
- kwargs['source'] = filename
- _execute_script(stream, **kwargs)
-
-
-def hijack_twill_output(new_output):
- from twill import commands as twc
- from twill import browser as twb
- twc.OUT = new_output
- twb.OUT = new_output
-
-
-class LiveTestCase(TestCase):
-
- sourcefile = None
- cube = ''
- def setUp(self):
- assert self.cube, "You must specify a cube in your testcase"
- # twill can be quite verbose ...
- self.twill_output = StringIO()
- hijack_twill_output(self.twill_output)
- # build a config, and get a connection
- self.config = LivetestConfiguration(self.cube, self.sourcefile)
- _, user, passwd, _ = loadconf()
- self.repo, self.cnx = in_memory_repo_cnx(self.config, user, password=passwd)
- self.setup_db(self.cnx)
-
- def tearDown(self):
- self.teardown_db(self.cnx)
-
-
- def setup_db(self, cnx):
- """override setup_db() to setup your environment"""
-
- def teardown_db(self, cnx):
- """override teardown_db() to clean up your environment"""
-
- def get_loggedurl(self):
- port, user, passwd, logged_url = loadconf()
- return logged_url
-
- def get_anonurl(self):
- port, _, _, _ = loadconf()
- return 'http://%s:%s/view?login=anon&password=anon' % (
- socket.gethostname(), port)
-
- # convenience
- execute_scenario = staticmethod(execute_scenario)
-
-
-if __name__ == '__main__':
- runserver()
diff -r d8bb8f631d41 -r a4e667270dd4 devtools/qunit.py
--- a/devtools/qunit.py Mon Sep 26 18:37:23 2011 +0200
+++ b/devtools/qunit.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,3 +1,21 @@
+# copyright 2010-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
import os, os.path as osp
import signal
from tempfile import mkdtemp, NamedTemporaryFile, TemporaryFile
diff -r d8bb8f631d41 -r a4e667270dd4 devtools/repotest.py
--- a/devtools/repotest.py Mon Sep 26 18:37:23 2011 +0200
+++ b/devtools/repotest.py Fri Dec 09 12:08:27 2011 +0100
@@ -148,8 +148,7 @@
from cubicweb.server.sources.rql2sql import SQLGenerator, remove_unused_solutions
class RQLGeneratorTC(TestCase):
- schema = backend = None # set this in concret test
-
+ schema = backend = None # set this in concrete class
@classmethod
def setUpClass(cls):
@@ -197,7 +196,7 @@
class BaseQuerierTC(TestCase):
- repo = None # set this in concret test
+ repo = None # set this in concrete class
def setUp(self):
self.o = self.repo.querier
@@ -205,7 +204,7 @@
self.ueid = self.session.user.eid
assert self.ueid != -1
self.repo._type_source_cache = {} # clear cache
- self.pool = self.session.set_pool()
+ self.cnxset = self.session.set_cnxset()
self.maxeid = self.get_max_eid()
do_monkey_patch()
self._dumb_sessions = []
@@ -213,7 +212,7 @@
def get_max_eid(self):
return self.session.execute('Any MAX(X)')[0][0]
def cleanup(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.session.execute('DELETE Any X WHERE X eid > %s' % self.maxeid)
def tearDown(self):
@@ -225,7 +224,7 @@
for session in self._dumb_sessions:
session.rollback()
session.close()
- self.repo._free_pool(self.pool)
+ self.repo._free_cnxset(self.cnxset)
assert self.session.user.eid != -1
def set_debug(self, debug):
@@ -263,7 +262,8 @@
u = self.repo._build_user(self.session, self.session.user.eid)
u._groups = set(groups)
s = Session(u, self.repo)
- s._threaddata.pool = self.pool
+ s._threaddata.cnxset = self.cnxset
+ s._threaddata.ctx_count = 1
# register session to ensure it gets closed
self._dumb_sessions.append(s)
return s
@@ -273,7 +273,7 @@
def commit(self):
self.session.commit()
- self.session.set_pool()
+ self.session.set_cnxset()
class BasePlannerTC(BaseQuerierTC):
@@ -287,7 +287,7 @@
# XXX source_defs
self.o = self.repo.querier
self.session = self.repo._sessions.values()[0]
- self.pool = self.session.set_pool()
+ self.cnxset = self.session.set_cnxset()
self.schema = self.o.schema
self.sources = self.o._repo.sources
self.system = self.sources[-1]
@@ -311,7 +311,7 @@
del self.repo.sources_by_uri[source.uri]
undo_monkey_patch()
for session in self._dumb_sessions:
- session._threaddata.pool = None
+ session._threaddata.cnxset = None
session.close()
def _prepare_plan(self, rql, kwargs=None):
@@ -328,9 +328,10 @@
# monkey patch some methods to get predicatable results #######################
-from cubicweb.rqlrewrite import RQLRewriter
-_orig_insert_snippets = RQLRewriter.insert_snippets
-_orig_build_variantes = RQLRewriter.build_variantes
+from cubicweb import rqlrewrite
+_orig_iter_relations = rqlrewrite.iter_relations
+_orig_insert_snippets = rqlrewrite.RQLRewriter.insert_snippets
+_orig_build_variantes = rqlrewrite.RQLRewriter.build_variantes
def _insert_snippets(self, snippets, varexistsmap=None):
_orig_insert_snippets(self, sorted(snippets, snippet_cmp), varexistsmap)
@@ -414,9 +415,13 @@
def _syntax_tree_search(*args, **kwargs):
return deepcopy(_orig_syntax_tree_search(*args, **kwargs))
+def _ordered_iter_relations(stinfo):
+ return sorted(_orig_iter_relations(stinfo), key=lambda x:x.r_type)
+
def do_monkey_patch():
- RQLRewriter.insert_snippets = _insert_snippets
- RQLRewriter.build_variantes = _build_variantes
+ rqlrewrite.iter_relations = _ordered_iter_relations
+ rqlrewrite.RQLRewriter.insert_snippets = _insert_snippets
+ rqlrewrite.RQLRewriter.build_variantes = _build_variantes
ExecutionPlan._check_permissions = _check_permissions
ExecutionPlan.tablesinorder = None
ExecutionPlan.init_temp_table = _init_temp_table
@@ -425,8 +430,9 @@
PyroRQLSource.syntax_tree_search = _syntax_tree_search
def undo_monkey_patch():
- RQLRewriter.insert_snippets = _orig_insert_snippets
- RQLRewriter.build_variantes = _orig_build_variantes
+ rqlrewrite.iter_relations = _orig_iter_relations
+ rqlrewrite.RQLRewriter.insert_snippets = _orig_insert_snippets
+ rqlrewrite.RQLRewriter.build_variantes = _orig_build_variantes
ExecutionPlan._check_permissions = _orig_check_permissions
ExecutionPlan.init_temp_table = _orig_init_temp_table
PartPlanInformation.merge_input_maps = _orig_merge_input_maps
diff -r d8bb8f631d41 -r a4e667270dd4 devtools/stresstester.py
--- a/devtools/stresstester.py Mon Sep 26 18:37:23 2011 +0200
+++ b/devtools/stresstester.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -38,7 +38,7 @@
-o / --report-output
Write profiler report into rather than on stdout
-Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+Copyright (c) 2003-2011 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
http://www.logilab.fr/ -- mailto:contact@logilab.fr
"""
@@ -73,9 +73,7 @@
start = clock()
try:
cursor.execute(query)
- except KeyboardInterrupt:
- raise
- except:
+ except Exception:
TB_LOCK.acquire()
traceback.print_exc()
TB_LOCK.release()
diff -r d8bb8f631d41 -r a4e667270dd4 devtools/testlib.py
--- a/devtools/testlib.py Mon Sep 26 18:37:23 2011 +0200
+++ b/devtools/testlib.py Fri Dec 09 12:08:27 2011 +0100
@@ -233,7 +233,7 @@
# web resources
try:
config.global_set_option('embed-allowed', re.compile('.*'))
- except: # not in server only configuration
+ except Exception: # not in server only configuration
pass
#XXX this doesn't need to a be classmethod anymore
@@ -274,7 +274,7 @@
def session(self):
"""return current server side session (using default manager account)"""
session = self.repo._sessions[self.cnx.sessionid]
- session.set_pool()
+ session.set_cnxset()
return session
@property
@@ -458,7 +458,7 @@
try:
return self.cnx.commit()
finally:
- self.session.set_pool() # ensure pool still set after commit
+ self.session.set_cnxset() # ensure cnxset still set after commit
@nocoverage
def rollback(self):
@@ -467,7 +467,7 @@
except dbapi.ProgrammingError:
pass # connection closed
finally:
- self.session.set_pool() # ensure pool still set after commit
+ self.session.set_cnxset() # ensure cnxset still set after commit
# # server side db api #######################################################
@@ -475,7 +475,7 @@
if eid_key is not None:
warn('[3.8] eid_key is deprecated, you can safely remove this argument',
DeprecationWarning, stacklevel=2)
- self.session.set_pool()
+ self.session.set_cnxset()
return self.session.execute(rql, args)
# other utilities #########################################################
@@ -493,13 +493,17 @@
def assertModificationDateGreater(self, entity, olddate):
entity.cw_attr_cache.pop('modification_date', None)
- self.failUnless(entity.modification_date > olddate)
+ self.assertTrue(entity.modification_date > olddate)
def assertItemsEqual(self, it1, it2, *args, **kwargs):
it1 = set(getattr(x, 'eid', x) for x in it1)
it2 = set(getattr(x, 'eid', x) for x in it2)
super(CubicWebTC, self).assertItemsEqual(it1, it2, *args, **kwargs)
+ def assertMessageEqual(self, req, params, msg):
+ msg = req.session.data[params['_cwmsgid']]
+ self.assertEqual(msg, msg)
+
# workflow utilities #######################################################
def assertPossibleTransitions(self, entity, expected):
@@ -568,6 +572,8 @@
if views:
try:
view = viewsvreg._select_best(views, req, rset=rset)
+ if view is None:
+ raise NoSelectableObject((req,), {'rset':rset}, views)
if view.linkable():
yield view
else:
@@ -722,7 +728,7 @@
self.assertEqual(session.login, origsession.login)
self.assertEqual(session.anonymous_session, False)
self.assertEqual(path, 'view')
- self.assertEqual(params, {'__message': 'welcome %s !' % req.user.login})
+ self.assertMessageEqual(req, params, 'welcome %s !' % req.user.login)
def assertAuthFailure(self, req, nbsessions=0):
self.app.connect(req)
@@ -806,15 +812,13 @@
"""
try:
output = viewfunc(**kwargs)
- except (SystemExit, KeyboardInterrupt):
- raise
- except:
+ except Exception:
# hijack exception: generative tests stop when the exception
# is not an AssertionError
klass, exc, tcbk = sys.exc_info()
try:
msg = '[%s in %s] %s' % (klass, view.__regid__, exc)
- except:
+ except Exception:
msg = '[%s in %s] undisplayable exception' % (klass, view.__regid__)
raise AssertionError, msg, tcbk
return self._check_html(output, view, template)
@@ -856,9 +860,7 @@
def assertWellFormed(self, validator, content, context=None):
try:
return validator.parse_string(content)
- except (SystemExit, KeyboardInterrupt):
- raise
- except:
+ except Exception:
# hijack exception: generative tests stop when the exception
# is not an AssertionError
klass, exc, tcbk = sys.exc_info()
@@ -870,7 +872,7 @@
try:
str_exc = str(exc)
- except:
+ except Exception:
str_exc = 'undisplayable exception'
msg += str_exc
if content is not None:
@@ -1165,34 +1167,34 @@
pass
-def vreg_instrumentize(testclass):
- # XXX broken
- from cubicweb.devtools.apptest import TestEnvironment
- env = testclass._env = TestEnvironment('data', configcls=testclass.configcls)
- for reg in env.vreg.values():
- reg._selected = {}
- try:
- orig_select_best = reg.__class__.__orig_select_best
- except:
- orig_select_best = reg.__class__._select_best
- def instr_select_best(self, *args, **kwargs):
- selected = orig_select_best(self, *args, **kwargs)
- try:
- self._selected[selected.__class__] += 1
- except KeyError:
- self._selected[selected.__class__] = 1
- except AttributeError:
- pass # occurs on reg used to restore database
- return selected
- reg.__class__._select_best = instr_select_best
- reg.__class__.__orig_select_best = orig_select_best
+# def vreg_instrumentize(testclass):
+# # XXX broken
+# from cubicweb.devtools.apptest import TestEnvironment
+# env = testclass._env = TestEnvironment('data', configcls=testclass.configcls)
+# for reg in env.vreg.values():
+# reg._selected = {}
+# try:
+# orig_select_best = reg.__class__.__orig_select_best
+# except Exception:
+# orig_select_best = reg.__class__._select_best
+# def instr_select_best(self, *args, **kwargs):
+# selected = orig_select_best(self, *args, **kwargs)
+# try:
+# self._selected[selected.__class__] += 1
+# except KeyError:
+# self._selected[selected.__class__] = 1
+# except AttributeError:
+# pass # occurs on reg used to restore database
+# return selected
+# reg.__class__._select_best = instr_select_best
+# reg.__class__.__orig_select_best = orig_select_best
-def print_untested_objects(testclass, skipregs=('hooks', 'etypes')):
- for regname, reg in testclass._env.vreg.iteritems():
- if regname in skipregs:
- continue
- for appobjects in reg.itervalues():
- for appobject in appobjects:
- if not reg._selected.get(appobject):
- print 'not tested', regname, appobject
+# def print_untested_objects(testclass, skipregs=('hooks', 'etypes')):
+# for regname, reg in testclass._env.vreg.iteritems():
+# if regname in skipregs:
+# continue
+# for appobjects in reg.itervalues():
+# for appobject in appobjects:
+# if not reg._selected.get(appobject):
+# print 'not tested', regname, appobject
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/admin/index.rst
--- a/doc/book/en/admin/index.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/admin/index.rst Fri Dec 09 12:08:27 2011 +0100
@@ -23,7 +23,6 @@
multisources
ldap
pyro
- gae
migration
additional-tips
rql-logs
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/admin/instance-config.rst
--- a/doc/book/en/admin/instance-config.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/admin/instance-config.rst Fri Dec 09 12:08:27 2011 +0100
@@ -43,18 +43,33 @@
use apache (for example) for redirection and the variable `main.https-url`
of configuration file.
+For this to work you have to activate the following apache modules :
+
+* rewrite
+* proxy
+* http_proxy
+
+The command on Debian based systems for that is ::
+
+ a2enmod rewrite http_proxy proxy
+ /etc/init.d/apache2 restart
+
:Example:
For an apache redirection of a site accessible via `http://localhost/demo`
and `https://localhost/demo` and actually running on port 8080, it
takes to the http:::
+ ProxyPreserveHost On
+ RewriteEngine On
RewriteCond %{REQUEST_URI} ^/demo
RewriteRule ^/demo$ /demo/
RewriteRule ^/demo/(.*) http://127.0.0.1:8080/$1 [L,P]
and for the https:::
+ ProxyPreserveHost On
+ RewriteEngine On
RewriteCond %{REQUEST_URI} ^/ demo
RewriteRule ^/demo$/demo/
RewriteRule ^/demo/(.*) http://127.0.0.1:8080/https/$1 [L,P]
@@ -65,6 +80,11 @@
base-url = http://localhost/demo
https-url = https://localhost/demo
+Notice that if you simply want a site accessible through https, not *both* http
+and https, simply set `base-url` to the https url and the first section into your
+apache configuration (as you would have to do for an http configuration with an
+apache front-end).
+
Setting up the web client
-------------------------
:`web.embed-allowed`:
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/admin/ldap.rst
--- a/doc/book/en/admin/ldap.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/admin/ldap.rst Fri Dec 09 12:08:27 2011 +0100
@@ -27,7 +27,7 @@
Credential checks are _always_ done against the LDAP server.
The base functionality for this is in
-cubicweb/server/sources/ldapuser.py.
+:file:`cubicweb/server/sources/ldapuser.py`.
Configurations options
----------------------
@@ -39,14 +39,14 @@
LDAP server connection options:
-* host: may contain port information using : notation.
-* protocol (choices are ldap, ldaps, ldapi)
-* auth-mode (choices are simple, cram_md5, digest_md5, gssapi, support
+* `host`, may contain port information using : notation.
+* `protocol`, choices are ldap, ldaps, ldapi
+* `auth-mode`, (choices are simple, cram_md5, digest_md5, gssapi, support
for the later being partial as of now)
-* auth-realm, realm to use when using gssapi/kerberos authentication
-* data-cnx-dn, user dn to use to open data connection to the ldap (eg
+* `auth-realm`, realm to use when using gssapi/kerberos authentication
+* `data-cnx-dn`, user dn to use to open data connection to the ldap (eg
used to respond to rql queries)
-* data-cnx-password, password to use to open data connection to the
+* `data-cnx-password`, password to use to open data connection to the
ldap (eg used to respond to rql queries)
If the LDAP server accepts anonymous binds, then it is possible to
@@ -55,16 +55,30 @@
LDAP schema mapping:
-* user-base-dn, base DN to lookup for users
-* user-scope, user search scope
-* user-classes, classes of user
-* user-attrs-map, map from ldap user attributes to cubicweb attributes
-* user-login-attr, attribute used as login on authentication
+* `user-base-dn`, base DN to lookup for users
+* `user-scope`, user search scope
+* `user-classes`, classes of user
+* `user-attrs-map`, map from ldap user attributes to cubicweb attributes
+* `user-login-attr`, attribute used as login on authentication
LDAP source internal configuration:
-* user-default-group, name of a group in which ldap users will be by
+* `user-default-group`, name of a group in which ldap users will be by
default. You can set multiple groups by separating them by a comma
-* synchronization-interval, interval between synchronization with the
+* `synchronization-interval`, interval between synchronization with the
ldap directory in seconds (default to once a day)
-* life time of query cache in minutes (default to two hours).
+* `cache-life-time`, life time of query cache in minutes (default to two hours).
+
+Other notes
+-----------
+
+* Yes, cubicweb is able to start if ldap cannot be reached, even on c-c start,
+ though that will slow down the instance, since it will indefinitly attempt
+ to connect to the ldap on each query on users.
+
+* Changing the name of the ldap server in your script is fine, changing the base
+ DN isn't since it's used to identify already known users from others
+
+* You can use the :class:`CWSourceHostConfig` to have variants for a source
+ configuration according to the host the instance is running on. To do so go on
+ the source's view from the sources management view.
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/admin/pyro.rst
--- a/doc/book/en/admin/pyro.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/admin/pyro.rst Fri Dec 09 12:08:27 2011 +0100
@@ -1,14 +1,19 @@
Working with a distributed client (using Pyro)
==============================================
+.. _UsingPyro:
+
In some circumstances, it is practical to split the repository and
-web-client parts of the application, for load-balancing reasons. Or
+web-client parts of the application for load-balancing reasons. Or
one wants to access the repository from independant scripts to consult
or update the database.
+Prerequisites
+-------------
+
For this to work, several steps have to be taken in order.
-You must first ensure that the apropriate software is installed and
+You must first ensure that the appropriate software is installed and
running (see ref:`setup`)::
pyro-nsd -x -p 6969
@@ -21,19 +26,40 @@
pyro-instance-id=myinstancename
-Finally, the client (for instance in the case of a script) must
-connect specifically, as in the following example code:
+Connect to the CubicWeb repository from a python script
+-------------------------------------------------------
+
+Assuming pyro-nsd is running and your instance is configured with ``pyro-server=yes``,
+you will be able to use :mod:`cubicweb.dbapi` api to initiate the connection.
+
+.. note::
+ Regardless of whether your instance is pyro activated or not, you can still
+ achieve this by using cubicweb-ctl shell scripts in a simpler way, as by default
+ it creates a repository 'in-memory' instead of connecting through pyro. That
+ also means you've to be on the host where the instance is running.
+
+Finally, the client (for instance a python script) must connect specifically
+as in the following example code:
.. sourcecode:: python
from cubicweb import dbapi
- def pyro_connect(instname, login, password, pyro_ns_host):
- cnx = dbapi.connect(instname, login, password, pyro_ns_host)
- cnx.load_appobjects()
- return cnx
+ cnx = dbapi.connect(database='instance-id', user='admin', password='admin')
+ cnx.load_appobjects()
+ cur = cnx.cursor()
+ for name in (u'Personal', u'Professional', u'Computers'):
+ cur.execute('INSERT Tag T: T name %(n)s', {'n': name})
+ cnx.commit()
-The 'cnx.load_appobjects()' line is optional. Without it you will get
-data through the connection roughly as you would from a DBAPI
-connection. With it, provided the cubicweb-client part is installed
-and accessible, you get the ORM goodies.
+Calling :meth:`cubicweb.dbapi.load_appobjects`, will populates The `cubicweb
+registries`_ with the application objects installed on the host where the script
+runs. You'll then be allowed to use the ORM goodies and custom entity methods and
+views. Of course this is optional, without it you can still get the repository
+data through the connection but in a roughly way: only RQL cursors will be
+available, e.g. you can't even build entity objects from the result set.
+
+
+
+.. _cubicweb registries: VRegistryIntro_
+
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/admin/setup-windows.rst
--- a/doc/book/en/admin/setup-windows.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/admin/setup-windows.rst Fri Dec 09 12:08:27 2011 +0100
@@ -8,13 +8,12 @@
Setting up a Windows development environment is not too complicated
but it requires a series of small steps.
-We proposed an example of a typical |cubicweb| installation on Windows
+We propose an example of a typical |cubicweb| installation on Windows
from sources. We assume everything goes into ``C:\\`` and for any
package, without version specification, "the latest is
the greatest".
-Take into the mind that adjusting the installation drive should be
-straightforward.
+Mind that adjusting the installation drive should be straightforward.
@@ -24,9 +23,9 @@
|cubicweb| requires some base elements that must be installed to run
correctly. So, first of all, you must install them :
-* python >= 2.5 and < 3
- (`Download Python `_).
- You can also consider the Python(x,y) distribution
+* python >= 2.5 and < 3
+ (`Download Python `_).
+ You can also consider the Python(x,y) distribution
(`Download Python(x,y) `_)
as it makes things easier for Windows user by wrapping in a single installer
python 2.5 plus numerous useful third-party modules and
@@ -34,24 +33,24 @@
IDE for Python under Windows).
* `Twisted `_ is an event-driven
- networking engine
+ networking engine
(`Download Twisted `_)
* `lxml `_ library
- (version >=2.2.1) allows working with XML and HTML
+ (version >=2.2.1) allows working with XML and HTML
(`Download lxml `_)
-* `Postgresql 8.4 `_,
- an object-relational database system
- (`Download Postgresql `_)
- and its python drivers
+* `Postgresql 8.4 `_,
+ an object-relational database system
+ (`Download Postgresql `_)
+ and its python drivers
(`Download psycopg `_)
-* A recent version of `gettext`
+* A recent version of `gettext`
(`Download gettext `_).
-* `rql `_,
- the recent version of the Relationship Query Language parser
+* `rql `_,
+ the recent version of the Relationship Query Language parser
(`Download rql `_).
Install optional elements
@@ -60,22 +59,22 @@
We recommend you to install the following elements. They are not
mandatory but they activate very interesting features in |cubicweb|:
-* `Simplejson `_
- must be installed if you have python <= 2.5
+* `Simplejson `_
+ must be installed if you have python <= 2.5
(`Download simplejson `_).
It is included in the Standard library from Python >= 2.6.
-* `Pyro `_
+* `Pyro `_
enables remote access to cubicweb repository instances.
It also allows the client and the server not running on the same machine
(`Download Pyro `_).
-* `python-ldap `_
+* `python-ldap `_
provides access to LDAP/Active directory directories
(`Download python-ldap `_).
-* `graphviz `_
- which allow schema drawings.
+* `graphviz `_
+ which allow schema drawings.
(`Download graphviz `_).
It is quite recommended (albeit not mandatory).
@@ -88,28 +87,27 @@
Some additional tools could be useful to develop :ref:`cubes `
with the framework.
-* `mercurial `_ and its standard
- windows GUI (`TortoiseHG `_)
- allow you to get the source code of |cubicweb| from control version
- repositories. So you will be able to get the latest development
- version in an easy way
+* `mercurial `_ and its standard windows GUI
+ (`TortoiseHG `_) allow you to get the source
+ code of |cubicweb| from control version repositories. So you will be able to
+ get the latest development version and pre-release bugfixes in an easy way
(`Download mercurial `_).
* You can also consider the ssh client `Putty` in order to peruse
mercurial over ssh (`Download `_).
* If you are an Eclipse user, mercurial can be integrated using the
- `MercurialEclipse` plugin
+ `MercurialEclipse` plugin
(`Home page `_).
Getting the sources
-------------------
-There are tow ways to get the sources of |cubicweb| and its
+There are two ways to get the sources of |cubicweb| and its
:ref:`cubes `:
-* download the latest release (:ref:`SourceInstallation`)
-* get the development version using Mercurial
+* download the latest release (:ref:`SourceInstallation`)
+* get the development version using Mercurial
(:ref:`MercurialInstallation`)
Environment variables
@@ -123,8 +121,8 @@
it. That opens a small window allowing edition of user-related and system-wide
variables.
-We will consider only user variables. First, the ``PATH`` variable. Assuming
-you are logged as user *Jane*, add the following paths, separated by
+We will consider only user variables. First, the ``PATH`` variable. Assuming
+you are logged as user *Jane*, add the following paths, separated by
semi-colons::
C:\Documents and Settings\Jane\My Documents\Python\cubicweb\cubicweb\bin
@@ -154,3 +152,6 @@
Then start the service with::
net start cubicweb-my_instance
+
+In case this does not work, you should be able to see error reports in
+the application log, using the windows event log viewer.
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/admin/setup.rst
--- a/doc/book/en/admin/setup.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/admin/setup.rst Fri Dec 09 12:08:27 2011 +0100
@@ -91,10 +91,10 @@
:ref:`cubicweb with other database ` using the following
virtual packages :
- * `cubicweb-postgresql-support` contains the necessary dependency for
+ * `cubicweb-postgresql-support` contains the necessary dependencies for
using :ref:`cubicweb with postgresql datatabase `
- * `cubicweb-mysql-support` contains the necessary dependency for using
+ * `cubicweb-mysql-support` contains the necessary dependencies for using
:ref:`cubicweb with mysql database `.
.. _`list of sources`: http://wiki.debian.org/SourcesList
@@ -144,9 +144,9 @@
.. _`virtualenv`: http://virtualenv.openplans.org/
A working compilation chain is needed to build the modules that include C
-extensions. If you definitively wont, installing `Lxml
-`_, `Twisted `_ and
-`libgecode `_ will help.
+extensions. If you definitively wont, installing `Lxml `_,
+`Twisted Web `_ and `libgecode
+`_ will help.
To install |cubicweb| and its dependencies, just run::
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/annexes/faq.rst
--- a/doc/book/en/annexes/faq.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/annexes/faq.rst Fri Dec 09 12:08:27 2011 +0100
@@ -148,25 +148,12 @@
to anonymous user, which will automatically execute what is
decribed above.
-How to load data from a script ?
---------------------------------
-
-The following script aims at loading data within a script assuming pyro-nsd is
-running and your instance is configured with ``pyro-server=yes``, otherwise
-you would not be able to use dbapi.
-
-.. sourcecode:: python
+How to load data from a python script ?
+---------------------------------------
+Please, refer to the `Pyro chapter`_.
- from cubicweb import dbapi
+.. _`Pyro chapter`: UsingPyro_
- cnx = dbapi.connect(database='instance-id', user='admin', password='admin')
- cur = cnx.cursor()
- for name in (u'Personal', u'Professional', u'Computers'):
- cur.execute('INSERT Tag T: T name %(n)s', {'n': name})
- cnx.commit()
-
-Wether your instance as pyro activated or not, you can still acheive this by
-using cubicweb-ctl shell scripts.
How to format an entity date attribute ?
----------------------------------------
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/annexes/rql/implementation.rst
--- a/doc/book/en/annexes/rql/implementation.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/annexes/rql/implementation.rst Fri Dec 09 12:08:27 2011 +0100
@@ -11,7 +11,7 @@
expression.
::
- statement:: = (select | delete | insert | update) ';'
+ statement ::= (select | delete | insert | update) ';'
# select specific rules
@@ -130,18 +130,7 @@
with an OR. I do not think that the negation is supported on this type of
relation (XXX to be confirmed).
-- Relations defining the variables must be left to those using them. For
- example::
-
- Point P where P abs X, P ord Y, P value X+Y
-
- is valid, but::
-
- Point P where P abs X, P value X+Y, P ord Y
-
- is not.
-
-- missing proper explicit type conversion, COALESCE and certainly other things...
+- missing COALESCE and certainly other things...
- writing an rql query requires knowledge of the used schema (with real relation
names and entities, not those viewed in the user interface). On the other
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/annexes/rql/language.rst
--- a/doc/book/en/annexes/rql/language.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/annexes/rql/language.rst Fri Dec 09 12:08:27 2011 +0100
@@ -5,288 +5,584 @@
RQL syntax
----------
+.. _RQLKeywords:
+
Reserved keywords
~~~~~~~~~~~~~~~~~
-The keywords are not case sensitive.
::
AND, ASC, BEING, DELETE, DESC, DISTINCT, EXISTS, FALSE, GROUPBY,
- HAVING, ILIKE, IN, INSERT, LIKE, LIMIT, NOT, NOW, NULL, OFFSET,
+ HAVING, ILIKE, INSERT, LIKE, LIMIT, NOT, NOW, NULL, OFFSET,
OR, ORDERBY, SET, TODAY, TRUE, UNION, WHERE, WITH
+The keywords are not case sensitive. You should not use them when defining your
+schema, or as RQL variable names.
-Variables and Typing
+
+.. _RQLCase:
+
+Case
+~~~~
+
+* Variables should be all upper-cased.
+
+* Relation should be all lower-cased and match exactly names of relations defined
+ in the schema.
+
+* Entity types should start with an upper cased letter and be followed by at least
+ a lower cased latter.
+
+
+.. _RQLVariables:
+
+Variables and typing
~~~~~~~~~~~~~~~~~~~~
-With RQL, we do not distinguish between entities and attributes. The
-value of an attribute is considered an entity of a particular type (see
-below), linked to one (real) entity by a relation called the name of
-the attribute.
+Entities and values to browse and/or select are represented in the query by
+*variables* that must be written in capital letters.
+
+With RQL, we do not distinguish between entities and attributes. The value of an
+attribute is considered as an entity of a particular type (see below), linked to
+one (real) entity by a relation called the name of the attribute, where the
+entity is the subject and the attribute the object.
+
+The possible type(s) for each variable is derived from the schema according to
+the constraints expressed above and thanks to the relations between each
+variable.
-Entities and values to browse and/or select are represented in
-the query by *variables* that must be written in capital letters.
+We can restrict the possible types for a variable using the special relation
+**is** in the restrictions.
+
-There is a special type **Any**, referring to a non specific type.
+Virtual relations
+~~~~~~~~~~~~~~~~~
-We can restrict the possible types for a variable using the
-special relation **is** in the constraints.
+Those relations may only be used in RQL query but are not actual attributes of
+your entities.
+
+* `has_text`: relation to use to query the full text index (only for entities
+ having fulltextindexed attributes).
-The possible type(s) for each variable is derived from the schema
-according to the constraints expressed above and thanks to the relations
-between each variable.
+* `identity`: relation to use to tell that a RQL variable is the same as another
+ when you've to use two different variables for querying purpose. On the
+ opposite it's also useful together with the :ref:`NOT` operator to tell that two
+ variables should not identify the same entity
+
-Built-in types
-``````````````
+.. _RQLLiterals:
+
+Literal expressions
+~~~~~~~~~~~~~~~~~~~
+
+Bases types supported by RQL are those supported by yams schema. Literal values
+are expressed as explained below:
-The base types supported are string (between double or single quotes),
-integers or floats (the separator is '.'), dates and
-boolean. We expect to receive a schema in which types String,
-Int, Float, Date and Boolean are defined.
+* string should be between double or single quotes. If the value contains a
+ quote, it should be preceded by a backslash '\'
+
+* floats separator is dot '.'
+
+* boolean values are :keyword:`TRUE` and :keyword:`FALSE` keywords
-* `String` (literal: between double or single quotes).
-* `Int`, `Float` (separator being'.').
-* `Date`, `Datetime`, `Time` (literal: string YYYY/MM/DD [hh:mm] or keywords
- `TODAY` and `NOW`).
-* `Boolean` (keywords `TRUE` and `FALSE`).
-* `Keyword` NULL.
+* date and time should be expressed as a string with ISO notation : YYYY/MM/DD
+ [hh:mm], or using keywords :keyword:`TODAY` and :keyword:`NOW`
+You may also use the :keyword:`NULL` keyword, meaning 'unspecified'.
+
+
+.. _RQLOperators:
Operators
~~~~~~~~~
-Logical Operators
+.. _RQLLogicalOperators:
+
+Logical operators
`````````````````
::
AND, OR, NOT, ','
- ',' is equivalent to 'AND' but with the smallest among the priority
- of logical operators (see :ref:`PriorityOperators`).
+',' is equivalent to 'AND' but with the smallest among the priority of logical
+operators (see :ref:`RQLOperatorsPriority`).
+
+.. _RQLMathematicalOperators:
+
+Mathematical operators
+``````````````````````
-Mathematical Operators
-``````````````````````
-::
++----------+---------------------+-----------+--------+
+| Operator | Description | Example | Result |
++==========+=====================+===========+========+
+| `+` | addition | 2 + 3 | 5 |
++----------+---------------------+-----------+--------+
+| `-` | subtraction | 2 - 3 | -1 |
++----------+---------------------+-----------+--------+
+| `*` | multiplication | 2 * 3 | 6 |
++----------+---------------------+-----------+--------+
+| / | division | 4 / 2 | 2 |
++----------+---------------------+-----------+--------+
+| % | modulo (remainder) | 5 % 4 | 1 |
++----------+---------------------+-----------+--------+
+| ^ | exponentiation | 2.0 ^ 3.0 | 8 |
++----------+---------------------+-----------+--------+
+| & | bitwise AND | 91 & 15 | 11 |
++----------+---------------------+-----------+--------+
+| | | bitwise OR | 32 | 3 | 35 |
++----------+---------------------+-----------+--------+
+| # | bitwise XOR | 17 # 5 | 20 |
++----------+---------------------+-----------+--------+
+| ~ | bitwise NOT | ~1 | -2 |
++----------+---------------------+-----------+--------+
+| << | bitwise shift left | 1 << 4 | 16 |
++----------+---------------------+-----------+--------+
+| >> | bitwise shift right | 8 >> 2 | 2 |
++----------+---------------------+-----------+--------+
- +, -, *, /
+
+Notice integer division truncates results depending on the backend behaviour. For
+instance, postgresql does.
+
+
+.. _RQLComparisonOperators:
Comparison operators
````````````````````
-::
+ ::
- =, <, <=, >=, >, ~=, IN, LIKE, ILIKE
-
-* Syntax to use comparison operator:
+ =, !=, <, <=, >=, >, IN
- `VARIABLE relation operator VALUE`
-* The operator `=` is the default operator and can be omitted.
+The syntax to use comparison operators is:
-* `relation` name is always attended
+ `VARIABLE attribute VALUE`
-* The operator `LIKE` equivalent to `~=` can be used with the
- special character `%` in a string to indicate that the chain
- must start or finish by a prefix/suffix:
- ::
+The `=` operator is the default operator and can be omitted, i.e. :
+
+ `VARIABLE attribute = VALUE`
- Any X WHERE X name ~= 'Th%'
- Any X WHERE X name LIKE '%lt'
+is equivalent to
-* The operator `ILIKE` is the case insensitive version of `LIKE`.
+ `VARIABLE attribute VALUE`
-* The operator `IN` provides a list of possible values:
- ::
+
+The operator `IN` provides a list of possible values: ::
Any X WHERE X name IN ('chauvat', 'fayolle', 'di mascio', 'thenault')
-.. XXX nico: "A trick <> 'bar'" wouldn't it be more convenient than "NOT A trick 'bar'" ?
+.. _RQLStringOperators:
+
+String operators
+````````````````
+::
+
+ LIKE, ILIKE, ~=, REGEXP
+
+The :keyword:`LIKE` string operator can be used with the special character `%` in
+a string as wild-card: ::
+
+ # match every entity whose name starts with 'Th'
+ Any X WHERE X name ~= 'Th%'
+ # match every entity whose name endswith 'lt'
+ Any X WHERE X name LIKE '%lt'
+ # match every entity whose name contains a 'l' and a 't'
+ Any X WHERE X name LIKE '%l%t%'
-.. _PriorityOperators:
+:keyword:`ILIKE` is the case insensitive version of :keyword:`LIKE`. It's not
+available on all backend (e.g. sqlite doesn't support it). If not available for
+your backend, :keyword:`ILIKE` will behave like :keyword:`LIKE`.
+
+`~=` is a shortcut version of :keyword:`ILIKE`, or of :keyword:`LIKE` when the
+former is not available on the back-end.
+
+
+The :keyword:`REGEXP` is an alternative to :keyword:`LIKE` that supports POSIX
+regular expressions::
+
+ # match entities whose title starts with a digit
+ Any X WHERE X title REGEXP "^[0-9].*"
+
+
+The underlying SQL operator used is back-end-dependent :
+
+- the ``~`` operator is used for postgresql,
+- the ``REGEXP`` operator for mysql and sqlite.
+
+Other back-ends are not supported yet.
+
+
+.. _RQLOperatorsPriority:
Operators priority
``````````````````
-#. "(", ")"
-#. '*', '/'
-#. '+', '-'
-#. 'NOT'
-#. 'AND'
-#. 'OR'
-#. ','
+#. `(`, `)`
+#. `^`, `<<`, `>>`
+#. `*`, `/`, `%`, `&`
+#. `+`, `-`, `|`, `#`
+#. `NOT`
+#. `AND`
+#. `OR`
+#. `,`
+.. _RQLSearchQuery:
+
Search Query
~~~~~~~~~~~~
- [ `DISTINCT`] V1 (, V2) \ *
- [ `GROUPBY` V1 (V2) \*] [ `ORDERBY` ]
+Simplified grammar of search query: ::
+
+ [ `DISTINCT`] `Any` V1 (, V2) \*
+ [ `GROUPBY` V1 (, V2) \*] [ `ORDERBY` ]
[ `LIMIT` ] [ `OFFSET` ]
- [ `WHERE` ]
- [ `WITH` V1 (, V2) \ * BEING ()]
- [ `HAVING` ]
+ [ `WHERE` ]
+ [ `WITH` V1 (, V2)\* BEING ()]
+ [ `HAVING` ]
[ `UNION` ]
-:entity type:
- Type of selected variables.
- The special type `Any` is equivalent to not specify a type.
-:restriction:
- list of conditions to test successively
- `V1 relation V2 | `
-:orderterms:
- Definition of the selection order: variable or column number followed by
- sorting method ( `ASC`, `DESC`), ASC is the default.
-:note for grouped queries:
- For grouped queries (e.g., a clause `GROUPBY`), all
- selected variables must be aggregated or grouped.
+Selection
+`````````
+
+The fist occuring clause is the selection of terms that should be in the result
+set. Terms may be variable, literals, function calls, arithmetic, etc. and each
+term is separated by a comma.
+
+There will be as much column in the result set as term in this clause, respecting
+order.
+
+Syntax for function call is somewhat intuitive, for instance: ::
+
+ Any UPPER(N) WHERE P firstname N
+
+
+Grouping and aggregating
+````````````````````````
+
+The :keyword:`GROUPBY` keyword is followed by a list of terms on which results
+should be grouped. They are usually used with aggregate functions, responsible to
+aggregate values for each group (see :ref:`RQLAggregateFunctions`).
+
+For grouped queries, all selected variables must be either aggregated (i.e. used
+by an aggregate function) or grouped (i.e. listed in the :keyword:`GROUPBY`
+clause).
+
+
+Sorting
+```````
+
+The :keyword:`ORDERBY` keyword if followed by the definition of the selection
+order: variable or column number followed by sorting method (:keyword:`ASC`,
+:keyword:`DESC`), :keyword:`ASC` being the default. If the sorting method is not
+specified, then the sorting is ascendant (`ASC`).
+
+
+Pagination
+``````````
+
+The :keyword:`LIMIT` and :keyword:`OFFSET` keywords may be respectively used to
+limit the number of results and to tell from which result line to start (for
+instance, use `LIMIT 20` to get the first 20 results, then `LIMIT 20 OFFSET 20`
+to get the next 20.
-Sorting and groups
-``````````````````
+Restrictions
+````````````
+
+The :keyword:`WHERE` keyword introduce one of the "main" part of the query, where
+you "define" variables and add some restrictions telling what you're interested
+in.
+
+It's a list of triplets "subject relation object", e.g. `V1 relation
+(V2 | )`. Triplets are separated using :ref:`RQLLogicalOperators`.
-- For grouped queries (e.g. with a GROUPBY clause), all
- selected variables should be grouped at the right of the keyword.
+.. Note:
+
+ About the negation operator (:keyword:`NOT`):
+
+ * "NOT X relation Y" is equivalent to "NOT EXISTS(X relation Y)"
+
+ * `Any X WHERE NOT X owned_by U` means "entities that have no relation
+ `owned_by`".
+
+ * `Any X WHERE NOT X owned_by U, U login "syt"` means "the entity have no
+ relation `owned_by` with the user syt". They may have a relation "owned_by"
+ with another user.
-- If the sorting method (SORT_METHOD) is not specified, then the sorting is
- ascendant (`ASC`).
+In this clause, you can also use :keyword:`EXISTS` when you want to know if some
+expression is true and do not need the complete set of elements that make it
+true. Testing for existence is much faster than fetching the complete set of
+results, especially when you think about using `OR` against several expressions. For instance
+if you want to retrieve versions which are in state "ready" or tagged by
+"priority", you should write :
+
+::
+
+ Any X ORDERBY PN,N
+ WHERE X num N, X version_of P, P name PN,
+ EXISTS(X in_state S, S name "ready")
+ OR EXISTS(T tags X, T name "priority")
+
+not
-- Aggregate Functions: COUNT, MIN, MAX, AVG, SUM, GROUP_CONCAT
+::
+
+ Any X ORDERBY PN,N
+ WHERE X num N, X version_of P, P name PN,
+ (X in_state S, S name "ready")
+ OR (T tags X, T name "priority")
-.. note::
- Aggregate functions will return None if there is no result row.
+Both queries aren't at all equivalent :
+
+* the former will retrieve all versions, then check for each one which are in the
+ matching state of or tagged by the expected tag,
+
+* the later will retrieve all versions, state and tags (cartesian product!),
+ compute join and then exclude each row which are in the matching state of or
+ tagged by the expected tag. This implies that : you won't get any result if the
+ in_state or tag
-Having
-```````
+You can also use the question mark (`?`) to mark optional relations which allow
+you to select entities related **or not** to another. It is a similar concept
+that the `Left outer join`_:
+
+ the result of a left outer join (or simply left join) for table A and B
+ always contains all records of the "left" table (A), even if the
+ join-condition does not find any matching record in the "right" table (B).
+
+You must use the `?` behind a variable to specify that the relation toward it
+is optional. For instance:
+
+- Bugs of a project attached or not to a version ::
+
+ Any X, V WHERE X concerns P, P eid 42, X corrected_in V?
+
+ You will get a result set containing all the project's tickets, with either the
+ version in which it's corrected or None for tickets not related to a version.
+
+
+- All cards and the project they document if any ::
+
+ Any C, P WHERE C is Card, P? documented_by C
+
+Notice you may also use outer join:
-The HAVING clause, as in SQL, has been originally introduced to restrict a query
+- on the RHS of attribute relation, e.g. ::
+
+ Any X WHERE X ref XR, Y name XR?
+
+ so that Y is outer joined on X by ref/name attributes comparison
+
+
+- on any side of an `HAVING` expression, e.g. ::
+
+ Any X WHERE X creation_date XC, Y creation_date YC
+ HAVING YEAR(XC)=YEAR(YC)?
+
+ so that Y is outer joined on X by comparison of the year extracted from their
+ creation date. ::
+
+ Any X WHERE X creation_date XC, Y creation_date YC
+ HAVING YEAR(XC)?=YEAR(YC)
+
+ would outer join X on Y instead.
+
+
+Having restrictions
+```````````````````
+
+The :keyword:`HAVING` clause, as in SQL, may be used to restrict a query
according to value returned by an aggregate function, e.g.::
Any X GROUPBY X WHERE X relation Y HAVING COUNT(Y) > 10
-It may however be used for something else...
-
-In the WHERE clause, we are limited to 3-expression, such thing can't be
-expressed directly as in the SQL's way. But this can be expressed using HAVING
-comparison expression.
-
-For instance, let's say you want to get people whose uppercased first name equals
-to another person uppercased first name::
+It may however be used for something else: In the :keyword:`WHERE` clause, we are
+limited to triplet expressions, so some things may not be expressed there. Let's
+take an example : if you want to get people whose upper-cased first name equals to
+another person upper-cased first name. There is no proper way to express this
+using triplet, so you should use something like: ::
- Person X WHERE X firstname XFN, Y firstname YFN HAVING X > Y, UPPER(XFN) = UPPER(YFN)
-
-This open some new possibilities. Another example::
+ Any X WHERE X firstname XFN, Y firstname YFN, NOT X identity Y HAVING UPPER(XFN) = UPPER(YFN)
- Person X WHERE X birthday XB HAVING YEAR(XB) = 2000
+Another example: imagine you want person born in 2000: ::
-That lets you use transformation functions not only in selection but for
-restriction as well and to by-pass limitation of the WHERE clause, which was the
-major flaw in the RQL language.
+ Any X WHERE X birthday XB HAVING YEAR(XB) = 2000
Notice that while we would like this to work without the HAVING clause, this
can't be currently be done because it introduces an ambiguity in RQL's grammar
that can't be handled by Yapps_, the parser's generator we're using.
-Negation
-````````
+
+Sub-queries
+```````````
+
+The :keyword:`WITH` keyword introduce sub-queries clause. Each sub-query has the
+form:
+
+ V1(,V2) BEING (rql query)
-* A query such as `Document X WHERE NOT X owned_by U` means "the documents have
- no relation `owned_by`".
+Variables at the left of the :keyword:`BEING` keyword defines into which
+variables results from the sub-query will be mapped to into the outer query.
+Sub-queries are separated from each other using a comma.
-* But the query `Document X WHERE NOT X owned_by U, U login "syt"` means "the
- documents have no relation `owned_by` with the user syt". They may have a
- relation "owned_by" with another user.
+Let's say we want to retrieve for each project its number of versions and its
+number of tickets. Due to the nature of relational algebra behind the scene, this
+can't be achieved using a single query. You have to write something along the
+line of: ::
+
+ Any X, VC, TC WHERE X identity XX
+ WITH X, VC BEING (Any X, COUNT(V) GROUPBY X WHERE V version_of X),
+ XX, TC BEING (Any X, COUNT(T) GROUPBY X WHERE T ticket_of X)
-Identity
-````````
+Notice that we can't reuse a same variable name as alias for two different
+sub-queries, hence the usage of 'X' and 'XX' in this example, which are then
+unified using the special `identity` relation (see :ref:`XXX`).
+
+.. Warning:
+
+ Sub-queries define a new variable scope, so even if a variable has the same name
+ in the outer query and in the sub-query, they technically **aren't* the same
+ variable. So ::
-You can use the special relation `identity` in a query to
-add an identity constraint between two variables. This is equivalent
-to ``is`` in python::
+ Any W, REF WITH W, REF BEING
+ (Any W, REF WHERE W is Workcase, W ref REF,
+ W concerned_by D, D name "Logilab")
+ could be written:
- Any A WHERE A comments B, A identity B
+ Any W, REF WITH W, REF BEING
+ (Any W1, REF1 WHERE W1 is Workcase, W1 ref REF1,
+ W1 concerned_by D, D name "Logilab")
-return all objects that comment themselves. The relation `identity` is
-especially useful when defining the rules for securities with `RQLExpressions`.
+ Also, when a variable is coming from a sub-query, you currently can't reference
+ its attribute or inlined relations in the outer query, you've to fetch them in
+ the sub-query. For instance, let's say we want to sort by project name in our
+ first example, we would have to write ::
-Limit / offset
-``````````````
-::
+ Any X, VC, TC ORDERBY XN WHERE X identity XX
+ WITH X, XN, VC BEING (Any X, COUNT(V) GROUPBY X,XN WHERE V version_of X, X name XN),
+ XX, TC BEING (Any X, COUNT(T) GROUPBY X WHERE T ticket_of X)
+
+ instead of ::
- Any P ORDERBY N LIMIT 5 OFFSET 10 WHERE P is Person, P firstname N
+ Any X, VC, TC ORDERBY XN WHERE X identity XX, X name XN,
+ WITH X, XN, VC BEING (Any X, COUNT(V) GROUPBY X WHERE V version_of X),
+ XX, TC BEING (Any X, COUNT(T) GROUPBY X WHERE T ticket_of X)
+
+ which would result in a SQL execution error.
-Exists
-```````
+Union
+`````
-You can use `EXISTS` when you want to know if some expression is true and do not
-need the complete set of elements that make it true. Testing for existence is
-much faster than fetching the complete set of results.
+You may get a result set containing the concatenation of several queries using
+the :keyword:`UNION`. The selection of each query should have the same number of
+columns.
::
- Any X ORDERBY PN,N
- WHERE X num N, X version_of P, P name PN,
- EXISTS(X in_state S, S name IN ("dev", "ready"))
- OR EXISTS(T tags X, T name "priority")
-
-
-Optional relations
-``````````````````
-
-It is a similar concept that the `Left outer join`_:
-
- the result of a left outer join (or simply left join) for table A and B
- always contains all records of the "left" table (A), even if the
- join-condition does not find any matching record in the "right" table (B).
-
-* They allow you to select entities related or not to another.
-
-* You must use the `?` behind the variable to specify that the relation
- toward it is optional:
-
- - Anomalies of a project attached or not to a version ::
-
- Any X, V WHERE X concerns P, P eid 42, X corrected_in V?
-
- - All cards and the project they document if necessary ::
-
- Any C, P WHERE C is Card, P? documented_by C
-
- Any T,P,V WHERE T is Ticket, T concerns P, T done_in V?
+ (Any X, XN WHERE X is Person, X surname XN) UNION (Any X,XN WHERE X is Company, X name XN)
-Subqueries
-``````````
-::
+.. _RQLFunctions:
+
+Available functions
+~~~~~~~~~~~~~~~~~~~
+
+Below is the list of aggregate and transformation functions that are supported
+nativly by the framework. Notice that cubes may define additional functions.
+
+.. _RQLAggregateFunctions:
+
+Aggregate functions
+```````````````````
++--------------------+----------------------------------------------------------+
+| :func:`COUNT` | return the number of rows |
++--------------------+----------------------------------------------------------+
+| :func:`MIN` | return the minimum value |
++--------------------+----------------------------------------------------------+
+| :func:`MAX` | return the maximum value |
++--------------------+----------------------------------------------------------+
+| :func:`AVG` | return the average value |
++--------------------+----------------------------------------------------------+
+| :func:`SUM` | return the sum of values |
++--------------------+----------------------------------------------------------+
+| :func:`COMMA_JOIN` | return each value separated by a comma (for string only) |
++--------------------+----------------------------------------------------------+
+
+All aggregate functions above take a single argument. Take care some aggregate
+functions (e.g. :keyword:`MAX`, :keyword:`MIN`) may return `None` if there is no
+result row.
+
+.. _RQLStringFunctions:
+
+String transformation functions
+```````````````````````````````
- (Any X WHERE X is Person) UNION (Any X WHERE X is Company)
++-------------------------+-----------------------------------------------------------------+
+| :func:`UPPER(String)` | upper case the string |
++-------------------------+-----------------------------------------------------------------+
+| :func:`LOWER(String)` | lower case the string |
++-------------------------+-----------------------------------------------------------------+
+| :func:`LENGTH(String)` | return the length of the string |
++-------------------------+-----------------------------------------------------------------+
+| :func:`SUBSTRING( | extract from the string a string starting at given index and of |
+| String, start, length)`| given length |
++-------------------------+-----------------------------------------------------------------+
+| :func:`LIMIT_SIZE( | if the length of the string is greater than given max size, |
+| String, max size)` | strip it and add ellipsis ("..."). The resulting string will |
+| | hence have max size + 3 characters |
++-------------------------+-----------------------------------------------------------------+
+| :func:`TEXT_LIMIT_SIZE( | similar to the above, but allow to specify the MIME type of the |
+| String, format, | text contained by the string. Supported formats are text/html, |
+| max size)` | text/xhtml and text/xml. All others will be considered as plain |
+| | text. For non plain text format, sgml tags will be first removed|
+| | before limiting the string. |
++-------------------------+-----------------------------------------------------------------+
+
+.. _RQLDateFunctions:
+
+Date extraction functions
+`````````````````````````
+
++--------------------------+----------------------------------------+
+| :func:`YEAR(Date)` | return the year of a date or datetime |
++--------------------------+----------------------------------------+
+| :func:`MONTH(Date)` | return the year of a date or datetime |
++--------------------------+----------------------------------------+
+| :func:`DAY(Date)` | return the year of a date or datetime |
++--------------------------+----------------------------------------+
+| :func:`HOUR(Datetime)` | return the year of a datetime |
++--------------------------+----------------------------------------+
+| :func:`MINUTE(Datetime)` | return the year of a datetime |
++--------------------------+----------------------------------------+
+| :func:`SECOND(Datetime)` | return the year of a datetime |
++--------------------------+----------------------------------------+
+
+.. _RQLOtherFunctions:
+
+Other functions
+```````````````
++-----------------------+--------------------------------------------------------------------+
+| :func:`ABS(num)` | return the absolute value of a number |
++-----------------------+--------------------------------------------------------------------+
+| :func:`RANDOM()` | return a pseudo-random value from 0.0 to 1.0 |
++-----------------------+--------------------------------------------------------------------+
+| :func:`FSPATH(X)` | expect X to be an attribute whose value is stored in a |
+| | :class:`BFSStorage` and return its path on the file system |
++-----------------------+--------------------------------------------------------------------+
+| :func:`FTKIRANK(X)` | expect X to be an entity used in a has_text relation, and return a |
+| | number corresponding to the rank order of each resulting entity |
++-----------------------+--------------------------------------------------------------------+
+| :func:`CAST(Type, X)` | expect X to be an attribute and return it casted into the given |
+| | final type |
++-----------------------+--------------------------------------------------------------------+
- DISTINCT Any W, REF
- WITH W, REF BEING
- (
- (Any W, REF WHERE W is Workcase, W ref REF,
- W concerned_by D, D name "Logilab")
- UNION
- (Any W, REF WHERE W is Workcase, W ref REF, '
- W split_into WP, WP name "WP1")
- )
-
-Function calls
-``````````````
-::
-
- Any UPPER(N) WHERE P firstname N
- Any LOWER(N) WHERE P firstname N
-
-Functions available on string: `UPPER`, `LOWER`
-
-.. XXX retrieve available function automatically
-
-For a performance issue, you can enrich the RQL dialect by RDMS (Relational database management system) functions.
-
+.. _RQLExamples:
Examples
~~~~~~~~
@@ -349,6 +645,8 @@
Any X where X is in (FirstType, SecondType)
+.. _RQLInsertQuery:
+
Insertion query
~~~~~~~~~~~~~~~
@@ -380,6 +678,8 @@
INSERT Person X: X name 'foo', X friend Y WHERE name 'nice'
+.. _RQLSetQuery:
+
Update and relation creation queries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -401,6 +701,8 @@
SET X know Y WHERE X friend Y
+.. _RQLDeleteQuery:
+
Deletion query
~~~~~~~~~~~~~~
@@ -421,22 +723,6 @@
DELETE X friend Y WHERE X is Person, X name 'foo'
-Virtual RQL relations
-~~~~~~~~~~~~~~~~~~~~~
-
-Those relations may only be used in RQL query and are not actual
-attributes of your entities.
-
-* `has_text`: relation to use to query the full text index (only for
- entities having fulltextindexed attributes).
-
-* `identity`: `Identity`_ relation to use to tell that a RQL variable should be
- the same as another (but you've to use two different rql variables
- for querying purpose)
-
-* `is`: relation to enforce possible types for a variable
-
-
-
.. _Yapps: http://theory.stanford.edu/~amitp/yapps/
.. _Left outer join: http://en.wikipedia.org/wiki/Join_(SQL)#Left_outer_join
+
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/conf.py
--- a/doc/book/en/conf.py Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/conf.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -66,7 +66,7 @@
# General substitutions.
project = 'CubicWeb'
-copyright = '2001-2010, Logilab'
+copyright = '2001-2011, Logilab'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devrepo/datamodel/baseschema.rst
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devrepo/datamodel/define-workflows.rst
--- a/doc/book/en/devrepo/datamodel/define-workflows.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devrepo/datamodel/define-workflows.rst Fri Dec 09 12:08:27 2011 +0100
@@ -13,7 +13,7 @@
possible transitions from one state to another state.
We will define a simple workflow for a blog, with only the following two states:
-`submitted` and `published`. You may want to take a look at :ref:`_TutosBase` if
+`submitted` and `published`. You may want to take a look at :ref:`TutosBase` if
you want to quickly setup an instance running a blog.
Setting up a workflow
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devrepo/datamodel/definition.rst
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devrepo/repo/hooks.rst
--- a/doc/book/en/devrepo/repo/hooks.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devrepo/repo/hooks.rst Fri Dec 09 12:08:27 2011 +0100
@@ -110,7 +110,7 @@
parents.add(parent.eid)
- class CheckSubsidiaryCycleOp(DataOperationMixIn, Operation):
+ class CheckSubsidiaryCycleOp(Operation):
def precommit_event(self):
check_cycle(self.session, self.eidto, 'subsidiary_of')
@@ -145,7 +145,7 @@
def __call__(self):
CheckSubsidiaryCycleOp.get_instance(self._cw).add_data(self.eidto)
- class CheckSubsidiaryCycleOp(Operation):
+ class CheckSubsidiaryCycleOp(DataOperationMixIn, Operation):
def precommit_event(self):
for eid in self.get_data():
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devrepo/repo/sessions.rst
--- a/doc/book/en/devrepo/repo/sessions.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devrepo/repo/sessions.rst Fri Dec 09 12:08:27 2011 +0100
@@ -59,11 +59,11 @@
other credentials elements (calling `authentication_information`),
giving the request object each time
- * the default retriever (bizarrely named
- `LoginPaswordRetreiver`) will in turn defer login and password
- fetching to the request object (which, depending on the
- authentication mode (`cookie` or `http`), will do the
- appropriate things and return a login and a password)
+ * the default retriever (oddly named `LoginPasswordRetreiver`)
+ will in turn defer login and password fetching to the request
+ object (which, depending on the authentication mode (`cookie`
+ or `http`), will do the appropriate things and return a login
+ and a password)
* the authentication manager, on success, asks the `Repository`
object to connect with the found credentials (using `connect`)
@@ -74,10 +74,10 @@
from which a regular `Session` object is made; it returns the
session id
- * the source in turn will defer work to an authentifier class
- that define the ultimate `authenticate` method (for instance
- the native source will query the database against the
- provided credentials)
+ * the source in turn will delegate work to an authentifier
+ class that defines the ultimate `authenticate` method (for
+ instance the native source will query the database against
+ the provided credentials)
* the authentication manager, on success, will call back _all_
retrievers with `authenticated` and return its authentication
@@ -99,9 +99,9 @@
each side: some middleware will do pre-authentication and under the
right circumstances add a new HTTP `x-foo-user` header to the query
before it reaches the CubicWeb instance. For a concrete example of
-this, see the `apachekerberos`_ cube.
+this, see the `trustedauth`_ cube.
-.. _`apachekerberos`: http://www.cubicweb.org/project/cubicweb-apachekerberos
+.. _`trustedauth`: http://www.cubicweb.org/project/cubicweb-trustedauth
Repository authentication plugins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devrepo/testing.rst
--- a/doc/book/en/devrepo/testing.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devrepo/testing.rst Fri Dec 09 12:08:27 2011 +0100
@@ -337,13 +337,12 @@
sourcefile='/path/to/realdb_sources')
def test_blog_rss(self):
- req = self.request()
- rset = req.execute('Any B ORDERBY D DESC WHERE B is BlogEntry, '
- 'B created_by U, U login "logilab", B creation_date D')
+ req = self.request()
+ rset = req.execute('Any B ORDERBY D DESC WHERE B is BlogEntry, '
+ 'B created_by U, U login "logilab", B creation_date D')
self.view('rss', rset)
-
Testing with other cubes
------------------------
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devweb/edition/dissection.rst
--- a/doc/book/en/devweb/edition/dissection.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devweb/edition/dissection.rst Fri Dec 09 12:08:27 2011 +0100
@@ -1,8 +1,8 @@
.. _form_dissection:
-Dissection of a form
---------------------
+Dissection of an entity form
+----------------------------
This is done (again) with a vanilla instance of the `tracker`_
cube. We will populate the database with a bunch of entities and see
@@ -10,44 +10,6 @@
.. _`tracker`: http://www.cubicweb.org/project/cubicweb-tracker
-Patching the session object
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In order to play interactively with web side application objects, we
-have to cheat a bit: we will decorate the session object with some
-missing artifacts that should belong to a web request object. With
-that we can instantiate and render forms interactively.
-
-The function below does the minimum to allow going through this
-exercice. Some attributes or methods may be missing for other
-purposes. It is nevertheless not complicated to enhance it if need
-arises.
-
-.. sourcecode:: python
-
- def monkey_patch_session(session):
- """ useful to use the cw shell session object
- with web appobjects, which expect more than a plain
- data repository session
- """
- # for autoform selection
- session.json_request = False
- session.url = lambda: u'http://perdu.com'
- session.session = session
- session.form = {}
- session.list_form_param = lambda *args: []
- # for render
- session.use_fckeditor = lambda: False
- session._ressources = []
- session.add_js = session.add_css = lambda *args: session._ressources.append(args)
- session.external_resource = lambda x:{}
- session._tabcount = 0
- def next_tabindex():
- session._tabcount += 1
- return session._tabcount
- session.next_tabindex = next_tabindex
- return session
-
Populating the database
~~~~~~~~~~~~~~~~~~~~~~~
@@ -71,10 +33,17 @@
.. sourcecode:: python
- >>> monkey_patch_session(session)
- >>> form = session.vreg['forms'].select('edition', session, rset=rql('Ticket T'))
+ >>> cnx.use_web_compatible_requests('http://fakeurl.com')
+ >>> req = cnx.request()
+ >>> form = req.vreg['forms'].select('edition', req, rset=rql('Ticket T'))
>>> html = form.render()
+.. note::
+
+ In order to play interactively with web side application objects, we have to
+ cheat a bit to have request object that will looks like HTTP request object, by
+ calling :meth:`use_web_compatible_requests()` on the connection.
+
This creates an automatic entity form. The ``.render()`` call yields
an html (unicode) string. The html output is shown below (with
internal fieldset omitted).
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devweb/edition/form.rst
--- a/doc/book/en/devweb/edition/form.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devweb/edition/form.rst Fri Dec 09 12:08:27 2011 +0100
@@ -48,9 +48,10 @@
'sparql': []}
-The two most important form families here (for all pracitcal purposes)
-are `base` and `edition`. Most of the time one wants alterations of
-the AutomaticEntityForm (from the `edition` category).
+The two most important form families here (for all practical purposes) are `base`
+and `edition`. Most of the time one wants alterations of the
+:class:`AutomaticEntityForm` to generate custom forms to handle edition of an
+entity.
The Automatic Entity Form
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -214,6 +215,158 @@
unpublished versions defined in the project (sorted by number) for
which the current user is allowed to establish the relation.
+
+Building self-posted form with custom fields/widgets
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sometimes you want a form that is not related to entity edition. For those,
+you'll have to handle form posting by yourself. Here is a complete example on how
+to achieve this (and more).
+
+Imagine you want a form that selects a month period. There are no proper
+field/widget to handle this in CubicWeb, so let's start by defining them:
+
+.. sourcecode:: python
+
+ # let's have the whole import list at the beginning, even those necessary for
+ # subsequent snippets
+ from logilab.common import date
+ from logilab.mtconverter import xml_escape
+ from cubicweb.view import View
+ from cubicweb.selectors import match_kwargs
+ from cubicweb.web import RequestError, ProcessFormError
+ from cubicweb.web import formfields as fields, formwidgets as wdgs
+ from cubicweb.web.views import forms, calendar
+
+ class MonthSelect(wdgs.Select):
+ """Custom widget to display month and year. Expect value to be given as a
+ date instance.
+ """
+
+ def format_value(self, form, field, value):
+ return u'%s/%s' % (value.year, value.month)
+
+ def process_field_data(self, form, field):
+ val = super(MonthSelect, self).process_field_data(form, field)
+ try:
+ year, month = val.split('/')
+ year = int(year)
+ month = int(month)
+ return date.date(year, month, 1)
+ except ValueError:
+ raise ProcessFormError(
+ form._cw._('badly formated date string %s') % val)
+
+
+ class MonthPeriodField(fields.CompoundField):
+ """custom field composed of two subfields, 'begin_month' and 'end_month'.
+
+ It expects to be used on form that has 'mindate' and 'maxdate' in its
+ extra arguments, telling the range of month to display.
+ """
+
+ def __init__(self, *args, **kwargs):
+ kwargs.setdefault('widget', wdgs.IntervalWidget())
+ super(MonthPeriodField, self).__init__(
+ [fields.StringField(name='begin_month',
+ choices=self.get_range, sort=False,
+ value=self.get_mindate,
+ widget=MonthSelect()),
+ fields.StringField(name='end_month',
+ choices=self.get_range, sort=False,
+ value=self.get_maxdate,
+ widget=MonthSelect())], *args, **kwargs)
+
+ @staticmethod
+ def get_range(form, field):
+ mindate = date.todate(form.cw_extra_kwargs['mindate'])
+ maxdate = date.todate(form.cw_extra_kwargs['maxdate'])
+ assert mindate <= maxdate
+ _ = form._cw._
+ months = []
+ while mindate <= maxdate:
+ label = '%s %s' % (_(calendar.MONTHNAMES[mindate.month - 1]),
+ mindate.year)
+ value = field.widget.format_value(form, field, mindate)
+ months.append( (label, value) )
+ mindate = date.next_month(mindate)
+ return months
+
+ @staticmethod
+ def get_mindate(form, field):
+ return form.cw_extra_kwargs['mindate']
+
+ @staticmethod
+ def get_maxdate(form, field):
+ return form.cw_extra_kwargs['maxdate']
+
+ def process_posted(self, form):
+ for field, value in super(MonthPeriodField, self).process_posted(form):
+ if field.name == 'end_month':
+ value = date.last_day(value)
+ yield field, value
+
+
+Here we first define a widget that will be used to select the beginning and the
+end of the period, displaying months like ' YYYY' but using 'YYYY/mm' as
+actual value.
+
+We then define a field that will actually hold two fields, one for the beginning
+and another for the end of the period. Each subfield uses the widget we defined
+earlier, and the outer field itself uses the standard
+:class:`IntervalWidget`. The field adds some logic:
+
+* a vocabulary generation function `get_range`, used to populate each sub-field
+
+* two 'value' functions `get_mindate` and `get_maxdate`, used to tell to
+ subfields which value they should consider on form initialization
+
+* overriding of `process_posted`, called when the form is being posted, so that
+ the end of the period is properly set to the last day of the month.
+
+Now, we can define a very simple form:
+
+.. sourcecode:: python
+
+ class MonthPeriodSelectorForm(forms.FieldsForm):
+ __regid__ = 'myform'
+ __select__ = match_kwargs('mindate', 'maxdate')
+
+ form_buttons = [wdgs.SubmitButton()]
+ form_renderer_id = 'onerowtable'
+ period = MonthPeriodField()
+
+
+where we simply add our field, set a submit button and use a very simple renderer
+(try others!). Also we specify a selector that ensures form will have arguments
+necessary to our field.
+
+Now, we need a view that will wrap the form and handle post when it occurs,
+simply displaying posted values in the page:
+
+.. sourcecode:: python
+
+ class SelfPostingForm(View):
+ __regid__ = 'myformview'
+
+ def call(self):
+ mindate, maxdate = date.date(2010, 1, 1), date.date(2012, 1, 1)
+ form = self._cw.vreg['forms'].select(
+ 'myform', self._cw, mindate=mindate, maxdate=maxdate, action='')
+ try:
+ posted = form.process_posted()
+ self.w(u'
posted values %s
' % xml_escape(repr(posted)))
+ except RequestError: # no specified period asked
+ pass
+ form.render(w=self.w, formvalues=self._cw.form)
+
+
+Notice usage of the :meth:`process_posted` method, that will return a dictionary
+of typed values (because they have been processed by the field). In our case, when
+the form is posted you should see a dictionary with 'begin_month' and 'end_month'
+as keys with the selected dates as value (as a python `date` object).
+
+
APIs
~~~~
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devweb/resource.rst
--- a/doc/book/en/devweb/resource.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devweb/resource.rst Fri Dec 09 12:08:27 2011 +0100
@@ -8,7 +8,7 @@
Static files handling
---------------------
-.. automethod:: cubicweb.web.webconfig.WebConfiguration.static_directory
+.. autoattribute:: cubicweb.web.webconfig.WebConfiguration.static_directory
.. automethod:: cubicweb.web.webconfig.WebConfiguration.static_file_exists
.. automethod:: cubicweb.web.webconfig.WebConfiguration.static_file_open
.. automethod:: cubicweb.web.webconfig.WebConfiguration.static_file_add
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devweb/views/baseviews.rst
--- a/doc/book/en/devweb/views/baseviews.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devweb/views/baseviews.rst Fri Dec 09 12:08:27 2011 +0100
@@ -1,137 +1,17 @@
-.. -*- coding: utf-8 -*-
-
Base views
----------
-*CubicWeb* provides a lot of standard views, that can be found in
+|cubicweb| provides a lot of standard views, that can be found in
:mod:`cubicweb.web.views` sub-modules.
A certain number of views are used to build the web interface, which apply to one
-or more entities. As other appobject, Their identifier is what distinguish them
+or more entities. As other appobjects, their identifier is what distinguish them
from each others. The most generic ones, found in
:mod:`cubicweb.web.views.baseviews`, are described below.
-HTML views
-~~~~~~~~~~
-
-Special views
-`````````````
-
-*noresult*
- This view is the default view used when no result has been found
- (e.g. empty result set).
-
-*final*
- Display the value of a cell without trasnformation (in case of a non final
- entity, we see the eid). Applicable on any result set.
-
-.. note::
-
- `final` entities are merely attributes.
-
-*null*
- This view is the default view used when nothing needs to be rendered.
- It is always applicable.
-
-
-Entity views
-````````````
-
-*incontext, outofcontext*
-
- Those are used to display a link to an entity, whose label depends on the
- entity having to be displayed in or out of context (of another entity): some
- entities make sense in the context of another entity. For instance, the
- `Version` of a `Project` in forge. So one may expect that 'incontext' will
- be called when display a version from within the context of a project, while
- 'outofcontext"' will be called in other cases. In our example, the
- 'incontext' view of the version would be something like '0.1.2', while the
- 'outofcontext' view would include the project name, e.g. 'baz 0.1.2' (since
- only a version number without the associated project doesn't make sense if
- you don't know yet that you're talking about the famous 'baz' project. |cubicweb|
- tries to make guess and call 'incontext'/'outofcontext' nicely. When it can't
- know, the 'oneline' view should be used.
-
- By default it respectively produces the result of `textincontext` and
- `textoutofcontext` wrapped in a link leading to the primary view of the
- entity.
-
-
-*oneline*
-
- This view is used when we can't tell if the entity should be considered as
- displayed in or out of context. By default it produces the result of `text`
- in a link leading to the primary view of the entity.
+You'll probably want to customize one or more of the described views which are
+default, generic, implementations.
-List
-`````
-
-*list*
-
- This view displays a list of entities by creating a HTML list (`
`) and
- call the view `listitem` for each entity of the result set. The 'list' view
- will generate html like:
-
- .. sourcecode:: html
-
-
-
"result of 'subvid' view for a row
- ...
-
-
-
-*simplelist*
-
- This view is not 'ul' based, and rely on div behaviour to separate items. html
- will look like
-
- .. sourcecode:: html
-
-
"result of 'subvid' view for a row
- ...
-
-
- It relies on base :class:`~cubicweb.view.View` class implementation of the
- :meth:`call` method to insert those
.
-
-
-*sameetypelist*
+.. automodule:: cubicweb.web.views.baseviews
- This view displays a list of entities of the same type, in HTML section
- (`
`) and call the view `sameetypelistitem` for each entity of the result
- set. It's designed to get a more adapted global list when displayed entities
- are all of the same type.
-
-
-*csv*
-
- This view displays each entity in a coma separated list. It is NOT related to
- the well-known text file format.
-
-
-Those list view can be given a 'subvid' arguments, telling the view to use of
-each item in the list. When not specified, the value of the 'redirect_vid'
-attribute of :class:`ListItemView` (for 'listview') or of :class:`SimpleListView`
-will be used. This default to 'outofcontext' for 'list' / 'incontext' for
-'simplelist'
-
-
-Text entity views
-~~~~~~~~~~~~~~~~~
-
-Basic html view have some variantsto be used when generating raw text, not html
-(for notifications for instance).
-
-*text*
-
- This is the simplest text view for an entity. By default it returns the
- result of the `.dc_title` method, which is cut to fit the
- `navigation.short-line-size` property if necessary.
-
-*textincontext, textoutofcontext*
-
- Similar to the `text` view, but called when an entity is considered out or in
- context (see description of incontext/outofcontext html views for more
- information on this). By default it returns respectively the result of the
- methods `.dc_title()` and `.dc_long_title()` of the entity.
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devweb/views/primary.rst
--- a/doc/book/en/devweb/views/primary.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devweb/views/primary.rst Fri Dec 09 12:08:27 2011 +0100
@@ -10,11 +10,11 @@
It is automatically selected on a one line result set containing an
entity.
-This view is supposed to render a maximum of informations about the
+It lives in the :mod:`cubicweb.web.views.primary` module.
+
+The *primary* view is supposed to render a maximum of informations about the
entity.
-It lives in the :mod:`cubicweb.web.views.primary` module.
-
.. _primary_view_layout:
Layout
@@ -139,8 +139,6 @@
that can't be done using rql for instance.
-
-
.. sourcecode:: python
pv_section = uicfg.primaryview_section
@@ -163,62 +161,8 @@
``tag_subject_of``. To avoid warnings during execution, they should be set to
``'*'``.
-Rendering methods and attributes
-````````````````````````````````
-The basic layout of a primary view is as in the
-:ref:`primary_view_layout` section. This layout is actually drawn by
-the `render_entity` method.
-
-The methods you may want to modify while customizing a ``PrimaryView``
-are:
-
-*render_entity_title(self, entity)*
- Renders the entity title, by default using entity's :meth:`dc_title()` method.
-
-*render_entity_attributes(self, entity)*
- Renders all attributes and relations in the 'attributes' section . The
- :attr:`skip_none` attribute controls the display of `None` valued attributes.
-
-*render_entity_relations(self, entity)*
- Renders all relations in the 'relations' section.
-
-*render_side_boxes(self, entity, boxes)*
- Renders side boxes on the right side of the content. This will generate a box
- for each relation in the 'sidebox' section, as well as explicit box
- appobjects selectable in this context.
-
-The placement of relations in the relations section or in side boxes
-can be controlled through the :ref:`primary_view_configuration` mechanism.
-
-*content_navigation_components(self, context)*
- This method is applicable only for entity type implementing the interface
- `IPrevNext`. This interface is for entities which can be linked to a previous
- and/or next entity. This method will render the navigation links between
- entities of this type, either at the top or at the bottom of the page
- given the context (navcontent{top|bottom}).
-
-Also, please note that by setting the following attributes in your
-subclass, you can already customize some of the rendering:
-
-*show_attr_label*
- Renders the attribute label next to the attribute value if set to `True`.
- Otherwise, does only display the attribute value.
-
-*show_rel_label*
- Renders the relation label next to the relation value if set to `True`.
- Otherwise, does only display the relation value.
-
-*skip_none*
- Does not render an attribute value that is None if set to `True`.
-
-*main_related_section*
- Renders the relations of the entity if set to `True`.
-
-A good practice is for you to identify the content of your entity type for which
-the default rendering does not answer your need so that you can focus on the specific
-method (from the list above) that needs to be modified. We do not advise you to
-overwrite ``render_entity`` unless you want a completely different layout.
+.. automodule:: cubicweb.web.views.primary
Example of customization and creation
@@ -329,3 +273,4 @@
.. image:: ../../images/lax-book_10-blog-with-two-entries_en.png
:alt: a blog and all its entries
+
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devweb/views/reledit.rst
--- a/doc/book/en/devweb/views/reledit.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devweb/views/reledit.rst Fri Dec 09 12:08:27 2011 +0100
@@ -116,7 +116,34 @@
which always work.
+Disable `reledit`
+*****************
+
+By default, `reledit` is available on attributes and relations displayed in
+the 'attribute' section of the default primary view. If you want to disable
+it for some attribute or relation, you have use `uicfg`:
+
+.. sourcecode:: python
+
+ import uicfg.primaryview_display_ctrl as _pvdc
+ _pvdc.tag_attribute(('Company', 'name'), {'vid': 'incontext'})
+
+To deactivate it everywhere it's used automatically, you may use the code snippet
+below somewhere in your cube's views:
+
+.. sourcecode:: python
+
+ from cubicweb.web.views import reledit
+
+ class DeactivatedAutoClickAndEditFormView(reledit.AutoClickAndEditFormView):
+ def _should_edit_attribute(self, rschema):
+ return False
+
+ def _should_edit_attribute(self, rschema, role):
+ return False
+
+ def registration_callback(vreg):
+ vreg.register_and_replace(DeactivatedAutoClickAndEditFormView,
+ reledit.AutoClickAndEditFormView)
-
-
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/devweb/views/startup.rst
--- a/doc/book/en/devweb/views/startup.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/devweb/views/startup.rst Fri Dec 09 12:08:27 2011 +0100
@@ -1,15 +1,18 @@
Startup views
-------------
-(:mod:`cubicweb.web.views.startup`)
+Startup views are views requiring no context, from which you usually start
+browsing (for instance the index page). The usual selectors are
+:class:`~cubicweb.selectors.none_rset` or :class:`~cubicweb.selectors.yes`.
-The usual selectors are no_rset or yes. These views don't apply to a
-result set.
+You'll find here a description of startup views provided by the framework.
-*index*
- This view defines the home page of your application. It does not require
- a result set to apply to.
+.. automodule:: cubicweb.web.views.startup
+
+
+Other startup views:
*schema*
A view dedicated to the display of the schema of the instance
+.. XXX to be continued
\ No newline at end of file
diff -r d8bb8f631d41 -r a4e667270dd4 doc/book/en/intro/concepts.rst
--- a/doc/book/en/intro/concepts.rst Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/book/en/intro/concepts.rst Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,3 @@
-
.. -*- coding: utf-8 -*-
.. _Concepts:
@@ -31,7 +30,7 @@
.. note::
The command :command:`cubicweb-ctl list` displays the list of cubes
-installed on your system.
+ installed on your system.
.. _`CubicWeb.org Forge`: http://www.cubicweb.org/project/
.. _`cubicweb-blog`: http://www.cubicweb.org/project/cubicweb-blog
diff -r d8bb8f631d41 -r a4e667270dd4 doc/tools/pyjsrest.py
--- a/doc/tools/pyjsrest.py Mon Sep 26 18:37:23 2011 +0200
+++ b/doc/tools/pyjsrest.py Fri Dec 09 12:08:27 2011 +0100
@@ -102,7 +102,7 @@
for fileid in INDEX_IN_ORDER:
try:
index.remove(fileid)
- except:
+ except Exception:
raise Exception(
'Bad file id %s referenced in INDEX_IN_ORDER in %s, '
'fix this please' % (fileid, __file__))
diff -r d8bb8f631d41 -r a4e667270dd4 entities/adapters.py
--- a/entities/adapters.py Mon Sep 26 18:37:23 2011 +0200
+++ b/entities/adapters.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2010-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -137,7 +137,7 @@
value = entity.printable_value(rschema, format='text/plain')
except TransformError:
continue
- except:
+ except Exception:
self.exception("can't add value of %s to text index for entity %s",
rschema, entity.eid)
continue
@@ -366,8 +366,8 @@
class IProgressAdapter(EntityAdapter):
"""something that has a cost, a state and a progression.
- You should at least override progress_info an in_progress methods on concret
- implementations.
+ You should at least override progress_info an in_progress methods on
+ concrete implementations.
"""
__needs_bw_compat__ = True
__regid__ = 'IProgress'
diff -r d8bb8f631d41 -r a4e667270dd4 entities/authobjs.py
--- a/entities/authobjs.py Mon Sep 26 18:37:23 2011 +0200
+++ b/entities/authobjs.py Fri Dec 09 12:08:27 2011 +0100
@@ -98,7 +98,7 @@
prop = self._cw.execute(
'CWProperty X WHERE X pkey %(k)s, X for_user U, U eid %(u)s',
{'k': pkey, 'u': self.eid}).get_entity(0, 0)
- except:
+ except Exception:
kwargs = dict(pkey=unicode(pkey), value=value)
if self.is_in_group('managers'):
kwargs['for_user'] = self
diff -r d8bb8f631d41 -r a4e667270dd4 entities/lib.py
--- a/entities/lib.py Mon Sep 26 18:37:23 2011 +0200
+++ b/entities/lib.py Fri Dec 09 12:08:27 2011 +0100
@@ -40,6 +40,7 @@
class EmailAddress(AnyEntity):
__regid__ = 'EmailAddress'
fetch_attrs, fetch_order = fetch_config(['address', 'alias'])
+ rest_attr = 'eid'
def dc_title(self):
if self.alias:
diff -r d8bb8f631d41 -r a4e667270dd4 entities/schemaobjs.py
diff -r d8bb8f631d41 -r a4e667270dd4 entities/test/unittest_wfobjs.py
--- a/entities/test/unittest_wfobjs.py Mon Sep 26 18:37:23 2011 +0200
+++ b/entities/test/unittest_wfobjs.py Fri Dec 09 12:08:27 2011 +0100
@@ -165,7 +165,7 @@
user = self.user()
iworkflowable = user.cw_adapt_to('IWorkflowable')
iworkflowable.fire_transition('deactivate', comment=u'deactivate user')
- user.clear_all_caches()
+ user.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'deactivated')
self._test_manager_deactivate(user)
trinfo = self._test_manager_deactivate(user)
@@ -192,7 +192,7 @@
self.commit()
iworkflowable.fire_transition('wake up')
self.commit()
- user.clear_all_caches()
+ user.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'deactivated')
# XXX test managers can change state without matching transition
@@ -274,14 +274,14 @@
self.assertEqual(iworkflowable.subworkflow_input_transition(), None)
iworkflowable.fire_transition('swftr1', u'go')
self.commit()
- group.clear_all_caches()
+ group.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_state.eid, swfstate1.eid)
self.assertEqual(iworkflowable.current_workflow.eid, swf.eid)
self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
self.assertEqual(iworkflowable.subworkflow_input_transition().eid, swftr1.eid)
iworkflowable.fire_transition('tr1', u'go')
self.commit()
- group.clear_all_caches()
+ group.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_state.eid, state2.eid)
self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
@@ -295,10 +295,10 @@
# force back to state1
iworkflowable.change_state('state1', u'gadget')
iworkflowable.fire_transition('swftr1', u'au')
- group.clear_all_caches()
+ group.cw_clear_all_caches()
iworkflowable.fire_transition('tr2', u'chapeau')
self.commit()
- group.clear_all_caches()
+ group.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_state.eid, state3.eid)
self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
@@ -390,7 +390,7 @@
):
iworkflowable.fire_transition(trans)
self.commit()
- group.clear_all_caches()
+ group.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, nextstate)
@@ -408,11 +408,11 @@
wf.add_state('asleep', initial=True)
self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
{'wf': wf.eid, 'x': self.member.eid})
- self.member.clear_all_caches()
+ self.member.cw_clear_all_caches()
iworkflowable = self.member.cw_adapt_to('IWorkflowable')
self.assertEqual(iworkflowable.state, 'activated')# no change before commit
self.commit()
- self.member.clear_all_caches()
+ self.member.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
self.assertEqual(iworkflowable.state, 'asleep')
self.assertEqual(iworkflowable.workflow_history, ())
@@ -429,7 +429,7 @@
self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
{'wf': wf.eid, 'x': self.member.eid})
self.commit()
- self.member.clear_all_caches()
+ self.member.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
self.assertEqual(iworkflowable.state, 'asleep')
self.assertEqual(parse_hist(iworkflowable.workflow_history),
@@ -472,10 +472,10 @@
self.commit()
self.execute('DELETE X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
{'wf': wf.eid, 'x': self.member.eid})
- self.member.clear_all_caches()
+ self.member.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'asleep')# no change before commit
self.commit()
- self.member.clear_all_caches()
+ self.member.cw_clear_all_caches()
self.assertEqual(iworkflowable.current_workflow.name, "default user workflow")
self.assertEqual(iworkflowable.state, 'activated')
self.assertEqual(parse_hist(iworkflowable.workflow_history),
@@ -504,13 +504,13 @@
self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
{'wf': wf.eid, 'x': user.eid})
self.commit()
- user.clear_all_caches()
+ user.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'asleep')
self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
['rest'])
iworkflowable.fire_transition('rest')
self.commit()
- user.clear_all_caches()
+ user.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'asleep')
self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
['rest'])
@@ -520,7 +520,7 @@
self.commit()
iworkflowable.fire_transition('rest')
self.commit()
- user.clear_all_caches()
+ user.cw_clear_all_caches()
self.assertEqual(iworkflowable.state, 'dead')
self.assertEqual(parse_hist(iworkflowable.workflow_history),
[('asleep', 'asleep', 'rest', None),
@@ -557,7 +557,7 @@
def setUp(self):
CubicWebTC.setUp(self)
self.wf = self.session.user.cw_adapt_to('IWorkflowable').current_workflow
- self.session.set_pool()
+ self.session.set_cnxset()
self.s_activated = self.wf.state_by_name('activated').eid
self.s_deactivated = self.wf.state_by_name('deactivated').eid
self.s_dummy = self.wf.add_state(u'dummy').eid
@@ -629,13 +629,13 @@
iworkflowable = user.cw_adapt_to('IWorkflowable')
iworkflowable.fire_transition('deactivate')
cnx.commit()
- session.set_pool()
+ session.set_cnxset()
with self.assertRaises(ValidationError) as cm:
iworkflowable.fire_transition('deactivate')
self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
u"transition isn't allowed from")
cnx.rollback()
- session.set_pool()
+ session.set_cnxset()
# get back now
iworkflowable.fire_transition('activate')
cnx.commit()
diff -r d8bb8f631d41 -r a4e667270dd4 entities/wfobjs.py
--- a/entities/wfobjs.py Mon Sep 26 18:37:23 2011 +0200
+++ b/entities/wfobjs.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -326,8 +326,8 @@
result[ep.subwf_state.eid] = ep.destination and ep.destination.eid
return result
- def clear_all_caches(self):
- super(WorkflowTransition, self).clear_all_caches()
+ def cw_clear_all_caches(self):
+ super(WorkflowTransition, self).cw_clear_all_caches()
clear_cache(self, 'exit_points')
diff -r d8bb8f631d41 -r a4e667270dd4 entity.py
--- a/entity.py Mon Sep 26 18:37:23 2011 +0200
+++ b/entity.py Fri Dec 09 12:08:27 2011 +0100
@@ -395,8 +395,10 @@
@cached
def cw_metainformation(self):
- res = dict(zip(('type', 'source', 'extid'), self._cw.describe(self.eid)))
- res['source'] = self._cw.source_defs()[res['source']]
+ res = self._cw.describe(self.eid, asdict=True)
+ # use 'asource' and not 'source' since this is the actual source,
+ # while 'source' is the physical source (where it's stored)
+ res['source'] = self._cw.source_defs()[res.pop('asource')]
return res
def cw_check_perm(self, action):
@@ -431,9 +433,11 @@
use_ext_id = False
if 'base_url' not in kwargs and \
getattr(self._cw, 'search_state', ('normal',))[0] == 'normal':
- baseurl = self.cw_metainformation()['source'].get('base-url')
- if baseurl:
- kwargs['base_url'] = baseurl
+ sourcemeta = self.cw_metainformation()['source']
+ if sourcemeta.get('use-cwuri-as-url'):
+ return self.cwuri # XXX consider kwargs?
+ if sourcemeta.get('base-url'):
+ kwargs['base_url'] = sourcemeta['base-url']
use_ext_id = True
if method in (None, 'view'):
try:
@@ -718,12 +722,21 @@
self.cw_attr_cache[name] = value = None
return value
- def related(self, rtype, role='subject', limit=None, entities=False): # XXX .cw_related
+ def related(self, rtype, role='subject', limit=None, entities=False, # XXX .cw_related
+ safe=False):
"""returns a resultset of related entities
- :param role: is the role played by 'self' in the relation ('subject' or 'object')
- :param limit: resultset's maximum size
- :param entities: if True, the entites are returned; if False, a result set is returned
+ :param rtype:
+ the name of the relation, aka relation type
+ :param role:
+ the role played by 'self' in the relation ('subject' or 'object')
+ :param limit:
+ resultset's maximum size
+ :param entities:
+ if True, the entites are returned; if False, a result set is returned
+ :param safe:
+ if True, an empty rset/list of entities will be returned in case of
+ :exc:`Unauthorized`, else (the default), the exception is propagated
"""
try:
return self._cw_relation_cache(rtype, role, entities, limit)
@@ -734,7 +747,12 @@
return []
return self._cw.empty_rset()
rql = self.cw_related_rql(rtype, role)
- rset = self._cw.execute(rql, {'x': self.eid})
+ try:
+ rset = self._cw.execute(rql, {'x': self.eid})
+ except Unauthorized:
+ if not safe:
+ raise
+ rset = self._cw.empty_rset()
self.cw_set_relation_cache(rtype, role, rset)
return self.related(rtype, role, limit, entities)
@@ -773,7 +791,7 @@
rql.split(' WHERE ', 1)[1])
elif not ' ORDERBY ' in rql:
args = rql.split(' WHERE ', 1)
- # if modification_date already retreived, we should use it instead
+ # if modification_date already retrieved, we should use it instead
# of adding another variable for sort. This should be be problematic
# but it's actually with sqlserver, see ticket #694445
if 'X modification_date ' in args[1]:
@@ -942,7 +960,7 @@
assert role
self._cw_related_cache.pop('%s_%s' % (rtype, role), None)
- def clear_all_caches(self): # XXX cw_clear_all_caches
+ def cw_clear_all_caches(self):
"""flush all caches on this entity. Further attributes/relations access
will triggers new database queries to get back values.
@@ -1024,6 +1042,10 @@
# deprecated stuff #########################################################
+ @deprecated('[3.13] use entity.cw_clear_all_caches()')
+ def clear_all_caches(self):
+ return self.cw_clear_all_caches()
+
@deprecated('[3.9] use entity.cw_attr_value(attr)')
def get_value(self, name):
return self.cw_attr_value(name)
diff -r d8bb8f631d41 -r a4e667270dd4 etwist/http.py
--- a/etwist/http.py Mon Sep 26 18:37:23 2011 +0200
+++ b/etwist/http.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,7 +1,7 @@
"""twisted server for CubicWeb web instances
:organization: Logilab
-:copyright: 2001-2010 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:copyright: 2001-2011 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
"""
@@ -25,25 +25,14 @@
def _init_headers(self):
if self._headers_out is None:
return
-
- # initialize cookies
- cookies = self._headers_out.getHeader('set-cookie') or []
- for cookie in cookies:
- self._twreq.addCookie(cookie.name, cookie.value, cookie.expires,
- cookie.domain, cookie.path, #TODO max-age
- comment = cookie.comment, secure=cookie.secure)
- self._headers_out.removeHeader('set-cookie')
-
- # initialize other headers
- for k, v in self._headers_out.getAllRawHeaders():
- self._twreq.setHeader(k, v[0])
-
+ # initialize headers
+ for k, values in self._headers_out.getAllRawHeaders():
+ self._twreq.responseHeaders.setRawHeaders(k, values)
# add content-length if not present
if (self._headers_out.getHeader('content-length') is None
and self._stream is not None):
self._twreq.setHeader('content-length', len(self._stream))
-
def _finalize(self):
# we must set code before writing anything, else it's too late
if self._code is not None:
diff -r d8bb8f631d41 -r a4e667270dd4 etwist/request.py
--- a/etwist/request.py Mon Sep 26 18:37:23 2011 +0200
+++ b/etwist/request.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
diff -r d8bb8f631d41 -r a4e667270dd4 etwist/server.py
--- a/etwist/server.py Mon Sep 26 18:37:23 2011 +0200
+++ b/etwist/server.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -17,14 +17,18 @@
# with CubicWeb. If not, see .
"""twisted server for CubicWeb web instances"""
+from __future__ import with_statement
+
__docformat__ = "restructuredtext en"
import sys
import os
+import os.path as osp
import select
-import errno
import traceback
import threading
+import re
+from hashlib import md5 # pylint: disable=E0611
from os.path import join
from time import mktime
from datetime import date, timedelta
@@ -41,7 +45,8 @@
from logilab.common.decorators import monkeypatch
-from cubicweb import AuthenticationError, ConfigurationError, CW_EVENT_MANAGER
+from cubicweb import (AuthenticationError, ConfigurationError,
+ CW_EVENT_MANAGER, CubicWebException)
from cubicweb.utils import json_dumps
from cubicweb.web import Redirect, DirectResponse, StatusResponse, LogOut
from cubicweb.web.application import CubicWebPublisher
@@ -70,13 +75,85 @@
code=http.FORBIDDEN,
stream='Access forbidden')
-class File(static.File):
- """Prevent from listing directories"""
+
+class NoListingFile(static.File):
+ def __init__(self, config, path=None):
+ if path is None:
+ path = config.static_directory
+ static.File.__init__(self, path)
+ self.config = config
+
+ def set_expires(self, request):
+ if not self.config.debugmode:
+ # XXX: Don't provide additional resource information to error responses
+ #
+ # the HTTP RFC recommands not going further than 1 year ahead
+ expires = date.today() + timedelta(days=6*30)
+ request.setHeader('Expires', generateDateTime(mktime(expires.timetuple())))
+
def directoryListing(self):
return ForbiddenDirectoryLister()
-class LongTimeExpiringFile(File):
+class DataLookupDirectory(NoListingFile):
+ def __init__(self, config, path):
+ self.md5_version = config.instance_md5_version()
+ NoListingFile.__init__(self, config, path)
+ self.here = path
+ self._defineChildResources()
+ if self.config.debugmode:
+ self.data_modconcat_basepath = '/data/??'
+ else:
+ self.data_modconcat_basepath = '/data/%s/??' % self.md5_version
+
+ def _defineChildResources(self):
+ self.putChild(self.md5_version, self)
+
+ def getChild(self, path, request):
+ if not path:
+ uri = request.uri
+ if uri.startswith('/https/'):
+ uri = uri[6:]
+ if uri.startswith(self.data_modconcat_basepath):
+ resource_relpath = uri[len(self.data_modconcat_basepath):]
+ if resource_relpath:
+ paths = resource_relpath.split(',')
+ try:
+ self.set_expires(request)
+ return ConcatFiles(self.config, paths)
+ except ConcatFileNotFoundError:
+ return self.childNotFound
+ return self.directoryListing()
+ childpath = join(self.here, path)
+ dirpath, rid = self.config.locate_resource(childpath)
+ if dirpath is None:
+ # resource not found
+ return self.childNotFound
+ filepath = os.path.join(dirpath, rid)
+ if os.path.isdir(filepath):
+ resource = DataLookupDirectory(self.config, childpath)
+ # cache resource for this segment path to avoid recomputing
+ # directory lookup
+ self.putChild(path, resource)
+ return resource
+ else:
+ self.set_expires(request)
+ return NoListingFile(self.config, filepath)
+
+
+class FCKEditorResource(NoListingFile):
+
+ def getChild(self, path, request):
+ pre_path = request.path.split('/')[1:]
+ if pre_path[0] == 'https':
+ pre_path.pop(0)
+ uiprops = self.config.https_uiprops
+ else:
+ uiprops = self.config.uiprops
+ return static.File(osp.join(uiprops['FCKEDITOR_PATH'], path))
+
+
+class LongTimeExpiringFile(DataLookupDirectory):
"""overrides static.File and sets a far future ``Expires`` date
on the resouce.
@@ -88,28 +165,84 @@
etc.
"""
- def render(self, request):
- # XXX: Don't provide additional resource information to error responses
- #
- # the HTTP RFC recommands not going further than 1 year ahead
- expires = date.today() + timedelta(days=6*30)
- request.setHeader('Expires', generateDateTime(mktime(expires.timetuple())))
- return File.render(self, request)
+ def _defineChildResources(self):
+ pass
+
+
+class ConcatFileNotFoundError(CubicWebException):
+ pass
+
+
+class ConcatFiles(LongTimeExpiringFile):
+ def __init__(self, config, paths):
+ _, ext = osp.splitext(paths[0])
+ self._resources = {}
+ # create a unique / predictable filename. We don't consider cubes
+ # version since uicache is cleared at server startup, and file's dates
+ # are checked in debug mode
+ fname = 'cache_concat_' + md5(';'.join(paths)).hexdigest() + ext
+ filepath = osp.join(config.appdatahome, 'uicache', fname)
+ LongTimeExpiringFile.__init__(self, config, filepath)
+ self._concat_cached_filepath(filepath, paths)
+
+ def _resource(self, path):
+ try:
+ return self._resources[path]
+ except KeyError:
+ self._resources[path] = self.config.locate_resource(path)
+ return self._resources[path]
+
+ def _concat_cached_filepath(self, filepath, paths):
+ if not self._up_to_date(filepath, paths):
+ with open(filepath, 'wb') as f:
+ for path in paths:
+ dirpath, rid = self._resource(path)
+ if rid is None:
+ # In production mode log an error, do not return a 404
+ # XXX the erroneous content is cached anyway
+ LOGGER.error('concatenated data url error: %r file '
+ 'does not exist', path)
+ if self.config.debugmode:
+ raise ConcatFileNotFoundError(path)
+ else:
+ for line in open(osp.join(dirpath, rid)):
+ f.write(line)
+ f.write('\n')
+
+ def _up_to_date(self, filepath, paths):
+ """
+ The concat-file is considered up-to-date if it exists.
+ In debug mode, an additional check is performed to make sure that
+ concat-file is more recent than all concatenated files
+ """
+ if not osp.isfile(filepath):
+ return False
+ if self.config.debugmode:
+ concat_lastmod = os.stat(filepath).st_mtime
+ for path in paths:
+ dirpath, rid = self._resource(path)
+ if rid is None:
+ raise ConcatFileNotFoundError(path)
+ path = osp.join(dirpath, rid)
+ if os.stat(path).st_mtime > concat_lastmod:
+ return False
+ return True
class CubicWebRootResource(resource.Resource):
def __init__(self, config, vreg=None):
+ resource.Resource.__init__(self)
self.config = config
# instantiate publisher here and not in init_publisher to get some
# checks done before daemonization (eg versions consistency)
self.appli = CubicWebPublisher(config, vreg=vreg)
self.base_url = config['base-url']
self.https_url = config['https-url']
- self.children = {}
- self.static_directories = set(('data%s' % config.instance_md5_version(),
- 'data', 'static', 'fckeditor'))
global MAX_POST_LENGTH
MAX_POST_LENGTH = config['max-post-length']
+ self.putChild('static', NoListingFile(config))
+ self.putChild('fckeditor', FCKEditorResource(self.config, ''))
+ self.putChild('data', DataLookupDirectory(self.config, ''))
def init_publisher(self):
config = self.config
@@ -152,38 +285,6 @@
def getChild(self, path, request):
"""Indicate which resource to use to process down the URL's path"""
- pre_path = request.path.split('/')[1:]
- if pre_path[0] == 'https':
- pre_path.pop(0)
- uiprops = self.config.https_uiprops
- else:
- uiprops = self.config.uiprops
- directory = pre_path[0]
- # Anything in data/, static/, fckeditor/ and the generated versioned
- # data directory is treated as static files
- if directory in self.static_directories:
- # take care fckeditor may appears as root directory or as a data
- # subdirectory
- if directory == 'static':
- return File(self.config.static_directory)
- if directory == 'fckeditor':
- return File(uiprops['FCKEDITOR_PATH'])
- if directory != 'data':
- # versioned directory, use specific file with http cache
- # headers so their are cached for a very long time
- cls = LongTimeExpiringFile
- else:
- cls = File
- if path == 'fckeditor':
- return cls(uiprops['FCKEDITOR_PATH'])
- if path == directory: # recurse
- return self
- datadir, path = self.config.locate_resource(path)
- if datadir is None:
- return self # recurse
- self.debug('static file %s from %s', path, datadir)
- return cls(join(datadir, path))
- # Otherwise we use this single resource
return self
def render(self, request):
@@ -208,7 +309,7 @@
# so we deferred that part to the cubicweb thread
request.process_multipart()
return self._render_request(request)
- except:
+ except Exception:
errorstream = StringIO()
traceback.print_exc(file=errorstream)
return HTTPResponse(stream='
%s
' % errorstream.getvalue(),
@@ -302,6 +403,13 @@
stream=content, code=code,
headers=request.headers_out)
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ @classmethod
+ def debug(cls, msg, *a, **kw):
+ pass
+ info = warning = error = critical = exception = debug
+
JSON_PATHS = set(('json',))
FRAME_POST_PATHS = set(('validateform',))
@@ -409,6 +517,7 @@
# serve it via standard HTTP on port set in the configuration
port = config['port'] or 8080
interface = config['interface']
+ reactor.suggestThreadPoolSize(config['webserver-threadpool-size'])
reactor.listenTCP(port, website, interface=interface)
if not config.debugmode:
if sys.platform == 'win32':
@@ -421,12 +530,8 @@
return whichproc # parent process
root_resource.init_publisher() # before changing uid
if config['uid'] is not None:
- try:
- uid = int(config['uid'])
- except ValueError:
- from pwd import getpwnam
- uid = getpwnam(config['uid']).pw_uid
- os.setuid(uid)
+ from logilab.common.daemon import setugid
+ setugid(config['uid'])
root_resource.start_service()
LOGGER.info('instance started on %s', root_resource.base_url)
# avoid annoying warnign if not in Main Thread
diff -r d8bb8f631d41 -r a4e667270dd4 etwist/test/unittest_server.py
--- a/etwist/test/unittest_server.py Mon Sep 26 18:37:23 2011 +0200
+++ b/etwist/test/unittest_server.py Fri Dec 09 12:08:27 2011 +0100
@@ -15,8 +15,12 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see .
+
+import os, os.path as osp, glob
+
from cubicweb.devtools.testlib import CubicWebTC
-from cubicweb.etwist.server import host_prefixed_baseurl
+from cubicweb.etwist.server import (host_prefixed_baseurl, ConcatFiles,
+ ConcatFileNotFoundError)
class HostPrefixedBaseURLTC(CubicWebTC):
@@ -50,3 +54,30 @@
self._check('http://localhost:8080/hg/', 'code.cubicweb.org',
'http://localhost:8080/hg/')
+
+class ConcatFilesTC(CubicWebTC):
+
+ def tearDown(self):
+ super(ConcatFilesTC, self).tearDown()
+ self._cleanup_concat_cache()
+ self.config.debugmode = False
+
+ def _cleanup_concat_cache(self):
+ uicachedir = osp.join(self.config.apphome, 'uicache')
+ for fname in glob.glob(osp.join(uicachedir, 'cache_concat_*')):
+ os.unlink(osp.join(uicachedir, fname))
+
+ def test_cache(self):
+ concat = ConcatFiles(self.config, ('cubicweb.ajax.js', 'jquery.js'))
+ self.failUnless(osp.isfile(concat.path))
+
+ def test_404(self):
+ # when not in debug mode, should not crash
+ ConcatFiles(self.config, ('cubicweb.ajax.js', 'dummy.js'))
+ # in debug mode, raise error
+ self.config.debugmode = True
+ try:
+ self.assertRaises(ConcatFileNotFoundError, ConcatFiles, self.config,
+ ('cubicweb.ajax.js', 'dummy.js'))
+ finally:
+ self.config.debugmode = False
diff -r d8bb8f631d41 -r a4e667270dd4 etwist/twconfig.py
--- a/etwist/twconfig.py Mon Sep 26 18:37:23 2011 +0200
+++ b/etwist/twconfig.py Fri Dec 09 12:08:27 2011 +0100
@@ -90,6 +90,13 @@
'help': 'run a pyro server',
'group': 'main', 'level': 1,
}),
+ ('webserver-threadpool-size',
+ {'type': 'int',
+ 'default': 4,
+ 'help': "size of twisted's reactor threadpool. It should probably be not too \
+much greater than connection-poolsize",
+ 'group': 'web', 'level': 3,
+ }),
) + WebConfiguration.options)
def server_file(self):
diff -r d8bb8f631d41 -r a4e667270dd4 etwist/twctl.py
--- a/etwist/twctl.py Mon Sep 26 18:37:23 2011 +0200
+++ b/etwist/twctl.py Fri Dec 09 12:08:27 2011 +0100
@@ -17,6 +17,10 @@
# with CubicWeb. If not, see .
"""cubicweb-clt handlers for twisted"""
+from os.path import join
+
+from logilab.common.shellutils import rm
+
from cubicweb.toolsutils import CommandHandler
from cubicweb.web.webctl import WebCreateHandler
@@ -32,6 +36,9 @@
def start_server(self, config):
from cubicweb.etwist import server
+ config.info('clear ui caches')
+ for cachedir in ('uicache', 'uicachehttps'):
+ rm(join(config.appdatahome, cachedir, '*'))
return server.run(config)
class TWStopHandler(CommandHandler):
diff -r d8bb8f631d41 -r a4e667270dd4 ext/rest.py
--- a/ext/rest.py Mon Sep 26 18:37:23 2011 +0200
+++ b/ext/rest.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -71,7 +71,7 @@
try:
try:
eid_num, rest = text.split(u':', 1)
- except:
+ except ValueError:
eid_num, rest = text, '#'+text
eid_num = int(eid_num)
if eid_num < 0:
@@ -105,11 +105,17 @@
else:
rql, vid = text, None
_cw.ensure_ro_rql(rql)
- rset = _cw.execute(rql, {'userid': _cw.user.eid})
- if vid is None:
- vid = vid_from_rset(_cw, rset, _cw.vreg.schema)
- view = _cw.vreg['views'].select(vid, _cw, rset=rset)
- content = view.render()
+ try:
+ rset = _cw.execute(rql, {'userid': _cw.user.eid})
+ if rset:
+ if vid is None:
+ vid = vid_from_rset(_cw, rset, _cw.vreg.schema)
+ else:
+ vid = 'noresult'
+ view = _cw.vreg['views'].select(vid, _cw, rset=rset)
+ content = view.render()
+ except Exception, exc:
+ content = 'an error occured while interpreting this rql directive: %r' % exc
set_classes(options)
return [nodes.raw('', content, format='html')], []
@@ -181,7 +187,7 @@
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name
- from pygments.formatters import HtmlFormatter
+ from pygments.formatters.html import HtmlFormatter
except ImportError:
pygments_directive = None
else:
@@ -200,7 +206,7 @@
context = state.document.settings.context
context._cw.add_css('pygments.css')
except AttributeError:
- # used outside cubicweb
+ # used outside cubicweb XXX use hasattr instead
pass
return [nodes.raw('', parsed, format='html')]
diff -r d8bb8f631d41 -r a4e667270dd4 ext/test/unittest_rest.py
--- a/ext/test/unittest_rest.py Mon Sep 26 18:37:23 2011 +0200
+++ b/ext/test/unittest_rest.py Fri Dec 09 12:08:27 2011 +0100
@@ -63,6 +63,16 @@
self.assert_(out.endswith('anon'
'
\n
\n
\n'))
+ def test_rql_role_with_vid_empty_rset(self):
+ context = self.context()
+ out = rest_publish(context, ':rql:`Any X WHERE X is CWUser, X login "nono":table`')
+ self.assert_(out.endswith('
No result matching query
\n\n'))
+
+ def test_rql_role_with_unknown_vid(self):
+ context = self.context()
+ out = rest_publish(context, ':rql:`Any X WHERE X is CWUser:toto`')
+ self.assert_(out.startswith("
an error occured while interpreting this rql directive: ObjectNotFound(u'toto',)
"))
+
def test_rql_role_without_vid(self):
context = self.context()
out = rest_publish(context, ':rql:`Any X WHERE X is CWUser`')
diff -r d8bb8f631d41 -r a4e667270dd4 hooks/__init__.py
--- a/hooks/__init__.py Mon Sep 26 18:37:23 2011 +0200
+++ b/hooks/__init__.py Fri Dec 09 12:08:27 2011 +0100
@@ -46,28 +46,26 @@
session.commit()
finally:
session.close()
- self.repo.looping_task(60*60*24, cleanup_old_transactions, self.repo)
+ if self.repo.config['undo-support']:
+ self.repo.looping_task(60*60*24, cleanup_old_transactions,
+ self.repo)
def update_feeds(repo):
- session = repo.internal_session()
- try:
- # don't iter on repo.sources which doesn't include copy based
- # sources (the one we're looking for)
- for source in repo.sources_by_eid.itervalues():
- if (not source.copy_based_source
- or not repo.config.source_enabled(source)
- or not source.config['synchronize']):
- continue
- try:
- stats = source.pull_data(session)
- if stats.get('created'):
- source.info('added %s entities', len(stats['created']))
- if stats.get('updated'):
- source.info('updated %s entities', len(stats['updated']))
- session.commit()
- except Exception, exc:
- session.exception('while trying to update feed %s', source)
- session.rollback()
- session.set_pool()
- finally:
- session.close()
+ # don't iter on repo.sources which doesn't include copy based
+ # sources (the one we're looking for)
+ for source in repo.sources_by_eid.itervalues():
+ if (not source.copy_based_source
+ or not repo.config.source_enabled(source)
+ or not source.config['synchronize']):
+ continue
+ session = repo.internal_session(safe=True)
+ try:
+ stats = source.pull_data(session)
+ if stats.get('created'):
+ source.info('added %s entities', len(stats['created']))
+ if stats.get('updated'):
+ source.info('updated %s entities', len(stats['updated']))
+ except Exception, exc:
+ session.exception('while trying to update feed %s', source)
+ finally:
+ session.close()
self.repo.looping_task(60, update_feeds, self.repo)
diff -r d8bb8f631d41 -r a4e667270dd4 hooks/metadata.py
--- a/hooks/metadata.py Mon Sep 26 18:37:23 2011 +0200
+++ b/hooks/metadata.py Fri Dec 09 12:08:27 2011 +0100
@@ -23,6 +23,7 @@
from cubicweb.selectors import is_instance
from cubicweb.server import hook
+from cubicweb.server.edition import EditedEntity
class MetaDataHook(hook.Hook):
@@ -41,8 +42,10 @@
def __call__(self):
timestamp = datetime.now()
edited = self.entity.cw_edited
- edited.setdefault('creation_date', timestamp)
- edited.setdefault('modification_date', timestamp)
+ if not edited.get('creation_date'):
+ edited['creation_date'] = timestamp
+ if not edited.get('modification_date'):
+ edited['modification_date'] = timestamp
if not self._cw.get_shared_data('do-not-insert-cwuri'):
cwuri = u'%s%s' % (self._cw.base_url(), self.entity.eid)
edited.setdefault('cwuri', cwuri)
@@ -68,8 +71,9 @@
def precommit_event(self):
session = self.session
relations = [(eid, session.user.eid) for eid in self.get_data()
- # don't consider entities that have been created and
- # deleted in the same transaction
+ # don't consider entities that have been created and deleted in
+ # the same transaction, nor ones where created_by has been
+ # explicitly set
if not session.deleted_in_transaction(eid) and \
not session.entity_from_eid(eid).created_by]
session.add_relations([('created_by', relations)])
@@ -141,3 +145,87 @@
session.repo.system_source.index_entity(
session, session.entity_from_eid(self.eidto))
+
+
+# entity source handling #######################################################
+
+class ChangeEntityUpdateCaches(hook.Operation):
+ oldsource = newsource = entity = None # make pylint happy
+
+ def postcommit_event(self):
+ self.oldsource.reset_caches()
+ repo = self.session.repo
+ entity = self.entity
+ extid = entity.cw_metainformation()['extid']
+ repo._type_source_cache[entity.eid] = (
+ entity.__regid__, self.newsource.uri, None, self.newsource.uri)
+ if self.oldsource.copy_based_source:
+ uri = 'system'
+ else:
+ uri = self.oldsource.uri
+ repo._extid_cache[(extid, uri)] = -entity.eid
+
+class ChangeEntitySourceDeleteHook(MetaDataHook):
+ """support for moving an entity from an external source by watching 'Any
+ cw_source CWSource' relation
+ """
+
+ __regid__ = 'cw.metadata.source-change'
+ __select__ = MetaDataHook.__select__ & hook.match_rtype('cw_source')
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ if (self._cw.deleted_in_transaction(self.eidfrom)
+ or self._cw.deleted_in_transaction(self.eidto)):
+ return
+ schange = self._cw.transaction_data.setdefault('cw_source_change', {})
+ schange[self.eidfrom] = self.eidto
+
+
+class ChangeEntitySourceAddHook(MetaDataHook):
+ __regid__ = 'cw.metadata.source-change'
+ __select__ = MetaDataHook.__select__ & hook.match_rtype('cw_source')
+ events = ('before_add_relation',)
+
+ def __call__(self):
+ schange = self._cw.transaction_data.get('cw_source_change')
+ if schange is not None and self.eidfrom in schange:
+ newsource = self._cw.entity_from_eid(self.eidto)
+ if newsource.name != 'system':
+ raise Exception('changing source to something else than the '
+ 'system source is unsupported')
+ syssource = newsource.repo_source
+ oldsource = self._cw.entity_from_eid(schange[self.eidfrom])
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ # copy entity if necessary
+ if not oldsource.repo_source.copy_based_source:
+ entity.complete(skip_bytes=False)
+ if not entity.creation_date:
+ entity.cw_attr_cache['creation_date'] = datetime.now()
+ if not entity.modification_date:
+ entity.cw_attr_cache['modification_date'] = datetime.now()
+ entity.cw_attr_cache['cwuri'] = u'%s%s' % (self._cw.base_url(), entity.eid)
+ for rschema, attrschema in entity.e_schema.attribute_definitions():
+ if attrschema == 'Password' and \
+ rschema.rdef(entity.e_schema, attrschema).cardinality[0] == '1':
+ from logilab.common.shellutils import generate_password
+ entity.cw_attr_cache[rschema.type] = generate_password()
+ entity.cw_edited = EditedEntity(entity, **entity.cw_attr_cache)
+ syssource.add_entity(self._cw, entity)
+ # we don't want the moved entity to be reimported later. To
+ # distinguish this state, the trick is to change the associated
+ # record in the 'entities' system table with eid=-eid while leaving
+ # other fields unchanged, and to add a new record with eid=eid,
+ # source='system'. External source will then have consider case
+ # where `extid2eid` return a negative eid as 'this entity was known
+ # but has been moved, ignore it'.
+ self._cw.system_sql('UPDATE entities SET eid=-eid WHERE eid=%(eid)s',
+ {'eid': self.eidfrom})
+ attrs = {'type': entity.__regid__, 'eid': entity.eid, 'extid': None,
+ 'source': 'system', 'asource': 'system',
+ 'mtime': datetime.now()}
+ self._cw.system_sql(syssource.sqlgen.insert('entities', attrs), attrs)
+ # register an operation to update repository/sources caches
+ ChangeEntityUpdateCaches(self._cw, entity=entity,
+ oldsource=oldsource.repo_source,
+ newsource=syssource)
diff -r d8bb8f631d41 -r a4e667270dd4 hooks/notification.py
--- a/hooks/notification.py Mon Sep 26 18:37:23 2011 +0200
+++ b/hooks/notification.py Fri Dec 09 12:08:27 2011 +0100
@@ -28,6 +28,8 @@
class RenderAndSendNotificationView(hook.Operation):
"""delay rendering of notification view until precommit"""
+ view = None # make pylint happy
+
def precommit_event(self):
view = self.view
if view.cw_rset is not None and not view.cw_rset:
@@ -191,7 +193,7 @@
def _call(self):
try:
title = self.entity.dc_title()
- except:
+ except Exception:
# may raise an error during deletion process, for instance due to
# missing required relation
title = '#%s' % self.entity.eid
diff -r d8bb8f631d41 -r a4e667270dd4 hooks/syncschema.py
--- a/hooks/syncschema.py Mon Sep 26 18:37:23 2011 +0200
+++ b/hooks/syncschema.py Fri Dec 09 12:08:27 2011 +0100
@@ -42,12 +42,15 @@
TYPE_CONVERTER = { # XXX
'Boolean': bool,
'Int': int,
+ 'BigInt': int,
'Float': float,
'Password': str,
'String': unicode,
'Date' : unicode,
'Datetime' : unicode,
'Time' : unicode,
+ 'TZDatetime' : unicode,
+ 'TZTime' : unicode,
}
# core entity and relation types which can't be removed
@@ -84,7 +87,7 @@
session.system_sql(str('ALTER TABLE %s ADD %s integer'
% (table, column)), rollback_on_failure=False)
session.info('added column %s to table %s', column, table)
- except:
+ except Exception:
# silent exception here, if this error has not been raised because the
# column already exists, index creation will fail anyway
session.exception('error while adding column %s to table %s',
@@ -92,7 +95,7 @@
# create index before alter table which may expectingly fail during test
# (sqlite) while index creation should never fail (test for index existence
# is done by the dbhelper)
- session.pool.source('system').create_index(session, table, column)
+ session.cnxset.source('system').create_index(session, table, column)
session.info('added index on %s(%s)', table, column)
@@ -218,8 +221,8 @@
cwuser_cls = self.session.vreg['etypes'].etype_class('CWUser')
for session in repo._sessions.values():
session.user.__class__ = cwuser_cls
- except:
- self.critical('error while setting schmea', exc_info=True)
+ except Exception:
+ self.critical('error while setting schema', exc_info=True)
def rollback_event(self):
self.precommit_event()
@@ -243,6 +246,7 @@
CWAttribute entities
* add owned_by relation by creating the necessary CWRelation entity
"""
+ entity = None # make pylint happy
def precommit_event(self):
session = self.session
@@ -252,7 +256,7 @@
description=entity.description)
eschema = schema.add_entity_type(etype)
# create the necessary table
- tablesql = y2sql.eschema2sql(session.pool.source('system').dbhelper,
+ tablesql = y2sql.eschema2sql(session.cnxset.source('system').dbhelper,
eschema, prefix=SQL_PREFIX)
for sql in tablesql.split(';'):
if sql.strip():
@@ -289,7 +293,7 @@
self.session.vreg.schema.rename_entity_type(oldname, newname)
# we need sql to operate physical changes on the system database
sqlexec = self.session.system_sql
- dbhelper= self.session.pool.source('system').dbhelper
+ dbhelper= self.session.cnxset.source('system').dbhelper
sql = dbhelper.sql_rename_table(SQL_PREFIX+oldname,
SQL_PREFIX+newname)
sqlexec(sql)
@@ -433,7 +437,7 @@
# update the in-memory schema first
rdefdef = self.init_rdef(**props)
# then make necessary changes to the system source database
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
attrtype = y2sql.type_from_constraints(
syssource.dbhelper, rdefdef.object, rdefdef.constraints)
# XXX should be moved somehow into lgdb: sqlite doesn't support to
@@ -606,7 +610,7 @@
self.oldvalues = dict( (attr, getattr(rdef, attr)) for attr in self.values)
rdef.update(self.values)
# then make necessary changes to the system source database
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
if 'indexed' in self.values:
syssource.update_rdef_indexed(session, rdef)
self.indexed_changed = True
@@ -624,7 +628,7 @@
# revert changes on in memory schema
self.rdef.update(self.oldvalues)
# revert changes on database
- syssource = self.session.pool.source('system')
+ syssource = self.session.cnxset.source('system')
if self.indexed_changed:
syssource.update_rdef_indexed(self.session, self.rdef)
if self.null_allowed_changed:
@@ -652,7 +656,7 @@
rdef.constraints.remove(self.oldcstr)
# then update database: alter the physical schema on size/unique
# constraint changes
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
cstrtype = self.oldcstr.type()
if cstrtype == 'SizeConstraint':
syssource.update_rdef_column(session, rdef)
@@ -668,7 +672,7 @@
if self.oldcstr is not None:
self.rdef.constraints.append(self.oldcstr)
# revert changes on database
- syssource = self.session.pool.source('system')
+ syssource = self.session.cnxset.source('system')
if self.size_cstr_changed:
syssource.update_rdef_column(self.session, self.rdef)
if self.unique_changed:
@@ -699,7 +703,7 @@
rdef.constraints.append(newcstr)
# then update database: alter the physical schema on size/unique
# constraint changes
- syssource = session.pool.source('system')
+ syssource = session.cnxset.source('system')
if cstrtype == 'SizeConstraint' and (oldcstr is None or
oldcstr.max != newcstr.max):
syssource.update_rdef_column(session, rdef)
@@ -716,7 +720,7 @@
prefix = SQL_PREFIX
table = '%s%s' % (prefix, self.entity.constraint_of[0].name)
cols = ['%s%s' % (prefix, r.name) for r in self.entity.relations]
- dbhelper= session.pool.source('system').dbhelper
+ dbhelper= session.cnxset.source('system').dbhelper
sqls = dbhelper.sqls_create_multicol_unique_index(table, cols)
for sql in sqls:
session.system_sql(sql)
@@ -736,7 +740,7 @@
session = self.session
prefix = SQL_PREFIX
table = '%s%s' % (prefix, self.entity.type)
- dbhelper= session.pool.source('system').dbhelper
+ dbhelper= session.cnxset.source('system').dbhelper
cols = ['%s%s' % (prefix, c) for c in self.cols]
sqls = dbhelper.sqls_drop_multicol_unique_index(table, cols)
for sql in sqls:
@@ -756,6 +760,8 @@
class MemSchemaCWETypeDel(MemSchemaOperation):
"""actually remove the entity type from the instance's schema"""
+ etype = None # make pylint happy
+
def postcommit_event(self):
# del_entity_type also removes entity's relations
self.session.vreg.schema.del_entity_type(self.etype)
@@ -763,6 +769,8 @@
class MemSchemaCWRTypeAdd(MemSchemaOperation):
"""actually add the relation type to the instance's schema"""
+ rtypedef = None # make pylint happy
+
def precommit_event(self):
self.session.vreg.schema.add_relation_type(self.rtypedef)
@@ -772,6 +780,8 @@
class MemSchemaCWRTypeDel(MemSchemaOperation):
"""actually remove the relation type from the instance's schema"""
+ rtype = None # make pylint happy
+
def postcommit_event(self):
try:
self.session.vreg.schema.del_relation_type(self.rtype)
@@ -783,9 +793,10 @@
class MemSchemaPermissionAdd(MemSchemaOperation):
"""synchronize schema when a *_permission relation has been added on a group
"""
+ eid = action = group_eid = expr = None # make pylint happy
def precommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections.cnxset has been commited"""
try:
erschema = self.session.vreg.schema.schema_by_eid(self.eid)
except KeyError:
@@ -793,7 +804,7 @@
self.warning('no schema for %s', self.eid)
return
perms = list(erschema.action_permissions(self.action))
- if hasattr(self, 'group_eid'):
+ if self.group_eid is not None:
perm = self.session.entity_from_eid(self.group_eid).name
else:
perm = erschema.rql_expression(self.expr)
@@ -814,7 +825,7 @@
"""
def precommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
try:
erschema = self.session.vreg.schema.schema_by_eid(self.eid)
except KeyError:
@@ -827,7 +838,7 @@
self.action in ('delete', 'add'): # XXX 3.6.1 migration
return
perms = list(erschema.action_permissions(self.action))
- if hasattr(self, 'group_eid'):
+ if self.group_eid is not None:
perm = self.session.entity_from_eid(self.group_eid).name
else:
perm = erschema.rql_expression(self.expr)
@@ -842,6 +853,7 @@
class MemSchemaSpecializesAdd(MemSchemaOperation):
+ etypeeid = parentetypeeid = None # make pylint happy
def precommit_event(self):
eschema = self.session.vreg.schema.schema_by_eid(self.etypeeid)
@@ -853,6 +865,7 @@
class MemSchemaSpecializesDel(MemSchemaOperation):
+ etypeeid = parentetypeeid = None # make pylint happy
def precommit_event(self):
try:
@@ -1228,7 +1241,7 @@
source.fti_index_entities(session, [container])
if to_reindex:
# Transaction has already been committed
- session.pool.commit()
+ session.cnxset.commit()
diff -r d8bb8f631d41 -r a4e667270dd4 hooks/syncsession.py
--- a/hooks/syncsession.py Mon Sep 26 18:37:23 2011 +0200
+++ b/hooks/syncsession.py Fri Dec 09 12:08:27 2011 +0100
@@ -40,7 +40,8 @@
class _GroupOperation(hook.Operation):
"""base class for group operation"""
- geid = None
+ cnxuser = None # make pylint happy
+
def __init__(self, session, *args, **kwargs):
"""override to get the group name before actual groups manipulation:
@@ -55,8 +56,9 @@
class _DeleteGroupOp(_GroupOperation):
"""synchronize user when a in_group relation has been deleted"""
+
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
groups = self.cnxuser.groups
try:
groups.remove(self.group)
@@ -67,7 +69,7 @@
class _AddGroupOp(_GroupOperation):
"""synchronize user when a in_group relation has been added"""
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
groups = self.cnxuser.groups
if self.group in groups:
self.warning('user %s already in group %s', self.cnxuser,
@@ -97,7 +99,7 @@
hook.Operation.__init__(self, session)
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
try:
self.session.repo.close(self.cnxid)
except BadConnectionId:
@@ -117,12 +119,12 @@
# CWProperty hooks #############################################################
-
class _DelCWPropertyOp(hook.Operation):
"""a user's custom properties has been deleted"""
+ cwpropdict = key = None # make pylint happy
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
try:
del self.cwpropdict[self.key]
except KeyError:
@@ -131,17 +133,19 @@
class _ChangeCWPropertyOp(hook.Operation):
"""a user's custom properties has been added/changed"""
+ cwpropdict = key = value = None # make pylint happy
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
self.cwpropdict[self.key] = self.value
class _AddCWPropertyOp(hook.Operation):
"""a user's custom properties has been added/changed"""
+ cwprop = None # make pylint happy
def postcommit_event(self):
- """the observed connections pool has been commited"""
+ """the observed connections set has been commited"""
cwprop = self.cwprop
if not cwprop.for_user:
self.session.vreg['propertyvalues'][cwprop.pkey] = cwprop.value
diff -r d8bb8f631d41 -r a4e667270dd4 hooks/syncsources.py
--- a/hooks/syncsources.py Mon Sep 26 18:37:23 2011 +0200
+++ b/hooks/syncsources.py Fri Dec 09 12:08:27 2011 +0100
@@ -19,6 +19,7 @@
from socket import gethostname
+from logilab.common.decorators import clear_cache
from yams.schema import role_name
from cubicweb import ValidationError
@@ -30,7 +31,10 @@
category = 'cw.sources'
+# repo sources synchronization #################################################
+
class SourceAddedOp(hook.Operation):
+ entity = None # make pylint happy
def postcommit_event(self):
self.session.repo.add_source(self.entity)
@@ -51,7 +55,8 @@
class SourceRemovedOp(hook.Operation):
- def precommit_event(self):
+ uri = None # make pylint happy
+ def postcommit_event(self):
self.session.repo.remove_source(self.uri)
class SourceRemovedHook(SourceHook):
@@ -64,25 +69,59 @@
SourceRemovedOp(self._cw, uri=self.entity.name)
-class SourceUpdatedOp(hook.DataOperationMixIn, hook.Operation):
+class SourceConfigUpdatedOp(hook.DataOperationMixIn, hook.Operation):
def precommit_event(self):
self.__processed = []
for source in self.get_data():
- conf = source.repo_source.check_config(source)
- self.__processed.append( (source, conf) )
+ if not self.session.deleted_in_transaction(source.eid):
+ conf = source.repo_source.check_config(source)
+ self.__processed.append( (source, conf) )
def postcommit_event(self):
for source, conf in self.__processed:
source.repo_source.update_config(source, conf)
+
+class SourceRenamedOp(hook.LateOperation):
+ oldname = newname = None # make pylint happy
+
+ def precommit_event(self):
+ source = self.session.repo.sources_by_uri[self.oldname]
+ if source.copy_based_source:
+ sql = 'UPDATE entities SET asource=%(newname)s WHERE asource=%(oldname)s'
+ else:
+ sql = 'UPDATE entities SET source=%(newname)s, asource=%(newname)s WHERE source=%(oldname)s'
+ self.session.system_sql(sql, {'oldname': self.oldname,
+ 'newname': self.newname})
+
+ def postcommit_event(self):
+ repo = self.session.repo
+ # XXX race condition
+ source = repo.sources_by_uri.pop(self.oldname)
+ source.uri = self.newname
+ source.public_config['uri'] = self.newname
+ repo.sources_by_uri[self.newname] = source
+ repo._type_source_cache.clear()
+ clear_cache(repo, 'source_defs')
+ if not source.copy_based_source:
+ repo._extid_cache.clear()
+ repo._clear_planning_caches()
+ for cnxset in repo.cnxsets:
+ cnxset.source_cnxs[self.oldname] = cnxset.source_cnxs.pop(self.oldname)
+
+
class SourceUpdatedHook(SourceHook):
__regid__ = 'cw.sources.configupdate'
__select__ = SourceHook.__select__ & is_instance('CWSource')
- events = ('after_update_entity',)
+ events = ('before_update_entity',)
def __call__(self):
if 'config' in self.entity.cw_edited:
- SourceUpdatedOp.get_instance(self._cw).add_data(self.entity)
+ SourceConfigUpdatedOp.get_instance(self._cw).add_data(self.entity)
+ if 'name' in self.entity.cw_edited:
+ oldname, newname = self.entity.cw_edited.oldnewvalue('name')
+ SourceRenamedOp(self._cw, oldname=oldname, newname=newname)
+
class SourceHostConfigUpdatedHook(SourceHook):
__regid__ = 'cw.sources.hostconfigupdate'
@@ -94,21 +133,23 @@
not 'config' in self.entity.cw_edited:
return
try:
- SourceUpdatedOp.get_instance(self._cw).add_data(self.entity.cwsource)
+ SourceConfigUpdatedOp.get_instance(self._cw).add_data(self.entity.cwsource)
except IndexError:
# XXX no source linked to the host config yet
pass
-# source mapping synchronization. Expect cw_for_source/cw_schema are immutable
-# relations (i.e. can't change from a source or schema to another).
+# source mapping synchronization ###############################################
+#
+# Expect cw_for_source/cw_schema are immutable relations (i.e. can't change from
+# a source or schema to another).
-class SourceMappingDeleteHook(SourceHook):
+class SourceMappingImmutableHook(SourceHook):
"""check cw_for_source and cw_schema are immutable relations
XXX empty delete perms would be enough?
"""
- __regid__ = 'cw.sources.delschemaconfig'
+ __regid__ = 'cw.sources.mapping.immutable'
__select__ = SourceHook.__select__ & hook.match_rtype('cw_for_source', 'cw_schema')
events = ('before_add_relation',)
def __call__(self):
diff -r d8bb8f631d41 -r a4e667270dd4 hooks/test/unittest_syncschema.py
--- a/hooks/test/unittest_syncschema.py Mon Sep 26 18:37:23 2011 +0200
+++ b/hooks/test/unittest_syncschema.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -36,9 +36,9 @@
self.__class__.schema_eids = schema_eids_idx(self.repo.schema)
def index_exists(self, etype, attr, unique=False):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
return dbhelper.index_exists(sqlcursor, SQL_PREFIX + etype, SQL_PREFIX + attr, unique=unique)
def _set_perms(self, eid):
@@ -57,9 +57,9 @@
def test_base(self):
schema = self.repo.schema
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
self.failIf(schema.has_entity('Societe2'))
self.failIf(schema.has_entity('concerne2'))
# schema should be update on insertion (after commit)
@@ -170,9 +170,9 @@
# schema modification hooks tests #########################################
def test_uninline_relation(self):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
self.failUnless(self.schema['state_of'].inlined)
try:
self.execute('SET X inlined FALSE WHERE X name "state_of"')
@@ -182,7 +182,7 @@
self.failIf(self.index_exists('State', 'state_of'))
rset = self.execute('Any X, Y WHERE X state_of Y')
self.assertEqual(len(rset), 2) # user states
- except:
+ except Exception:
import traceback
traceback.print_exc()
finally:
@@ -195,9 +195,9 @@
self.assertEqual(len(rset), 2)
def test_indexed_change(self):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
try:
self.execute('SET X indexed FALSE WHERE X relation_type R, R name "name"')
self.failUnless(self.schema['name'].rdef('Workflow', 'String').indexed)
@@ -214,9 +214,9 @@
self.failUnless(self.index_exists('Workflow', 'name'))
def test_unique_change(self):
- self.session.set_pool()
- dbhelper = self.session.pool.source('system').dbhelper
- sqlcursor = self.session.pool['system']
+ self.session.set_cnxset()
+ dbhelper = self.session.cnxset.source('system').dbhelper
+ sqlcursor = self.session.cnxset['system']
try:
self.execute('INSERT CWConstraint X: X cstrtype CT, DEF constrained_by X '
'WHERE CT name "UniqueConstraint", DEF relation_type RT, DEF from_entity E,'
diff -r d8bb8f631d41 -r a4e667270dd4 hooks/workflow.py
--- a/hooks/workflow.py Mon Sep 26 18:37:23 2011 +0200
+++ b/hooks/workflow.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -45,6 +45,7 @@
class _SetInitialStateOp(hook.Operation):
"""make initial state be a default state"""
+ entity = None # make pylint happy
def precommit_event(self):
session = self.session
@@ -61,6 +62,7 @@
class _FireAutotransitionOp(hook.Operation):
"""try to fire auto transition after state changes"""
+ entity = None # make pylint happy
def precommit_event(self):
entity = self.entity
@@ -73,6 +75,7 @@
class _WorkflowChangedOp(hook.Operation):
"""fix entity current state when changing its workflow"""
+ eid = wfeid = None # make pylint happy
def precommit_event(self):
# notice that enforcement that new workflow apply to the entity's type is
@@ -109,6 +112,7 @@
class _CheckTrExitPoint(hook.Operation):
+ treid = None # make pylint happy
def precommit_event(self):
tr = self.session.entity_from_eid(self.treid)
@@ -122,6 +126,7 @@
class _SubWorkflowExitOp(hook.Operation):
+ forentity = trinfo = None # make pylint happy
def precommit_event(self):
session = self.session
@@ -148,7 +153,7 @@
class WorkflowHook(hook.Hook):
__abstract__ = True
- category = 'workflow'
+ category = 'metadata'
class SetInitialStateHook(WorkflowHook):
@@ -160,21 +165,15 @@
_SetInitialStateOp(self._cw, entity=self.entity)
-class PrepareStateChangeHook(WorkflowHook):
- """record previous state information"""
- __regid__ = 'cwdelstate'
- __select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
- events = ('before_delete_relation',)
+class FireTransitionHook(WorkflowHook):
+ """check the transition is allowed and add missing information into the
+ TrInfo entity.
- def __call__(self):
- self._cw.transaction_data.setdefault('pendingrelations', []).append(
- (self.eidfrom, self.rtype, self.eidto))
-
-
-class FireTransitionHook(WorkflowHook):
- """check the transition is allowed, add missing information. Expect that:
+ Expect that:
* wf_info_for inlined relation is set
* by_transition or to_state (managers only) inlined relation is set
+
+ Check for automatic transition to be fired at the end
"""
__regid__ = 'wffiretransition'
__select__ = WorkflowHook.__select__ & is_instance('TrInfo')
@@ -273,7 +272,7 @@
class FiredTransitionHook(WorkflowHook):
- """change related entity state"""
+ """change related entity state and handle exit of subworkflow"""
__regid__ = 'wffiretransition'
__select__ = WorkflowHook.__select__ & is_instance('TrInfo')
events = ('after_add_entity',)
@@ -296,6 +295,7 @@
__regid__ = 'wfcheckinstate'
__select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
events = ('before_add_relation',)
+ category = 'integrity'
def __call__(self):
session = self._cw
diff -r d8bb8f631d41 -r a4e667270dd4 i18n/de.po
--- a/i18n/de.po Mon Sep 26 18:37:23 2011 +0200
+++ b/i18n/de.po Fri Dec 09 12:08:27 2011 +0100
@@ -162,6 +162,11 @@
msgstr ""
#, python-format
+msgid ""
+"'%s' action for in_state relation should at least have 'linkattr=name' option"
+msgstr ""
+
+#, python-format
msgid "'%s' action requires 'linkattr' option"
msgstr ""
@@ -220,6 +225,9 @@
"können ein vollständiges Schema mit Meta-Daten anzeigen."
"div>"
+msgid ""
+msgstr ""
+
msgid ""
msgstr ""
@@ -256,6 +264,12 @@
msgid "BaseTransition_plural"
msgstr "Übergänge (abstrakt)"
+msgid "BigInt"
+msgstr ""
+
+msgid "BigInt_plural"
+msgstr ""
+
msgid "Bookmark"
msgstr "Lesezeichen"
@@ -1073,12 +1087,6 @@
msgid "add a CWRType"
msgstr "einen Relationstyp hinzufügen"
-msgid "add a CWSource"
-msgstr ""
-
-msgid "add a CWSourceSchemaConfig"
-msgstr ""
-
msgctxt "inlined:CWUser.use_email.subject"
msgid "add a EmailAddress"
msgstr "Email-Adresse hinzufügen"
@@ -1153,6 +1161,10 @@
msgid "allowed transitions from this state"
msgstr "erlaubte Übergänge von diesem Zustand"
+#, python-format
+msgid "allowed values for \"action\" are %s"
+msgstr ""
+
msgid "allowed_transition"
msgstr "erlaubter Übergang"
@@ -1235,6 +1247,9 @@
msgid "auto"
msgstr "automatisch"
+msgid "autocomputed attribute used to ensure transition coherency"
+msgstr ""
+
msgid "automatic"
msgstr "automatisch"
@@ -1787,10 +1802,10 @@
msgid "ctxcomponents_edit_box_description"
msgstr "Box mit verfügbaren Aktionen für die angezeigten Daten"
-msgid "ctxcomponents_facet.filters"
+msgid "ctxcomponents_facet.filterbox"
msgstr "Filter"
-msgid "ctxcomponents_facet.filters_description"
+msgid "ctxcomponents_facet.filterbox_description"
msgstr "Box mit Filter für aktuelle Suchergebnis-Funktionalität"
msgid "ctxcomponents_logo"
@@ -1896,10 +1911,6 @@
msgid "cw_schema_object"
msgstr ""
-msgctxt "CWAttribute"
-msgid "cw_schema_object"
-msgstr ""
-
msgctxt "CWEType"
msgid "cw_schema_object"
msgstr ""
@@ -1957,6 +1968,9 @@
msgid "data directory url"
msgstr "URL des Daten-Pools"
+msgid "data model schema"
+msgstr "Schema der Website"
+
msgid "data sources"
msgstr ""
@@ -2274,9 +2288,6 @@
msgid "eid"
msgstr ""
-msgid "email address to use for notification"
-msgstr "E-Mail-Adresse für Mitteilungen."
-
msgid "emails successfully sent"
msgstr "E-Mails erfolgreich versandt."
@@ -2388,6 +2399,9 @@
msgid "external page"
msgstr "externe Seite"
+msgid "facet-loading-msg"
+msgstr ""
+
msgid "facet.filters"
msgstr ""
@@ -2572,9 +2586,6 @@
"generische Relation, die anzeigt, dass eine Entität mit einer anderen Web-"
"Ressource identisch ist (siehe http://www.w3.org/TR/owl-ref/#sameAs-def)."
-msgid "go back to the index page"
-msgstr "Zurück zur Index-Seite"
-
msgid "granted to groups"
msgstr "an Gruppen gewährt"
@@ -2605,6 +2616,18 @@
msgid "groups"
msgstr "Gruppen"
+msgid "groups allowed to add entities/relations of this type"
+msgstr ""
+
+msgid "groups allowed to delete entities/relations of this type"
+msgstr ""
+
+msgid "groups allowed to read entities/relations of this type"
+msgstr ""
+
+msgid "groups allowed to update entities/relations of this type"
+msgstr ""
+
msgid "groups grant permissions to the user"
msgstr "die Gruppen geben dem Nutzer Rechte"
@@ -2727,6 +2750,13 @@
msgid "in_state_object"
msgstr "Zustand von"
+msgid "in_synchronization"
+msgstr ""
+
+msgctxt "CWSource"
+msgid "in_synchronization"
+msgstr ""
+
msgid "incontext"
msgstr "im Kontext"
@@ -3184,6 +3214,15 @@
msgid "no associated permissions"
msgstr "keine entsprechende Berechtigung"
+msgid "no content next link"
+msgstr ""
+
+msgid "no content prev link"
+msgstr ""
+
+msgid "no edited fields specified"
+msgstr ""
+
#, python-format
msgid "no edited fields specified for entity %s"
msgstr "kein Eingabefeld spezifiziert Für Entität %s"
@@ -3581,6 +3620,18 @@
msgid "right"
msgstr "rechts"
+msgid "rql expression allowing to add entities/relations of this type"
+msgstr ""
+
+msgid "rql expression allowing to delete entities/relations of this type"
+msgstr ""
+
+msgid "rql expression allowing to read entities/relations of this type"
+msgstr ""
+
+msgid "rql expression allowing to update entities/relations of this type"
+msgstr ""
+
msgid "rql expressions"
msgstr "RQL-Ausdrücke"
@@ -3617,9 +3668,6 @@
msgid "searching for"
msgstr "Suche nach"
-msgid "secondary"
-msgstr "sekundär"
-
msgid "security"
msgstr "Sicherheit"
@@ -3708,9 +3756,6 @@
msgid "site documentation"
msgstr "Dokumentation der Website"
-msgid "site schema"
-msgstr "Schema der Website"
-
msgid "site title"
msgstr "Titel der Website"
@@ -3722,9 +3767,6 @@
msgid "siteinfo"
msgstr ""
-msgid "some errors occurred:"
-msgstr "Einige Fehler sind aufgetreten"
-
msgid "some later transaction(s) touch entity, undo them first"
msgstr ""
"Eine oder mehrere frühere Transaktion(en) betreffen die Tntität. Machen Sie "
@@ -3762,6 +3804,11 @@
msgid "specifying %s is mandatory"
msgstr ""
+msgid ""
+"start timestamp of the currently in synchronization, or NULL when no "
+"synchronization in progress."
+msgstr ""
+
msgid "startup views"
msgstr "Start-Ansichten"
@@ -3935,6 +3982,12 @@
msgstr ""
"Der Wert \"%s\" wird bereits benutzt, bitte verwenden Sie einen anderen Wert"
+msgid "there is no next page"
+msgstr ""
+
+msgid "there is no previous page"
+msgstr ""
+
msgid "this action is not reversible!"
msgstr "Achtung! Diese Aktion ist unumkehrbar."
@@ -4035,7 +4088,7 @@
msgstr ""
msgid "transaction undone"
-msgstr "Transaktion rückgängig gemacht"
+msgstr ""
#, python-format
msgid "transition %(tr)s isn't allowed from %(st)s"
@@ -4290,6 +4343,9 @@
msgid "user preferences"
msgstr "Nutzereinstellungen"
+msgid "user's email account"
+msgstr ""
+
msgid "users"
msgstr "Nutzer"
@@ -4319,30 +4375,30 @@
msgid "value"
msgstr "Wert"
+#, python-format
+msgid "value %(value)s must be %(op)s %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be <= %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be >= %(boundary)s"
+msgstr ""
+
msgid "value associated to this key is not editable manually"
msgstr ""
"Der mit diesem Schlüssele verbundene Wert kann n icht manuell geändert "
"werden."
#, python-format
-msgid "value must be %(op)s %(boundary)s"
-msgstr "Der Wert muss %(op)s %(boundary)s sein."
-
-#, python-format
-msgid "value must be <= %(boundary)s"
-msgstr "Der Wert muss <= %(boundary)s sein."
+msgid "value should have maximum size of %s but found %s"
+msgstr ""
#, python-format
-msgid "value must be >= %(boundary)s"
-msgstr "Der Wert muss >= %(boundary)s sein."
-
-#, python-format
-msgid "value should have maximum size of %s"
-msgstr "Der Wert darf höchstens %s betragen."
-
-#, python-format
-msgid "value should have minimum size of %s"
-msgstr "Der Wert muss mindestens %s betragen."
+msgid "value should have minimum size of %s but found %s"
+msgstr ""
msgid "vcard"
msgstr "VCard"
@@ -4488,76 +4544,3 @@
#, python-format
msgid "you should un-inline relation %s which is supported and may be crossed "
msgstr ""
-
-#~ msgid "Attributes with non default permissions:"
-#~ msgstr "Attribute mit nicht-standard-Berechtigungen"
-
-#~ msgid "Entity types"
-#~ msgstr "Entitätstypen"
-
-#~ msgid "Index"
-#~ msgstr "Index"
-
-#~ msgid "Permissions for entity types"
-#~ msgstr "Berechtigungen für Entitätstypen"
-
-#~ msgid "Permissions for relations"
-#~ msgstr "Berechtigungen für Relationen"
-
-#~ msgid "Relation types"
-#~ msgstr "Relationstypen"
-
-#~ msgid "am/pm calendar (month)"
-#~ msgstr "am/pm Kalender (Monat)"
-
-#~ msgid "am/pm calendar (semester)"
-#~ msgstr "am/pm Kalender (Halbjahr)"
-
-#~ msgid "am/pm calendar (week)"
-#~ msgstr "am/pm Kalender (Woche)"
-
-#~ msgid "am/pm calendar (year)"
-#~ msgstr "am/pm Kalender (Jahr)"
-
-#~ msgid "application entities"
-#~ msgstr "Anwendungs-Entitäten"
-
-#~ msgid "calendar (month)"
-#~ msgstr "Kalender (monatlich)"
-
-#~ msgid "calendar (semester)"
-#~ msgstr "Kalender (halbjährlich)"
-
-#~ msgid "calendar (week)"
-#~ msgstr "Kalender (wöchentlich)"
-
-#~ msgid "calendar (year)"
-#~ msgstr "Kalender (jährlich)"
-
-#~ msgid ""
-#~ "can't set inlined=%(inlined)s, %(stype)s %(rtype)s %(otype)s has "
-#~ "cardinality=%(card)s"
-#~ msgstr ""
-#~ "Kann 'inlined' = %(inlined)s nicht zuweisen, %(stype)s %(rtype)s %(otype)"
-#~ "s hat die Kardinalität %(card)s"
-
-#~ msgid "create an index page"
-#~ msgstr "Eine Index-Seite anlegen"
-
-#~ msgid "edit the index page"
-#~ msgstr "Index-Seite bearbeiten"
-
-#~ msgid "schema entities"
-#~ msgstr "Entitäten, die das Schema definieren"
-
-#~ msgid "schema-security"
-#~ msgstr "Rechte"
-
-#~ msgid "system entities"
-#~ msgstr "System-Entitäten"
-
-#~ msgid "timestamp of the latest source synchronization."
-#~ msgstr "Zeitstempel der letzten Synchronisierung mit der Quelle."
-
-#~ msgid "up"
-#~ msgstr "nach oben"
diff -r d8bb8f631d41 -r a4e667270dd4 i18n/en.po
--- a/i18n/en.po Mon Sep 26 18:37:23 2011 +0200
+++ b/i18n/en.po Fri Dec 09 12:08:27 2011 +0100
@@ -5,7 +5,7 @@
msgstr ""
"Project-Id-Version: 2.0\n"
"POT-Creation-Date: 2006-01-12 17:35+CET\n"
-"PO-Revision-Date: 2010-09-15 14:55+0200\n"
+"PO-Revision-Date: 2011-04-29 12:57+0200\n"
"Last-Translator: Sylvain Thenault \n"
"Language-Team: English \n"
"Language: en\n"
@@ -154,6 +154,11 @@
msgstr ""
#, python-format
+msgid ""
+"'%s' action for in_state relation should at least have 'linkattr=name' option"
+msgstr ""
+
+#, python-format
msgid "'%s' action requires 'linkattr' option"
msgstr ""
@@ -209,6 +214,9 @@
"can also display a complete schema with meta-data."
msgstr ""
+msgid ""
+msgstr ""
+
msgid ""
msgstr ""
@@ -245,6 +253,12 @@
msgid "BaseTransition_plural"
msgstr "Transitions (abstract)"
+msgid "BigInt"
+msgstr "Big integer"
+
+msgid "BigInt_plural"
+msgstr "Big integers"
+
msgid "Bookmark"
msgstr "Bookmark"
@@ -503,7 +517,7 @@
msgstr "Interval"
msgid "IntervalBoundConstraint"
-msgstr "interval constraint"
+msgstr "Interval constraint"
msgid "Interval_plural"
msgstr "Intervals"
@@ -1033,12 +1047,6 @@
msgid "add a CWRType"
msgstr "add a relation type"
-msgid "add a CWSource"
-msgstr "add a source"
-
-msgid "add a CWSourceSchemaConfig"
-msgstr "add an item to mapping "
-
msgctxt "inlined:CWUser.use_email.subject"
msgid "add a EmailAddress"
msgstr "add an email address"
@@ -1111,6 +1119,10 @@
msgid "allowed transitions from this state"
msgstr ""
+#, python-format
+msgid "allowed values for \"action\" are %s"
+msgstr ""
+
msgid "allowed_transition"
msgstr "allowed transition"
@@ -1190,6 +1202,9 @@
msgid "auto"
msgstr "automatic"
+msgid "autocomputed attribute used to ensure transition coherency"
+msgstr ""
+
msgid "automatic"
msgstr ""
@@ -1742,10 +1757,10 @@
msgid "ctxcomponents_edit_box_description"
msgstr "box listing the applicable actions on the displayed data"
-msgid "ctxcomponents_facet.filters"
+msgid "ctxcomponents_facet.filterbox"
msgstr "facets box"
-msgid "ctxcomponents_facet.filters_description"
+msgid "ctxcomponents_facet.filterbox_description"
msgstr "box providing filter within current search results functionality"
msgid "ctxcomponents_logo"
@@ -1851,10 +1866,6 @@
msgid "cw_schema_object"
msgstr "mapped by"
-msgctxt "CWAttribute"
-msgid "cw_schema_object"
-msgstr "mapped by"
-
msgctxt "CWEType"
msgid "cw_schema_object"
msgstr "mapped by"
@@ -1912,6 +1923,9 @@
msgid "data directory url"
msgstr ""
+msgid "data model schema"
+msgstr ""
+
msgid "data sources"
msgstr ""
@@ -2219,9 +2233,6 @@
msgid "eid"
msgstr ""
-msgid "email address to use for notification"
-msgstr ""
-
msgid "emails successfully sent"
msgstr ""
@@ -2330,6 +2341,9 @@
msgid "external page"
msgstr ""
+msgid "facet-loading-msg"
+msgstr "processing, please wait"
+
msgid "facet.filters"
msgstr "filter"
@@ -2512,9 +2526,6 @@
"object as a local one: http://www.w3.org/TR/owl-ref/#sameAs-def"
msgstr ""
-msgid "go back to the index page"
-msgstr ""
-
msgid "granted to groups"
msgstr ""
@@ -2540,6 +2551,18 @@
msgid "groups"
msgstr ""
+msgid "groups allowed to add entities/relations of this type"
+msgstr ""
+
+msgid "groups allowed to delete entities/relations of this type"
+msgstr ""
+
+msgid "groups allowed to read entities/relations of this type"
+msgstr ""
+
+msgid "groups allowed to update entities/relations of this type"
+msgstr ""
+
msgid "groups grant permissions to the user"
msgstr ""
@@ -2654,6 +2677,13 @@
msgid "in_state_object"
msgstr "state of"
+msgid "in_synchronization"
+msgstr "in synchronization"
+
+msgctxt "CWSource"
+msgid "in_synchronization"
+msgstr "in synchronization"
+
msgid "incontext"
msgstr "in-context"
@@ -3095,6 +3125,15 @@
msgid "no associated permissions"
msgstr ""
+msgid "no content next link"
+msgstr ""
+
+msgid "no content prev link"
+msgstr ""
+
+msgid "no edited fields specified"
+msgstr ""
+
#, python-format
msgid "no edited fields specified for entity %s"
msgstr ""
@@ -3488,6 +3527,18 @@
msgid "right"
msgstr ""
+msgid "rql expression allowing to add entities/relations of this type"
+msgstr ""
+
+msgid "rql expression allowing to delete entities/relations of this type"
+msgstr ""
+
+msgid "rql expression allowing to read entities/relations of this type"
+msgstr ""
+
+msgid "rql expression allowing to update entities/relations of this type"
+msgstr ""
+
msgid "rql expressions"
msgstr ""
@@ -3524,9 +3575,6 @@
msgid "searching for"
msgstr ""
-msgid "secondary"
-msgstr ""
-
msgid "security"
msgstr ""
@@ -3612,9 +3660,6 @@
msgid "site documentation"
msgstr ""
-msgid "site schema"
-msgstr ""
-
msgid "site title"
msgstr ""
@@ -3624,9 +3669,6 @@
msgid "siteinfo"
msgstr "site information"
-msgid "some errors occurred:"
-msgstr ""
-
msgid "some later transaction(s) touch entity, undo them first"
msgstr ""
@@ -3662,6 +3704,11 @@
msgid "specifying %s is mandatory"
msgstr ""
+msgid ""
+"start timestamp of the currently in synchronization, or NULL when no "
+"synchronization in progress."
+msgstr ""
+
msgid "startup views"
msgstr ""
@@ -3830,6 +3877,12 @@
msgid "the value \"%s\" is already used, use another one"
msgstr ""
+msgid "there is no next page"
+msgstr ""
+
+msgid "there is no previous page"
+msgstr ""
+
msgid "this action is not reversible!"
msgstr ""
@@ -4176,6 +4229,9 @@
msgid "user preferences"
msgstr ""
+msgid "user's email account"
+msgstr ""
+
msgid "users"
msgstr ""
@@ -4205,27 +4261,27 @@
msgid "value"
msgstr ""
+#, python-format
+msgid "value %(value)s must be %(op)s %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be <= %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be >= %(boundary)s"
+msgstr ""
+
msgid "value associated to this key is not editable manually"
msgstr ""
#, python-format
-msgid "value must be %(op)s %(boundary)s"
-msgstr ""
-
-#, python-format
-msgid "value must be <= %(boundary)s"
+msgid "value should have maximum size of %s but found %s"
msgstr ""
#, python-format
-msgid "value must be >= %(boundary)s"
-msgstr ""
-
-#, python-format
-msgid "value should have maximum size of %s"
-msgstr ""
-
-#, python-format
-msgid "value should have minimum size of %s"
+msgid "value should have minimum size of %s but found %s"
msgstr ""
msgid "vcard"
@@ -4370,10 +4426,3 @@
#, python-format
msgid "you should un-inline relation %s which is supported and may be crossed "
msgstr ""
-
-#~ msgctxt "CWAttribute"
-#~ msgid "relations_object"
-#~ msgstr "constrained by"
-
-#~ msgid "schema-security"
-#~ msgstr "permissions"
diff -r d8bb8f631d41 -r a4e667270dd4 i18n/es.po
--- a/i18n/es.po Mon Sep 26 18:37:23 2011 +0200
+++ b/i18n/es.po Fri Dec 09 12:08:27 2011 +0100
@@ -163,6 +163,11 @@
msgstr "la acción '%s' no acepta opciones"
#, python-format
+msgid ""
+"'%s' action for in_state relation should at least have 'linkattr=name' option"
+msgstr ""
+
+#, python-format
msgid "'%s' action requires 'linkattr' option"
msgstr "la acción '%s' requiere una opción 'linkattr'"
@@ -221,6 +226,9 @@
"pero se puede ver a un modelo completo con meta-datos."
"div>"
+msgid ""
+msgstr ""
+
msgid ""
msgstr ""
@@ -257,6 +265,12 @@
msgid "BaseTransition_plural"
msgstr "Transiciones (abstractas)"
+msgid "BigInt"
+msgstr ""
+
+msgid "BigInt_plural"
+msgstr ""
+
msgid "Bookmark"
msgstr "Favorito"
@@ -1083,12 +1097,6 @@
msgid "add a CWRType"
msgstr "Agregar un tipo de relación"
-msgid "add a CWSource"
-msgstr "agregar una fuente"
-
-msgid "add a CWSourceSchemaConfig"
-msgstr "agregar una parte de mapeo"
-
msgctxt "inlined:CWUser.use_email.subject"
msgid "add a EmailAddress"
msgstr "Agregar correo electrónico"
@@ -1163,6 +1171,10 @@
msgid "allowed transitions from this state"
msgstr "transiciones autorizadas desde este estado"
+#, python-format
+msgid "allowed values for \"action\" are %s"
+msgstr ""
+
msgid "allowed_transition"
msgstr "transiciones autorizadas"
@@ -1246,6 +1258,9 @@
msgid "auto"
msgstr "Automático"
+msgid "autocomputed attribute used to ensure transition coherency"
+msgstr ""
+
msgid "automatic"
msgstr "Automático"
@@ -1812,10 +1827,10 @@
msgid "ctxcomponents_edit_box_description"
msgstr "Muestra las acciones posibles a ejecutar para los datos seleccionados"
-msgid "ctxcomponents_facet.filters"
+msgid "ctxcomponents_facet.filterbox"
msgstr "Filtros"
-msgid "ctxcomponents_facet.filters_description"
+msgid "ctxcomponents_facet.filterbox_description"
msgstr "Muestra los filtros aplicables a una búsqueda realizada"
msgid "ctxcomponents_logo"
@@ -1925,10 +1940,6 @@
msgid "cw_schema_object"
msgstr "mapeado por"
-msgctxt "CWAttribute"
-msgid "cw_schema_object"
-msgstr "mapeado por"
-
msgctxt "CWEType"
msgid "cw_schema_object"
msgstr "mapeado por"
@@ -1986,6 +1997,9 @@
msgid "data directory url"
msgstr "Url del repertorio de datos"
+msgid "data model schema"
+msgstr "Esquema del Sistema"
+
msgid "data sources"
msgstr "fuente de datos"
@@ -2313,9 +2327,6 @@
msgid "eid"
msgstr "eid"
-msgid "email address to use for notification"
-msgstr "Dirección electrónica a utilizarse para notificar"
-
msgid "emails successfully sent"
msgstr "Mensajes enviados con éxito"
@@ -2431,6 +2442,9 @@
msgid "external page"
msgstr "Página externa"
+msgid "facet-loading-msg"
+msgstr ""
+
msgid "facet.filters"
msgstr "Filtros"
@@ -2615,9 +2629,6 @@
"Relación genérica que indicar que una entidad es idéntica a otro recurso web "
"(ver http://www.w3.org/TR/owl-ref/#sameAs-def)."
-msgid "go back to the index page"
-msgstr "Regresar a la página de inicio"
-
msgid "granted to groups"
msgstr "Otorgado a los grupos"
@@ -2647,6 +2658,18 @@
msgid "groups"
msgstr "Grupos"
+msgid "groups allowed to add entities/relations of this type"
+msgstr ""
+
+msgid "groups allowed to delete entities/relations of this type"
+msgstr ""
+
+msgid "groups allowed to read entities/relations of this type"
+msgstr ""
+
+msgid "groups allowed to update entities/relations of this type"
+msgstr ""
+
msgid "groups grant permissions to the user"
msgstr "Los grupos otorgan los permisos al usuario"
@@ -2770,6 +2793,13 @@
msgid "in_state_object"
msgstr "Estado de"
+msgid "in_synchronization"
+msgstr ""
+
+msgctxt "CWSource"
+msgid "in_synchronization"
+msgstr ""
+
msgid "incontext"
msgstr "En el contexto"
@@ -3152,11 +3182,11 @@
msgctxt "CWSource"
msgid "name"
-msgstr "nombre"
+msgstr ""
msgctxt "State"
msgid "name"
-msgstr "Nombre"
+msgstr "nombre"
msgctxt "Transition"
msgid "name"
@@ -3225,6 +3255,15 @@
msgid "no associated permissions"
msgstr "No existe permiso asociado"
+msgid "no content next link"
+msgstr ""
+
+msgid "no content prev link"
+msgstr ""
+
+msgid "no edited fields specified"
+msgstr ""
+
#, python-format
msgid "no edited fields specified for entity %s"
msgstr "Ningún campo editable especificado para la entidad %s"
@@ -3631,6 +3670,18 @@
msgid "right"
msgstr "Derecha"
+msgid "rql expression allowing to add entities/relations of this type"
+msgstr ""
+
+msgid "rql expression allowing to delete entities/relations of this type"
+msgstr ""
+
+msgid "rql expression allowing to read entities/relations of this type"
+msgstr ""
+
+msgid "rql expression allowing to update entities/relations of this type"
+msgstr ""
+
msgid "rql expressions"
msgstr "Expresiones RQL"
@@ -3667,9 +3718,6 @@
msgid "searching for"
msgstr "Buscando"
-msgid "secondary"
-msgstr "Secundaria"
-
msgid "security"
msgstr "Seguridad"
@@ -3759,9 +3807,6 @@
msgid "site documentation"
msgstr "Documentación Sistema"
-msgid "site schema"
-msgstr "Esquema del Sistema"
-
msgid "site title"
msgstr "Nombre del Sistema"
@@ -3771,9 +3816,6 @@
msgid "siteinfo"
msgstr "información"
-msgid "some errors occurred:"
-msgstr "Algunos errores encontrados :"
-
msgid "some later transaction(s) touch entity, undo them first"
msgstr ""
"Las transacciones más recientes modificaron esta entidad, anúlelas primero"
@@ -3812,6 +3854,11 @@
msgid "specifying %s is mandatory"
msgstr "especificar %s es obligatorio"
+msgid ""
+"start timestamp of the currently in synchronization, or NULL when no "
+"synchronization in progress."
+msgstr ""
+
msgid "startup views"
msgstr "Vistas de inicio"
@@ -3985,6 +4032,12 @@
msgid "the value \"%s\" is already used, use another one"
msgstr "El valor \"%s\" ya esta en uso, favor de utilizar otro"
+msgid "there is no next page"
+msgstr ""
+
+msgid "there is no previous page"
+msgstr ""
+
msgid "this action is not reversible!"
msgstr "Esta acción es irreversible!."
@@ -4085,7 +4138,7 @@
msgstr "n° de transición"
msgid "transaction undone"
-msgstr "Transacciones Anuladas"
+msgstr ""
#, python-format
msgid "transition %(tr)s isn't allowed from %(st)s"
@@ -4340,6 +4393,9 @@
msgid "user preferences"
msgstr "Preferencias"
+msgid "user's email account"
+msgstr ""
+
msgid "users"
msgstr "Usuarios"
@@ -4369,28 +4425,28 @@
msgid "value"
msgstr "Vampr"
+#, python-format
+msgid "value %(value)s must be %(op)s %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be <= %(boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(value)s must be >= %(boundary)s"
+msgstr ""
+
msgid "value associated to this key is not editable manually"
msgstr "El valor asociado a este elemento no es editable manualmente"
#, python-format
-msgid "value must be %(op)s %(boundary)s"
-msgstr "El valor debe ser %(op)s %(boundary)s"
-
-#, python-format
-msgid "value must be <= %(boundary)s"
-msgstr "El valor debe ser <= %(boundary)s"
+msgid "value should have maximum size of %s but found %s"
+msgstr ""
#, python-format
-msgid "value must be >= %(boundary)s"
-msgstr "El valor debe ser >= %(boundary)s"
-
-#, python-format
-msgid "value should have maximum size of %s"
-msgstr "El valor no debe exceder de %s"
-
-#, python-format
-msgid "value should have minimum size of %s"
-msgstr "El valor no puede ser menor a %s"
+msgid "value should have minimum size of %s but found %s"
+msgstr ""
msgid "vcard"
msgstr "vcard"
@@ -4539,6 +4595,3 @@
msgstr ""
"usted debe quitar la puesta en línea de la relación %s que es aceptada y "
"puede ser cruzada"
-
-#~ msgid "add a %s"
-#~ msgstr "agregar un %s"
diff -r d8bb8f631d41 -r a4e667270dd4 i18n/fr.po
--- a/i18n/fr.po Mon Sep 26 18:37:23 2011 +0200
+++ b/i18n/fr.po Fri Dec 09 12:08:27 2011 +0100
@@ -162,6 +162,13 @@
msgstr "l'action '%s' ne prend pas d'option"
#, python-format
+msgid ""
+"'%s' action for in_state relation should at least have 'linkattr=name' option"
+msgstr ""
+"l'action '%s' pour la relation in_state doit au moins avoir l'option "
+"'linkattr=name'"
+
+#, python-format
msgid "'%s' action requires 'linkattr' option"
msgstr "l'action '%s' nécessite une option 'linkattr'"
@@ -219,6 +226,9 @@
"
Ce schéma du modèle de données exclue les méta-données, mais "
"vous pouvez afficher un schéma complet.
"
+msgid ""
+msgstr ""
+
msgid ""
msgstr ""
@@ -255,6 +265,12 @@
msgid "BaseTransition_plural"
msgstr "Transitions (abstraites)"
+msgid "BigInt"
+msgstr "Entier long"
+
+msgid "BigInt_plural"
+msgstr "Entiers longs"
+
msgid "Bookmark"
msgstr "Signet"
@@ -1081,12 +1097,6 @@
msgid "add a CWRType"
msgstr "ajouter un type de relation"
-msgid "add a CWSource"
-msgstr "ajouter une source"
-
-msgid "add a CWSourceSchemaConfig"
-msgstr "ajouter une partie de mapping"
-
msgctxt "inlined:CWUser.use_email.subject"
msgid "add a EmailAddress"
msgstr "ajouter une adresse électronique"
@@ -1161,6 +1171,10 @@
msgid "allowed transitions from this state"
msgstr "transitions autorisées depuis cet état"
+#, python-format
+msgid "allowed values for \"action\" are %s"
+msgstr "les valeurs autorisées pour \"action\" sont %s"
+
msgid "allowed_transition"
msgstr "transitions autorisées"
@@ -1244,6 +1258,10 @@
msgid "auto"
msgstr "automatique"
+msgid "autocomputed attribute used to ensure transition coherency"
+msgstr ""
+"attribut calculé automatiquement pour assurer la cohérence de la transition"
+
msgid "automatic"
msgstr "automatique"
@@ -1815,10 +1833,10 @@
msgstr ""
"boîte affichant les différentes actions possibles sur les données affichées"
-msgid "ctxcomponents_facet.filters"
+msgid "ctxcomponents_facet.filterbox"
msgstr "boîte à facettes"
-msgid "ctxcomponents_facet.filters_description"
+msgid "ctxcomponents_facet.filterbox_description"
msgstr ""
"boîte permettant de filtrer parmi les résultats d'une recherche à l'aide de "
"facettes"
@@ -1928,10 +1946,6 @@
msgid "cw_schema_object"
msgstr "mappé par"
-msgctxt "CWAttribute"
-msgid "cw_schema_object"
-msgstr "mappé par"
-
msgctxt "CWEType"
msgid "cw_schema_object"
msgstr "mappé par"
@@ -1989,6 +2003,9 @@
msgid "data directory url"
msgstr "url du répertoire de données"
+msgid "data model schema"
+msgstr "schéma du modèle de données"
+
msgid "data sources"
msgstr "sources de données"
@@ -2312,9 +2329,6 @@
msgid "eid"
msgstr "eid"
-msgid "email address to use for notification"
-msgstr "adresse email à utiliser pour la notification"
-
msgid "emails successfully sent"
msgstr "courriels envoyés avec succès"
@@ -2429,6 +2443,9 @@
msgid "external page"
msgstr "page externe"
+msgid "facet-loading-msg"
+msgstr "en cours de traitement, merci de patienter"
+
msgid "facet.filters"
msgstr "facettes"
@@ -2613,9 +2630,6 @@
"relation générique permettant d'indiquer qu'une entité est identique à une "
"autre ressource web (voir http://www.w3.org/TR/owl-ref/#sameAs-def)."
-msgid "go back to the index page"
-msgstr "retourner sur la page d'accueil"
-
msgid "granted to groups"
msgstr "accordée aux groupes"
@@ -2646,6 +2660,18 @@
msgid "groups"
msgstr "groupes"
+msgid "groups allowed to add entities/relations of this type"
+msgstr "groupes autorisés à ajouter des entités/relations de ce type"
+
+msgid "groups allowed to delete entities/relations of this type"
+msgstr "groupes autorisés à supprimer des entités/relations de ce type"
+
+msgid "groups allowed to read entities/relations of this type"
+msgstr "groupes autorisés à lire des entités/relations de ce type"
+
+msgid "groups allowed to update entities/relations of this type"
+msgstr "groupes autorisés à mettre à jour des entités/relations de ce type"
+
msgid "groups grant permissions to the user"
msgstr "les groupes donnent des permissions à l'utilisateur"
@@ -2768,6 +2794,13 @@
msgid "in_state_object"
msgstr "état de"
+msgid "in_synchronization"
+msgstr "en cours de synchronisation"
+
+msgctxt "CWSource"
+msgid "in_synchronization"
+msgstr "en cours de synchronisation"
+
msgid "incontext"
msgstr "dans le contexte"
@@ -3207,7 +3240,7 @@
msgstr "nombre d'entités dans la vue primaire"
msgid "navigation.short-line-size"
-msgstr "description courtes"
+msgstr "taille des descriptions courtes"
msgid "navtop"
msgstr "haut de page du contenu principal"
@@ -3224,6 +3257,15 @@
msgid "no associated permissions"
msgstr "aucune permission associée"
+msgid "no content next link"
+msgstr "pas de lien 'suivant'"
+
+msgid "no content prev link"
+msgstr "pas de lien 'précédent'"
+
+msgid "no edited fields specified"
+msgstr "aucun champ à éditer spécifié"
+
#, python-format
msgid "no edited fields specified for entity %s"
msgstr "aucun champ à éditer spécifié pour l'entité %s"
@@ -3632,6 +3674,19 @@
msgid "right"
msgstr "droite"
+msgid "rql expression allowing to add entities/relations of this type"
+msgstr "expression rql autorisant à ajouter des entités/relations de ce type"
+
+msgid "rql expression allowing to delete entities/relations of this type"
+msgstr "expression rql autorisant à supprimer des entités/relations de ce type"
+
+msgid "rql expression allowing to read entities/relations of this type"
+msgstr "expression rql autorisant à lire des entités/relations de ce type"
+
+msgid "rql expression allowing to update entities/relations of this type"
+msgstr ""
+"expression rql autorisant à mettre à jour des entités/relations de ce type"
+
msgid "rql expressions"
msgstr "conditions rql"
@@ -3668,9 +3723,6 @@
msgid "searching for"
msgstr "Recherche de"
-msgid "secondary"
-msgstr "secondaire"
-
msgid "security"
msgstr "sécurité"
@@ -3759,9 +3811,6 @@
msgid "site documentation"
msgstr "documentation du site"
-msgid "site schema"
-msgstr "schéma du site"
-
msgid "site title"
msgstr "titre du site"
@@ -3771,9 +3820,6 @@
msgid "siteinfo"
msgstr "informations"
-msgid "some errors occurred:"
-msgstr "des erreurs sont survenues"
-
msgid "some later transaction(s) touch entity, undo them first"
msgstr ""
"des transactions plus récentes modifient cette entité, annulez les d'abord"
@@ -3813,6 +3859,12 @@
msgid "specifying %s is mandatory"
msgstr "spécifier %s est obligatoire"
+msgid ""
+"start timestamp of the currently in synchronization, or NULL when no "
+"synchronization in progress."
+msgstr ""
+"horodate de départ de la synchronisation en cours, ou NULL s'il n'y en a pas."
+
msgid "startup views"
msgstr "vues de départ"
@@ -3985,6 +4037,12 @@
msgid "the value \"%s\" is already used, use another one"
msgstr "la valeur \"%s\" est déjà utilisée, veuillez utiliser une autre valeur"
+msgid "there is no next page"
+msgstr "il n'y a pas de page suivante"
+
+msgid "there is no previous page"
+msgstr "il n'y a pas de page précédente"
+
msgid "this action is not reversible!"
msgstr ""
"Attention ! Cette opération va détruire les données de façon irréversible."
@@ -4339,6 +4397,9 @@
msgid "user preferences"
msgstr "préférences utilisateur"
+msgid "user's email account"
+msgstr "email de l'utilisateur"
+
msgid "users"
msgstr "utilisateurs"
@@ -4368,28 +4429,28 @@
msgid "value"
msgstr "valeur"
+#, python-format
+msgid "value %(value)s must be %(op)s %(boundary)s"
+msgstr "la valeur %(value)s doit être %(op)s %(boundary)s"
+
+#, python-format
+msgid "value %(value)s must be <= %(boundary)s"
+msgstr "la valeur %(value)s doit être <= %(boundary)s"
+
+#, python-format
+msgid "value %(value)s must be >= %(boundary)s"
+msgstr "la valeur %(value)s doit être >= %(boundary)s"
+
msgid "value associated to this key is not editable manually"
msgstr "la valeur associée à cette clé n'est pas éditable manuellement"
#, python-format
-msgid "value must be %(op)s %(boundary)s"
-msgstr "la valeur doit être %(op)s %(boundary)s"
-
-#, python-format
-msgid "value must be <= %(boundary)s"
-msgstr "la valeur doit être <= %(boundary)s"
+msgid "value should have maximum size of %s but found %s"
+msgstr "la taille maximum est %s mais cette valeur est de taille %s"
#, python-format
-msgid "value must be >= %(boundary)s"
-msgstr "la valeur doit être >= %(boundary)s"
-
-#, python-format
-msgid "value should have maximum size of %s"
-msgstr "la valeur doit être de taille %s au maximum"
-
-#, python-format
-msgid "value should have minimum size of %s"
-msgstr "la valeur doit être de taille %s au minimum"
+msgid "value should have minimum size of %s but found %s"
+msgstr "la taille minimum est %s mais cette valeur est de taille %s"
msgid "vcard"
msgstr "vcard"
@@ -4539,66 +4600,3 @@
msgstr ""
"vous devriez enlevé la mise en ligne de la relation %s qui est supportée et "
"peut-être croisée"
-
-#~ msgid "Attributes with non default permissions:"
-#~ msgstr "Attributs ayant des permissions non-standard"
-
-#~ msgid "Entity types"
-#~ msgstr "Types d'entités"
-
-#~ msgid "Permissions for entity types"
-#~ msgstr "Permissions pour les types d'entités"
-
-#~ msgid "Permissions for relations"
-#~ msgstr "Permissions pour les relations"
-
-#~ msgid "Relation types"
-#~ msgstr "Types de relation"
-
-#~ msgid "add a %s"
-#~ msgstr "ajouter un %s"
-
-#~ msgid "am/pm calendar (month)"
-#~ msgstr "calendrier am/pm (mois)"
-
-#~ msgid "am/pm calendar (semester)"
-#~ msgstr "calendrier am/pm (semestre)"
-
-#~ msgid "am/pm calendar (week)"
-#~ msgstr "calendrier am/pm (semaine)"
-
-#~ msgid "am/pm calendar (year)"
-#~ msgstr "calendrier am/pm (année)"
-
-#~ msgid "application entities"
-#~ msgstr "entités applicatives"
-
-#~ msgid "calendar (month)"
-#~ msgstr "calendrier (mensuel)"
-
-#~ msgid "calendar (semester)"
-#~ msgstr "calendrier (semestriel)"
-
-#~ msgid "calendar (week)"
-#~ msgstr "calendrier (hebdo)"
-
-#~ msgid "calendar (year)"
-#~ msgstr "calendrier (annuel)"
-
-#~ msgid "create an index page"
-#~ msgstr "créer une page d'accueil"
-
-#~ msgid "edit the index page"
-#~ msgstr "éditer la page d'accueil"
-
-#~ msgid "schema entities"
-#~ msgstr "entités définissant le schéma"
-
-#~ msgid "schema-security"
-#~ msgstr "permissions"
-
-#~ msgid "system entities"
-#~ msgstr "entités systèmes"
-
-#~ msgid "timestamp of the latest source synchronization."
-#~ msgstr "date de la dernière synchronisation avec la source."
diff -r d8bb8f631d41 -r a4e667270dd4 mail.py
--- a/mail.py Mon Sep 26 18:37:23 2011 +0200
+++ b/mail.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -21,10 +21,10 @@
from base64 import b64encode, b64decode
from time import time
-from email.MIMEMultipart import MIMEMultipart
-from email.MIMEText import MIMEText
-from email.MIMEImage import MIMEImage
-from email.Header import Header
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.mime.image import MIMEImage
+from email.header import Header
try:
from socket import gethostname
except ImportError:
@@ -67,7 +67,7 @@
values = b64decode(str(values + '='*padding), '.-')
values = dict(v.split('=') for v in values.split('&'))
fromappid, host = qualif.split('.', 1)
- except:
+ except Exception:
return None
if appid != fromappid or host != gethostname():
return None
@@ -156,6 +156,10 @@
msgid_timestamp = True
+ # to be defined on concrete sub-classes
+ content = None # body of the mail
+ message = None # action verb of the subject
+
# this is usually the method to call
def render_and_send(self, **kwargs):
"""generate and send an email message for this view"""
diff -r d8bb8f631d41 -r a4e667270dd4 md5crypt.py
--- a/md5crypt.py Mon Sep 26 18:37:23 2011 +0200
+++ b/md5crypt.py Fri Dec 09 12:08:27 2011 +0100
@@ -41,7 +41,7 @@
MAGIC = '$1$' # Magic string
ITOA64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
-import hashlib as md5
+from hashlib import md5 # pylint: disable=E0611
def to64 (v, n):
ret = ''
@@ -63,7 +63,7 @@
salt = salt.split('$', 1)[0]
salt = salt[:8]
ctx = pw + magic + salt
- final = md5.md5(pw + salt + pw).digest()
+ final = md5(pw + salt + pw).digest()
for pl in xrange(len(pw), 0, -16):
if pl > 16:
ctx = ctx + final[:16]
@@ -77,7 +77,7 @@
else:
ctx = ctx + pw[0]
i = i >> 1
- final = md5.md5(ctx).digest()
+ final = md5(ctx).digest()
# The following is supposed to make
# things run slower.
# my question: WTF???
@@ -95,7 +95,7 @@
ctx1 = ctx1 + final[:16]
else:
ctx1 = ctx1 + pw
- final = md5.md5(ctx1).digest()
+ final = md5(ctx1).digest()
# Final xform
passwd = ''
passwd = passwd + to64((int(ord(final[0])) << 16)
diff -r d8bb8f631d41 -r a4e667270dd4 migration.py
--- a/migration.py Mon Sep 26 18:37:23 2011 +0200
+++ b/migration.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -201,8 +201,8 @@
if not ask_confirm or self.confirm(msg):
return meth(*args, **kwargs)
- def confirm(self, question, shell=True, abort=True, retry=False, pdb=False,
- default='y'):
+ def confirm(self, question, # pylint: disable=E0202
+ shell=True, abort=True, retry=False, pdb=False, default='y'):
"""ask for confirmation and return true on positive answer
if `retry` is true the r[etry] answer may return 2
@@ -269,7 +269,10 @@
def unicode_raw_input(prompt):
return unicode(raw_input(prompt), sys.stdin.encoding)
interact(banner, readfunc=unicode_raw_input, local=local_ctx)
- readline.write_history_file(histfile)
+ try:
+ readline.write_history_file(histfile)
+ except IOError:
+ pass
# delete instance's confirm attribute to avoid questions
del self.confirm
self.need_wrap = True
@@ -411,7 +414,7 @@
basecubes = [c for c in origcubes if not c in toremove]
self.config._cubes = tuple(self.config.expand_cubes(basecubes))
removed = [p for p in origcubes if not p in self.config._cubes]
- if not cube in removed:
+ if not cube in removed and cube in origcubes:
raise ConfigurationError("can't remove cube %s, "
"used as a dependency" % cube)
return removed
@@ -488,7 +491,7 @@
try:
oper, version = constraint.split()
self.reverse_dependencies[name].add( (oper, version, cube) )
- except:
+ except Exception:
self.warnings.append(
'cube %s depends on %s but constraint badly '
'formatted: %s' % (cube, name, constraint))
diff -r d8bb8f631d41 -r a4e667270dd4 misc/cwdesklets/rqlsensor/__init__.py
--- a/misc/cwdesklets/rqlsensor/__init__.py Mon Sep 26 18:37:23 2011 +0200
+++ b/misc/cwdesklets/rqlsensor/__init__.py Fri Dec 09 12:08:27 2011 +0100
@@ -15,9 +15,6 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see .
-"""
-
-"""
import webbrowser
reload(webbrowser)
@@ -89,7 +86,7 @@
cursor = cnx.cursor()
try:
rset = cursor.execute(rql)
- except:
+ except Exception:
del self._v_cnx
raise
self._urls = []
@@ -101,7 +98,7 @@
output.set('resultbg[%s]' % i, 'black')
try:
self._urls.append(base % 'Any X WHERE X eid %s' % line[0])
- except:
+ except Exception:
self._urls.append('')
i += 1
diff -r d8bb8f631d41 -r a4e667270dd4 misc/migration/3.12.9_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.12.9_Any.py Fri Dec 09 12:08:27 2011 +0100
@@ -0,0 +1,1 @@
+sync_schema_props_perms('cw_source')
diff -r d8bb8f631d41 -r a4e667270dd4 misc/migration/3.13.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.13.0_Any.py Fri Dec 09 12:08:27 2011 +0100
@@ -0,0 +1,3 @@
+sync_schema_props_perms('cw_source', syncprops=False)
+if schema['BigInt'].eid is None:
+ add_entity_type('BigInt')
diff -r d8bb8f631d41 -r a4e667270dd4 misc/migration/3.13.3_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.13.3_Any.py Fri Dec 09 12:08:27 2011 +0100
@@ -0,0 +1,2 @@
+drop_relation_definition('CWSourceSchemaConfig', 'cw_schema', 'CWAttribute')
+sync_schema_props_perms('cw_schema')
diff -r d8bb8f631d41 -r a4e667270dd4 misc/migration/3.13.6_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.13.6_Any.py Fri Dec 09 12:08:27 2011 +0100
@@ -0,0 +1,1 @@
+sync_schema_props_perms('CWSourceSchemaConfig')
diff -r d8bb8f631d41 -r a4e667270dd4 misc/migration/3.13.8_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.13.8_Any.py Fri Dec 09 12:08:27 2011 +0100
@@ -0,0 +1,5 @@
+change_attribute_type('CWCache', 'timestamp', 'TZDatetime')
+change_attribute_type('CWUser', 'last_login_time', 'TZDatetime')
+change_attribute_type('CWSource', 'latest_retrieval', 'TZDatetime')
+drop_attribute('CWSource', 'synchronizing')
+add_attribute('CWSource', 'in_synchronization')
diff -r d8bb8f631d41 -r a4e667270dd4 misc/migration/bootstrapmigration_repository.py
--- a/misc/migration/bootstrapmigration_repository.py Mon Sep 26 18:37:23 2011 +0200
+++ b/misc/migration/bootstrapmigration_repository.py Fri Dec 09 12:08:27 2011 +0100
@@ -35,6 +35,12 @@
ss.execschemarql(rql, rdef, ss.rdef2rql(rdef, CSTRMAP, groupmap=None))
commit(ask_confirm=False)
+if applcubicwebversion <= (3, 13, 0) and cubicwebversion >= (3, 13, 1):
+ sql('ALTER TABLE entities ADD asource VARCHAR(64)')
+ sql('UPDATE entities SET asource=cw_name '
+ 'FROM cw_CWSource, cw_source_relation '
+ 'WHERE entities.eid=cw_source_relation.eid_from AND cw_source_relation.eid_to=cw_CWSource.cw_eid')
+
if applcubicwebversion == (3, 6, 0) and cubicwebversion >= (3, 6, 0):
CSTRMAP = dict(rql('Any T, X WHERE X is CWConstraintType, X name T',
ask_confirm=False))
@@ -49,7 +55,7 @@
elif applcubicwebversion < (3, 6, 0) and cubicwebversion >= (3, 6, 0):
CSTRMAP = dict(rql('Any T, X WHERE X is CWConstraintType, X name T',
ask_confirm=False))
- session.set_pool()
+ session.set_cnxset()
permsdict = ss.deserialize_ertype_permissions(session)
with hooks_control(session, session.HOOKS_ALLOW_ALL, 'integrity'):
diff -r d8bb8f631d41 -r a4e667270dd4 misc/migration/postcreate.py
diff -r d8bb8f631d41 -r a4e667270dd4 misc/scripts/drop_external_entities.py
--- a/misc/scripts/drop_external_entities.py Mon Sep 26 18:37:23 2011 +0200
+++ b/misc/scripts/drop_external_entities.py Fri Dec 09 12:08:27 2011 +0100
@@ -3,7 +3,7 @@
sql("DELETE FROM entities WHERE type='Int'")
-ecnx = session.pool.connection(source)
+ecnx = session.cnxset.connection(source)
for e in rql('Any X WHERE X cw_source S, S name %(name)s', {'name': source}).entities():
meta = e.cw_metainformation()
assert meta['source']['uri'] == source
@@ -15,7 +15,7 @@
if suri != 'system':
try:
print 'deleting', e.__regid__, e.eid, suri, e.dc_title().encode('utf8')
- repo.delete_info(session, e, suri, meta['extid'], scleanup=True)
+ repo.delete_info(session, e, suri, scleanup=e.eid)
except UnknownEid:
print ' cant delete', e.__regid__, e.eid, meta
diff -r d8bb8f631d41 -r a4e667270dd4 misc/scripts/pyroforge2datafeed.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/scripts/pyroforge2datafeed.py Fri Dec 09 12:08:27 2011 +0100
@@ -0,0 +1,134 @@
+"""turn a pyro source into a datafeed source
+
+Once this script is run, execute c-c db-check to cleanup relation tables.
+"""
+import sys
+
+try:
+ source_name, = __args__
+ source = repo.sources_by_uri[source_name]
+except ValueError:
+ print('you should specify the source name as script argument (i.e. after --'
+ ' on the command line)')
+ sys.exit(1)
+except KeyError:
+ print '%s is not an active source' % source_name
+ sys.exit(1)
+
+# check source is reachable before doing anything
+try:
+ source.get_connection()._repo
+except AttributeError:
+ print '%s is not reachable. Fix this before running this script' % source_name
+ sys.exit(1)
+
+raw_input('Ensure you have shutdown all instances of this application before continuing.'
+ ' Type enter when ready.')
+
+system_source = repo.system_source
+
+from base64 import b64encode
+from cubicweb.server.edition import EditedEntity
+
+DONT_GET_BACK_ETYPES = set(( # XXX edit as desired
+ 'State',
+ 'RecipeStep', 'RecipeStepInput', 'RecipeStepOutput',
+ 'RecipeTransition', 'RecipeTransitionCondition',
+ 'NarvalConditionExpression', 'Recipe',
+ # XXX TestConfig
+ ))
+
+
+session.mode = 'write' # hold on the connections set
+
+print '******************** backport entity content ***************************'
+
+from cubicweb.server import debugged
+todelete = {}
+host = source.config['base-url'].split('://')[1]
+for entity in rql('Any X WHERE X cw_source S, S eid %(s)s', {'s': source.eid}).entities():
+ etype = entity.__regid__
+ if not source.support_entity(etype):
+ print "source doesn't support %s, delete %s" % (etype, entity.eid)
+ elif etype in DONT_GET_BACK_ETYPES:
+ print 'ignore %s, delete %s' % (etype, entity.eid)
+ else:
+ try:
+ entity.complete()
+ if not host in entity.cwuri:
+ print 'SKIP foreign entity', entity.cwuri, source.config['base-url']
+ continue
+ except Exception:
+ print '%s %s much probably deleted, delete it (extid %s)' % (
+ etype, entity.eid, entity.cw_metainformation()['extid'])
+ else:
+ print 'get back', etype, entity.eid
+ entity.cw_edited = EditedEntity(entity, **entity.cw_attr_cache)
+ system_source.add_entity(session, entity)
+ sql("UPDATE entities SET asource=%(asource)s, source='system', extid=%(extid)s "
+ "WHERE eid=%(eid)s", {'asource': source_name,
+ 'extid': b64encode(entity.cwuri),
+ 'eid': entity.eid})
+ continue
+ todelete.setdefault(etype, []).append(entity)
+
+# only cleanup entities table, remaining stuff should be cleaned by a c-c
+# db-check to be run after this script
+for entities in todelete.values():
+ system_source.delete_info_multi(session, entities, source_name)
+
+
+print '******************** backport mapping **********************************'
+session.disable_hook_categories('cw.sources')
+mapping = []
+for mappart in rql('Any X,SCH WHERE X cw_schema SCH, X cw_for_source S, S eid %(s)s',
+ {'s': source.eid}).entities():
+ schemaent = mappart.cw_schema[0]
+ if schemaent.__regid__ != 'CWEType':
+ assert schemaent.__regid__ == 'CWRType'
+ sch = schema._eid_index[schemaent.eid]
+ for rdef in sch.rdefs.values():
+ if not source.support_entity(rdef.subject) \
+ or not source.support_entity(rdef.object):
+ continue
+ if rdef.subject in DONT_GET_BACK_ETYPES \
+ and rdef.object in DONT_GET_BACK_ETYPES:
+ print 'dont map', rdef
+ continue
+ if rdef.subject in DONT_GET_BACK_ETYPES:
+ options = u'action=link\nlinkattr=name'
+ roles = 'object',
+ elif rdef.object in DONT_GET_BACK_ETYPES:
+ options = u'action=link\nlinkattr=name'
+ roles = 'subject',
+ else:
+ options = u'action=copy'
+ if rdef.rtype in ('use_environment',):
+ roles = 'object',
+ else:
+ roles = 'subject',
+ print 'map', rdef, options, roles
+ for role in roles:
+ mapping.append( (
+ (str(rdef.subject), str(rdef.rtype), str(rdef.object)),
+ options + '\nrole=%s' % role) )
+ mappart.cw_delete()
+
+source_ent = rql('CWSource S WHERE S eid %(s)s', {'s': source.eid}).get_entity(0, 0)
+source_ent.init_mapping(mapping)
+
+# change source properties
+config = u'''synchronize=yes
+synchronization-interval=10min
+delete-entities=no
+'''
+rql('SET X type "datafeed", X parser "cw.entityxml", X url %(url)s, X config %(config)s '
+ 'WHERE X eid %(x)s',
+ {'x': source.eid, 'config': config,
+ 'url': source.config['base-url']+'/project'})
+
+
+commit()
+
+from cubes.apycot import recipes
+recipes.create_quick_recipe(session)
diff -r d8bb8f631d41 -r a4e667270dd4 mttransforms.py
--- a/mttransforms.py Mon Sep 26 18:37:23 2011 +0200
+++ b/mttransforms.py Fri Dec 09 12:08:27 2011 +0100
@@ -99,10 +99,10 @@
def patch_convert(cls):
def _convert(self, trdata, origconvert=cls._convert):
- try:
- trdata.appobject._cw.add_css('pygments.css')
- except AttributeError: # session has no add_css, only http request
- pass
+ add_css = getattr(trdata.appobject._cw, 'add_css', None)
+ if add_css is not None:
+ # session has no add_css, only http request
+ add_css('pygments.css')
return origconvert(self, trdata)
cls._convert = _convert
patch_convert(pygmentstransforms.PygmentsHTMLTransform)
diff -r d8bb8f631d41 -r a4e667270dd4 pylintext.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/pylintext.py Fri Dec 09 12:08:27 2011 +0100
@@ -0,0 +1,41 @@
+"""https://pastebin.logilab.fr/show/860/"""
+
+from logilab.astng import MANAGER, nodes, scoped_nodes
+
+def turn_function_to_class(node):
+ """turn a Function node into a Class node (in-place)"""
+ node.__class__ = scoped_nodes.Class
+ node.bases = ()
+ # remove return nodes so that we don't get warned about 'return outside
+ # function' by pylint
+ for rnode in node.nodes_of_class(nodes.Return):
+ rnode.parent.body.remove(rnode)
+ # that seems to be enough :)
+
+
+def cubicweb_transform(module):
+ # handle objectify_selector decorator. Only look at module level functions,
+ # should be enough
+ for assnodes in module.locals.values():
+ for node in assnodes:
+ if isinstance(node, scoped_nodes.Function) and node.decorators:
+ for decorator in node.decorators.nodes:
+ for infered in decorator.infer():
+ if infered.name == 'objectify_selector':
+ turn_function_to_class(node)
+ break
+ else:
+ continue
+ break
+ # add yams base types into 'yams.buildobjs', astng doesn't grasp globals()
+ # magic in there
+ if module.name == 'yams.buildobjs':
+ from yams import BASE_TYPES
+ for etype in BASE_TYPES:
+ module.locals[etype] = [scoped_nodes.Class(etype, None)]
+
+MANAGER.register_transformer(cubicweb_transform)
+
+def register(linter):
+ """called when loaded by pylint --load-plugins, nothing to do here"""
+
diff -r d8bb8f631d41 -r a4e667270dd4 req.py
--- a/req.py Mon Sep 26 18:37:23 2011 +0200
+++ b/req.py Fri Dec 09 12:08:27 2011 +0100
@@ -29,7 +29,7 @@
from logilab.common.deprecation import deprecated
from logilab.common.date import ustrftime, strptime, todate, todatetime
-from cubicweb import Unauthorized, RegistryException, typed_eid
+from cubicweb import Unauthorized, NoSelectableObject, typed_eid
from cubicweb.rset import ResultSet
ONESECOND = timedelta(0, 1, 0)
@@ -66,7 +66,7 @@
self.vreg = vreg
try:
encoding = vreg.property_value('ui.encoding')
- except: # no vreg or property not registered
+ except Exception: # no vreg or property not registered
encoding = 'utf-8'
self.encoding = encoding
# cache result of execution for (rql expr / eids),
@@ -336,7 +336,7 @@
initargs.update(kwargs)
try:
view = self.vreg[__registry].select(__vid, self, rset=rset, **initargs)
- except RegistryException:
+ except NoSelectableObject:
if __fallback_oid is None:
raise
view = self.vreg[__registry].select(__fallback_oid, self,
@@ -409,7 +409,7 @@
# abstract methods to override according to the web front-end #############
- def describe(self, eid):
+ def describe(self, eid, asdict=False):
"""return a tuple (type, sourceuri, extid) for the entity with id """
raise NotImplementedError
diff -r d8bb8f631d41 -r a4e667270dd4 rqlrewrite.py
--- a/rqlrewrite.py Mon Sep 26 18:37:23 2011 +0200
+++ b/rqlrewrite.py Fri Dec 09 12:08:27 2011 +0100
@@ -119,6 +119,10 @@
return newsolutions
+def iter_relations(stinfo):
+ # this is a function so that test may return relation in a predictable order
+ return stinfo['relations'] - stinfo['rhsrelations']
+
class Unsupported(Exception):
"""raised when an rql expression can't be inserted in some rql query
because it create an unresolvable query (eg no solutions found)
@@ -337,43 +341,58 @@
"""introduce the given snippet in a subquery"""
subselect = stmts.Select()
snippetrqlst = n.Exists(transformedsnippet.copy(subselect))
+ get_rschema = self.schema.rschema
aliases = []
- rels_done = set()
- for i, (selectvar, snippetvar) in enumerate(varmap):
+ done = set()
+ for i, (selectvar, _) in enumerate(varmap):
+ need_null_test = False
subselectvar = subselect.get_variable(selectvar)
subselect.append_selected(n.VariableRef(subselectvar))
aliases.append(selectvar)
- vi = self.varinfos[i]
- need_null_test = False
- stinfo = vi['stinfo']
- for rel in stinfo['relations']:
- if rel in rels_done:
- continue
- rels_done.add(rel)
- rschema = self.schema.rschema(rel.r_type)
- if rschema.final or (rschema.inlined and
- not rel in stinfo['rhsrelations']):
- rel.children[0].name = selectvar # XXX explain why
- subselect.add_restriction(rel.copy(subselect))
- for vref in rel.children[1].iget_nodes(n.VariableRef):
- if isinstance(vref.variable, n.ColumnAlias):
- # XXX could probably be handled by generating the
- # subquery into the detected subquery
- raise BadSchemaDefinition(
- "cant insert security because of usage two inlined "
- "relations in this query. You should probably at "
- "least uninline %s" % rel.r_type)
- subselect.append_selected(vref.copy(subselect))
- aliases.append(vref.name)
- self.select.remove_node(rel)
- # when some inlined relation has to be copied in the
- # subquery, we need to test that either value is NULL or
- # that the snippet condition is satisfied
- if rschema.inlined and rel.optional:
- need_null_test = True
+ todo = [(selectvar, self.varinfos[i]['stinfo'])]
+ while todo:
+ varname, stinfo = todo.pop()
+ done.add(varname)
+ for rel in iter_relations(stinfo):
+ if rel in done:
+ continue
+ done.add(rel)
+ rschema = get_rschema(rel.r_type)
+ if rschema.final or rschema.inlined:
+ rel.children[0].name = varname # XXX explain why
+ subselect.add_restriction(rel.copy(subselect))
+ for vref in rel.children[1].iget_nodes(n.VariableRef):
+ if isinstance(vref.variable, n.ColumnAlias):
+ # XXX could probably be handled by generating the
+ # subquery into the detected subquery
+ raise BadSchemaDefinition(
+ "cant insert security because of usage two inlined "
+ "relations in this query. You should probably at "
+ "least uninline %s" % rel.r_type)
+ subselect.append_selected(vref.copy(subselect))
+ aliases.append(vref.name)
+ self.select.remove_node(rel)
+ # when some inlined relation has to be copied in the
+ # subquery and that relation is optional, we need to
+ # test that either value is NULL or that the snippet
+ # condition is satisfied
+ if varname == selectvar and rel.optional and rschema.inlined:
+ need_null_test = True
+ # also, if some attributes or inlined relation of the
+ # object variable are accessed, we need to get all those
+ # from the subquery as well
+ if vref.name not in done and rschema.inlined:
+ # we can use vref here define in above for loop
+ ostinfo = vref.variable.stinfo
+ for orel in iter_relations(ostinfo):
+ orschema = get_rschema(orel.r_type)
+ if orschema.final or orschema.inlined:
+ todo.append( (vref.name, ostinfo) )
+ break
if need_null_test:
snippetrqlst = n.Or(
- n.make_relation(subselectvar, 'is', (None, None), n.Constant,
+ n.make_relation(subselect.get_variable(selectvar), 'is',
+ (None, None), n.Constant,
operator='='),
snippetrqlst)
subselect.add_restriction(snippetrqlst)
@@ -619,7 +638,7 @@
def visit_mathexpression(self, node):
cmp_ = n.MathExpression(node.operator)
- for c in cmp.children:
+ for c in node.children:
cmp_.append(c.accept(self))
return cmp_
diff -r d8bb8f631d41 -r a4e667270dd4 rset.py
--- a/rset.py Mon Sep 26 18:37:23 2011 +0200
+++ b/rset.py Fri Dec 09 12:08:27 2011 +0100
@@ -351,7 +351,8 @@
if offset <= entity.cw_row < stop:
entity.cw_row = entity.cw_row - offset
else:
- self.req.drop_entity_cache(entity.eid)
+ entity.cw_rset = entity.as_rset()
+ entity.cw_row = entity.cw_col = 0
else:
rset = self.copy(rows, descr)
if not offset:
@@ -475,43 +476,57 @@
entity.eid = eid
# cache entity
req.set_entity_cache(entity)
- eschema = entity.e_schema
# try to complete the entity if there are some additional columns
if len(rowvalues) > 1:
- rqlst = self.syntax_tree()
- if rqlst.TYPE == 'select':
- # UNION query, find the subquery from which this entity has been
- # found
- select, col = rqlst.locate_subquery(col, etype, self.args)
+ eschema = entity.e_schema
+ eid_col, attr_cols, rel_cols = self._rset_structure(eschema, col)
+ entity.eid = rowvalues[eid_col]
+ for attr, col_idx in attr_cols.items():
+ entity.cw_attr_cache[attr] = rowvalues[col_idx]
+ for (rtype, role), col_idx in rel_cols.items():
+ value = rowvalues[col_idx]
+ if value is None:
+ if role == 'subject':
+ rql = 'Any Y WHERE X %s Y, X eid %s'
+ else:
+ rql = 'Any Y WHERE Y %s X, X eid %s'
+ rrset = ResultSet([], rql % (rtype, entity.eid))
+ rrset.req = req
+ else:
+ rrset = self._build_entity(row, col_idx).as_rset()
+ entity.cw_set_relation_cache(rtype, role, rrset)
+ return entity
+
+ @cached
+ def _rset_structure(self, eschema, entity_col):
+ eid_col = col = entity_col
+ rqlst = self.syntax_tree()
+ attr_cols = {}
+ rel_cols = {}
+ if rqlst.TYPE == 'select':
+ # UNION query, find the subquery from which this entity has been
+ # found
+ select, col = rqlst.locate_subquery(entity_col, eschema.type, self.args)
+ else:
+ select = rqlst
+ # take care, due to outer join support, we may find None
+ # values for non final relation
+ for i, attr, role in attr_desc_iterator(select, col, entity_col):
+ if role == 'subject':
+ rschema = eschema.subjrels[attr]
else:
- select = rqlst
- # take care, due to outer join support, we may find None
- # values for non final relation
- for i, attr, role in attr_desc_iterator(select, col, entity.cw_col):
- if role == 'subject':
- rschema = eschema.subjrels[attr]
- if rschema.final:
- if attr == 'eid':
- entity.eid = rowvalues[i]
- else:
- entity.cw_attr_cache[attr] = rowvalues[i]
- continue
+ rschema = eschema.objrels[attr]
+ if rschema.final:
+ if attr == 'eid':
+ eid_col = i
else:
- rschema = eschema.objrels[attr]
+ attr_cols[attr] = i
+ else:
rdef = eschema.rdef(attr, role)
# only keep value if it can't be multivalued
if rdef.role_cardinality(role) in '1?':
- if rowvalues[i] is None:
- if role == 'subject':
- rql = 'Any Y WHERE X %s Y, X eid %s'
- else:
- rql = 'Any Y WHERE Y %s X, X eid %s'
- rrset = ResultSet([], rql % (attr, entity.eid))
- rrset.req = req
- else:
- rrset = self._build_entity(row, i).as_rset()
- entity.cw_set_relation_cache(attr, role, rrset)
- return entity
+ rel_cols[(attr, role)] = i
+ return eid_col, attr_cols, rel_cols
@cached
def syntax_tree(self):
@@ -680,7 +695,7 @@
continue
if rootvar.name == rootmainvar.name:
continue
- if select is not rootselect:
+ if select is not rootselect and isinstance(rootvar, nodes.ColumnAlias):
term = select.selection[root.subquery_selection_index(select, i)]
var = _get_variable(term)
if var is None:
diff -r d8bb8f631d41 -r a4e667270dd4 schema.py
--- a/schema.py Mon Sep 26 18:37:23 2011 +0200
+++ b/schema.py Fri Dec 09 12:08:27 2011 +0100
@@ -544,10 +544,11 @@
rschema = self.add_relation_type(ybo.RelationType('identity'))
rschema.final = False
+ etype_name_re = r'[A-Z][A-Za-z0-9]*[a-z]+[A-Za-z0-9]*$'
def add_entity_type(self, edef):
edef.name = edef.name.encode()
edef.name = bw_normalize_etype(edef.name)
- if not re.match(r'[A-Z][A-Za-z0-9]*[a-z]+[0-9]*$', edef.name):
+ if not re.match(self.etype_name_re, edef.name):
raise BadSchemaDefinition(
'%r is not a valid name for an entity type. It should start '
'with an upper cased letter and be followed by at least a '
@@ -665,6 +666,8 @@
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+ # to be defined in concrete classes
+ full_rql = None
def __init__(self, expression, mainvars, eid):
self.eid = eid # eid of the entity representing this rql expression
diff -r d8bb8f631d41 -r a4e667270dd4 schemas/__init__.py
diff -r d8bb8f631d41 -r a4e667270dd4 schemas/base.py
--- a/schemas/base.py Mon Sep 26 18:37:23 2011 +0200
+++ b/schemas/base.py Fri Dec 09 12:08:27 2011 +0100
@@ -21,7 +21,9 @@
_ = unicode
from yams.buildobjs import (EntityType, RelationType, RelationDefinition,
- SubjectRelation, String, Datetime, Password, Interval)
+ SubjectRelation,
+ String, TZDatetime, Datetime, Password, Interval,
+ Boolean)
from cubicweb.schema import (
RQLConstraint, WorkflowableEntityType, ERQLExpression, RRQLExpression,
PUB_SYSTEM_ENTITY_PERMS, PUB_SYSTEM_REL_PERMS, PUB_SYSTEM_ATTR_PERMS)
@@ -40,13 +42,7 @@
upassword = Password(required=True) # password is a reserved word for mysql
firstname = String(maxsize=64)
surname = String(maxsize=64)
- last_login_time = Datetime(description=_('last connection date'))
- # allowing an email to be the primary email of multiple entities is necessary for
- # test at least :-/
- primary_email = SubjectRelation('EmailAddress', cardinality='??',
- description=_('email address to use for notification'))
- use_email = SubjectRelation('EmailAddress', cardinality='*?', composite='subject')
-
+ last_login_time = TZDatetime(description=_('last connection date'))
in_group = SubjectRelation('CWGroup', cardinality='+*',
constraints=[RQLConstraint('NOT O name "owners"')],
description=_('groups grant permissions to the user'))
@@ -70,17 +66,35 @@
to indicate which is the preferred form.'))
class use_email(RelationType):
- """ """
+ fulltext_container = 'subject'
+
+
+class use_email_relation(RelationDefinition):
+ """user's email account"""
+ name = "use_email"
__permissions__ = {
'read': ('managers', 'users', 'guests',),
'add': ('managers', RRQLExpression('U has_update_permission S'),),
'delete': ('managers', RRQLExpression('U has_update_permission S'),),
}
- fulltext_container = 'subject'
+ subject = "CWUser"
+ object = "EmailAddress"
+ cardinality = '*?'
+ composite = 'subject'
+
-class primary_email(RelationType):
+class primary_email(RelationDefinition):
"""the prefered email"""
- __permissions__ = use_email.__permissions__
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers', RRQLExpression('U has_update_permission S'),),
+ 'delete': ('managers', RRQLExpression('U has_update_permission S'),),
+ }
+ subject = "CWUser"
+ object = "EmailAddress"
+ cardinality = '??'
+ constraints= [RQLConstraint('S use_email O')]
+
class prefered_form(RelationType):
__permissions__ = {
@@ -238,7 +252,7 @@
name = String(required=True, unique=True, maxsize=128,
description=_('name of the cache'))
- timestamp = Datetime(default='NOW')
+ timestamp = TZDatetime(default='NOW')
class CWSource(EntityType):
@@ -264,7 +278,8 @@
# may changes when sources are specified
url = String(description=_('URLs from which content will be imported. You can put one url per line'))
parser = String(description=_('parser to use to extract entities from content retrieved at given URLs.'))
- latest_retrieval = Datetime(description=_('latest synchronization time'))
+ latest_retrieval = TZDatetime(description=_('latest synchronization time'))
+ in_synchronization = TZDatetime(description=_('start timestamp of the currently in synchronization, or NULL when no synchronization in progress.'))
ENTITY_MANAGERS_PERMISSIONS = {
@@ -307,8 +322,8 @@
class cw_source(RelationDefinition):
__permissions__ = {
'read': ('managers', 'users', 'guests'),
- 'add': (),
- 'delete': (),
+ 'add': ('managers',),
+ 'delete': ('managers',),
}
subject = '*'
object = 'CWSource'
@@ -317,17 +332,31 @@
class CWSourceSchemaConfig(EntityType):
__permissions__ = ENTITY_MANAGERS_PERMISSIONS
- __unique_together__ = [('cw_for_source', 'cw_schema')]
cw_for_source = SubjectRelation(
'CWSource', inlined=True, cardinality='1*', composite='object',
__permissions__=RELATION_MANAGERS_PERMISSIONS)
- cw_schema = SubjectRelation(
- ('CWEType', 'CWRType', 'CWAttribute', 'CWRelation'),
- inlined=True, cardinality='1*', composite='object',
- __permissions__=RELATION_MANAGERS_PERMISSIONS)
options = String(description=_('allowed options depends on the source type'))
+class rtype_cw_schema(RelationDefinition):
+ __permissions__ = RELATION_MANAGERS_PERMISSIONS
+ name = 'cw_schema'
+ subject = 'CWSourceSchemaConfig'
+ object = ('CWEType', 'CWRType')
+ inlined = True
+ cardinality = '1*'
+ composite = 'object'
+ constraints = [RQLConstraint('NOT O final TRUE')]
+
+class rdef_cw_schema(RelationDefinition):
+ __permissions__ = RELATION_MANAGERS_PERMISSIONS
+ name = 'cw_schema'
+ subject = 'CWSourceSchemaConfig'
+ object = 'CWRelation'
+ inlined = True
+ cardinality = '1*'
+ composite = 'object'
+
# "abtract" relation types, no definition in cubicweb itself ###################
class identical_to(RelationType):
diff -r d8bb8f631d41 -r a4e667270dd4 schemas/workflow.py
diff -r d8bb8f631d41 -r a4e667270dd4 selectors.py
--- a/selectors.py Mon Sep 26 18:37:23 2011 +0200
+++ b/selectors.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -255,12 +255,19 @@
* if `entity` is specified, return score for this entity's class
- * elif `row` is specified, return score for the class of the entity
- found in the specified cell, using column specified by `col` or 0
+ * elif `rset`, `select` and `filtered_variable` are specified, return score
+ for the possible classes for variable in the given rql :class:`Select`
+ node
+
+ * elif `rset` and `row` are specified, return score for the class of the
+ entity found in the specified cell, using column specified by `col` or 0
- * else return the sum of scores for each entity class found in the column
- specified specified by the `col` argument or in column 0 if not specified,
- unless:
+ * elif `rset` is specified return score for each entity class found in the
+ column specified specified by the `col` argument or in column 0 if not
+ specified
+
+ When there are several classes to be evaluated, return the sum of scores for
+ each entity class unless:
- `once_is_enough` is False (the default) and some entity class is scored
to 0, in which case 0 is returned
@@ -276,32 +283,37 @@
self.accept_none = accept_none
@lltrace
- def __call__(self, cls, req, rset=None, row=None, col=0, accept_none=None,
+ def __call__(self, cls, req, rset=None, row=None, col=0, entity=None,
+ select=None, filtered_variable=None,
+ accept_none=None,
**kwargs):
- if kwargs.get('entity'):
- return self.score_class(kwargs['entity'].__class__, req)
+ if entity is not None:
+ return self.score_class(entity.__class__, req)
if not rset:
return 0
- score = 0
- if row is None:
+ if select is not None and filtered_variable is not None:
+ etypes = set(sol[filtered_variable.name] for sol in select.solutions)
+ elif row is None:
if accept_none is None:
accept_none = self.accept_none
- if not accept_none:
- if any(rset[i][col] is None for i in xrange(len(rset))):
- return 0
- for etype in rset.column_types(col):
- if etype is None: # outer join
- return 0
- escore = self.score(cls, req, etype)
- if not escore and not self.once_is_enough:
- return 0
- elif self.once_is_enough:
- return escore
- score += escore
+ if not accept_none and \
+ any(rset[i][col] is None for i in xrange(len(rset))):
+ return 0
+ etypes = rset.column_types(col)
else:
etype = rset.description[row][col]
- if etype is not None:
- score = self.score(cls, req, etype)
+ # may have None in rset.description on outer join
+ if etype is None or rset.rows[row][col] is None:
+ return 0
+ etypes = (etype,)
+ score = 0
+ for etype in etypes:
+ escore = self.score(cls, req, etype)
+ if not escore and not self.once_is_enough:
+ return 0
+ elif self.once_is_enough:
+ return escore
+ score += escore
return score
def score(self, cls, req, etype):
@@ -909,6 +921,7 @@
# hack hack hack
def __call__(self, cls, req, **kwargs):
+ # hack hack hack
if self.strict:
return EntitySelector.__call__(self, cls, req, **kwargs)
return EClassSelector.__call__(self, cls, req, **kwargs)
diff -r d8bb8f631d41 -r a4e667270dd4 server/__init__.py
--- a/server/__init__.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/__init__.py Fri Dec 09 12:08:27 2011 +0100
@@ -210,9 +210,9 @@
handler = config.migration_handler(schema, interactive=False,
cnx=cnx, repo=repo)
# install additional driver specific sql files
- handler.install_custom_sql_scripts(join(CW_SOFTWARE_ROOT, 'schemas'), driver)
- for directory in reversed(config.cubes_path()):
- handler.install_custom_sql_scripts(join(directory, 'schema'), driver)
+ handler.cmd_install_custom_sql_scripts()
+ for cube in reversed(config.cubes()):
+ handler.cmd_install_custom_sql_scripts(cube)
# serialize the schema
initialize_schema(config, schema, handler)
# yoo !
@@ -231,8 +231,7 @@
from cubicweb.server.schemaserial import serialize_schema
from cubicweb.server.session import hooks_control
session = mhandler.session
- paths = [p for p in config.cubes_path() + [config.apphome]
- if exists(join(p, 'migration'))]
+ cubes = config.cubes()
# deactivate every hooks but those responsible to set metadata
# so, NO INTEGRITY CHECKS are done, to have quicker db creation.
# Active integrity is kept else we may pb such as two default
@@ -240,18 +239,22 @@
with hooks_control(session, session.HOOKS_DENY_ALL, 'metadata',
'activeintegrity'):
# execute cubicweb's pre script
- mhandler.exec_event_script('pre%s' % event)
+ mhandler.cmd_exec_event_script('pre%s' % event)
# execute cubes pre script if any
- for path in reversed(paths):
- mhandler.exec_event_script('pre%s' % event, path)
+ for cube in reversed(cubes):
+ mhandler.cmd_exec_event_script('pre%s' % event, cube)
+ # execute instance's pre script (useful in tests)
+ mhandler.cmd_exec_event_script('pre%s' % event, apphome=True)
# enter instance'schema into the database
- session.set_pool()
+ session.set_cnxset()
serialize_schema(session, schema)
# execute cubicweb's post script
- mhandler.exec_event_script('post%s' % event)
+ mhandler.cmd_exec_event_script('post%s' % event)
# execute cubes'post script if any
- for path in reversed(paths):
- mhandler.exec_event_script('post%s' % event, path)
+ for cube in reversed(cubes):
+ mhandler.cmd_exec_event_script('post%s' % event, cube)
+ # execute instance's post script (useful in tests)
+ mhandler.cmd_exec_event_script('post%s' % event, apphome=True)
# sqlite'stored procedures have to be registered at connection opening time
diff -r d8bb8f631d41 -r a4e667270dd4 server/checkintegrity.py
--- a/server/checkintegrity.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/checkintegrity.py Fri Dec 09 12:08:27 2011 +0100
@@ -47,7 +47,7 @@
sqlcursor.execute('SELECT type, source FROM entities WHERE eid=%s' % eid)
try:
etype, source = sqlcursor.fetchone()
- except:
+ except Exception:
eids[eid] = False
return False
if source and source != 'system':
@@ -58,7 +58,7 @@
{'x': eid}):
eids[eid] = True
return True
- except: # TypeResolverError, Unauthorized...
+ except Exception: # TypeResolverError, Unauthorized...
pass
eids[eid] = False
return False
@@ -101,7 +101,7 @@
# deactivate modification_date hook since we don't want them
# to be updated due to the reindexation
repo = session.repo
- cursor = session.pool['system']
+ cursor = session.cnxset['system']
dbhelper = session.repo.system_source.dbhelper
if not dbhelper.has_fti_table(cursor):
print 'no text index table'
@@ -188,6 +188,18 @@
if fix:
session.system_sql('DELETE FROM entities WHERE eid=%s;' % eid)
notify_fixed(fix)
+ session.system_sql('INSERT INTO cw_source_relation (eid_from, eid_to) '
+ 'SELECT e.eid, s.cw_eid FROM entities as e, cw_CWSource as s '
+ 'WHERE s.cw_name=e.asource AND NOT EXISTS(SELECT 1 FROM cw_source_relation as cs '
+ ' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid)')
+ session.system_sql('INSERT INTO is_relation (eid_from, eid_to) '
+ 'SELECT e.eid, s.cw_eid FROM entities as e, cw_CWEType as s '
+ 'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_relation as cs '
+ ' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid)')
+ session.system_sql('INSERT INTO is_instance_of_relation (eid_from, eid_to) '
+ 'SELECT e.eid, s.cw_eid FROM entities as e, cw_CWEType as s '
+ 'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_instance_of_relation as cs '
+ ' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid)')
print 'Checking entities tables'
for eschema in schema.entities():
if eschema.final:
@@ -283,10 +295,10 @@
rql = 'Any X WHERE NOT Y %s X, X is %s' % (rschema, etype)
for entity in session.execute(rql).entities():
print >> sys.stderr, '%s #%s is missing mandatory %s relation %s' % (
- entity.__regid__, entity.eid, role, rschema)
+ entity.__regid__, entity.eid, role, rschema),
if fix:
#if entity.cw_describe()['source']['uri'] == 'system': XXX
- entity.delete()
+ entity.cw_delete()
notify_fixed(fix)
@@ -304,9 +316,9 @@
rschema, rdef.subject)
for entity in session.execute(rql).entities():
print >> sys.stderr, '%s #%s is missing mandatory attribute %s' % (
- entity.__regid__, entity.eid, rschema)
+ entity.__regid__, entity.eid, rschema),
if fix:
- entity.delete()
+ entity.cw_delete()
notify_fixed(fix)
@@ -333,22 +345,6 @@
% (table, column, eidcolumn, eid),
{'v': default})
notify_fixed(fix)
- cursor = session.system_sql('SELECT MIN(%s) FROM %sCWUser;' % (eidcolumn,
- SQL_PREFIX))
- default_user_eid = cursor.fetchone()[0]
- assert default_user_eid is not None, 'no user defined !'
- for rel, default in ( ('owned_by', default_user_eid), ):
- cursor = session.system_sql("SELECT eid, type FROM entities "
- "WHERE source='system' AND NOT EXISTS "
- "(SELECT 1 FROM %s_relation WHERE eid_from=eid);"
- % rel)
- for eid, etype in cursor.fetchall():
- msg = ' %s with eid %s has no %s relation'
- print >> sys.stderr, msg % (etype, eid, rel),
- if fix:
- session.system_sql('INSERT INTO %s_relation VALUES (%s, %s) ;'
- % (rel, eid, default))
- notify_fixed(fix)
def check(repo, cnx, checks, reindex, fix, withpb=True):
@@ -356,11 +352,11 @@
using given user and password to locally connect to the repository
(no running cubicweb server needed)
"""
- session = repo._get_session(cnx.sessionid, setpool=True)
+ session = repo._get_session(cnx.sessionid, setcnxset=True)
# yo, launch checks
if checks:
eids_cache = {}
- with security_enabled(session, read=False): # ensure no read security
+ with security_enabled(session, read=False, write=False): # ensure no read security
for check in checks:
check_func = globals()['check_%s' % check]
check_func(repo.schema, session, eids_cache, fix=fix)
@@ -372,6 +368,6 @@
print 'WARNING: Diagnostic run, nothing has been corrected'
if reindex:
cnx.rollback()
- session.set_pool()
+ session.set_cnxset()
reindex_entities(repo.schema, session, withpb=withpb)
cnx.commit()
diff -r d8bb8f631d41 -r a4e667270dd4 server/edition.py
--- a/server/edition.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/edition.py Fri Dec 09 12:08:27 2011 +0100
@@ -68,6 +68,11 @@
super(EditedEntity, self).__delitem__(attr)
self.entity.cw_attr_cache.pop(attr, None)
+ def __copy__(self):
+ # default copy protocol fails in EditedEntity.__setitem__ because
+ # copied entity has no skip_security attribute at this point
+ return EditedEntity(self.entity, **self)
+
def pop(self, attr, *args):
# don't update skip_security by design (think to storage api)
assert not self.saved, 'too late to modify edited attributes'
diff -r d8bb8f631d41 -r a4e667270dd4 server/hook.py
--- a/server/hook.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/hook.py Fri Dec 09 12:08:27 2011 +0100
@@ -69,12 +69,19 @@
~~~~~~~~~~
Operations are subclasses of the :class:`~cubicweb.server.hook.Operation` class
-that may be created by hooks and scheduled to happen just before (or after) the
-`precommit`, `postcommit` or `rollback` event. Hooks are being fired immediately
-on data operations, and it is sometime necessary to delay the actual work down
-to a time where all other hooks have run. Also while the order of execution of
-hooks is data dependant (and thus hard to predict), it is possible to force an
-order on operations.
+that may be created by hooks and scheduled to happen on `precommit`,
+`postcommit` or `rollback` event (i.e. respectivly before/after a commit or
+before a rollback of a transaction).
+
+Hooks are being fired immediately on data operations, and it is sometime
+necessary to delay the actual work down to a time where we can expect all
+information to be there, or when all other hooks have run (though take case
+since operations may themselves trigger hooks). Also while the order of
+execution of hooks is data dependant (and thus hard to predict), it is possible
+to force an order on operations.
+
+So, for such case where you may miss some information that may be set later in
+the transaction, you should instantiate an operation in the hook.
Operations may be used to:
@@ -248,7 +255,7 @@
from logging import getLogger
from itertools import chain
-from logilab.common.decorators import classproperty
+from logilab.common.decorators import classproperty, cached
from logilab.common.deprecation import deprecated, class_renamed
from logilab.common.logging_ext import set_log_methods
@@ -257,7 +264,7 @@
from cubicweb.cwvreg import CWRegistry, VRegistry
from cubicweb.selectors import (objectify_selector, lltrace, ExpectedValueSelector,
is_instance)
-from cubicweb.appobject import AppObject
+from cubicweb.appobject import AppObject, NotSelector, OrSelector
from cubicweb.server.session import security_enabled
ENTITIES_HOOKS = set(('before_add_entity', 'after_add_entity',
@@ -318,15 +325,83 @@
else:
entities = []
eids_from_to = []
+ pruned = self.get_pruned_hooks(session, event,
+ entities, eids_from_to, kwargs)
# by default, hooks are executed with security turned off
with security_enabled(session, read=False):
for _kwargs in _iter_kwargs(entities, eids_from_to, kwargs):
- hooks = sorted(self.possible_objects(session, **_kwargs),
+ hooks = sorted(self.filtered_possible_objects(pruned, session, **_kwargs),
key=lambda x: x.order)
with security_enabled(session, write=False):
for hook in hooks:
- #print hook.category, hook.__regid__
- hook()
+ hook()
+
+ def get_pruned_hooks(self, session, event, entities, eids_from_to, kwargs):
+ """return a set of hooks that should not be considered by filtered_possible objects
+
+ the idea is to make a first pass over all the hooks in the
+ registry and to mark put some of them in a pruned list. The
+ pruned hooks are the one which:
+
+ * are disabled at the session level
+ * have a match_rtype or an is_instance selector which does not
+ match the rtype / etype of the relations / entities for
+ which we are calling the hooks. This works because the
+ repository calls the hooks grouped by rtype or by etype when
+ using the entities or eids_to_from keyword arguments
+
+ Only hooks with a simple selector or an AndSelector of simple
+ selectors are considered for disabling.
+
+ """
+ if 'entity' in kwargs:
+ entities = [kwargs['entity']]
+ if len(entities):
+ look_for_selector = is_instance
+ etype = entities[0].__regid__
+ elif 'rtype' in kwargs:
+ look_for_selector = match_rtype
+ etype = None
+ else: # nothing to prune, how did we get there ???
+ return set()
+ cache_key = (event, kwargs.get('rtype'), etype)
+ pruned = session.pruned_hooks_cache.get(cache_key)
+ if pruned is not None:
+ return pruned
+ pruned = set()
+ session.pruned_hooks_cache[cache_key] = pruned
+ if look_for_selector is not None:
+ for id, hooks in self.iteritems():
+ for hook in hooks:
+ enabled_cat, main_filter = hook.filterable_selectors()
+ if enabled_cat is not None:
+ if not enabled_cat(hook, session):
+ pruned.add(hook)
+ continue
+ if main_filter is not None:
+ if isinstance(main_filter, match_rtype) and \
+ (main_filter.frometypes is not None or \
+ main_filter.toetypes is not None):
+ continue
+ first_kwargs = _iter_kwargs(entities, eids_from_to, kwargs).next()
+ if not main_filter(hook, session, **first_kwargs):
+ pruned.add(hook)
+ return pruned
+
+
+ def filtered_possible_objects(self, pruned, *args, **kwargs):
+ for appobjects in self.itervalues():
+ if pruned:
+ filtered_objects = [obj for obj in appobjects if obj not in pruned]
+ if not filtered_objects:
+ continue
+ else:
+ filtered_objects = appobjects
+ obj = self._select_best(filtered_objects,
+ *args, **kwargs)
+ if obj is None:
+ continue
+ yield obj
class HooksManager(object):
def __init__(self, vreg):
@@ -462,8 +537,17 @@
# XXX deprecated
enabled = True
# stop pylint from complaining about missing attributes in Hooks classes
- eidfrom = eidto = entity = rtype = None
+ eidfrom = eidto = entity = rtype = repo = None
+ @classmethod
+ @cached
+ def filterable_selectors(cls):
+ search = cls.__select__.search_selector
+ if search((NotSelector, OrSelector)):
+ return None, None
+ enabled_cat = search(enabled_category)
+ main_filter = search((is_instance, match_rtype))
+ return enabled_cat, main_filter
@classmethod
def check_events(cls):
@@ -496,7 +580,7 @@
warn('[3.6] %s: accepts is deprecated, define proper __select__'
% classid(cls), DeprecationWarning)
rtypes = []
- for ertype in cls.accepts:
+ for ertype in cls.accepts: # pylint: disable=E1101
if ertype.islower():
rtypes.append(ertype)
else:
@@ -517,6 +601,7 @@
if hasattr(self, 'call'):
warn('[3.6] %s: call is deprecated, implement __call__'
% classid(self.__class__), DeprecationWarning)
+ # pylint: disable=E1101
if self.event.endswith('_relation'):
self.call(self._cw, self.eidfrom, self.rtype, self.eidto)
elif 'delete' in self.event:
@@ -544,7 +629,7 @@
Notice there are no default behaviour defined when a watched relation is
deleted, you'll have to handle this by yourself.
- You usually want to use the :class:`match_rtype_sets` selector on concret
+ You usually want to use the :class:`match_rtype_sets` selector on concrete
classes.
"""
events = ('after_add_relation',)
@@ -653,8 +738,8 @@
operation. These keyword arguments will be accessible as attributes from the
operation instance.
- An operation is triggered on connections pool events related to
- commit / rollback transations. Possible events are:
+ An operation is triggered on connections set events related to commit /
+ rollback transations. Possible events are:
* `precommit`:
@@ -724,11 +809,11 @@
if event == 'postcommit_event' and hasattr(self, 'commit_event'):
warn('[3.10] %s: commit_event method has been replaced by postcommit_event'
% classid(self.__class__), DeprecationWarning)
- self.commit_event()
+ self.commit_event() # pylint: disable=E1101
getattr(self, event)()
def precommit_event(self):
- """the observed connections pool is preparing a commit"""
+ """the observed connections set is preparing a commit"""
def revertprecommit_event(self):
"""an error went when pre-commiting this operation or a later one
@@ -738,14 +823,13 @@
"""
def rollback_event(self):
- """the observed connections pool has been rollbacked
+ """the observed connections set has been rollbacked
- do nothing by default, the operation will just be removed from the pool
- operation list
+ do nothing by default
"""
def postcommit_event(self):
- """the observed connections pool has committed"""
+ """the observed connections set has committed"""
@property
@deprecated('[3.6] use self.session.user')
@@ -1009,6 +1093,9 @@
class RQLPrecommitOperation(Operation):
+ # to be defined in concrete classes
+ rqls = None
+
def precommit_event(self):
execute = self.session.execute
for rql in self.rqls:
@@ -1028,7 +1115,7 @@
data_key = 'neweids'
def rollback_event(self):
- """the observed connections pool has been rollbacked,
+ """the observed connections set has been rollbacked,
remove inserted eid from repository type/source cache
"""
try:
@@ -1042,7 +1129,7 @@
"""
data_key = 'pendingeids'
def postcommit_event(self):
- """the observed connections pool has been rollbacked,
+ """the observed connections set has been rollbacked,
remove inserted eid from repository type/source cache
"""
try:
diff -r d8bb8f631d41 -r a4e667270dd4 server/migractions.py
--- a/server/migractions.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/migractions.py Fri Dec 09 12:08:27 2011 +0100
@@ -50,7 +50,7 @@
from yams.schema2sql import eschema2sql, rschema2sql
from yams.schema import RelationDefinitionSchema
-from cubicweb import AuthenticationError, ExecutionError
+from cubicweb import CW_SOFTWARE_ROOT, AuthenticationError, ExecutionError
from cubicweb.selectors import is_instance
from cubicweb.schema import (ETYPE_NAME_MAP, META_RTYPES, VIRTUAL_RTYPES,
PURE_VIRTUAL_RTYPES,
@@ -153,7 +153,7 @@
migrscript, funcname, *args, **kwargs)
except ExecutionError, err:
print >> sys.stderr, "-> %s" % err
- except:
+ except BaseException:
self.rollback()
raise
@@ -201,7 +201,6 @@
versions = repo.get_versions()
for cube, version in versions.iteritems():
version_file.write('%s %s\n' % (cube, version))
-
if not failed:
bkup = tarfile.open(backupfile, 'w|gz')
for filename in os.listdir(tmpdir):
@@ -242,7 +241,7 @@
written_format = format_file.readline().strip()
if written_format in ('portable', 'native'):
format = written_format
- self.config.open_connections_pools = False
+ self.config.init_cnxset_pool = False
repo = self.repo_connect()
for source in repo.sources:
if systemonly and source.uri != 'system':
@@ -255,7 +254,7 @@
raise SystemExit(1)
shutil.rmtree(tmpdir)
# call hooks
- repo.open_connections_pools()
+ repo.init_cnxset_pool()
repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile)
print '-> database restored.'
@@ -288,7 +287,7 @@
except (KeyboardInterrupt, EOFError):
print 'aborting...'
sys.exit(0)
- self.session.keep_pool_mode('transaction')
+ self.session.keep_cnxset_mode('transaction')
self.session.data['rebuild-infered'] = False
return self._cnx
@@ -296,10 +295,10 @@
def session(self):
if self.config is not None:
session = self.repo._get_session(self.cnx.sessionid)
- if session.pool is None:
+ if session.cnxset is None:
session.set_read_security(False)
session.set_write_security(False)
- session.set_pool()
+ session.set_cnxset()
return session
# no access to session on remote instance
return None
@@ -308,13 +307,13 @@
if hasattr(self, '_cnx'):
self._cnx.commit()
if self.session:
- self.session.set_pool()
+ self.session.set_cnxset()
def rollback(self):
if hasattr(self, '_cnx'):
self._cnx.rollback()
if self.session:
- self.session.set_pool()
+ self.session.set_cnxset()
def rqlexecall(self, rqliter, ask_confirm=False):
for rql, kwargs in rqliter:
@@ -351,10 +350,17 @@
"""cached constraint types mapping"""
return ss.cstrtype_mapping(self._cw)
- def exec_event_script(self, event, cubepath=None, funcname=None,
- *args, **kwargs):
- if cubepath:
+ def cmd_exec_event_script(self, event, cube=None, funcname=None,
+ *args, **kwargs):
+ """execute a cube event scripts `migration/.py` where event
+ is one of 'precreate', 'postcreate', 'preremove' and 'postremove'.
+ """
+ assert event in ('precreate', 'postcreate', 'preremove', 'postremove')
+ if cube:
+ cubepath = self.config.cube_dir(cube)
apc = osp.join(cubepath, 'migration', '%s.py' % event)
+ elif kwargs.pop('apphome', False):
+ apc = osp.join(self.config.apphome, 'migration', '%s.py' % event)
else:
apc = osp.join(self.config.migration_scripts_dir(), '%s.py' % event)
if osp.exists(apc):
@@ -373,19 +379,31 @@
if self.config.free_wheel:
self.cmd_reactivate_verification_hooks()
- def install_custom_sql_scripts(self, directory, driver):
+ def cmd_install_custom_sql_scripts(self, cube=None):
+ """install a cube custom sql scripts `schema/*..sql` where
+ depends on the instance main database backend (eg 'postgres',
+ 'mysql'...)
+ """
+ driver = self.repo.system_source.dbdriver
+ if cube is None:
+ directory = osp.join(CW_SOFTWARE_ROOT, 'schemas')
+ else:
+ directory = osp.join(self.config.cube_dir(cube), 'schema')
+ sql_scripts = []
for fpath in glob(osp.join(directory, '*.sql.%s' % driver)):
newname = osp.basename(fpath).replace('.sql.%s' % driver,
'.%s.sql' % driver)
warn('[3.5.6] rename %s into %s' % (fpath, newname),
DeprecationWarning)
+ sql_scripts.append(fpath)
+ sql_scripts += glob(osp.join(directory, '*.%s.sql' % driver))
+ for fpath in sql_scripts:
print '-> installing', fpath
- sqlexec(open(fpath).read(), self.session.system_sql, False,
- delimiter=';;')
- for fpath in glob(osp.join(directory, '*.%s.sql' % driver)):
- print '-> installing', fpath
- sqlexec(open(fpath).read(), self.session.system_sql, False,
- delimiter=';;')
+ try:
+ sqlexec(open(fpath).read(), self.session.system_sql, False,
+ delimiter=';;')
+ except Exception, exc:
+ print '-> ERROR:', exc, ', skipping', fpath
# schema synchronization internals ########################################
@@ -657,10 +675,9 @@
new = set()
# execute pre-create files
driver = self.repo.system_source.dbdriver
- for pack in reversed(newcubes):
- cubedir = self.config.cube_dir(pack)
- self.install_custom_sql_scripts(osp.join(cubedir, 'schema'), driver)
- self.exec_event_script('precreate', cubedir)
+ for cube in reversed(newcubes):
+ self.cmd_install_custom_sql_scripts(cube)
+ self.cmd_exec_event_script('precreate', cube)
# add new entity and relation types
for rschema in newcubes_schema.relations():
if not rschema in self.repo.schema:
@@ -683,8 +700,8 @@
self.cmd_add_relation_definition(str(fromtype), rschema.type,
str(totype))
# execute post-create files
- for pack in reversed(newcubes):
- self.exec_event_script('postcreate', self.config.cube_dir(pack))
+ for cube in reversed(newcubes):
+ self.cmd_exec_event_script('postcreate', cube)
self.commit()
def cmd_remove_cube(self, cube, removedeps=False):
@@ -696,8 +713,8 @@
removedcubes_schema = self.config.load_schema(construction_mode='non-strict')
reposchema = self.repo.schema
# execute pre-remove files
- for pack in reversed(removedcubes):
- self.exec_event_script('preremove', self.config.cube_dir(pack))
+ for cube in reversed(removedcubes):
+ self.cmd_exec_event_script('preremove', cube)
# remove cubes'entity and relation types
for rschema in fsschema.relations():
if not rschema in removedcubes_schema and rschema in reposchema:
@@ -718,7 +735,7 @@
str(fromtype), rschema.type, str(totype))
# execute post-remove files
for cube in reversed(removedcubes):
- self.exec_event_script('postremove', self.config.cube_dir(cube))
+ self.cmd_exec_event_script('postremove', cube)
self.rqlexec('DELETE CWProperty X WHERE X pkey %(pk)s',
{'pk': u'system.version.'+cube}, ask_confirm=False)
self.commit()
@@ -1364,7 +1381,7 @@
prop = self.rqlexec(
'CWProperty X WHERE X pkey %(k)s, NOT X for_user U',
{'k': pkey}, ask_confirm=False).get_entity(0, 0)
- except:
+ except Exception:
self.cmd_create_entity('CWProperty', pkey=unicode(pkey), value=value)
else:
prop.set_attributes(value=value)
@@ -1375,7 +1392,7 @@
def _cw(self):
session = self.session
if session is not None:
- session.set_pool()
+ session.set_cnxset()
return session
return self.cnx.request()
@@ -1482,14 +1499,14 @@
if not ask_confirm or self.confirm('Execute sql: %s ?' % sql):
try:
cu = self.session.system_sql(sql, args)
- except:
+ except Exception:
ex = sys.exc_info()[1]
if self.confirm('Error: %s\nabort?' % ex, pdb=True):
raise
return
try:
return cu.fetchall()
- except:
+ except Exception:
# no result to fetch
return
@@ -1530,15 +1547,16 @@
"""
rschema = self.repo.schema.rschema(attr)
oldtype = rschema.objects(etype)[0]
- rdefeid = rschema.rproperty(etype, oldtype, 'eid')
+ rdefeid = rschema.rdef(etype, oldtype).eid
+ allownull = rschema.rdef(etype, oldtype).cardinality[0] != '1'
sql = ("UPDATE cw_CWAttribute "
"SET cw_to_entity=(SELECT cw_eid FROM cw_CWEType WHERE cw_name='%s')"
"WHERE cw_eid=%s") % (newtype, rdefeid)
self.sqlexec(sql, ask_confirm=False)
dbhelper = self.repo.system_source.dbhelper
sqltype = dbhelper.TYPE_MAPPING[newtype]
- sql = 'ALTER TABLE cw_%s ALTER COLUMN cw_%s TYPE %s' % (etype, attr, sqltype)
- self.sqlexec(sql, ask_confirm=False)
+ cursor = self.session.cnxset[self.repo.system_source.uri]
+ dbhelper.change_col_type(cursor, 'cw_%s' % etype, 'cw_%s' % attr, sqltype, allownull)
if commit:
self.commit()
@@ -1561,8 +1579,7 @@
This may be useful on accidental desync between the repository schema
and a sql database
"""
- dbhelper = self.repo.system_source.dbhelper
- tablesql = rschema2sql(dbhelper, self.repo.schema.rschema(rtype))
+ tablesql = rschema2sql(self.repo.schema.rschema(rtype))
for sql in tablesql.split(';'):
if sql.strip():
self.sqlexec(sql)
diff -r d8bb8f631d41 -r a4e667270dd4 server/msplanner.py
--- a/server/msplanner.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/msplanner.py Fri Dec 09 12:08:27 2011 +0100
@@ -291,6 +291,8 @@
self.sourcesterms = self._sourcesterms = {}
# source : {relation: set(child variable and constant)}
self._crossrelations = {}
+ # term : set(sources)
+ self._discarded_sources = {}
# dictionary of variables and constants which are linked to each other
# using a non final relation supported by multiple sources (crossed or
# not).
@@ -370,7 +372,7 @@
eid = const.eval(self.plan.args)
source = self._session.source_from_eid(eid)
if (source is self.system_source
- or (hasrel and
+ or (hasrel and varobj._q_invariant and
not any(source.support_relation(r.r_type)
for r in varobj.stinfo['relations']
if not r is rel))):
@@ -539,6 +541,7 @@
if invariant and source is self.system_source:
continue
self._remove_source_term(source, lhs)
+ self._discarded_sources.setdefault(lhs, set()).add(source)
usesys = self.system_source not in sources
else:
for source, terms in sourcesterms.items():
@@ -546,6 +549,7 @@
if invariant and source is self.system_source:
continue
self._remove_source_term(source, lhs)
+ self._discarded_sources.setdefault(lhs, set()).add(source)
usesys = self.system_source in sources
if rel is None or (len(var.stinfo['relations']) == 2 and
not var.stinfo['selected']):
@@ -697,6 +701,12 @@
rel in self._crossrelations[s]))
if invalid_sources:
self._remove_sources(term, invalid_sources)
+ discarded = self._discarded_sources.get(term)
+ if discarded is not None and not any(x[0] for x in (termsources-invalid_sources)
+ if not x[0] in discarded):
+ raise BadRQLQuery('relation %s cant be crossed but %s and %s should '
+ 'come from difference sources' %
+ (rel.r_type, term.as_string(), oterm.as_string()))
# if term is a rewritten const, we can apply the same changes to
# all other consts inserted from the same original variable
for const in self._const_vars.get(term, ()):
@@ -1438,7 +1448,7 @@
for step in steps
for select in step.union.children):
if temptable:
- step = IntersectFetchStep(plan) # XXX not implemented
+ raise NotImplementedError('oops') # IntersectFetchStep(plan)
else:
step = IntersectStep(plan)
else:
@@ -1623,17 +1633,7 @@
def visit_relation(self, node, newroot, terms):
if not node.is_types_restriction():
if not node in terms and node in self.skip and self.solindices.issubset(self.skip[node]):
- if not self.schema.rschema(node.r_type).final:
- # can't really skip the relation if one variable is selected
- # and only referenced by this relation
- for vref in node.iget_nodes(VariableRef):
- stinfo = vref.variable.stinfo
- if stinfo['selected'] and len(stinfo['relations']) == 1:
- break
- else:
- return None, node
- else:
- return None, node
+ return None, node
if not self._relation_supported(node):
raise UnsupportedBranch()
# don't copy type restriction unless this is the only supported relation
@@ -1650,7 +1650,7 @@
self._pending_vrefs = []
try:
res = self.visit_default(node, newroot, terms)[0]
- except:
+ except Exception:
# when a relation isn't supported, we should dereference potentially
# introduced variable refs
for vref in self._pending_vrefs:
diff -r d8bb8f631d41 -r a4e667270dd4 server/pool.py
--- a/server/pool.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/pool.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -15,19 +15,18 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see .
-"""CubicWeb server connections pool : the repository has a limited number of
-connections pools, each of them dealing with a set of connections on each source
-used by the repository. A connections pools (`ConnectionsPool`) is an
-abstraction for a group of connection to each source.
+"""CubicWeb server connections set : the repository has a limited number of
+:class:`ConnectionsSet` (defined in configuration, default to 4). Each of them
+hold a connection for each source used by the repository.
"""
__docformat__ = "restructuredtext en"
import sys
-class ConnectionsPool(object):
+class ConnectionsSet(object):
"""handle connections on a set of sources, at some point associated to a
- user session
+ :class:`Session`
"""
def __init__(self, sources):
@@ -74,40 +73,40 @@
# catch exceptions, rollback other sources anyway
try:
cnx.rollback()
- except:
+ except Exception:
source.critical('rollback error', exc_info=sys.exc_info())
# error on rollback, the connection is much probably in a really
# bad state. Replace it by a new one.
self.reconnect(source)
def close(self, i_know_what_i_do=False):
- """close all connections in the pool"""
+ """close all connections in the set"""
if i_know_what_i_do is not True: # unexpected closing safety belt
- raise RuntimeError('pool shouldn\'t be closed')
+ raise RuntimeError('connections set shouldn\'t be closed')
for cu in self._cursors.values():
try:
cu.close()
- except:
+ except Exception:
continue
for _, cnx in self.source_cnxs.values():
try:
cnx.close()
- except:
+ except Exception:
continue
# internals ###############################################################
- def pool_set(self):
- """pool is being set"""
+ def cnxset_set(self):
+ """connections set is being set on a session"""
self.check_connections()
- def pool_reset(self):
- """pool is being reseted"""
+ def cnxset_freed(self):
+ """connections set is being freed from a session"""
for source, cnx in self.source_cnxs.values():
- source.pool_reset(cnx)
+ source.cnxset_freed(cnx)
def sources(self):
- """return the source objects handled by this pool"""
+ """return the source objects handled by this connections set"""
# implementation details of flying insert requires the system source
# first
yield self.source_cnxs['system'][0]
@@ -136,7 +135,7 @@
try:
# properly close existing connection if any
self.source_cnxs[source.uri][1].close()
- except:
+ except Exception:
pass
source.info('trying to reconnect')
self.source_cnxs[source.uri] = (source, source.get_connection())
diff -r d8bb8f631d41 -r a4e667270dd4 server/querier.py
--- a/server/querier.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/querier.py Fri Dec 09 12:08:27 2011 +0100
@@ -169,7 +169,7 @@
# session executing the query
self.session = session
# quick reference to the system source
- self.syssource = session.pool.source('system')
+ self.syssource = session.cnxset.source('system')
# execution steps
self.steps = []
# index of temporary tables created during execution
@@ -666,7 +666,7 @@
if server.DEBUG & (server.DBG_RQL | server.DBG_SQL):
if server.DEBUG & (server.DBG_MORE | server.DBG_SQL):
print '*'*80
- print 'querier input', rql, args
+ print 'querier input', repr(rql), repr(args)
# parse the query and binds variables
cachekey = rql
try:
@@ -734,8 +734,8 @@
# transaction must been rollbacked
#
# notes:
- # * we should not reset the pool here, since we don't want the
- # session to loose its pool during processing
+ # * we should not reset the connections set here, since we don't want the
+ # session to loose it during processing
# * don't rollback if we're in the commit process, will be handled
# by the session
if session.commit_state is None:
diff -r d8bb8f631d41 -r a4e667270dd4 server/repository.py
--- a/server/repository.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/repository.py Fri Dec 09 12:08:27 2011 +0100
@@ -154,13 +154,13 @@
self.sources_by_uri = {'system': self.system_source}
# querier helper, need to be created after sources initialization
self.querier = querier.QuerierHelper(self, self.schema)
- # cache eid -> type / source
+ # cache eid -> (type, physical source, extid, actual source)
self._type_source_cache = {}
# cache (extid, source uri) -> eid
self._extid_cache = {}
- # open some connections pools
- if config.open_connections_pools:
- self.open_connections_pools()
+ # open some connections set
+ if config.init_cnxset_pool:
+ self.init_cnxset_pool()
@onevent('after-registry-reload', self)
def fix_user_classes(self):
usercls = self.vreg['etypes'].etype_class('CWUser')
@@ -168,10 +168,10 @@
if not isinstance(session.user, InternalManager):
session.user.__class__ = usercls
- def open_connections_pools(self):
+ def init_cnxset_pool(self):
config = self.config
- self._available_pools = Queue.Queue()
- self._available_pools.put_nowait(pool.ConnectionsPool(self.sources))
+ self._cnxsets_pool = Queue.Queue()
+ self._cnxsets_pool.put_nowait(pool.ConnectionsSet(self.sources))
if config.quick_start:
# quick start, usually only to get a minimal repository to get cubes
# information (eg dump/restore/...)
@@ -213,14 +213,14 @@
# configurate tsearch according to postgres version
for source in self.sources:
source.init_creating()
- # close initialization pool and reopen fresh ones for proper
+ # close initialization connetions set and reopen fresh ones for proper
# initialization now that we know cubes
- self._get_pool().close(True)
- # list of available pools (we can't iterate on Queue instance)
- self.pools = []
+ self._get_cnxset().close(True)
+ # list of available_cnxsets (we can't iterate on Queue instance)
+ self.cnxsets = []
for i in xrange(config['connections-pool-size']):
- self.pools.append(pool.ConnectionsPool(self.sources))
- self._available_pools.put_nowait(self.pools[-1])
+ self.cnxsets.append(pool.ConnectionsSet(self.sources))
+ self._cnxsets_pool.put_nowait(self.cnxsets[-1])
if config.quick_start:
config.init_cubes(self.get_cubes())
self.hm = hook.HooksManager(self.vreg)
@@ -244,7 +244,7 @@
self.sources_by_eid[sourceent.eid] = self.system_source
self.system_source.init(True, sourceent)
continue
- self.add_source(sourceent, add_to_pools=False)
+ self.add_source(sourceent, add_to_cnxsets=False)
finally:
session.close()
@@ -253,7 +253,7 @@
'can_cross_relation', 'rel_type_sources'):
clear_cache(self, cache)
- def add_source(self, sourceent, add_to_pools=True):
+ def add_source(self, sourceent, add_to_cnxsets=True):
source = self.get_source(sourceent.type, sourceent.name,
sourceent.host_config, sourceent.eid)
self.sources_by_eid[sourceent.eid] = source
@@ -261,15 +261,15 @@
if self.config.source_enabled(source):
# call source's init method to complete their initialisation if
# needed (for instance looking for persistent configuration using an
- # internal session, which is not possible until pools have been
+ # internal session, which is not possible until connections sets have been
# initialized)
source.init(True, sourceent)
if not source.copy_based_source:
self.sources.append(source)
self.querier.set_planner()
- if add_to_pools:
- for pool in self.pools:
- pool.add_source(source)
+ if add_to_cnxsets:
+ for cnxset in self.cnxsets:
+ cnxset.add_source(source)
else:
source.init(False, sourceent)
self._clear_planning_caches()
@@ -280,8 +280,8 @@
if self.config.source_enabled(source) and not source.copy_based_source:
self.sources.remove(source)
self.querier.set_planner()
- for pool in self.pools:
- pool.remove_source(source)
+ for cnxset in self.cnxsets:
+ cnxset.remove_source(source)
self._clear_planning_caches()
def get_source(self, type, uri, source_config, eid=None):
@@ -344,7 +344,7 @@
self.looping_task(cleanup_session_interval, self.clean_sessions)
assert isinstance(self._looping_tasks, list), 'already started'
for i, (interval, func, args) in enumerate(self._looping_tasks):
- self._looping_tasks[i] = task = utils.LoopTask(interval, func, args)
+ self._looping_tasks[i] = task = utils.LoopTask(self, interval, func, args)
self.info('starting task %s with interval %.2fs', task.name,
interval)
task.start()
@@ -368,25 +368,25 @@
t.start()
#@locked
- def _get_pool(self):
+ def _get_cnxset(self):
try:
- return self._available_pools.get(True, timeout=5)
+ return self._cnxsets_pool.get(True, timeout=5)
except Queue.Empty:
- raise Exception('no pool available after 5 secs, probably either a '
+ raise Exception('no connections set available after 5 secs, probably either a '
'bug in code (too many uncommited/rollbacked '
'connections) or too much load on the server (in '
'which case you can try to set a bigger '
- 'connections pools size)')
+ 'connections pool size)')
- def _free_pool(self, pool):
- self._available_pools.put_nowait(pool)
+ def _free_cnxset(self, cnxset):
+ self._cnxsets_pool.put_nowait(cnxset)
def pinfo(self):
- # XXX: session.pool is accessed from a local storage, would be interesting
- # to see if there is a pool set in any thread specific data)
- return '%s: %s (%s)' % (self._available_pools.qsize(),
+ # XXX: session.cnxset is accessed from a local storage, would be interesting
+ # to see if there is a cnxset set in any thread specific data)
+ return '%s: %s (%s)' % (self._cnxsets_pool.qsize(),
','.join(session.user.login for session in self._sessions.values()
- if session.pool),
+ if session.cnxset),
threading.currentThread())
def shutdown(self):
"""called on server stop event to properly close opened sessions and
@@ -409,12 +409,12 @@
or self.config.quick_start):
self.hm.call_hooks('server_shutdown', repo=self)
self.close_sessions()
- while not self._available_pools.empty():
- pool = self._available_pools.get_nowait()
+ while not self._cnxsets_pool.empty():
+ cnxset = self._cnxsets_pool.get_nowait()
try:
- pool.close(True)
- except:
- self.exception('error while closing %s' % pool)
+ cnxset.close(True)
+ except Exception:
+ self.exception('error while closing %s' % cnxset)
continue
if self.pyro_registered:
if self._use_pyrons():
@@ -496,7 +496,7 @@
results['nb_open_sessions'] = len(self._sessions)
results['nb_active_threads'] = threading.activeCount()
results['looping_tasks'] = ', '.join(str(t) for t in self._looping_tasks)
- results['available_pools'] = self._available_pools.qsize()
+ results['available_cnxsets'] = self._cnxsets_pool.qsize()
results['threads'] = ', '.join(sorted(str(t) for t in threading.enumerate()))
return results
@@ -505,12 +505,7 @@
This is a public method, not requiring a session id.
"""
- try:
- # necessary to support pickling used by pyro
- self.schema.__hashmode__ = 'pickle'
- return self.schema
- finally:
- self.schema.__hashmode__ = None
+ return self.schema
def get_cubes(self):
"""Return the list of cubes used by this instance.
@@ -535,12 +530,12 @@
# XXX we may want to check we don't give sensible information
if foreid is None:
return self.config[option]
- _, sourceuri, extid = self.type_and_source_from_eid(foreid)
+ _, sourceuri, extid, _ = self.type_and_source_from_eid(foreid)
if sourceuri == 'system':
return self.config[option]
- pool = self._get_pool()
+ cnxset = self._get_cnxset()
try:
- cnx = pool.connection(sourceuri)
+ cnx = cnxset.connection(sourceuri)
# needed to check connection is valid and usable by the current
# thread
newcnx = self.sources_by_uri[sourceuri].check_connection(cnx)
@@ -548,7 +543,7 @@
cnx = newcnx
return cnx.get_option_value(option, extid)
finally:
- self._free_pool(pool)
+ self._free_cnxset(cnxset)
@cached
def get_versions(self, checkversions=False):
@@ -721,7 +716,7 @@
* build_descr is a flag indicating if the description should be
built on select queries
"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
try:
rset = self.querier.execute(session, rqlstring, args,
@@ -747,21 +742,23 @@
self.exception('unexpected error while executing %s with %s', rqlstring, args)
raise
finally:
- session.reset_pool()
+ session.free_cnxset()
def describe(self, sessionid, eid, txid=None):
- """return a tuple (type, source, extid) for the entity with id """
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ """return a tuple `(type, physical source uri, extid, actual source
+ uri)` for the entity of the given `eid`
+ """
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.type_and_source_from_eid(eid, session)
finally:
- session.reset_pool()
+ session.free_cnxset()
def check_session(self, sessionid):
"""raise `BadConnectionId` if the connection is no more valid, else
return its latest activity timestamp.
"""
- return self._get_session(sessionid, setpool=False).timestamp
+ return self._get_session(sessionid, setcnxset=False).timestamp
def get_shared_data(self, sessionid, key, default=None, pop=False, txdata=False):
"""return value associated to key in the session's data dictionary or
@@ -772,7 +769,7 @@
If key isn't defined in the dictionnary, value specified by the
`default` argument will be returned.
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
return session.get_shared_data(key, default, pop, txdata)
def set_shared_data(self, sessionid, key, value, txdata=False):
@@ -782,7 +779,7 @@
transaction's data which are cleared on commit/rollback of the current
transaction.
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
session.set_shared_data(key, value, txdata)
def commit(self, sessionid, txid=None):
@@ -794,7 +791,7 @@
return session.commit()
except (ValidationError, Unauthorized):
raise
- except:
+ except Exception:
self.exception('unexpected error')
raise
@@ -805,16 +802,16 @@
session = self._get_session(sessionid)
session.set_tx_data(txid)
session.rollback()
- except:
+ except Exception:
self.exception('unexpected error')
raise
def close(self, sessionid, txid=None, checkshuttingdown=True):
"""close the session with the given id"""
- session = self._get_session(sessionid, setpool=True, txid=txid,
+ session = self._get_session(sessionid, setcnxset=True, txid=txid,
checkshuttingdown=checkshuttingdown)
# operation uncommited before close are rollbacked before hook is called
- session.rollback(reset_pool=False)
+ session.rollback(free_cnxset=False)
self.hm.call_hooks('session_close', session)
# commit session at this point in case write operation has been done
# during `session_close` hooks
@@ -829,7 +826,7 @@
* update user information on each user's request (i.e. groups and
custom properties)
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
if props is not None:
self.set_session_props(sessionid, props)
user = session.user
@@ -841,43 +838,43 @@
* update user information on each user's request (i.e. groups and
custom properties)
"""
- session = self._get_session(sessionid, setpool=False)
+ session = self._get_session(sessionid, setcnxset=False)
for prop, value in props.items():
session.change_property(prop, value)
def undoable_transactions(self, sessionid, ueid=None, txid=None,
**actionfilters):
"""See :class:`cubicweb.dbapi.Connection.undoable_transactions`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.undoable_transactions(session, ueid,
**actionfilters)
finally:
- session.reset_pool()
+ session.free_cnxset()
def transaction_info(self, sessionid, txuuid, txid=None):
"""See :class:`cubicweb.dbapi.Connection.transaction_info`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.tx_info(session, txuuid)
finally:
- session.reset_pool()
+ session.free_cnxset()
def transaction_actions(self, sessionid, txuuid, public=True, txid=None):
"""See :class:`cubicweb.dbapi.Connection.transaction_actions`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.tx_actions(session, txuuid, public)
finally:
- session.reset_pool()
+ session.free_cnxset()
def undo_transaction(self, sessionid, txuuid, txid=None):
"""See :class:`cubicweb.dbapi.Connection.undo_transaction`"""
- session = self._get_session(sessionid, setpool=True, txid=txid)
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
return self.system_source.undo_transaction(session, txuuid)
finally:
- session.reset_pool()
+ session.free_cnxset()
# public (inter-repository) interface #####################################
@@ -893,7 +890,7 @@
deleted since the given timestamp
"""
session = self.internal_session()
- updatetime = datetime.now()
+ updatetime = datetime.utcnow()
try:
modentities, delentities = self.system_source.modified_entities(
session, etypes, mtime)
@@ -908,7 +905,7 @@
for sessionid in self._sessions.keys():
try:
self.close(sessionid, checkshuttingdown=False)
- except:
+ except Exception: # XXX BaseException?
self.exception('error while closing session %s' % sessionid)
def clean_sessions(self):
@@ -925,18 +922,20 @@
nbclosed += 1
return nbclosed
- def internal_session(self, cnxprops=None):
- """return a dbapi like connection/cursor using internal user which
- have every rights on the repository. You'll *have to* commit/rollback
- or close (rollback implicitly) the session once the job's done, else
- you'll leak connections pool up to the time where no more pool is
- available, causing irremediable freeze...
+ def internal_session(self, cnxprops=None, safe=False):
+ """return a dbapi like connection/cursor using internal user which have
+ every rights on the repository. The `safe` argument is a boolean flag
+ telling if integrity hooks should be activated or not.
+
+ *YOU HAVE TO* commit/rollback or close (rollback implicitly) the
+ session once the job's done, else you'll leak connections set up to the
+ time where no one is available, causing irremediable freeze...
"""
- session = InternalSession(self, cnxprops)
- session.set_pool()
+ session = InternalSession(self, cnxprops, safe)
+ session.set_cnxset()
return session
- def _get_session(self, sessionid, setpool=False, txid=None,
+ def _get_session(self, sessionid, setcnxset=False, txid=None,
checkshuttingdown=True):
"""return the user associated to the given session identifier"""
if checkshuttingdown and self.shutting_down:
@@ -945,9 +944,9 @@
session = self._sessions[sessionid]
except KeyError:
raise BadConnectionId('No such session %s' % sessionid)
- if setpool:
- session.set_tx_data(txid) # must be done before set_pool
- session.set_pool()
+ if setcnxset:
+ session.set_tx_data(txid) # must be done before set_cnxset
+ session.set_cnxset()
return session
# data sources handling ###################################################
@@ -955,7 +954,9 @@
# * correspondance between eid and local id (i.e. specific to a given source)
def type_and_source_from_eid(self, eid, session=None):
- """return a tuple (type, source, extid) for the entity with id """
+ """return a tuple `(type, physical source uri, extid, actual source
+ uri)` for the entity of the given `eid`
+ """
try:
eid = typed_eid(eid)
except ValueError:
@@ -965,19 +966,19 @@
except KeyError:
if session is None:
session = self.internal_session()
- reset_pool = True
+ free_cnxset = True
else:
- reset_pool = False
+ free_cnxset = False
try:
- etype, uri, extid = self.system_source.eid_type_source(session,
- eid)
+ etype, uri, extid, auri = self.system_source.eid_type_source(
+ session, eid)
finally:
- if reset_pool:
- session.reset_pool()
- self._type_source_cache[eid] = (etype, uri, extid)
- if uri != 'system':
- self._extid_cache[(extid, uri)] = eid
- return etype, uri, extid
+ if free_cnxset:
+ session.free_cnxset()
+ self._type_source_cache[eid] = (etype, uri, extid, auri)
+ if uri != 'system':
+ self._extid_cache[(extid, uri)] = eid
+ return etype, uri, extid, auri
def clear_caches(self, eids):
etcache = self._type_source_cache
@@ -985,7 +986,7 @@
rqlcache = self.querier._rql_cache
for eid in eids:
try:
- etype, uri, extid = etcache.pop(typed_eid(eid)) # may be a string in some cases
+ etype, uri, extid, auri = etcache.pop(typed_eid(eid)) # may be a string in some cases
rqlcache.pop('%s X WHERE X eid %s' % (etype, eid), None)
extidcache.pop((extid, uri), None)
except KeyError:
@@ -1019,31 +1020,52 @@
def eid2extid(self, source, eid, session=None):
"""get local id from an eid"""
- etype, uri, extid = self.type_and_source_from_eid(eid, session)
+ etype, uri, extid, _ = self.type_and_source_from_eid(eid, session)
if source.uri != uri:
# eid not from the given source
raise UnknownEid(eid)
return extid
def extid2eid(self, source, extid, etype, session=None, insert=True,
- sourceparams=None):
- """get eid from a local id. An eid is attributed if no record is found"""
+ complete=True, commit=True, sourceparams=None):
+ """Return eid from a local id. If the eid is a negative integer, that
+ means the entity is known but has been copied back to the system source
+ hence should be ignored.
+
+ If no record is found, ie the entity is not known yet:
+
+ 1. an eid is attributed
+
+ 2. the source's :meth:`before_entity_insertion` method is called to
+ build the entity instance
+
+ 3. unless source's :attr:`should_call_hooks` tell otherwise,
+ 'before_add_entity' hooks are called
+
+ 4. record is added into the system source
+
+ 5. the source's :meth:`after_entity_insertion` method is called to
+ complete building of the entity instance
+
+ 6. unless source's :attr:`should_call_hooks` tell otherwise,
+ 'before_add_entity' hooks are called
+ """
uri = 'system' if source.copy_based_source else source.uri
cachekey = (extid, uri)
try:
return self._extid_cache[cachekey]
except KeyError:
pass
- reset_pool = False
+ free_cnxset = False
if session is None:
session = self.internal_session()
- reset_pool = True
+ free_cnxset = True
eid = self.system_source.extid2eid(session, uri, extid)
if eid is not None:
self._extid_cache[cachekey] = eid
- self._type_source_cache[eid] = (etype, uri, extid)
- if reset_pool:
- session.reset_pool()
+ self._type_source_cache[eid] = (etype, uri, extid, source.uri)
+ if free_cnxset:
+ session.free_cnxset()
return eid
if not insert:
return
@@ -1055,24 +1077,25 @@
# processing a commit, we have to use another one
if not session.is_internal_session:
session = self.internal_session()
- reset_pool = True
+ free_cnxset = True
try:
eid = self.system_source.create_eid(session)
self._extid_cache[cachekey] = eid
- self._type_source_cache[eid] = (etype, uri, extid)
+ self._type_source_cache[eid] = (etype, uri, extid, source.uri)
entity = source.before_entity_insertion(
session, extid, etype, eid, sourceparams)
if source.should_call_hooks:
self.hm.call_hooks('before_add_entity', session, entity=entity)
- # XXX call add_info with complete=False ?
- self.add_info(session, entity, source, extid)
+ self.add_info(session, entity, source, extid, complete=complete)
source.after_entity_insertion(session, extid, entity, sourceparams)
if source.should_call_hooks:
self.hm.call_hooks('after_add_entity', session, entity=entity)
- session.commit(reset_pool)
+ if commit or free_cnxset:
+ session.commit(free_cnxset)
return eid
- except:
- session.rollback(reset_pool)
+ except Exception:
+ if commit or free_cnxset:
+ session.rollback(free_cnxset)
raise
def add_info(self, session, entity, source, extid=None, complete=True):
@@ -1083,22 +1106,32 @@
hook.CleanupNewEidsCacheOp.get_instance(session).add_data(entity.eid)
self.system_source.add_info(session, entity, source, extid, complete)
- def delete_info(self, session, entity, sourceuri, extid, scleanup=None):
+ def delete_info(self, session, entity, sourceuri, scleanup=None):
"""called by external source when some entity known by the system source
has been deleted in the external source
"""
# mark eid as being deleted in session info and setup cache update
# operation
hook.CleanupDeletedEidsCacheOp.get_instance(session).add_data(entity.eid)
- self._delete_info(session, entity, sourceuri, extid, scleanup)
+ self._delete_info(session, entity, sourceuri, scleanup)
- def _delete_info(self, session, entity, sourceuri, extid, scleanup=None):
+ def _delete_info(self, session, entity, sourceuri, scleanup=None):
"""delete system information on deletion of an entity:
+
* delete all remaining relations from/to this entity
+
* call delete info on the system source which will transfer record from
the entities table to the deleted_entities table
+
+ When scleanup is specified, it's expected to be the source's eid, in
+ which case we'll specify the target's relation source so that this
+ source is ignored. E.g. we want to delete relations stored locally, as
+ the deletion information comes from the external source, it's its
+ responsability to have cleaned-up its own relations.
"""
pendingrtypes = session.transaction_data.get('pendingrtypes', ())
+ if scleanup is not None:
+ source = self.sources_by_eid[scleanup]
# delete remaining relations: if user can delete the entity, he can
# delete all its relations without security checking
with security_enabled(session, read=False, write=False):
@@ -1114,6 +1147,13 @@
else:
rql = 'DELETE Y %s X WHERE X eid %%(x)s' % rtype
if scleanup is not None:
+ # if the relation can't be crossed, nothing to cleanup (we
+ # would get a BadRQLQuery from the multi-sources planner).
+ # This may still leave some junk if the mapping has changed
+ # at some point, but one can still run db-check to catch
+ # those
+ if not source in self.can_cross_relation(rtype):
+ continue
# source cleaning: only delete relations stored locally
# (here, scleanup
rql += ', NOT (Y cw_source S, S eid %(seid)s)'
@@ -1121,6 +1161,8 @@
session.execute(rql, {'x': eid, 'seid': scleanup},
build_descr=False)
except Exception:
+ if self.config.mode == 'test':
+ raise
self.exception('error while cascading delete for entity %s '
'from %s. RQL: %s', entity, sourceuri, rql)
self.system_source.delete_info_multi(session, [entity], sourceuri)
@@ -1130,11 +1172,12 @@
the same etype and belinging to the same source.
"""
pendingrtypes = session.transaction_data.get('pendingrtypes', ())
+ if scleanup is not None:
+ source = self.sources_by_eid[scleanup]
# delete remaining relations: if user can delete the entity, he can
# delete all its relations without security checking
with security_enabled(session, read=False, write=False):
- eids = [_e.eid for _e in entities]
- in_eids = ','.join((str(eid) for eid in eids))
+ in_eids = ','.join([str(_e.eid) for _e in entities])
for rschema, _, role in entities[0].e_schema.relation_definitions():
rtype = rschema.type
if rtype in schema.VIRTUAL_RTYPES or rtype in pendingrtypes:
@@ -1146,11 +1189,20 @@
else:
rql = 'DELETE Y %s X WHERE X eid IN (%s)' % (rtype, in_eids)
if scleanup is not None:
+ # if the relation can't be crossed, nothing to cleanup (we
+ # would get a BadRQLQuery from the multi-sources planner).
+ # This may still leave some junk if the mapping has changed
+ # at some point, but one can still run db-check to catch
+ # those
+ if not source in self.can_cross_relation(rtype):
+ continue
# source cleaning: only delete relations stored locally
rql += ', NOT (Y cw_source S, S eid %(seid)s)'
try:
session.execute(rql, {'seid': scleanup}, build_descr=False)
except Exception:
+ if self.config.mode == 'test':
+ raise
self.exception('error while cascading delete for entity %s '
'from %s. RQL: %s', entities, sourceuri, rql)
self.system_source.delete_info_multi(session, entities, sourceuri)
@@ -1195,7 +1247,8 @@
suri = 'system'
extid = source.get_extid(entity)
self._extid_cache[(str(extid), suri)] = entity.eid
- self._type_source_cache[entity.eid] = (entity.__regid__, suri, extid)
+ self._type_source_cache[entity.eid] = (entity.__regid__, suri, extid,
+ source.uri)
return extid
def glob_add_entity(self, session, edited):
@@ -1356,7 +1409,7 @@
# in setdefault, this should not be changed without profiling.
for eid in eids:
- etype, sourceuri, extid = self.type_and_source_from_eid(eid, session)
+ etype, sourceuri, extid, _ = self.type_and_source_from_eid(eid, session)
# XXX should cache entity's cw_metainformation
entity = session.entity_from_eid(eid, etype)
try:
@@ -1369,7 +1422,11 @@
source = self.sources_by_uri[sourceuri]
if source.should_call_hooks:
self.hm.call_hooks('before_delete_entity', session, entities=entities)
- self._delete_info_multi(session, entities, sourceuri)
+ if session.deleted_in_transaction(source.eid):
+ # source is being deleted, think to give scleanup argument
+ self._delete_info_multi(session, entities, sourceuri, scleanup=source.eid)
+ else:
+ self._delete_info_multi(session, entities, sourceuri)
source.delete_entities(session, entities)
if source.should_call_hooks:
self.hm.call_hooks('after_delete_entity', session, entities=entities)
diff -r d8bb8f631d41 -r a4e667270dd4 server/rqlannotation.py
--- a/server/rqlannotation.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/rqlannotation.py Fri Dec 09 12:08:27 2011 +0100
@@ -109,8 +109,9 @@
ostinfo = rhs.children[0].variable.stinfo
else:
ostinfo = lhs.variable.stinfo
- if not any(orel for orel in ostinfo['relations']
- if orel.optional and orel is not rel):
+ if not (ostinfo.get('optcomparisons') or
+ any(orel for orel in ostinfo['relations']
+ if orel.optional and orel is not rel)):
break
if rschema.final or (onlhs and rschema.inlined):
if rschema.type != 'has_text':
@@ -202,8 +203,8 @@
# since introduced duplicates will be removed
if scope.stmt.distinct and diffscope_rels:
return iter(_sort(diffscope_rels)).next()
- # XXX could use a relation for a different scope if it can't generate
- # duplicates, so we would have to check cardinality
+ # XXX could use a relation from a different scope if it can't generate
+ # duplicates, so we should have to check cardinality
raise CantSelectPrincipal()
def _select_main_var(relations):
@@ -211,16 +212,22 @@
relation for the rhs variable
"""
principal = None
+ others = []
# sort for test predictability
for rel in sorted(relations, key=lambda x: (x.children[0].name, x.r_type)):
# only equality relation with a variable as rhs may be principal
if rel.operator() not in ('=', 'IS') \
or not isinstance(rel.children[1].children[0], VariableRef) or rel.neged(strict=True):
continue
+ if rel.optional:
+ others.append(rel)
+ continue
if rel.scope is rel.stmt:
return rel
principal = rel
if principal is None:
+ if others:
+ return others[0]
raise BadRQLQuery('unable to find principal in %s' % ', '.join(
r.as_string() for r in relations))
return principal
diff -r d8bb8f631d41 -r a4e667270dd4 server/schemaserial.py
--- a/server/schemaserial.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/schemaserial.py Fri Dec 09 12:08:27 2011 +0100
@@ -88,7 +88,7 @@
repo = session.repo
dbhelper = repo.system_source.dbhelper
# XXX bw compat (3.6 migration)
- sqlcu = session.pool['system']
+ sqlcu = session.cnxset['system']
sqlcu.execute("SELECT * FROM cw_CWRType WHERE cw_name='symetric'")
if sqlcu.fetchall():
sql = dbhelper.sql_rename_col('cw_CWRType', 'cw_symetric', 'cw_symmetric',
@@ -135,11 +135,11 @@
try:
sqlexec('UPDATE deleted_entities SET type=%(n)s WHERE type=%(x)s',
{'x': etype, 'n': netype})
- except:
+ except Exception:
pass
tocleanup = [eid]
- tocleanup += (eid for eid, (eidetype, uri, extid) in repo._type_source_cache.items()
- if etype == eidetype)
+ tocleanup += (eid for eid, cached in repo._type_source_cache.iteritems()
+ if etype == cached[0])
repo.clear_caches(tocleanup)
session.commit(False)
if needcopy:
diff -r d8bb8f631d41 -r a4e667270dd4 server/server.py
--- a/server/server.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/server.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -129,6 +129,13 @@
signal.signal(signal.SIGINT, lambda x, y, s=self: s.quit())
signal.signal(signal.SIGTERM, lambda x, y, s=self: s.quit())
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ @classmethod
+ def info(cls, msg, *a, **kw):
+ pass
+
from logging import getLogger
from cubicweb import set_log_methods
LOGGER = getLogger('cubicweb.reposerver')
diff -r d8bb8f631d41 -r a4e667270dd4 server/serverconfig.py
--- a/server/serverconfig.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/serverconfig.py Fri Dec 09 12:08:27 2011 +0100
@@ -130,7 +130,7 @@
('connections-pool-size',
{'type' : 'int',
'default': 4,
- 'help': 'size of the connections pools. Each source supporting multiple \
+ 'help': 'size of the connections pool. Each source supporting multiple \
connections will have this number of opened connections.',
'group': 'main', 'level': 3,
}),
@@ -209,9 +209,9 @@
}),
) + CubicWebConfiguration.options)
- # should we open connections pools (eg connect to sources). This is usually
- # necessary...
- open_connections_pools = True
+ # should we init the connections pool (eg connect to sources). This is
+ # usually necessary...
+ init_cnxset_pool = True
# read the schema from the database
read_instance_schema = True
@@ -255,7 +255,7 @@
# configuration file (#16102)
@cached
def read_sources_file(self):
- return read_config(self.sources_file())
+ return read_config(self.sources_file(), raise_if_unreadable=True)
def sources(self):
"""return a dictionnaries containing sources definitions indexed by
diff -r d8bb8f631d41 -r a4e667270dd4 server/serverctl.py
--- a/server/serverctl.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/serverctl.py Fri Dec 09 12:08:27 2011 +0100
@@ -24,6 +24,7 @@
# completion). So import locally in command helpers.
import sys
import os
+import logging
from logilab.common import nullobject
from logilab.common.configuration import Configuration
@@ -122,11 +123,10 @@
interactive=interactive)
# disable autocommit (isolation_level(1)) because DROP and
# CREATE DATABASE can't be executed in a transaction
- try:
- cnx.set_isolation_level(0)
- except AttributeError:
+ set_isolation_level = getattr(cnx, 'set_isolation_level', None)
+ if set_isolation_level is not None:
# set_isolation_level() is psycopg specific
- pass
+ set_isolation_level(0)
return cnx
def repo_cnx(config):
@@ -248,7 +248,7 @@
cursor.execute, 'DROP USER %s' % user) is not ERROR:
print '-> user %s dropped.' % user
cnx.commit()
- except:
+ except BaseException:
cnx.rollback()
raise
@@ -363,21 +363,28 @@
createdb(helper, source, dbcnx, cursor)
dbcnx.commit()
print '-> database %s created.' % dbname
- except:
+ except BaseException:
dbcnx.rollback()
raise
cnx = system_source_cnx(source, special_privs='CREATE LANGUAGE',
interactive=not automatic)
cursor = cnx.cursor()
helper.init_fti_extensions(cursor)
+ cnx.commit()
# postgres specific stuff
if driver == 'postgres':
- # install plpythonu/plpgsql language if not installed by the cube
- langs = sys.platform == 'win32' and ('plpgsql',) or ('plpythonu', 'plpgsql')
+ # install plpythonu/plpgsql languages
+ langs = ('plpythonu', 'plpgsql')
for extlang in langs:
- helper.create_language(cursor, extlang)
- cursor.close()
- cnx.commit()
+ if automatic or ASK.confirm('Create language %s ?' % extlang):
+ try:
+ helper.create_language(cursor, extlang)
+ except Exception, exc:
+ print '-> ERROR:', exc
+ print '-> could not create language %s, some stored procedures might be unusable' % extlang
+ cnx.rollback()
+ else:
+ cnx.commit()
print '-> database for instance %s created and necessary extensions installed.' % appid
print
if automatic:
@@ -560,6 +567,7 @@
"""
name = 'reset-admin-pwd'
arguments = ''
+ min_args = max_args = 1
options = (
('password',
{'short': 'p', 'type' : 'string', 'metavar' : '',
@@ -643,14 +651,13 @@
)
def run(self, args):
- from logilab.common.daemon import daemonize
+ from logilab.common.daemon import daemonize, setugid
from cubicweb.cwctl import init_cmdline_log_threshold
from cubicweb.server.server import RepositoryServer
appid = args[0]
debug = self['debug']
if sys.platform == 'win32' and not debug:
- from logging import getLogger
- logger = getLogger('cubicweb.ctl')
+ logger = logging.getLogger('cubicweb.ctl')
logger.info('Forcing debug mode on win32 platform')
debug = True
config = ServerConfiguration.config_for(appid, debugmode=debug)
@@ -668,12 +675,7 @@
return
uid = config['uid']
if uid is not None:
- try:
- uid = int(uid)
- except ValueError:
- from pwd import getpwnam
- uid = getpwnam(uid).pw_uid
- os.setuid(uid)
+ setugid(uid)
server.install_sig_handlers()
server.connect(config['host'], 0)
server.run()
@@ -982,7 +984,7 @@
appid = args[0]
config = ServerConfiguration.config_for(appid)
repo, cnx = repo_cnx(config)
- session = repo._get_session(cnx.sessionid, setpool=True)
+ session = repo._get_session(cnx.sessionid, setcnxset=True)
reindex_entities(repo.schema, session)
cnx.commit()
@@ -1007,11 +1009,43 @@
mih.cmd_synchronize_schema()
+class SynchronizeSourceCommand(Command):
+ """Force a source synchronization.
+
+
+ the identifier of the instance
+
+ the name of the source to synchronize.
+ """
+ name = 'source-sync'
+ arguments = ''
+ min_args = max_args = 2
+
+ def run(self, args):
+ config = ServerConfiguration.config_for(args[0])
+ config.global_set_option('log-file', None)
+ config.log_format = '%(levelname)s %(name)s: %(message)s'
+ logger = logging.getLogger('cubicweb.sources')
+ logger.setLevel(logging.INFO)
+ # only retrieve cnx to trigger authentication, close it right away
+ repo, cnx = repo_cnx(config)
+ cnx.close()
+ try:
+ source = repo.sources_by_uri[args[1]]
+ except KeyError:
+ raise ExecutionError('no source named %r' % args[1])
+ session = repo.internal_session()
+ stats = source.pull_data(session, force=True, raise_on_error=True)
+ for key, val in stats.iteritems():
+ if val:
+ print key, ':', val
+
+
for cmdclass in (CreateInstanceDBCommand, InitInstanceCommand,
GrantUserOnInstanceCommand, ResetAdminPasswordCommand,
StartRepositoryCommand,
DBDumpCommand, DBRestoreCommand, DBCopyCommand,
AddSourceCommand, CheckRepositoryCommand, RebuildFTICommand,
- SynchronizeInstanceSchemaCommand,
+ SynchronizeInstanceSchemaCommand, SynchronizeSourceCommand
):
CWCTL.register(cmdclass)
diff -r d8bb8f631d41 -r a4e667270dd4 server/session.py
--- a/server/session.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/session.py Fri Dec 09 12:08:27 2011 +0100
@@ -61,6 +61,18 @@
description.append(term.get_type(solution, args))
return description
+def selection_idx_type(i, rqlst, args):
+ """try to return type of term at index `i` of the rqlst's selection"""
+ for select in rqlst.children:
+ term = select.selection[i]
+ for solution in select.solutions:
+ try:
+ ttype = term.get_type(solution, args)
+ if ttype is not None:
+ return ttype
+ except CoercionError:
+ return None
+
@objectify_selector
def is_user_session(cls, req, **kwargs):
"""repository side only selector returning 1 if the session is a regular
@@ -125,21 +137,13 @@
self.categories = categories
def __enter__(self):
- self.oldmode = self.session.set_hooks_mode(self.mode)
- if self.mode is self.session.HOOKS_DENY_ALL:
- self.changes = self.session.enable_hook_categories(*self.categories)
- else:
- self.changes = self.session.disable_hook_categories(*self.categories)
+ self.oldmode, self.changes = self.session.init_hooks_mode_categories(
+ self.mode, self.categories)
def __exit__(self, exctype, exc, traceback):
- if self.changes:
- if self.mode is self.session.HOOKS_DENY_ALL:
- self.session.disable_hook_categories(*self.changes)
- else:
- self.session.enable_hook_categories(*self.changes)
- self.session.set_hooks_mode(self.oldmode)
+ self.session.reset_hooks_mode_categories(self.oldmode, self.mode, self.changes)
-INDENT = ''
+
class security_enabled(object):
"""context manager to control security w/ session.execute, since by
default security is disabled on queries executed on the repository
@@ -151,33 +155,90 @@
self.write = write
def __enter__(self):
-# global INDENT
- if self.read is not None:
- self.oldread = self.session.set_read_security(self.read)
-# print INDENT + 'read', self.read, self.oldread
- if self.write is not None:
- self.oldwrite = self.session.set_write_security(self.write)
-# print INDENT + 'write', self.write, self.oldwrite
-# INDENT += ' '
+ self.oldread, self.oldwrite = self.session.init_security(
+ self.read, self.write)
def __exit__(self, exctype, exc, traceback):
-# global INDENT
-# INDENT = INDENT[:-2]
- if self.read is not None:
- self.session.set_read_security(self.oldread)
-# print INDENT + 'reset read to', self.oldread
- if self.write is not None:
- self.session.set_write_security(self.oldwrite)
-# print INDENT + 'reset write to', self.oldwrite
+ self.session.reset_security(self.oldread, self.oldwrite)
class TransactionData(object):
def __init__(self, txid):
self.transactionid = txid
+ self.ctx_count = 0
+
class Session(RequestSessionBase):
- """tie session id, user, connections pool and other session data all
- together
+ """Repository usersession, tie a session id, user, connections set and
+ other session data all together.
+
+ About session storage / transactions
+ ------------------------------------
+
+ Here is a description of internal session attributes. Besides :attr:`data`
+ and :attr:`transaction_data`, you should not have to use attributes
+ described here but higher level APIs.
+
+ :attr:`data` is a dictionary containing shared data, used to communicate
+ extra information between the client and the repository
+
+ :attr:`_tx_data` is a dictionary of :class:`TransactionData` instance, one
+ for each running transaction. The key is the transaction id. By default
+ the transaction id is the thread name but it can be otherwise (per dbapi
+ cursor for instance, or per thread name *from another process*).
+
+ :attr:`__threaddata` is a thread local storage whose `txdata` attribute
+ refers to the proper instance of :class:`TransactionData` according to the
+ transaction.
+
+ :attr:`_threads_in_transaction` is a set of (thread, connections set)
+ referencing threads that currently hold a connections set for the session.
+
+ You should not have to use neither :attr:`_txdata` nor :attr:`__threaddata`,
+ simply access transaction data transparently through the :attr:`_threaddata`
+ property. Also, you usually don't have to access it directly since current
+ transaction's data may be accessed/modified through properties / methods:
+
+ :attr:`transaction_data`, similarly to :attr:`data`, is a dictionary
+ containing some shared data that should be cleared at the end of the
+ transaction. Hooks and operations may put arbitrary data in there, and
+ this may also be used as a communication channel between the client and
+ the repository.
+
+ :attr:`cnxset`, the connections set to use to execute queries on sources.
+ During a transaction, the connection set may be freed so that is may be
+ used by another session as long as no writing is done. This means we can
+ have multiple sessions with a reasonably low connections set pool size.
+
+ :attr:`mode`, string telling the connections set handling mode, may be one
+ of 'read' (connections set may be freed), 'write' (some write was done in
+ the connections set, it can't be freed before end of the transaction),
+ 'transaction' (we want to keep the connections set during all the
+ transaction, with or without writing)
+
+ :attr:`pending_operations`, ordered list of operations to be processed on
+ commit/rollback
+
+ :attr:`commit_state`, describing the transaction commit state, may be one
+ of None (not yet committing), 'precommit' (calling precommit event on
+ operations), 'postcommit' (calling postcommit event on operations),
+ 'uncommitable' (some :exc:`ValidationError` or :exc:`Unauthorized` error
+ has been raised during the transaction and so it must be rollbacked).
+
+ :attr:`read_security` and :attr:`write_security`, boolean flags telling if
+ read/write security is currently activated.
+
+ :attr:`hooks_mode`, may be either `HOOKS_ALLOW_ALL` or `HOOKS_DENY_ALL`.
+
+ :attr:`enabled_hook_categories`, when :attr:`hooks_mode` is
+ `HOOKS_DENY_ALL`, this set contains hooks categories that are enabled.
+
+ :attr:`disabled_hook_categories`, when :attr:`hooks_mode` is
+ `HOOKS_ALLOW_ALL`, this set contains hooks categories that are disabled.
+
+
+ :attr:`running_dbapi_query`, boolean flag telling if the executing query
+ is coming from a dbapi connection or is a query from within the repository
"""
is_internal_session = False
@@ -246,7 +307,10 @@
"""return a fake request/session using specified user"""
session = Session(user, self.repo)
threaddata = session._threaddata
- threaddata.pool = self.pool
+ threaddata.cnxset = self.cnxset
+ # we attributed a connections set, need to update ctx_count else it will be freed
+ # while undesired
+ threaddata.ctx_count = 1
# share pending_operations, else operation added in the hi-jacked
# session such as SendMailOp won't ever be processed
threaddata.pending_operations = self.pending_operations
@@ -388,14 +452,14 @@
"""return a sql cursor on the system database"""
if sql.split(None, 1)[0].upper() != 'SELECT':
self.mode = 'write'
- source = self.pool.source('system')
+ source = self.cnxset.source('system')
try:
return source.doexec(self, sql, args, rollback=rollback_on_failure)
except (source.OperationalError, source.InterfaceError):
if not rollback_on_failure:
raise
source.warning("trying to reconnect")
- self.pool.reconnect(source)
+ self.cnxset.reconnect(source)
return source.doexec(self, sql, args, rollback=rollback_on_failure)
def set_language(self, language):
@@ -446,6 +510,29 @@
def security_enabled(self, read=False, write=False):
return security_enabled(self, read=read, write=write)
+ def init_security(self, read, write):
+ if read is None:
+ oldread = None
+ else:
+ oldread = self.set_read_security(read)
+ if write is None:
+ oldwrite = None
+ else:
+ oldwrite = self.set_write_security(write)
+ self._threaddata.ctx_count += 1
+ return oldread, oldwrite
+
+ def reset_security(self, read, write):
+ txstore = self._threaddata
+ txstore.ctx_count -= 1
+ if txstore.ctx_count == 0:
+ self._clear_thread_storage(txstore)
+ else:
+ if read is not None:
+ self.set_read_security(read)
+ if write is not None:
+ self.set_write_security(write)
+
@property
def read_security(self):
"""return a boolean telling if read security is activated or not"""
@@ -498,7 +585,7 @@
return self.DEFAULT_SECURITY
try:
return txstore.write_security
- except:
+ except AttributeError:
txstore.write_security = self.DEFAULT_SECURITY
return txstore.write_security
@@ -546,6 +633,30 @@
self._threaddata.hooks_mode = mode
return oldmode
+ def init_hooks_mode_categories(self, mode, categories):
+ oldmode = self.set_hooks_mode(mode)
+ if mode is self.HOOKS_DENY_ALL:
+ changes = self.enable_hook_categories(*categories)
+ else:
+ changes = self.disable_hook_categories(*categories)
+ self._threaddata.ctx_count += 1
+ return oldmode, changes
+
+ def reset_hooks_mode_categories(self, oldmode, mode, categories):
+ txstore = self._threaddata
+ txstore.ctx_count -= 1
+ if txstore.ctx_count == 0:
+ self._clear_thread_storage(txstore)
+ else:
+ try:
+ if categories:
+ if mode is self.HOOKS_DENY_ALL:
+ return self.disable_hook_categories(*categories)
+ else:
+ return self.enable_hook_categories(*categories)
+ finally:
+ self.set_hooks_mode(oldmode)
+
@property
def disabled_hook_categories(self):
try:
@@ -569,17 +680,18 @@
- on HOOKS_ALLOW_ALL mode, ensure those categories are disabled
"""
changes = set()
+ self.pruned_hooks_cache.clear()
if self.hooks_mode is self.HOOKS_DENY_ALL:
- enablecats = self.enabled_hook_categories
+ enabledcats = self.enabled_hook_categories
for category in categories:
- if category in enablecats:
- enablecats.remove(category)
+ if category in enabledcats:
+ enabledcats.remove(category)
changes.add(category)
else:
- disablecats = self.disabled_hook_categories
+ disabledcats = self.disabled_hook_categories
for category in categories:
- if category not in disablecats:
- disablecats.add(category)
+ if category not in disabledcats:
+ disabledcats.add(category)
changes.add(category)
return tuple(changes)
@@ -590,17 +702,18 @@
- on HOOKS_ALLOW_ALL mode, ensure those categories are not disabled
"""
changes = set()
+ self.pruned_hooks_cache.clear()
if self.hooks_mode is self.HOOKS_DENY_ALL:
- enablecats = self.enabled_hook_categories
+ enabledcats = self.enabled_hook_categories
for category in categories:
- if category not in enablecats:
- enablecats.add(category)
+ if category not in enabledcats:
+ enabledcats.add(category)
changes.add(category)
else:
- disablecats = self.disabled_hook_categories
+ disabledcats = self.disabled_hook_categories
for category in categories:
- if category in self.disabled_hook_categories:
- disablecats.remove(category)
+ if category in disabledcats:
+ disabledcats.remove(category)
changes.add(category)
return tuple(changes)
@@ -620,19 +733,19 @@
# connection management ###################################################
- def keep_pool_mode(self, mode):
- """set pool_mode, e.g. how the session will keep its pool:
+ def keep_cnxset_mode(self, mode):
+ """set `mode`, e.g. how the session will keep its connections set:
- * if mode == 'write', the pool is freed after each ready query, but kept
- until the transaction's end (eg commit or rollback) when a write query
- is detected (eg INSERT/SET/DELETE queries)
+ * if mode == 'write', the connections set is freed after each ready
+ query, but kept until the transaction's end (eg commit or rollback)
+ when a write query is detected (eg INSERT/SET/DELETE queries)
- * if mode == 'transaction', the pool is only freed after the
+ * if mode == 'transaction', the connections set is only freed after the
transaction's end
- notice that a repository has a limited set of pools, and a session has to
- wait for a free pool to run any rql query (unless it already has a pool
- set).
+ notice that a repository has a limited set of connections sets, and a
+ session has to wait for a free connections set to run any rql query
+ (unless it already has one set).
"""
assert mode in ('transaction', 'write')
if mode == 'transaction':
@@ -655,56 +768,58 @@
commit_state = property(get_commit_state, set_commit_state)
@property
- def pool(self):
- """connections pool, set according to transaction mode for each query"""
+ def cnxset(self):
+ """connections set, set according to transaction mode for each query"""
if self._closed:
- self.reset_pool(True)
- raise Exception('try to access pool on a closed session')
- return getattr(self._threaddata, 'pool', None)
+ self.free_cnxset(True)
+ raise Exception('try to access connections set on a closed session %s' % self.id)
+ return getattr(self._threaddata, 'cnxset', None)
- def set_pool(self):
- """the session need a pool to execute some queries"""
+ def set_cnxset(self):
+ """the session need a connections set to execute some queries"""
with self._closed_lock:
if self._closed:
- self.reset_pool(True)
- raise Exception('try to set pool on a closed session')
- if self.pool is None:
- # get pool first to avoid race-condition
- self._threaddata.pool = pool = self.repo._get_pool()
+ self.free_cnxset(True)
+ raise Exception('try to set connections set on a closed session %s' % self.id)
+ if self.cnxset is None:
+ # get connections set first to avoid race-condition
+ self._threaddata.cnxset = cnxset = self.repo._get_cnxset()
+ self._threaddata.ctx_count += 1
try:
- pool.pool_set()
- except:
- self._threaddata.pool = None
- self.repo._free_pool(pool)
+ cnxset.cnxset_set()
+ except Exception:
+ self._threaddata.cnxset = None
+ self.repo._free_cnxset(cnxset)
raise
self._threads_in_transaction.add(
- (threading.currentThread(), pool) )
- return self._threaddata.pool
+ (threading.currentThread(), cnxset) )
+ return self._threaddata.cnxset
- def _free_thread_pool(self, thread, pool, force_close=False):
+ def _free_thread_cnxset(self, thread, cnxset, force_close=False):
try:
- self._threads_in_transaction.remove( (thread, pool) )
+ self._threads_in_transaction.remove( (thread, cnxset) )
except KeyError:
- # race condition on pool freeing (freed by commit or rollback vs
+ # race condition on cnxset freeing (freed by commit or rollback vs
# close)
pass
else:
if force_close:
- pool.reconnect()
+ cnxset.reconnect()
else:
- pool.pool_reset()
- # free pool once everything is done to avoid race-condition
- self.repo._free_pool(pool)
+ cnxset.cnxset_freed()
+ # free cnxset once everything is done to avoid race-condition
+ self.repo._free_cnxset(cnxset)
- def reset_pool(self, ignoremode=False):
- """the session is no longer using its pool, at least for some time"""
- # pool may be none if no operation has been done since last commit
+ def free_cnxset(self, ignoremode=False):
+ """the session is no longer using its connections set, at least for some time"""
+ # cnxset may be none if no operation has been done since last commit
# or rollback
- pool = getattr(self._threaddata, 'pool', None)
- if pool is not None and (ignoremode or self.mode == 'read'):
+ cnxset = getattr(self._threaddata, 'cnxset', None)
+ if cnxset is not None and (ignoremode or self.mode == 'read'):
# even in read mode, we must release the current transaction
- self._free_thread_pool(threading.currentThread(), pool)
- del self._threaddata.pool
+ self._free_thread_cnxset(threading.currentThread(), cnxset)
+ del self._threaddata.cnxset
+ self._threaddata.ctx_count -= 1
def _touch(self):
"""update latest session usage timestamp and reset mode to read"""
@@ -770,9 +885,13 @@
def source_defs(self):
return self.repo.source_defs()
- def describe(self, eid):
+ def describe(self, eid, asdict=False):
"""return a tuple (type, sourceuri, extid) for the entity with id """
- return self.repo.type_and_source_from_eid(eid, self)
+ metas = self.repo.type_and_source_from_eid(eid, self)
+ if asdict:
+ return dict(zip(('type', 'source', 'extid', 'asource'), metas))
+ # XXX :-1 for cw compat, use asdict=True for full information
+ return metas[:-1]
# db-api like interface ###################################################
@@ -793,9 +912,9 @@
rset.req = self
return rset
- def _clear_thread_data(self, reset_pool=True):
- """remove everything from the thread local storage, except pool
- which is explicitly removed by reset_pool, and mode which is set anyway
+ def _clear_thread_data(self, free_cnxset=True):
+ """remove everything from the thread local storage, except connections set
+ which is explicitly removed by free_cnxset, and mode which is set anyway
by _touch
"""
try:
@@ -803,23 +922,38 @@
except AttributeError:
pass
else:
- if reset_pool:
- self._tx_data.pop(txstore.transactionid, None)
- try:
- del self.__threaddata.txdata
- except AttributeError:
- pass
+ if free_cnxset:
+ self.free_cnxset()
+ if txstore.ctx_count == 0:
+ self._clear_thread_storage(txstore)
+ else:
+ self._clear_tx_storage(txstore)
else:
- for name in ('commit_state', 'transaction_data',
- 'pending_operations', '_rewriter'):
- try:
- delattr(txstore, name)
- except AttributeError:
- continue
+ self._clear_tx_storage(txstore)
+
+ def _clear_thread_storage(self, txstore):
+ self._tx_data.pop(txstore.transactionid, None)
+ try:
+ del self.__threaddata.txdata
+ except AttributeError:
+ pass
- def commit(self, reset_pool=True):
+ def _clear_tx_storage(self, txstore):
+ for name in ('commit_state', 'transaction_data',
+ 'pending_operations', '_rewriter',
+ 'pruned_hooks_cache'):
+ try:
+ delattr(txstore, name)
+ except AttributeError:
+ continue
+
+ def commit(self, free_cnxset=True, reset_pool=None):
"""commit the current session's transaction"""
- if self.pool is None:
+ if reset_pool is not None:
+ warn('[3.13] use free_cnxset argument instead for reset_pool',
+ DeprecationWarning, stacklevel=2)
+ free_cnxset = reset_pool
+ if self.cnxset is None:
assert not self.pending_operations
self._clear_thread_data()
self._touch()
@@ -847,7 +981,7 @@
operation.handle_event('precommit_event')
self.pending_operations[:] = processed
self.debug('precommit session %s done', self.id)
- except:
+ except BaseException:
# if error on [pre]commit:
#
# * set .failed = True on the operation causing the failure
@@ -862,37 +996,41 @@
for operation in reversed(processed):
try:
operation.handle_event('revertprecommit_event')
- except:
+ except BaseException:
self.critical('error while reverting precommit',
exc_info=True)
# XXX use slice notation since self.pending_operations is a
# read-only property.
self.pending_operations[:] = processed + self.pending_operations
- self.rollback(reset_pool)
+ self.rollback(free_cnxset)
raise
- self.pool.commit()
+ self.cnxset.commit()
self.commit_state = 'postcommit'
while self.pending_operations:
operation = self.pending_operations.pop(0)
operation.processed = 'postcommit'
try:
operation.handle_event('postcommit_event')
- except:
+ except BaseException:
self.critical('error while postcommit',
exc_info=sys.exc_info())
self.debug('postcommit session %s done', self.id)
return self.transaction_uuid(set=False)
finally:
self._touch()
- if reset_pool:
- self.reset_pool(ignoremode=True)
- self._clear_thread_data(reset_pool)
+ if free_cnxset:
+ self.free_cnxset(ignoremode=True)
+ self._clear_thread_data(free_cnxset)
- def rollback(self, reset_pool=True):
+ def rollback(self, free_cnxset=True, reset_pool=None):
"""rollback the current session's transaction"""
- # don't use self.pool, rollback may be called with _closed == True
- pool = getattr(self._threaddata, 'pool', None)
- if pool is None:
+ if reset_pool is not None:
+ warn('[3.13] use free_cnxset argument instead for reset_pool',
+ DeprecationWarning, stacklevel=2)
+ free_cnxset = reset_pool
+ # don't use self.cnxset, rollback may be called with _closed == True
+ cnxset = getattr(self._threaddata, 'cnxset', None)
+ if cnxset is None:
self._clear_thread_data()
self._touch()
self.debug('rollback session %s done (no db activity)', self.id)
@@ -904,23 +1042,23 @@
try:
operation = self.pending_operations.pop(0)
operation.handle_event('rollback_event')
- except:
+ except BaseException:
self.critical('rollback error', exc_info=sys.exc_info())
continue
- pool.rollback()
+ cnxset.rollback()
self.debug('rollback for session %s done', self.id)
finally:
self._touch()
- if reset_pool:
- self.reset_pool(ignoremode=True)
- self._clear_thread_data(reset_pool)
+ if free_cnxset:
+ self.free_cnxset(ignoremode=True)
+ self._clear_thread_data(free_cnxset)
def close(self):
- """do not close pool on session close, since they are shared now"""
+ """do not close connections set on session close, since they are shared now"""
with self._closed_lock:
self._closed = True
# copy since _threads_in_transaction maybe modified while waiting
- for thread, pool in self._threads_in_transaction.copy():
+ for thread, cnxset in self._threads_in_transaction.copy():
if thread is threading.currentThread():
continue
self.info('waiting for thread %s', thread)
@@ -930,12 +1068,12 @@
for i in xrange(10):
thread.join(1)
if not (thread.isAlive() and
- (thread, pool) in self._threads_in_transaction):
+ (thread, cnxset) in self._threads_in_transaction):
break
else:
self.error('thread %s still alive after 10 seconds, will close '
'session anyway', thread)
- self._free_thread_pool(thread, pool, force_close=True)
+ self._free_thread_cnxset(thread, cnxset, force_close=True)
self.rollback()
del self.__threaddata
del self._tx_data
@@ -962,9 +1100,16 @@
self._threaddata.pending_operations = []
return self._threaddata.pending_operations
+ @property
+ def pruned_hooks_cache(self):
+ try:
+ return self._threaddata.pruned_hooks_cache
+ except AttributeError:
+ self._threaddata.pruned_hooks_cache = {}
+ return self._threaddata.pruned_hooks_cache
+
def add_operation(self, operation, index=None):
- """add an observer"""
- assert self.commit_state != 'commit'
+ """add an operation"""
if index is None:
self.pending_operations.append(operation)
else:
@@ -1023,20 +1168,13 @@
unstables = rqlst.get_variable_indices()
basedescr = []
todetermine = []
- sampleselect = rqlst.children[0]
- samplesols = sampleselect.solutions[0]
- for i, term in enumerate(sampleselect.selection):
- try:
- ttype = term.get_type(samplesols, args)
- except CoercionError:
+ for i in xrange(len(rqlst.children[0].selection)):
+ ttype = selection_idx_type(i, rqlst, args)
+ if ttype is None or ttype == 'Any':
ttype = None
isfinal = True
else:
- if ttype is None or ttype == 'Any':
- ttype = None
- isfinal = True
- else:
- isfinal = ttype in BASE_TYPES
+ isfinal = ttype in BASE_TYPES
if ttype is None or i in unstables:
basedescr.append(None)
todetermine.append( (i, isfinal) )
@@ -1049,7 +1187,8 @@
def _build_descr(self, result, basedescription, todetermine):
description = []
etype_from_eid = self.describe
- for row in result:
+ todel = []
+ for i, row in enumerate(result):
row_descr = basedescription[:]
for index, isfinal in todetermine:
value = row[index]
@@ -1063,10 +1202,14 @@
try:
row_descr[index] = etype_from_eid(value)[0]
except UnknownEid:
- self.critical('wrong eid %s in repository, you should '
- 'db-check the database' % value)
- row_descr[index] = row[index] = None
- description.append(tuple(row_descr))
+ self.error('wrong eid %s in repository, you should '
+ 'db-check the database' % value)
+ todel.append(i)
+ break
+ else:
+ description.append(tuple(row_descr))
+ for i in reversed(todel):
+ del result[i]
return description
# deprecated ###############################################################
@@ -1075,6 +1218,18 @@
def schema_rproperty(self, rtype, eidfrom, eidto, rprop):
return getattr(self.rtype_eids_rdef(rtype, eidfrom, eidto), rprop)
+ @property
+ @deprecated("[3.13] use .cnxset attribute instead of .pool")
+ def pool(self):
+ return self.cnxset
+
+ @deprecated("[3.13] use .set_cnxset() method instead of .set_pool()")
+ def set_pool(self):
+ return self.set_cnxset()
+
+ @deprecated("[3.13] use .free_cnxset() method instead of .reset_pool()")
+ def reset_pool(self):
+ return self.free_cnxset()
@deprecated("[3.7] execute is now unsafe by default in hooks/operation. You"
" can also control security with the security_enabled context "
@@ -1133,20 +1288,21 @@
is_internal_session = True
running_dbapi_query = False
- def __init__(self, repo, cnxprops=None):
+ def __init__(self, repo, cnxprops=None, safe=False):
super(InternalSession, self).__init__(InternalManager(), repo, cnxprops,
_id='internal')
self.user._cw = self # XXX remove when "vreg = user._cw.vreg" hack in entity.py is gone
self.cnxtype = 'inmemory'
- self.disable_hook_categories('integrity')
+ if not safe:
+ self.disable_hook_categories('integrity')
@property
- def pool(self):
- """connections pool, set according to transaction mode for each query"""
+ def cnxset(self):
+ """connections set, set according to transaction mode for each query"""
if self.repo.shutting_down:
- self.reset_pool(True)
+ self.free_cnxset(True)
raise ShuttingDown('repository is shutting down')
- return getattr(self._threaddata, 'pool', None)
+ return getattr(self._threaddata, 'cnxset', None)
class InternalManager(object):
diff -r d8bb8f631d41 -r a4e667270dd4 server/sources/__init__.py
--- a/server/sources/__init__.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/sources/__init__.py Fri Dec 09 12:08:27 2011 +0100
@@ -25,6 +25,7 @@
from logging import getLogger
from logilab.common import configuration
+from logilab.common.deprecation import deprecated
from yams.schema import role_name
@@ -36,11 +37,11 @@
def dbg_st_search(uri, union, varmap, args, cachekey=None, prefix='rql for'):
if server.DEBUG & server.DBG_RQL:
- print ' %s %s source: %s' % (prefix, uri, union.as_string())
+ print ' %s %s source: %s' % (prefix, uri, repr(union.as_string()))
if varmap:
print ' using varmap', varmap
if server.DEBUG & server.DBG_MORE:
- print ' args', args
+ print ' args', repr(args)
print ' cache key', cachekey
print ' solutions', ','.join(str(s.solutions)
for s in union.children)
@@ -64,13 +65,13 @@
self.ttl = timedelta(seconds=ttl)
def __setitem__(self, key, value):
- dict.__setitem__(self, key, (datetime.now(), value))
+ dict.__setitem__(self, key, (datetime.utcnow(), value))
def __getitem__(self, key):
return dict.__getitem__(self, key)[1]
def clear_expired(self):
- now_ = datetime.now()
+ now_ = datetime.utcnow()
ttl = self.ttl
for key, (timestamp, value) in self.items():
if now_ - timestamp > ttl:
@@ -110,15 +111,24 @@
# force deactivation (configuration error for instance)
disabled = False
+ # boolean telling if cwuri of entities from this source is the url that
+ # should be used as entity's absolute url
+ use_cwuri_as_url = False
+
# source configuration options
options = ()
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
def __init__(self, repo, source_config, eid=None):
self.repo = repo
self.set_schema(repo.schema)
self.support_relations['identity'] = False
self.eid = eid
self.public_config = source_config.copy()
+ self.public_config.setdefault('use-cwuri-as-url', self.use_cwuri_as_url)
self.remove_sensitive_information(self.public_config)
self.uri = source_config.pop('uri')
set_log_methods(self, getLogger('cubicweb.sources.'+self.uri))
@@ -171,7 +181,7 @@
# cw < 3.10 bw compat
try:
processed['adapter'] = confdict['adapter']
- except:
+ except KeyError:
pass
# check for unknown options
if confdict and not confdict.keys() == ['adapter']:
@@ -213,7 +223,7 @@
"""
pass
- PUBLIC_KEYS = ('type', 'uri')
+ PUBLIC_KEYS = ('type', 'uri', 'use-cwuri-as-url')
def remove_sensitive_information(self, sourcedef):
"""remove sensitive information such as login / password from source
definition
@@ -230,23 +240,23 @@
def check_connection(self, cnx):
"""Check connection validity, return None if the connection is still
- valid else a new connection (called when the pool using the given
- connection is being attached to a session). Do nothing by default.
+ valid else a new connection (called when the connections set using the
+ given connection is being attached to a session). Do nothing by default.
"""
pass
- def close_pool_connections(self):
- for pool in self.repo.pools:
- pool._cursors.pop(self.uri, None)
- pool.source_cnxs[self.uri][1].close()
+ def close_source_connections(self):
+ for cnxset in self.repo.cnxsets:
+ cnxset._cursors.pop(self.uri, None)
+ cnxset.source_cnxs[self.uri][1].close()
- def open_pool_connections(self):
- for pool in self.repo.pools:
- pool.source_cnxs[self.uri] = (self, self.get_connection())
+ def open_source_connections(self):
+ for cnxset in self.repo.cnxsets:
+ cnxset.source_cnxs[self.uri] = (self, self.get_connection())
- def pool_reset(self, cnx):
- """the pool using the given connection is being reseted from its current
- attached session
+ def cnxset_freed(self, cnx):
+ """the connections set holding the given connection is being reseted
+ from its current attached session.
do nothing by default
"""
@@ -264,12 +274,6 @@
# external source api ######################################################
- def eid2extid(self, eid, session=None):
- return self.repo.eid2extid(self, eid, session)
-
- def extid2eid(self, value, etype, session=None, **kwargs):
- return self.repo.extid2eid(self, value, etype, session, **kwargs)
-
def support_entity(self, etype, write=False):
"""return true if the given entity's type is handled by this adapter
if write is true, return true only if it's a RW support
@@ -404,7 +408,7 @@
.executemany().
"""
res = self.syntax_tree_search(session, union, args, varmap=varmap)
- session.pool.source('system').manual_insert(res, table, session)
+ session.cnxset.source('system').manual_insert(res, table, session)
# write modification api ###################################################
# read-only sources don't have to implement methods below
@@ -517,6 +521,15 @@
pass
+ @deprecated('[3.13] use repo.eid2extid(source, eid, session)')
+ def eid2extid(self, eid, session=None):
+ return self.repo.eid2extid(self, eid, session)
+
+ @deprecated('[3.13] use extid2eid(source, value, etype, session, **kwargs)')
+ def extid2eid(self, value, etype, session=None, **kwargs):
+ return self.repo.extid2eid(self, value, etype, session, **kwargs)
+
+
class TrFunc(object):
"""lower, upper"""
def __init__(self, trname, index, attrname=None):
diff -r d8bb8f631d41 -r a4e667270dd4 server/sources/datafeed.py
--- a/server/sources/datafeed.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/sources/datafeed.py Fri Dec 09 12:08:27 2011 +0100
@@ -18,15 +18,24 @@
"""datafeed sources: copy data from an external data stream into the system
database
"""
+from __future__ import with_statement
+
+import urllib2
+import StringIO
from datetime import datetime, timedelta
from base64 import b64decode
+from cookielib import CookieJar
-from cubicweb import RegistryNotFound, ObjectNotFound, ValidationError
+from lxml import etree
+
+from cubicweb import RegistryNotFound, ObjectNotFound, ValidationError, UnknownEid
from cubicweb.server.sources import AbstractSource
from cubicweb.appobject import AppObject
+
class DataFeedSource(AbstractSource):
copy_based_source = True
+ use_cwuri_as_url = True
options = (
('synchronize',
@@ -46,6 +55,15 @@
'external source (default to 5 minutes, must be >= 1 min).'),
'group': 'datafeed-source', 'level': 2,
}),
+ ('max-lock-lifetime',
+ {'type' : 'time',
+ 'default': '1h',
+ 'help': ('Maximum time allowed for a synchronization to be run. '
+ 'Exceeded that time, the synchronization will be considered '
+ 'as having failed and not properly released the lock, hence '
+ 'it won\'t be considered'),
+ 'group': 'datafeed-source', 'level': 2,
+ }),
('delete-entities',
{'type' : 'yn',
'default': True,
@@ -71,7 +89,7 @@
def _entity_update(self, source_entity):
source_entity.complete()
- self.parser = source_entity.parser
+ self.parser_id = source_entity.parser
self.latest_retrieval = source_entity.latest_retrieval
self.urls = [url.strip() for url in source_entity.url.splitlines()
if url.strip()]
@@ -81,6 +99,7 @@
properly typed with defaults set
"""
self.synchro_interval = timedelta(seconds=typedconfig['synchronization-interval'])
+ self.max_lock_lifetime = timedelta(seconds=typedconfig['max-lock-lifetime'])
if source_entity is not None:
self._entity_update(source_entity)
self.config = typedconfig
@@ -88,12 +107,12 @@
def init(self, activated, source_entity):
if activated:
self._entity_update(source_entity)
- self.parser = source_entity.parser
+ self.parser_id = source_entity.parser
self.load_mapping(source_entity._cw)
def _get_parser(self, session, **kwargs):
return self.repo.vreg['parsers'].select(
- self.parser, session, source=self, **kwargs)
+ self.parser_id, session, source=self, **kwargs)
def load_mapping(self, session):
self.mapping = {}
@@ -121,27 +140,54 @@
return False
return datetime.utcnow() < (self.latest_retrieval + self.synchro_interval)
+ def update_latest_retrieval(self, session):
+ self.latest_retrieval = datetime.utcnow()
+ session.execute('SET X latest_retrieval %(date)s WHERE X eid %(x)s',
+ {'x': self.eid, 'date': self.latest_retrieval})
+
+ def acquire_synchronization_lock(self, session):
+ # XXX race condition until WHERE of SET queries is executed using
+ # 'SELECT FOR UPDATE'
+ now = datetime.utcnow()
+ if not session.execute(
+ 'SET X in_synchronization %(now)s WHERE X eid %(x)s, '
+ 'X in_synchronization NULL OR X in_synchronization < %(maxdt)s',
+ {'x': self.eid, 'now': now, 'maxdt': now - self.max_lock_lifetime}):
+ self.error('concurrent synchronization detected, skip pull')
+ session.commit(free_cnxset=False)
+ return False
+ session.commit(free_cnxset=False)
+ return True
+
+ def release_synchronization_lock(self, session):
+ session.set_cnxset()
+ session.execute('SET X in_synchronization NULL WHERE X eid %(x)s',
+ {'x': self.eid})
+ session.commit()
+
def pull_data(self, session, force=False, raise_on_error=False):
+ """Launch synchronization of the source if needed.
+
+ This method is responsible to handle commit/rollback on the given
+ session.
+ """
if not force and self.fresh():
return {}
+ if not self.acquire_synchronization_lock(session):
+ return {}
+ try:
+ with session.transaction(free_cnxset=False):
+ return self._pull_data(session, force, raise_on_error)
+ finally:
+ self.release_synchronization_lock(session)
+
+ def _pull_data(self, session, force=False, raise_on_error=False):
if self.config['delete-entities']:
myuris = self.source_cwuris(session)
else:
myuris = None
parser = self._get_parser(session, sourceuris=myuris)
- error = False
- self.info('pulling data for source %s', self.uri)
- for url in self.urls:
- try:
- if parser.process(url):
- error = True
- except IOError, exc:
- if raise_on_error:
- raise
- self.error('could not pull data while processing %s: %s',
- url, exc)
- error = True
- if error:
+ if self.process_urls(parser, self.urls, raise_on_error):
self.warning("some error occured, don't attempt to delete entities")
elif self.config['delete-entities'] and myuris:
byetype = {}
@@ -151,11 +197,30 @@
for etype, eids in byetype.iteritems():
session.execute('DELETE %s X WHERE X eid IN (%s)'
% (etype, ','.join(eids)))
- self.latest_retrieval = datetime.utcnow()
- session.execute('SET X latest_retrieval %(date)s WHERE X eid %(x)s',
- {'x': self.eid, 'date': self.latest_retrieval})
+ self.update_latest_retrieval(session)
return parser.stats
+ def process_urls(self, parser, urls, raise_on_error=False):
+ error = False
+ for url in urls:
+ self.info('pulling data from %s', url)
+ try:
+ if parser.process(url, raise_on_error):
+ error = True
+ except IOError, exc:
+ if raise_on_error:
+ raise
+ self.error('could not pull data while processing %s: %s',
+ url, exc)
+ error = True
+ except Exception, exc:
+ if raise_on_error:
+ raise
+ self.exception('error while processing %s: %s',
+ url, exc)
+ error = True
+ return error
+
def before_entity_insertion(self, session, lid, etype, eid, sourceparams):
"""called by the repository when an eid has been attributed for an
entity stored here but the entity has not been inserted in the system
@@ -166,12 +231,9 @@
"""
entity = super(DataFeedSource, self).before_entity_insertion(
session, lid, etype, eid, sourceparams)
- entity.cw_edited['cwuri'] = unicode(lid)
+ entity.cw_edited['cwuri'] = lid.decode('utf-8')
entity.cw_edited.set_defaults()
sourceparams['parser'].before_entity_copy(entity, sourceparams)
- # avoid query to search full-text indexed attributes
- for attr in entity.e_schema.indexable_attributes():
- entity.cw_edited.setdefault(attr, u'')
return entity
def after_entity_insertion(self, session, lid, entity, sourceparams):
@@ -195,8 +257,8 @@
class DataFeedParser(AppObject):
__registry__ = 'parsers'
- def __init__(self, session, source, sourceuris=None):
- self._cw = session
+ def __init__(self, session, source, sourceuris=None, **kwargs):
+ super(DataFeedParser, self).__init__(session, **kwargs)
self.source = source
self.sourceuris = sourceuris
self.stats = {'created': set(),
@@ -213,14 +275,44 @@
raise ValidationError(schemacfg.eid, {None: msg})
def extid2entity(self, uri, etype, **sourceparams):
+ """return an entity for the given uri. May return None if it should be
+ skipped
+ """
+ session = self._cw
+ # if cwsource is specified and repository has a source with the same
+ # name, call extid2eid on that source so entity will be properly seen as
+ # coming from this source
+ source_uri = sourceparams.pop('cwsource', None)
+ if source_uri is not None and source_uri != 'system':
+ source = session.repo.sources_by_uri.get(source_uri, self.source)
+ else:
+ source = self.source
sourceparams['parser'] = self
- eid = self.source.extid2eid(str(uri), etype, self._cw,
- sourceparams=sourceparams)
+ if isinstance(uri, unicode):
+ uri = uri.encode('utf-8')
+ try:
+ eid = session.repo.extid2eid(source, str(uri), etype, session,
+ complete=False, commit=False,
+ sourceparams=sourceparams)
+ except ValidationError, ex:
+ self.source.error('error while creating %s: %s', etype, ex)
+ return None
+ if eid < 0:
+ # entity has been moved away from its original source
+ #
+ # Don't give etype to entity_from_eid so we get UnknownEid if the
+ # entity has been removed
+ try:
+ entity = session.entity_from_eid(-eid)
+ except UnknownEid:
+ return None
+ self.notify_updated(entity) # avoid later update from the source's data
+ return entity
if self.sourceuris is not None:
self.sourceuris.pop(str(uri), None)
- return self._cw.entity_from_eid(eid, etype)
+ return session.entity_from_eid(eid, etype)
- def process(self, url):
+ def process(self, url, partialcommit=True):
"""main callback: process the url"""
raise NotImplementedError
@@ -238,3 +330,66 @@
def notify_updated(self, entity):
return self.stats['updated'].add(entity.eid)
+
+
+class DataFeedXMLParser(DataFeedParser):
+
+ def process(self, url, raise_on_error=False, partialcommit=True):
+ """IDataFeedParser main entry point"""
+ try:
+ parsed = self.parse(url)
+ except Exception, ex:
+ if raise_on_error:
+ raise
+ self.source.error(str(ex))
+ return True
+ error = False
+ for args in parsed:
+ try:
+ self.process_item(*args)
+ if partialcommit:
+ # commit+set_cnxset instead of commit(free_cnxset=False) to let
+ # other a chance to get our connections set
+ self._cw.commit()
+ self._cw.set_cnxset()
+ except ValidationError, exc:
+ if raise_on_error:
+ raise
+ if partialcommit:
+ self.source.error('Skipping %s because of validation error %s' % (args, exc))
+ self._cw.rollback()
+ self._cw.set_cnxset()
+ error = True
+ else:
+ raise
+ return error
+
+ def parse(self, url):
+ if url.startswith('http'):
+ from cubicweb.sobjects.parsers import URL_MAPPING
+ for mappedurl in URL_MAPPING:
+ if url.startswith(mappedurl):
+ url = url.replace(mappedurl, URL_MAPPING[mappedurl], 1)
+ break
+ self.source.info('GET %s', url)
+ stream = _OPENER.open(url)
+ elif url.startswith('file://'):
+ stream = open(url[7:])
+ else:
+ stream = StringIO.StringIO(url)
+ return self.parse_etree(etree.parse(stream).getroot())
+
+ def parse_etree(self, document):
+ return [(document,)]
+
+ def process_item(self, *args):
+ raise NotImplementedError
+
+# use a cookie enabled opener to use session cookie if any
+_OPENER = urllib2.build_opener()
+try:
+ from logilab.common import urllib2ext
+ _OPENER.add_handler(urllib2ext.HTTPGssapiAuthHandler())
+except ImportError: # python-kerberos not available
+ pass
+_OPENER.add_handler(urllib2.HTTPCookieProcessor(CookieJar()))
diff -r d8bb8f631d41 -r a4e667270dd4 server/sources/extlite.py
--- a/server/sources/extlite.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/sources/extlite.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -102,19 +102,19 @@
def backup(self, backupfile, confirm):
"""method called to create a backup of the source's data"""
- self.close_pool_connections()
+ self.close_source_connections()
try:
self.sqladapter.backup_to_file(backupfile, confirm)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
def restore(self, backupfile, confirm, drop):
"""method called to restore a backup of source's data"""
- self.close_pool_connections()
+ self.close_source_connections()
try:
self.sqladapter.restore_from_file(backupfile, confirm, drop)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
@property
def _sqlcnx(self):
@@ -174,15 +174,15 @@
def check_connection(self, cnx):
"""check connection validity, return None if the connection is still valid
- else a new connection (called when the pool using the given connection is
+ else a new connection (called when the connections set holding the given connection is
being attached to a session)
always return the connection to reset eventually cached cursor
"""
return cnx
- def pool_reset(self, cnx):
- """the pool using the given connection is being reseted from its current
+ def cnxset_freed(self, cnx):
+ """the connections set holding the given connection is being freed from its current
attached session: release the connection lock if the connection wrapper
has a connection set
"""
@@ -286,7 +286,7 @@
"""
if server.DEBUG:
print 'exec', query, args
- cursor = session.pool[self.uri]
+ cursor = session.cnxset[self.uri]
try:
# str(query) to avoid error if it's an unicode string
cursor.execute(str(query), args)
@@ -294,9 +294,9 @@
self.critical("sql: %r\n args: %s\ndbms message: %r",
query, args, ex.args[0])
try:
- session.pool.connection(self.uri).rollback()
+ session.cnxset.connection(self.uri).rollback()
self.critical('transaction has been rollbacked')
- except:
+ except Exception:
pass
raise
return cursor
diff -r d8bb8f631d41 -r a4e667270dd4 server/sources/ldapuser.py
--- a/server/sources/ldapuser.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/sources/ldapuser.py Fri Dec 09 12:08:27 2011 +0100
@@ -273,7 +273,7 @@
if self._conn is None:
try:
self._connect()
- except:
+ except Exception:
self.exception('unable to connect to ldap:')
return ConnectionWrapper(self._conn)
@@ -310,7 +310,11 @@
except Exception:
self.error('while trying to authenticate %s', user, exc_info=True)
raise AuthenticationError()
- return self.extid2eid(user['dn'], 'CWUser', session)
+ eid = self.repo.extid2eid(self, user['dn'], 'CWUser', session)
+ if eid < 0:
+ # user has been moved away from this source
+ raise AuthenticationError()
+ return eid
def ldap_name(self, var):
if var.stinfo['relations']:
@@ -392,7 +396,7 @@
break
assert mainvars, rqlst
columns, globtransforms = self.prepare_columns(mainvars, rqlst)
- eidfilters = []
+ eidfilters = [lambda x: x > 0]
allresults = []
generator = RQL2LDAPFilter(self, session, args, mainvars)
for mainvar in mainvars:
@@ -419,7 +423,7 @@
filteredres = []
for resdict in res:
# get sure the entity exists in the system table
- eid = self.extid2eid(resdict['dn'], 'CWUser', session)
+ eid = self.repo.extid2eid(self, resdict['dn'], 'CWUser', session)
for eidfilter in eidfilters:
if not eidfilter(eid):
break
@@ -524,21 +528,21 @@
"""make an ldap query"""
self.debug('ldap search %s %s %s %s %s', self.uri, base, scope,
searchstr, list(attrs))
- # XXX for now, we do not have connection pool support for LDAP, so
+ # XXX for now, we do not have connections set support for LDAP, so
# this is always self._conn
- cnx = session.pool.connection(self.uri).cnx
+ cnx = session.cnxset.connection(self.uri).cnx
try:
res = cnx.search_s(base, scope, searchstr, attrs)
except ldap.PARTIAL_RESULTS:
res = cnx.result(all=0)[1]
except ldap.NO_SUCH_OBJECT:
self.info('ldap NO SUCH OBJECT')
- eid = self.extid2eid(base, 'CWUser', session, insert=False)
+ eid = self.repo.extid2eid(self, base, 'CWUser', session, insert=False)
if eid:
self.warning('deleting ldap user with eid %s and dn %s',
eid, base)
entity = session.entity_from_eid(eid, 'CWUser')
- self.repo.delete_info(session, entity, self.uri, base)
+ self.repo.delete_info(session, entity, self.uri)
self.reset_caches()
return []
# except ldap.REFERRAL, e:
@@ -566,7 +570,7 @@
try:
for i in range(len(value)):
value[i] = unicode(value[i], 'utf8')
- except:
+ except Exception:
pass
if isinstance(value, list) and len(value) == 1:
rec_dict[key] = value = value[0]
@@ -642,6 +646,7 @@
"""generate an LDAP filter for a rql query"""
def __init__(self, source, session, args=None, mainvars=()):
self.source = source
+ self.repo = source.repo
self._ldap_attrs = source.user_rev_attrs
self._base_filters = source.base_filters
self._session = session
@@ -747,7 +752,7 @@
}[rhs.operator]
self._eidfilters.append(filter)
return
- dn = self.source.eid2extid(eid, self._session)
+ dn = self.repo.eid2extid(self.source, eid, self._session)
raise GotDN(dn)
try:
filter = '(%s%s)' % (self._ldap_attrs[relation.r_type],
diff -r d8bb8f631d41 -r a4e667270dd4 server/sources/native.py
--- a/server/sources/native.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/sources/native.py Fri Dec 09 12:08:27 2011 +0100
@@ -313,9 +313,9 @@
self.dbhelper.dbname = abspath(self.dbhelper.dbname)
self.get_connection = lambda: ConnectionWrapper(self)
self.check_connection = lambda cnx: cnx
- def pool_reset(cnx):
+ def cnxset_freed(cnx):
cnx.close()
- self.pool_reset = pool_reset
+ self.cnxset_freed = cnxset_freed
if self.dbdriver == 'sqlite':
self._create_eid = None
self.create_eid = self._create_eid_sqlite
@@ -355,21 +355,21 @@
"""execute the query and return its result"""
return self.process_result(self.doexec(session, sql, args))
- def init_creating(self, pool=None):
+ def init_creating(self, cnxset=None):
# check full text index availibility
if self.do_fti:
- if pool is None:
- _pool = self.repo._get_pool()
- _pool.pool_set()
+ if cnxset is None:
+ _cnxset = self.repo._get_cnxset()
+ _cnxset.cnxset_set()
else:
- _pool = pool
- if not self.dbhelper.has_fti_table(_pool['system']):
+ _cnxset = cnxset
+ if not self.dbhelper.has_fti_table(_cnxset['system']):
if not self.repo.config.creating:
self.critical('no text index table')
self.do_fti = False
- if pool is None:
- _pool.pool_reset()
- self.repo._free_pool(_pool)
+ if cnxset is None:
+ _cnxset.cnxset_freed()
+ self.repo._free_cnxset(_cnxset)
def backup(self, backupfile, confirm, format='native'):
"""method called to create a backup of the source's data"""
@@ -377,25 +377,25 @@
self.repo.fill_schema()
self.set_schema(self.repo.schema)
helper = DatabaseIndependentBackupRestore(self)
- self.close_pool_connections()
+ self.close_source_connections()
try:
helper.backup(backupfile)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
elif format == 'native':
- self.close_pool_connections()
+ self.close_source_connections()
try:
self.backup_to_file(backupfile, confirm)
finally:
- self.open_pool_connections()
+ self.open_source_connections()
else:
raise ValueError('Unknown format %r' % format)
def restore(self, backupfile, confirm, drop, format='native'):
"""method called to restore a backup of source's data"""
- if self.repo.config.open_connections_pools:
- self.close_pool_connections()
+ if self.repo.config.init_cnxset_pool:
+ self.close_source_connections()
try:
if format == 'portable':
helper = DatabaseIndependentBackupRestore(self)
@@ -405,12 +405,18 @@
else:
raise ValueError('Unknown format %r' % format)
finally:
- if self.repo.config.open_connections_pools:
- self.open_pool_connections()
+ if self.repo.config.init_cnxset_pool:
+ self.open_source_connections()
def init(self, activated, source_entity):
- self.init_creating(source_entity._cw.pool)
+ self.init_creating(source_entity._cw.cnxset)
+ try:
+ # test if 'asource' column exists
+ query = self.dbhelper.sql_add_limit_offset('SELECT asource FROM entities', 1)
+ source_entity._cw.system_sql(query)
+ except Exception, ex:
+ self.eid_type_source = self.eid_type_source_pre_131
def shutdown(self):
if self._eid_creation_cnx:
@@ -532,13 +538,13 @@
raise
# FIXME: better detection of deconnection pb
self.warning("trying to reconnect")
- session.pool.reconnect(self)
+ session.cnxset.reconnect(self)
cursor = self.doexec(session, sql, args)
except (self.DbapiError,), exc:
# We get this one with pyodbc and SQL Server when connection was reset
if exc.args[0] == '08S01' and session.mode != 'write':
self.warning("trying to reconnect")
- session.pool.reconnect(self)
+ session.cnxset.reconnect(self)
cursor = self.doexec(session, sql, args)
else:
raise
@@ -585,7 +591,7 @@
for table in temptables:
try:
self.doexec(session,'DROP TABLE %s' % table)
- except:
+ except Exception:
pass
try:
del self._temp_table_data[table]
@@ -727,9 +733,9 @@
"""Execute a query.
it's a function just so that it shows up in profiling
"""
- cursor = session.pool[self.uri]
+ cursor = session.cnxset[self.uri]
if server.DEBUG & server.DBG_SQL:
- cnx = session.pool.connection(self.uri)
+ cnx = session.cnxset.connection(self.uri)
# getattr to get the actual connection if cnx is a ConnectionWrapper
# instance
print 'exec', query, args, getattr(cnx, '_cnx', cnx)
@@ -744,7 +750,7 @@
query, args, ex.args[0])
if rollback:
try:
- session.pool.connection(self.uri).rollback()
+ session.cnxset.connection(self.uri).rollback()
if self.repo.config.mode != 'test':
self.critical('transaction has been rollbacked')
except Exception, ex:
@@ -773,7 +779,7 @@
"""
if server.DEBUG & server.DBG_SQL:
print 'execmany', query, 'with', len(args), 'arguments'
- cursor = session.pool[self.uri]
+ cursor = session.cnxset[self.uri]
try:
# str(query) to avoid error if it's an unicode string
cursor.executemany(str(query), args)
@@ -784,10 +790,10 @@
self.critical("sql many: %r\n args: %s\ndbms message: %r",
query, args, ex.args[0])
try:
- session.pool.connection(self.uri).rollback()
+ session.cnxset.connection(self.uri).rollback()
if self.repo.config.mode != 'test':
self.critical('transaction has been rollbacked')
- except:
+ except Exception:
pass
raise
@@ -802,7 +808,7 @@
self.error("backend can't alter %s.%s to %s%s", table, column, coltype,
not allownull and 'NOT NULL' or '')
return
- self.dbhelper.change_col_type(LogCursor(session.pool[self.uri]),
+ self.dbhelper.change_col_type(LogCursor(session.cnxset[self.uri]),
table, column, coltype, allownull)
self.info('altered %s.%s: now %s%s', table, column, coltype,
not allownull and 'NOT NULL' or '')
@@ -817,7 +823,7 @@
return
table, column = rdef_table_column(rdef)
coltype, allownull = rdef_physical_info(self.dbhelper, rdef)
- self.dbhelper.set_null_allowed(LogCursor(session.pool[self.uri]),
+ self.dbhelper.set_null_allowed(LogCursor(session.cnxset[self.uri]),
table, column, coltype, allownull)
def update_rdef_indexed(self, session, rdef):
@@ -835,29 +841,49 @@
self.drop_index(session, table, column, unique=True)
def create_index(self, session, table, column, unique=False):
- cursor = LogCursor(session.pool[self.uri])
+ cursor = LogCursor(session.cnxset[self.uri])
self.dbhelper.create_index(cursor, table, column, unique)
def drop_index(self, session, table, column, unique=False):
- cursor = LogCursor(session.pool[self.uri])
+ cursor = LogCursor(session.cnxset[self.uri])
self.dbhelper.drop_index(cursor, table, column, unique)
# system source interface #################################################
- def eid_type_source(self, session, eid):
- """return a tuple (type, source, extid) for the entity with id """
- sql = 'SELECT type, source, extid FROM entities WHERE eid=%s' % eid
+ def _eid_type_source(self, session, eid, sql, _retry=True):
try:
res = self.doexec(session, sql).fetchone()
- except:
- assert session.pool, 'session has no pool set'
- raise UnknownEid(eid)
- if res is None:
- raise UnknownEid(eid)
- if res[-1] is not None:
+ if res is not None:
+ return res
+ except (self.OperationalError, self.InterfaceError):
+ if session.mode == 'read' and _retry:
+ self.warning("trying to reconnect (eid_type_source())")
+ session.cnxset.reconnect(self)
+ return self._eid_type_source(session, eid, sql, _retry=False)
+ except Exception:
+ assert session.cnxset, 'session has no connections set'
+ self.exception('failed to query entities table for eid %s', eid)
+ raise UnknownEid(eid)
+
+ def eid_type_source(self, session, eid): # pylint: disable=E0202
+ """return a tuple (type, source, extid) for the entity with id """
+ sql = 'SELECT type, source, extid, asource FROM entities WHERE eid=%s' % eid
+ res = self._eid_type_source(session, eid, sql)
+ if res[-2] is not None:
if not isinstance(res, list):
res = list(res)
+ res[-2] = b64decode(res[-2])
+ return res
+
+ def eid_type_source_pre_131(self, session, eid):
+ """return a tuple (type, source, extid) for the entity with id """
+ sql = 'SELECT type, source, extid FROM entities WHERE eid=%s' % eid
+ res = self._eid_type_source(session, eid, sql)
+ if not isinstance(res, list):
+ res = list(res)
+ if res[-1] is not None:
res[-1] = b64decode(res[-1])
+ res.append(res[1])
return res
def extid2eid(self, session, source_uri, extid):
@@ -874,7 +900,7 @@
result = cursor.fetchone()
if result:
return result[0]
- except:
+ except Exception:
pass
return None
@@ -898,13 +924,13 @@
return cursor.fetchone()[0]
- def create_eid(self, session):
+ def create_eid(self, session): # pylint: disable=E0202
# lock needed to prevent 'Connection is busy with results for another
# command (0)' errors with SQLServer
with self._eid_cnx_lock:
- return self._create_eid()
+ return self._create_eid() # pylint: disable=E1102
- def _create_eid(self):
+ def _create_eid(self): # pylint: disable=E0202
# internal function doing the eid creation without locking.
# needed for the recursive handling of disconnections (otherwise we
# deadlock on self._eid_cnx_lock
@@ -920,16 +946,16 @@
# FIXME: better detection of deconnection pb
self.warning("trying to reconnect create eid connection")
self._eid_creation_cnx = None
- return self._create_eid()
+ return self._create_eid() # pylint: disable=E1102
except (self.DbapiError,), exc:
# We get this one with pyodbc and SQL Server when connection was reset
if exc.args[0] == '08S01':
self.warning("trying to reconnect create eid connection")
self._eid_creation_cnx = None
- return self._create_eid()
+ return self._create_eid() # pylint: disable=E1102
else:
raise
- except: # WTF?
+ except Exception: # WTF?
cnx.rollback()
self._eid_creation_cnx = None
self.exception('create eid failed in an unforeseen way on SQL statement %s', sql)
@@ -946,7 +972,7 @@
extid = b64encode(extid)
uri = 'system' if source.copy_based_source else source.uri
attrs = {'type': entity.__regid__, 'eid': entity.eid, 'extid': extid,
- 'source': uri, 'mtime': datetime.now()}
+ 'source': uri, 'asource': source.uri, 'mtime': datetime.utcnow()}
self.doexec(session, self.sqlgen.insert('entities', attrs), attrs)
# insert core relations: is, is_instance_of and cw_source
try:
@@ -976,7 +1002,7 @@
self.index_entity(session, entity=entity)
# update entities.mtime.
# XXX Only if entity.__regid__ in self.multisources_etypes?
- attrs = {'eid': entity.eid, 'mtime': datetime.now()}
+ attrs = {'eid': entity.eid, 'mtime': datetime.utcnow()}
self.doexec(session, self.sqlgen.update('entities', attrs, ['eid']), attrs)
def delete_info_multi(self, session, entities, uri):
@@ -993,7 +1019,7 @@
if entities[0].__regid__ not in self.multisources_etypes:
return
attrs = {'type': entities[0].__regid__,
- 'source': uri, 'dtime': datetime.now()}
+ 'source': uri, 'dtime': datetime.utcnow()}
for entity in entities:
extid = entity.cw_metainformation()['extid']
if extid is not None:
@@ -1127,7 +1153,7 @@
important note: while undoing of a transaction, only hooks in the
'integrity', 'activeintegrity' and 'undo' categories are called.
"""
- # set mode so pool isn't released subsquently until commit/rollback
+ # set mode so connections set isn't released subsquently until commit/rollback
session.mode = 'write'
errors = []
session.transaction_data['undoing_uuid'] = txuuid
@@ -1147,7 +1173,7 @@
table when some undoable transaction is started
"""
ueid = session.user.eid
- attrs = {'tx_uuid': uuid, 'tx_user': ueid, 'tx_time': datetime.now()}
+ attrs = {'tx_uuid': uuid, 'tx_user': ueid, 'tx_time': datetime.utcnow()}
self.doexec(session, self.sqlgen.insert('transactions', attrs), attrs)
def _save_attrs(self, session, entity, attrs):
@@ -1372,7 +1398,7 @@
def fti_unindex_entities(self, session, entities):
"""remove text content for entities from the full text index
"""
- cursor = session.pool['system']
+ cursor = session.cnxset['system']
cursor_unindex_object = self.dbhelper.cursor_unindex_object
try:
for entity in entities:
@@ -1385,7 +1411,7 @@
"""add text content of created/modified entities to the full text index
"""
cursor_index_object = self.dbhelper.cursor_index_object
- cursor = session.pool['system']
+ cursor = session.cnxset['system']
try:
# use cursor_index_object, not cursor_reindex_object since
# unindexing done in the FTIndexEntityOp
@@ -1434,6 +1460,7 @@
eid INTEGER PRIMARY KEY NOT NULL,
type VARCHAR(64) NOT NULL,
source VARCHAR(64) NOT NULL,
+ asource VARCHAR(64) NOT NULL,
mtime %s NOT NULL,
extid VARCHAR(256)
);;
@@ -1625,7 +1652,7 @@
return self._source.get_connection()
def backup(self, backupfile):
- archive=zipfile.ZipFile(backupfile, 'w')
+ archive=zipfile.ZipFile(backupfile, 'w', allowZip64=True)
self.cnx = self.get_connection()
try:
self.cursor = self.cnx.cursor()
@@ -1655,7 +1682,6 @@
prefix = 'cw_'
for etype in self.schema.entities():
eschema = self.schema.eschema(etype)
- print etype, eschema.final
if eschema.final:
continue
etype_tables.append('%s%s'%(prefix, etype))
@@ -1721,7 +1747,7 @@
return dumps((name, columns, rows), pickle.HIGHEST_PROTOCOL)
def restore(self, backupfile):
- archive = zipfile.ZipFile(backupfile, 'r')
+ archive = zipfile.ZipFile(backupfile, 'r', allowZip64=True)
self.cnx = self.get_connection()
self.cursor = self.cnx.cursor()
sequences, tables, table_chunks = self.read_metadata(archive, backupfile)
diff -r d8bb8f631d41 -r a4e667270dd4 server/sources/pyrorql.py
--- a/server/sources/pyrorql.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/sources/pyrorql.py Fri Dec 09 12:08:27 2011 +0100
@@ -191,7 +191,7 @@
self.support_entities[ertype] = 'write' in options
else: # CWRType
if ertype in ('is', 'is_instance_of', 'cw_source') or ertype in VIRTUAL_RTYPES:
- msg = schemacfg._cw._('%s relation should not be in mapped') % rtype
+ msg = schemacfg._cw._('%s relation should not be in mapped') % ertype
raise ValidationError(schemacfg.eid, {role_name('cw_for_schema', 'subject'): msg})
options = self._check_options(schemacfg, self.rtype_options)
if 'dontcross' in options:
@@ -226,7 +226,7 @@
self.cross_relations.remove(ertype)
else:
self.dont_cross_relations.remove(ertype)
- except:
+ except Exception:
self.error('while updating mapping consequently to removal of %s',
schemacfg)
@@ -235,10 +235,12 @@
if dexturi == 'system' or not (
dexturi in self.repo.sources_by_uri or self._skip_externals):
assert etype in self.support_entities, etype
- return self.repo.extid2eid(self, str(extid), etype, session), True
- if dexturi in self.repo.sources_by_uri:
+ eid = self.repo.extid2eid(self, str(extid), etype, session)
+ if eid > 0:
+ return eid, True
+ elif dexturi in self.repo.sources_by_uri:
source = self.repo.sources_by_uri[dexturi]
- cnx = session.pool.connection(source.uri)
+ cnx = session.cnxset.connection(source.uri)
eid = source.local_eid(cnx, dextid, session)[0]
return eid, False
return None, None
@@ -273,20 +275,22 @@
entity = rset.get_entity(0, 0)
entity.complete(entity.e_schema.indexable_attributes())
source.index_entity(session, entity)
- except:
+ except Exception:
self.exception('while updating %s with external id %s of source %s',
etype, extid, self.uri)
continue
for etype, extid in deleted:
try:
- eid = self.extid2eid(str(extid), etype, session,
- insert=False)
+ eid = self.repo.extid2eid(self, str(extid), etype, session,
+ insert=False)
# entity has been deleted from external repository but is not known here
if eid is not None:
entity = session.entity_from_eid(eid, etype)
- repo.delete_info(session, entity, self.uri, extid,
+ repo.delete_info(session, entity, self.uri,
scleanup=self.eid)
- except:
+ except Exception:
+ if self.repo.config.mode == 'test':
+ raise
self.exception('while updating %s with external id %s of source %s',
etype, extid, self.uri)
continue
@@ -322,7 +326,7 @@
else a new connection
"""
# we have to transfer manually thread ownership. This can be done safely
- # since the pool to which belong the connection is affected to one
+ # since the connections set holding the connection is affected to one
# session/thread and can't be called simultaneously
try:
cnx._repo._transferThread(threading.currentThread())
@@ -359,7 +363,7 @@
if not args is None:
args = args.copy()
# get cached cursor anyway
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
if cu is None:
# this is a ConnectionWrapper instance
msg = session._("can't connect to source %s, some data may be missing")
@@ -390,7 +394,7 @@
or uidtype(union, i, etype, args)):
needtranslation.append(i)
if needtranslation:
- cnx = session.pool.connection(self.uri)
+ cnx = session.cnxset.connection(self.uri)
for rowindex in xrange(rset.rowcount - 1, -1, -1):
row = rows[rowindex]
localrow = False
@@ -421,7 +425,7 @@
def _entity_relations_and_kwargs(self, session, entity):
relations = []
- kwargs = {'x': self.eid2extid(entity.eid, session)}
+ kwargs = {'x': self.repo.eid2extid(self, entity.eid, session)}
for key, val in entity.cw_attr_cache.iteritems():
relations.append('X %s %%(%s)s' % (key, key))
kwargs[key] = val
@@ -434,43 +438,52 @@
def update_entity(self, session, entity):
"""update an entity in the source"""
relations, kwargs = self._entity_relations_and_kwargs(session, entity)
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
cu.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations), kwargs)
self._query_cache.clear()
- entity.clear_all_caches()
+ entity.cw_clear_all_caches()
def delete_entity(self, session, entity):
"""delete an entity from the source"""
- cu = session.pool[self.uri]
+ if session.deleted_in_transaction(self.eid):
+ # source is being deleted, don't propagate
+ self._query_cache.clear()
+ return
+ cu = session.cnxset[self.uri]
cu.execute('DELETE %s X WHERE X eid %%(x)s' % entity.__regid__,
- {'x': self.eid2extid(entity.eid, session)})
+ {'x': self.repo.eid2extid(self, entity.eid, session)})
self._query_cache.clear()
def add_relation(self, session, subject, rtype, object):
"""add a relation to the source"""
- cu = session.pool[self.uri]
+ cu = session.cnxset[self.uri]
cu.execute('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
- {'x': self.eid2extid(subject, session),
- 'y': self.eid2extid(object, session)})
+ {'x': self.repo.eid2extid(self, subject, session),
+ 'y': self.repo.eid2extid(self, object, session)})
self._query_cache.clear()
- session.entity_from_eid(subject).clear_all_caches()
- session.entity_from_eid(object).clear_all_caches()
+ session.entity_from_eid(subject).cw_clear_all_caches()
+ session.entity_from_eid(object).cw_clear_all_caches()
def delete_relation(self, session, subject, rtype, object):
"""delete a relation from the source"""
- cu = session.pool[self.uri]
+ if session.deleted_in_transaction(self.eid):
+ # source is being deleted, don't propagate
+ self._query_cache.clear()
+ return
+ cu = session.cnxset[self.uri]
cu.execute('DELETE X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
- {'x': self.eid2extid(subject, session),
- 'y': self.eid2extid(object, session)})
+ {'x': self.repo.eid2extid(self, subject, session),
+ 'y': self.repo.eid2extid(self, object, session)})
self._query_cache.clear()
- session.entity_from_eid(subject).clear_all_caches()
- session.entity_from_eid(object).clear_all_caches()
+ session.entity_from_eid(subject).cw_clear_all_caches()
+ session.entity_from_eid(object).cw_clear_all_caches()
class RQL2RQL(object):
"""translate a local rql query to be executed on a distant repository"""
def __init__(self, source):
self.source = source
+ self.repo = source.repo
self.current_operator = None
def _accept_children(self, node):
@@ -656,7 +669,7 @@
value = const.eval(self.kwargs)
try:
return None, self._const_var[value]
- except:
+ except Exception:
var = self._varmaker.next()
self.need_translation = True
restr = '%s eid %s' % (var, self.visit_constant(const))
@@ -666,7 +679,7 @@
def eid2extid(self, eid):
try:
- return self.source.eid2extid(eid, self._session)
+ return self.repo.eid2extid(self.source, eid, self._session)
except UnknownEid:
operator = self.current_operator
if operator is not None and operator != '=':
diff -r d8bb8f631d41 -r a4e667270dd4 server/sources/rql2sql.py
--- a/server/sources/rql2sql.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/sources/rql2sql.py Fri Dec 09 12:08:27 2011 +0100
@@ -56,6 +56,7 @@
from logilab.database import FunctionDescr, SQL_FUNCTIONS_REGISTRY
from rql import BadRQLQuery, CoercionError
+from rql.utils import common_parent
from rql.stmts import Union, Select
from rql.nodes import (SortTerm, VariableRef, Constant, Function, Variable, Or,
Not, Comparison, ColumnAlias, Relation, SubQuery, Exists)
@@ -669,7 +670,7 @@
else:
tocheck.append(compnode)
# tocheck hold a set of comparison not implying an aggregat function
- # put them in fakehaving if the don't share an Or node as ancestor
+ # put them in fakehaving if they don't share an Or node as ancestor
# with another comparison containing an aggregat function
for compnode in tocheck:
parents = set()
@@ -784,7 +785,20 @@
sorts = select.orderby
groups = select.groupby
having = select.having
- morerestr = extract_fake_having_terms(having)
+ for restr in extract_fake_having_terms(having):
+ scope = None
+ for vref in restr.get_nodes(VariableRef):
+ vscope = vref.variable.scope
+ if vscope is select:
+ continue # ignore select scope, so restriction is added to
+ # the inner most scope possible
+ if scope is None:
+ scope = vscope
+ elif vscope is not scope:
+ scope = common_parent(scope, vscope).scope
+ if scope is None:
+ scope = select
+ scope.add_restriction(restr)
# remember selection, it may be changed and have to be restored
origselection = select.selection[:]
# check if the query will have union subquery, if it need sort term
@@ -829,7 +843,7 @@
self._in_wrapping_query = False
self._state = state
try:
- sql = self._solutions_sql(select, morerestr, sols, distinct,
+ sql = self._solutions_sql(select, sols, distinct,
needalias or needwrap)
# generate groups / having before wrapping query selection to get
# correct column aliases
@@ -900,15 +914,13 @@
except KeyError:
continue
- def _solutions_sql(self, select, morerestr, solutions, distinct, needalias):
+ def _solutions_sql(self, select, solutions, distinct, needalias):
sqls = []
for solution in solutions:
self._state.reset(solution)
# visit restriction subtree
if select.where is not None:
self._state.add_restriction(select.where.accept(self))
- for restriction in morerestr:
- self._state.add_restriction(restriction.accept(self))
sql = [self._selection_sql(select.selection, distinct, needalias)]
if self._state.restrictions:
sql.append('WHERE %s' % ' AND '.join(self._state.restrictions))
@@ -1226,35 +1238,47 @@
def _visit_outer_join_inlined_relation(self, relation, rschema):
- leftvar, leftconst, rightvar, rightconst = relation_info(relation)
- assert not (leftconst and rightconst), "doesn't make sense"
- if relation.optional != 'right':
- leftvar, rightvar = rightvar, leftvar
- leftconst, rightconst = rightconst, leftconst
- outertype = 'FULL' if relation.optional == 'both' else 'LEFT'
- leftalias = self._var_table(leftvar)
+ lhsvar, lhsconst, rhsvar, rhsconst = relation_info(relation)
+ assert not (lhsconst and rhsconst), "doesn't make sense"
attr = 'eid' if relation.r_type == 'identity' else relation.r_type
- lhs, rhs = relation.get_variable_parts()
+ lhsalias = self._var_table(lhsvar)
+ rhsalias = rhsvar and self._var_table(rhsvar)
try:
- lhssql = self._varmap['%s.%s' % (lhs.name, attr)]
+ lhssql = self._varmap['%s.%s' % (lhsvar.name, attr)]
except KeyError:
- lhssql = '%s.%s%s' % (self._var_table(lhs.variable), SQL_PREFIX, attr)
- if rightvar is not None:
- rightalias = self._var_table(rightvar)
- if rightalias is None:
- if rightconst is not None:
- # inlined relation with invariant as rhs
- condition = '%s=%s' % (lhssql, rightconst.accept(self))
- if relation.r_type != 'identity':
- condition = '(%s OR %s IS NULL)' % (condition, lhssql)
- if not leftvar.stinfo.get('optrelations'):
- return condition
- self._state.add_outer_join_condition(leftalias, condition)
- return
- if leftalias is None:
- leftalias = leftvar._q_sql.split('.', 1)[0]
- self._state.replace_tables_by_outer_join(
- leftalias, rightalias, outertype, '%s=%s' % (lhssql, rhs.accept(self)))
+ if lhsalias is None:
+ lhssql = lhsconst.accept(self)
+ else:
+ lhssql = '%s.%s%s' % (lhsalias, SQL_PREFIX, attr)
+ condition = '%s=%s' % (lhssql, (rhsconst or rhsvar).accept(self))
+ # this is not a typo, rhs optional variable means lhs outer join and vice-versa
+ if relation.optional == 'left':
+ lhsvar, rhsvar = rhsvar, lhsvar
+ lhsconst, rhsconst = rhsconst, lhsconst
+ lhsalias, rhsalias = rhsalias, lhsalias
+ outertype = 'LEFT'
+ elif relation.optional == 'both':
+ outertype = 'FULL'
+ else:
+ outertype = 'LEFT'
+ if rhsalias is None:
+ if rhsconst is not None:
+ # inlined relation with invariant as rhs
+ if relation.r_type != 'identity':
+ condition = '(%s OR %s IS NULL)' % (condition, lhssql)
+ if not lhsvar.stinfo.get('optrelations'):
+ return condition
+ self._state.add_outer_join_condition(lhsalias, condition)
+ return
+ if lhsalias is None:
+ if lhsconst is not None and not rhsvar.stinfo.get('optrelations'):
+ return condition
+ lhsalias = lhsvar._q_sql.split('.', 1)[0]
+ if lhsalias == rhsalias:
+ self._state.add_outer_join_condition(lhsalias, condition)
+ else:
+ self._state.replace_tables_by_outer_join(
+ lhsalias, rhsalias, outertype, condition)
return ''
def _visit_var_attr_relation(self, relation, rhs_vars):
@@ -1280,9 +1304,16 @@
relation.r_type)
try:
self._state.ignore_varmap = True
- return '%s%s' % (lhssql, relation.children[1].accept(self))
+ sql = lhssql + relation.children[1].accept(self)
finally:
self._state.ignore_varmap = False
+ if relation.optional == 'right':
+ leftalias = self._var_table(principal.children[0].variable)
+ rightalias = self._var_table(relation.children[0].variable)
+ self._state.replace_tables_by_outer_join(
+ leftalias, rightalias, 'LEFT', sql)
+ return ''
+ return sql
return ''
def _visit_attribute_relation(self, rel):
@@ -1360,29 +1391,63 @@
def visit_comparison(self, cmp):
"""generate SQL for a comparison"""
+ optional = getattr(cmp, 'optional', None) # rql < 0.30
if len(cmp.children) == 2:
- # XXX occurs ?
+ # simplified expression from HAVING clause
lhs, rhs = cmp.children
else:
lhs = None
rhs = cmp.children[0]
+ assert not optional
+ sql = None
operator = cmp.operator
if operator in ('LIKE', 'ILIKE'):
if operator == 'ILIKE' and not self.dbhelper.ilike_support:
operator = ' LIKE '
else:
operator = ' %s ' % operator
+ elif operator == 'REGEXP':
+ sql = ' %s' % self.dbhelper.sql_regexp_match_expression(rhs.accept(self))
elif (operator == '=' and isinstance(rhs, Constant)
and rhs.eval(self._args) is None):
if lhs is None:
- return ' IS NULL'
- return '%s IS NULL' % lhs.accept(self)
+ sql = ' IS NULL'
+ else:
+ sql = '%s IS NULL' % lhs.accept(self)
elif isinstance(rhs, Function) and rhs.name == 'IN':
assert operator == '='
operator = ' '
- if lhs is None:
- return '%s%s'% (operator, rhs.accept(self))
- return '%s%s%s'% (lhs.accept(self), operator, rhs.accept(self))
+ if sql is None:
+ if lhs is None:
+ sql = '%s%s'% (operator, rhs.accept(self))
+ else:
+ sql = '%s%s%s'% (lhs.accept(self), operator, rhs.accept(self))
+ if optional is None:
+ return sql
+ leftvars = cmp.children[0].get_nodes(VariableRef)
+ assert len(leftvars) == 1
+ if leftvars[0].variable.stinfo['attrvar'] is None:
+ assert isinstance(leftvars[0].variable, ColumnAlias)
+ leftalias = leftvars[0].variable._q_sqltable
+ else:
+ leftalias = self._var_table(leftvars[0].variable.stinfo['attrvar'])
+ rightvars = cmp.children[1].get_nodes(VariableRef)
+ assert len(rightvars) == 1
+ if rightvars[0].variable.stinfo['attrvar'] is None:
+ assert isinstance(rightvars[0].variable, ColumnAlias)
+ rightalias = rightvars[0].variable._q_sqltable
+ else:
+ rightalias = self._var_table(rightvars[0].variable.stinfo['attrvar'])
+ if optional == 'right':
+ self._state.replace_tables_by_outer_join(
+ leftalias, rightalias, 'LEFT', sql)
+ elif optional == 'left':
+ self._state.replace_tables_by_outer_join(
+ rightalias, leftalias, 'LEFT', sql)
+ else:
+ self._state.replace_tables_by_outer_join(
+ leftalias, rightalias, 'FULL', sql)
+ return ''
def visit_mathexpression(self, mexpr):
"""generate SQL for a mathematic expression"""
@@ -1397,6 +1462,10 @@
pass
return '(%s %s %s)'% (lhs.accept(self), operator, rhs.accept(self))
+ def visit_unaryexpression(self, uexpr):
+ """generate SQL for a unary expression"""
+ return '%s%s'% (uexpr.operator, uexpr.children[0].accept(self))
+
def visit_function(self, func):
"""generate SQL name for a function"""
if func.name == 'FTIRANK':
@@ -1422,15 +1491,17 @@
if constant.type is None:
return 'NULL'
value = constant.value
- if constant.type == 'Int' and isinstance(constant.parent, SortTerm):
+ if constant.type == 'etype':
return value
+ if constant.type == 'Int': # XXX Float?
+ return str(value)
if constant.type in ('Date', 'Datetime'):
rel = constant.relation()
if rel is not None:
rel._q_needcast = value
return self.keyword_map[value]()
if constant.type == 'Boolean':
- value = self.dbhelper.boolean_value(value)
+ return str(self.dbhelper.boolean_value(value))
if constant.type == 'Substitute':
try:
# we may found constant from simplified var in varmap
@@ -1584,8 +1655,14 @@
scope = self._state.scopes[var.scope]
self._state.add_table(sql.split('.', 1)[0], scope=scope)
except KeyError:
- sql = '%s.%s%s' % (self._var_table(var), SQL_PREFIX, rtype)
- #self._state.done.add(var.name)
+ # rtype may be an attribute relation when called from
+ # _visit_var_attr_relation. take care about 'eid' rtype, since in
+ # some case we may use the `entities` table, so in that case we've
+ # to properly use variable'sql
+ if rtype == 'eid':
+ sql = var.accept(self)
+ else:
+ sql = '%s.%s%s' % (self._var_table(var), SQL_PREFIX, rtype)
return sql
def _linked_var_sql(self, variable):
diff -r d8bb8f631d41 -r a4e667270dd4 server/sources/storages.py
--- a/server/sources/storages.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/sources/storages.py Fri Dec 09 12:08:27 2011 +0100
@@ -211,7 +211,7 @@
"""return the current fs_path of the tribute.
Return None is the attr is not stored yet."""
- sysource = entity._cw.pool.source('system')
+ sysource = entity._cw.cnxset.source('system')
cu = sysource.doexec(entity._cw,
'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s' % (
attr, entity.__regid__, entity.eid))
diff -r d8bb8f631d41 -r a4e667270dd4 server/sqlutils.py
--- a/server/sqlutils.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/sqlutils.py Fri Dec 09 12:08:27 2011 +0100
@@ -338,6 +338,17 @@
return _limit_size(text, maxsize)
cnx.create_function("TEXT_LIMIT_SIZE", 2, limit_size2)
+ from logilab.common.date import strptime
+ def weekday(ustr):
+ try:
+ dt = strptime(ustr, '%Y-%m-%d %H:%M:%S')
+ except:
+ dt = strptime(ustr, '%Y-%m-%d')
+ # expect sunday to be 1, saturday 7 while weekday method return 0 for
+ # monday
+ return (dt.weekday() + 1) % 7
+ cnx.create_function("WEEKDAY", 1, weekday)
+
import yams.constraints
yams.constraints.patch_sqlite_decimal()
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/data/bootstrap_cubes
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/data/site_cubicweb.py
--- a/server/test/data/site_cubicweb.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/data/site_cubicweb.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -20,14 +20,10 @@
from logilab.database.sqlite import register_sqlite_pyfunc
from rql.utils import register_function
-try:
- class DUMB_SORT(FunctionDescr):
- pass
+class DUMB_SORT(FunctionDescr):
+ pass
- register_function(DUMB_SORT)
- def dumb_sort(something):
- return something
- register_sqlite_pyfunc(dumb_sort)
-except:
- # already registered
- pass
+register_function(DUMB_SORT)
+def dumb_sort(something):
+ return something
+register_sqlite_pyfunc(dumb_sort)
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_datafeed.py
--- a/server/test/unittest_datafeed.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_datafeed.py Fri Dec 09 12:08:27 2011 +0100
@@ -37,19 +37,21 @@
self.assertEqual(dfsource.synchro_interval, timedelta(seconds=60))
self.assertFalse(dfsource.fresh())
+
class AParser(datafeed.DataFeedParser):
__regid__ = 'testparser'
- def process(self, url):
+ def process(self, url, raise_on_error=False):
entity = self.extid2entity('http://www.cubicweb.org/', 'Card',
- item={'title': u'cubicweb.org',
- 'content': u'the cw web site'})
+ item={'title': u'cubicweb.org',
+ 'content': u'the cw web site'})
if not self.created_during_pull(entity):
self.notify_updated(entity)
def before_entity_copy(self, entity, sourceparams):
entity.cw_edited.update(sourceparams['item'])
with self.temporary_appobjects(AParser):
- stats = dfsource.pull_data(self.session, force=True)
+ session = self.repo.internal_session()
+ stats = dfsource.pull_data(session, force=True)
self.commit()
# test import stats
self.assertEqual(sorted(stats.keys()), ['created', 'updated'])
@@ -64,26 +66,29 @@
self.assertEqual(entity.cw_source[0].name, 'myfeed')
self.assertEqual(entity.cw_metainformation(),
{'type': 'Card',
- 'source': {'uri': 'system', 'type': 'native'},
+ 'source': {'uri': 'myfeed', 'type': 'datafeed', 'use-cwuri-as-url': True},
'extid': 'http://www.cubicweb.org/'}
)
+ self.assertEqual(entity.absolute_url(), 'http://www.cubicweb.org/')
# test repo cache keys
self.assertEqual(self.repo._type_source_cache[entity.eid],
- ('Card', 'system', 'http://www.cubicweb.org/'))
+ ('Card', 'system', 'http://www.cubicweb.org/', 'myfeed'))
self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')],
entity.eid)
# test repull
- stats = dfsource.pull_data(self.session, force=True)
+ session.set_cnxset()
+ stats = dfsource.pull_data(session, force=True)
self.assertEqual(stats['created'], set())
self.assertEqual(stats['updated'], set((entity.eid,)))
# test repull with caches reseted
self.repo._type_source_cache.clear()
self.repo._extid_cache.clear()
- stats = dfsource.pull_data(self.session, force=True)
+ session.set_cnxset()
+ stats = dfsource.pull_data(session, force=True)
self.assertEqual(stats['created'], set())
self.assertEqual(stats['updated'], set((entity.eid,)))
self.assertEqual(self.repo._type_source_cache[entity.eid],
- ('Card', 'system', 'http://www.cubicweb.org/'))
+ ('Card', 'system', 'http://www.cubicweb.org/', 'myfeed'))
self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')],
entity.eid)
@@ -93,6 +98,30 @@
self.assertTrue(dfsource.latest_retrieval)
self.assertTrue(dfsource.fresh())
+ # test_rename_source
+ req = self.request()
+ req.execute('SET S name "myrenamedfeed" WHERE S is CWSource, S name "myfeed"')
+ self.commit()
+ entity = self.execute('Card X').get_entity(0, 0)
+ self.assertEqual(entity.cwuri, 'http://www.cubicweb.org/')
+ self.assertEqual(entity.cw_source[0].name, 'myrenamedfeed')
+ self.assertEqual(entity.cw_metainformation(),
+ {'type': 'Card',
+ 'source': {'uri': 'myrenamedfeed', 'type': 'datafeed', 'use-cwuri-as-url': True},
+ 'extid': 'http://www.cubicweb.org/'}
+ )
+ self.assertEqual(self.repo._type_source_cache[entity.eid],
+ ('Card', 'system', 'http://www.cubicweb.org/', 'myrenamedfeed'))
+ self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')],
+ entity.eid)
+
+ # test_delete_source
+ req = self.request()
+ req.execute('DELETE CWSource S WHERE S name "myrenamedfeed"')
+ self.commit()
+ self.failIf(self.execute('Card X WHERE X title "cubicweb.org"'))
+ self.failIf(self.execute('Any X WHERE X has_text "cubicweb.org"'))
+
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
unittest_main()
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_hook.py
--- a/server/test/unittest_hook.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_hook.py Fri Dec 09 12:08:27 2011 +0100
@@ -23,7 +23,7 @@
from logilab.common.testlib import TestCase, unittest_main, mock_object
-from cubicweb.devtools import TestServerConfiguration
+from cubicweb.devtools import TestServerConfiguration, fake
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.server import hook
from cubicweb.hooks import integrity, syncschema
@@ -124,10 +124,8 @@
def test_call_hook(self):
self.o.register(AddAnyHook)
dis = set()
- cw = mock_object(vreg=self.vreg,
- set_read_security=lambda *a,**k: None,
- set_write_security=lambda *a,**k: None,
- is_hook_activated=lambda x, cls: cls.category not in dis)
+ cw = fake.FakeSession()
+ cw.is_hook_activated = lambda cls: cls.category not in dis
self.assertRaises(HookCalled,
self.o.call_hooks, 'before_add_entity', cw)
dis.add('cat1')
@@ -203,10 +201,10 @@
# self.assertEqual(self.called, [(1, 'concerne', 2), (3, 'concerne', 4)])
-# def _before_relation_hook(self, pool, subject, r_type, object):
+# def _before_relation_hook(self, cnxset, subject, r_type, object):
# self.called.append((subject, r_type, object))
-# def _after_relation_hook(self, pool, subject, r_type, object):
+# def _after_relation_hook(self, cnxset, subject, r_type, object):
# self.called.append((subject, r_type, object))
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_ldapuser.py
--- a/server/test/unittest_ldapuser.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_ldapuser.py Fri Dec 09 12:08:27 2011 +0100
@@ -61,7 +61,7 @@
# no such user
raise AuthenticationError()
# don't check upassword !
- return self.extid2eid(user['dn'], 'CWUser', session)
+ return self.repo.extid2eid(self, user['dn'], 'CWUser', session)
def setUpModule(*args):
create_slapd_configuration(LDAPUserSourceTC.config)
@@ -137,7 +137,7 @@
def test_authenticate(self):
source = self.repo.sources_by_uri['ldapuser']
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertRaises(AuthenticationError,
source.authenticate, self.session, 'toto', 'toto')
@@ -239,7 +239,7 @@
iworkflowable.fire_transition('deactivate')
try:
cnx.commit()
- adim.clear_all_caches()
+ adim.cw_clear_all_caches()
self.assertEqual(adim.in_state[0].name, 'deactivated')
trinfo = iworkflowable.latest_trinfo()
self.assertEqual(trinfo.owned_by[0].login, SYT)
@@ -265,7 +265,7 @@
self.failUnless(self.sexecute('Any X,Y WHERE X login %(syt)s, Y login "cochon"', {'syt': SYT}))
def test_exists1(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.session.create_entity('CWGroup', name=u'bougloup1')
self.session.create_entity('CWGroup', name=u'bougloup2')
self.sexecute('SET U in_group G WHERE G name ~= "bougloup%", U login "admin"')
@@ -378,6 +378,27 @@
rset = cu.execute('Any F WHERE X has_text "iaminguestsgrouponly", X firstname F')
self.assertEqual(rset.rows, [[None]])
+ def test_copy_to_system_source(self):
+ source = self.repo.sources_by_uri['ldapuser']
+ eid = self.sexecute('CWUser X WHERE X login %(login)s', {'login': SYT})[0][0]
+ self.sexecute('SET X cw_source S WHERE X eid %(x)s, S name "system"', {'x': eid})
+ self.commit()
+ source.reset_caches()
+ rset = self.sexecute('CWUser X WHERE X login %(login)s', {'login': SYT})
+ self.assertEqual(len(rset), 1)
+ e = rset.get_entity(0, 0)
+ self.assertEqual(e.eid, eid)
+ self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system', 'use-cwuri-as-url': False},
+ 'type': 'CWUser',
+ 'extid': None})
+ self.assertEqual(e.cw_source[0].name, 'system')
+ self.failUnless(e.creation_date)
+ self.failUnless(e.modification_date)
+ # XXX test some password has been set
+ source.synchronize()
+ rset = self.sexecute('CWUser X WHERE X login %(login)s', {'login': SYT})
+ self.assertEqual(len(rset), 1)
+
def test_nonregr1(self):
self.sexecute('Any X,AA ORDERBY AA DESC WHERE E eid %(x)s, E owned_by X, '
'X modification_date AA',
@@ -465,8 +486,8 @@
self._schema = repo.schema
super(RQL2LDAPFilterTC, self).setUp()
ldapsource = repo.sources[-1]
- self.pool = repo._get_pool()
- session = mock_object(pool=self.pool)
+ self.cnxset = repo._get_cnxset()
+ session = mock_object(cnxset=self.cnxset)
self.o = RQL2LDAPFilter(ldapsource, session)
self.ldapclasses = ''.join(ldapsource.base_filters)
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_migractions.py
--- a/server/test/unittest_migractions.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_migractions.py Fri Dec 09 12:08:27 2011 +0100
@@ -338,7 +338,7 @@
@tag('longrun')
def test_sync_schema_props_perms(self):
cursor = self.mh.session
- cursor.set_pool()
+ cursor.set_cnxset()
nbrqlexpr_start = cursor.execute('Any COUNT(X) WHERE X is RQLExpression')[0][0]
migrschema['titre'].rdefs[('Personne', 'String')].order = 7
migrschema['adel'].rdefs[('Personne', 'String')].order = 6
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_msplanner.py
--- a/server/test/unittest_msplanner.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_msplanner.py Fri Dec 09 12:08:27 2011 +0100
@@ -296,7 +296,7 @@
True)
def test_not_relation_no_split_external(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
# similar to the above test but with an eid coming from the external source.
# the same plan may be used, since we won't find any record in the system source
# linking 9999999 to a state
@@ -313,13 +313,15 @@
True)
def test_simplified_var(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ # need access to source since X table has to be accessed because of the outer join
self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR (X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
{'x': 999999, 'u': self.session.user.eid},
- {self.system: {'P': s[0], 'G': s[0], 'X': s[0],
+ {self.system: {'P': s[0], 'G': s[0],
'require_permission': s[0], 'in_group': s[0], 'P': s[0], 'require_group': s[0],
- 'u': s[0]}},
- False)
+ 'u': s[0]},
+ self.cards: {'X': s[0]}},
+ True)
def test_delete_relation1(self):
ueid = self.session.user.eid
@@ -329,7 +331,7 @@
False)
def test_crossed_relation_eid_1_needattr(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999)
+ repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
ueid = self.session.user.eid
self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T',
{'x': 999999,},
@@ -337,14 +339,14 @@
True)
def test_crossed_relation_eid_1_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999)
+ repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
{'x': 999999},
{self.system: {'Y': s[0], 'x': s[0]}},
False)
def test_crossed_relation_eid_2_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
{'x': 999999,},
{self.cards: {'Y': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]},
@@ -352,7 +354,7 @@
False)
def test_version_crossed_depends_on_1(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
{'x': 999999},
{self.cards: {'X': s[0], 'AD': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]},
@@ -360,7 +362,7 @@
True)
def test_version_crossed_depends_on_2(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999)
+ repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
{'x': 999999},
{self.cards: {'X': s[0], 'AD': s[0]},
@@ -368,8 +370,8 @@
True)
def test_simplified_var_3(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
- repo._type_source_cache[999998] = ('State', 'cards', 999998)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ repo._type_source_cache[999998] = ('State', 'cards', 999998, 'cards')
self._test('Any S,T WHERE S eid %(s)s, N eid %(n)s, N type T, N is Note, S is State',
{'n': 999999, 's': 999998},
{self.cards: {'s': s[0], 'N': s[0]}}, False)
@@ -1210,12 +1212,12 @@
[{'X': 'Note', 'S': 'State'}])],
[self.cards, self.system], None, {'X': 'table0.C0', 'S': 'table0.C1'}, []),
('UnionStep', None, None,
- [('OneFetchStep', [('Any X,S,U WHERE X in_state S, X todo_by U, S is State, U is CWUser, X is Note',
+ [('OneFetchStep', [('Any X,S,U WHERE X in_state S, X todo_by U, S is State, U is Personne, X is Affaire',
+ [{'X': 'Affaire', 'S': 'State', 'U': 'Personne'}])],
+ None, None, [self.system], {}, []),
+ ('OneFetchStep', [('Any X,S,U WHERE X todo_by U, S is State, U is CWUser, X is Note',
[{'X': 'Note', 'S': 'State', 'U': 'CWUser'}])],
None, None, [self.system], {'X': 'table0.C0', 'S': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,S,U WHERE X in_state S, X todo_by U, S is State, U is Personne, X is Affaire',
- [{'X': 'Affaire', 'S': 'State', 'U': 'Personne'}])],
- None, None, [self.system], {}, []),
])
])
@@ -1266,7 +1268,7 @@
{'x': ueid})
def test_not_relation_no_split_external(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
# similar to the above test but with an eid coming from the external source.
# the same plan may be used, since we won't find any record in the system source
# linking 9999999 to a state
@@ -1297,7 +1299,7 @@
)])
def test_external_attributes_and_relation(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any A,B,C,D WHERE A eid %(x)s,A creation_date B,A modification_date C, A todo_by D?',
[('FetchStep', [('Any A,B,C WHERE A eid 999999, A creation_date B, A modification_date C, A is Note',
[{'A': 'Note', 'C': 'Datetime', 'B': 'Datetime'}])],
@@ -1312,12 +1314,31 @@
{'x': 999999})
- def test_simplified_var(self):
+ def test_simplified_var_1(self):
ueid = self.session.user.eid
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
- self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR (X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
- [('OneFetchStep', [('Any %s WHERE %s in_group G, (G name IN("managers", "logilab")) OR (X require_permission P?, P name "bla", P require_group G), X eid 999999' % (ueid, ueid),
- [{'X': 'Note', 'G': 'CWGroup', 'P': 'CWPermission'}])],
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ # need access to cards source since X table has to be accessed because of the outer join
+ self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR '
+ '(X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
+ [('FetchStep',
+ [('Any 999999', [{}])], [self.cards],
+ None, {u'%(x)s': 'table0.C0'}, []),
+ ('OneFetchStep',
+ [(u'Any 6 WHERE 6 in_group G, (G name IN("managers", "logilab")) OR '
+ '(X require_permission P?, P name "bla", P require_group G), '
+ 'G is CWGroup, P is CWPermission, X is Note',
+ [{'G': 'CWGroup', 'P': 'CWPermission', 'X': 'Note'}])],
+ None, None, [self.system], {u'%(x)s': 'table0.C0'}, [])],
+ {'x': 999999, 'u': ueid})
+
+ def test_simplified_var_2(self):
+ ueid = self.session.user.eid
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ # no need access to source since X is invariant
+ self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR '
+ '(X require_permission P, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
+ [('OneFetchStep', [('Any %s WHERE %s in_group G, (G name IN("managers", "logilab")) OR (999999 require_permission P, P name "bla", P require_group G)' % (ueid, ueid),
+ [{'G': 'CWGroup', 'P': 'CWPermission'}])],
None, None, [self.system], {}, [])],
{'x': 999999, 'u': ueid})
@@ -1529,7 +1550,7 @@
{'E': ueid})
def test_eid_dont_cross_relation_1(self):
- repo._type_source_cache[999999] = ('Personne', 'system', 999999)
+ repo._type_source_cache[999999] = ('Personne', 'system', 999999, 'system')
self._test('Any Y,YT WHERE X eid %(x)s, X fiche Y, Y title YT',
[('OneFetchStep', [('Any Y,YT WHERE X eid 999999, X fiche Y, Y title YT',
[{'X': 'Personne', 'Y': 'Card', 'YT': 'String'}])],
@@ -1537,7 +1558,7 @@
{'x': 999999})
def test_eid_dont_cross_relation_2(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self.cards.dont_cross_relations.add('concerne')
try:
self._test('Any Y,S,YT,X WHERE Y concerne X, Y in_state S, X eid 999999, Y ref YT',
@@ -1552,7 +1573,7 @@
# external source w/ .cross_relations == ['multisource_crossed_rel'] ######
def test_crossed_relation_eid_1_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999)
+ repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
[('OneFetchStep', [('Any Y WHERE 999999 multisource_crossed_rel Y', [{u'Y': 'Note'}])],
None, None, [self.system], {}, [])
@@ -1560,7 +1581,7 @@
{'x': 999999,})
def test_crossed_relation_eid_1_needattr(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999)
+ repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T',
[('FetchStep', [('Any Y,T WHERE Y type T, Y is Note', [{'T': 'String', 'Y': 'Note'}])],
[self.cards, self.system], None,
@@ -1573,7 +1594,7 @@
{'x': 999999,})
def test_crossed_relation_eid_2_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
[('OneFetchStep', [('Any Y WHERE 999999 multisource_crossed_rel Y, Y is Note', [{'Y': 'Note'}])],
None, None, [self.cards, self.system], {}, [])
@@ -1581,7 +1602,7 @@
{'x': 999999,})
def test_crossed_relation_eid_2_needattr(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T',
[('OneFetchStep', [('Any Y,T WHERE 999999 multisource_crossed_rel Y, Y type T, Y is Note',
[{'T': 'String', 'Y': 'Note'}])],
@@ -1591,7 +1612,7 @@
{'x': 999999,})
def test_crossed_relation_eid_not_1(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999)
+ repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
self._test('Any Y WHERE X eid %(x)s, NOT X multisource_crossed_rel Y',
[('FetchStep', [('Any Y WHERE Y is Note', [{'Y': 'Note'}])],
[self.cards, self.system], None, {'Y': 'table0.C0'}, []),
@@ -1608,7 +1629,7 @@
# {'x': 999999,})
def test_crossed_relation_base_XXXFIXME(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999)
+ repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
self._test('Any X,Y,T WHERE X multisource_crossed_rel Y, Y type T, X type T',
[('FetchStep', [('Any X,T WHERE X type T, X is Note', [{'T': 'String', 'X': 'Note'}])],
[self.cards, self.system], None,
@@ -1697,8 +1718,8 @@
# edition queries tests ###################################################
def test_insert_simplified_var_1(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
- repo._type_source_cache[999998] = ('State', 'system', None)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ repo._type_source_cache[999998] = ('State', 'system', None, 'system')
self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T',
[('InsertStep',
[('InsertRelationsStep',
@@ -1710,8 +1731,8 @@
{'n': 999999, 's': 999998})
def test_insert_simplified_var_2(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
- repo._type_source_cache[999998] = ('State', 'system', None)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ repo._type_source_cache[999998] = ('State', 'system', None, 'system')
self._test('INSERT Note X: X in_state S, X type T, X migrated_from N WHERE S eid %(s)s, N eid %(n)s, N type T',
[('InsertStep',
[('InsertRelationsStep',
@@ -1724,8 +1745,8 @@
{'n': 999999, 's': 999998})
def test_insert_simplified_var_3(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
- repo._type_source_cache[999998] = ('State', 'cards', 999998)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ repo._type_source_cache[999998] = ('State', 'cards', 999998, 'cards')
self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T',
[('InsertStep',
[('InsertRelationsStep',
@@ -1737,8 +1758,8 @@
{'n': 999999, 's': 999998})
def test_insert_simplified_var_4(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
- repo._type_source_cache[999998] = ('State', 'system', None)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ repo._type_source_cache[999998] = ('State', 'system', None, 'system')
self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s',
[('InsertStep',
[('InsertRelationsStep', [])]
@@ -1746,8 +1767,8 @@
{'n': 999999, 's': 999998})
def test_insert_simplified_var_5(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
- repo._type_source_cache[999998] = ('State', 'system', None)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ repo._type_source_cache[999998] = ('State', 'system', None, 'system')
self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s, A concerne N',
[('InsertStep',
[('InsertRelationsStep',
@@ -1784,19 +1805,23 @@
{'x': ueid, 'y': ueid})
def test_delete_relation3(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
- self._test('DELETE Y multisource_inlined_rel X WHERE X eid %(x)s, NOT (Y cw_source S, S name %(source)s)',
- [('DeleteRelationsStep',
- [('OneFetchStep',
- [('Any Y,999999 WHERE Y multisource_inlined_rel 999999, NOT EXISTS(Y cw_source S, S name "cards"), S is CWSource, Y is IN(Card, Note)',
- [{'S': 'CWSource', 'Y': 'Card'}, {'S': 'CWSource', 'Y': 'Note'}])],
- None, None, [self.system], {},
- [])]
- )],
- {'x': 999999, 'source': 'cards'})
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ self.assertRaises(
+ BadRQLQuery, self._test,
+ 'DELETE Y multisource_inlined_rel X WHERE X eid %(x)s, '
+ 'NOT (Y cw_source S, S name %(source)s)', [],
+ {'x': 999999, 'source': 'cards'})
+
+ def test_delete_relation4(self):
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ self.assertRaises(
+ BadRQLQuery, self._test,
+ 'DELETE X multisource_inlined_rel Y WHERE Y is Note, X eid %(x)s, '
+ 'NOT (Y cw_source S, S name %(source)s)', [],
+ {'x': 999999, 'source': 'cards'})
def test_delete_entity1(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999)
+ repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
self._test('DELETE Note X WHERE X eid %(x)s, NOT Y multisource_rel X',
[('DeleteEntitiesStep',
[('OneFetchStep', [('Any 999999 WHERE NOT EXISTS(Y multisource_rel 999999), Y is IN(Card, Note)',
@@ -1807,7 +1832,7 @@
{'x': 999999})
def test_delete_entity2(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999)
+ repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
self._test('DELETE Note X WHERE X eid %(x)s, NOT X multisource_inlined_rel Y',
[('DeleteEntitiesStep',
[('OneFetchStep', [('Any X WHERE X eid 999999, NOT X multisource_inlined_rel Y, X is Note, Y is IN(Affaire, Note)',
@@ -1872,7 +1897,7 @@
# ])
def test_ldap_user_related_to_invariant_and_dont_cross_rel(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self.cards.dont_cross_relations.add('created_by')
try:
self._test('Any X,XL WHERE E eid %(x)s, E created_by X, X login XL',
@@ -1893,7 +1918,7 @@
self.cards.dont_cross_relations.remove('created_by')
def test_ambigous_cross_relation(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self.cards.support_relations['see_also'] = True
self.cards.cross_relations.add('see_also')
try:
@@ -2044,7 +2069,7 @@
])
def test_source_conflict_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
with self.assertRaises(BadRQLQuery) as cm:
self._test('Any X WHERE X cw_source S, S name "system", X eid %(x)s',
[], {'x': 999999})
@@ -2067,7 +2092,7 @@
def test_ambigous_cross_relation_source_specified(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self.cards.support_relations['see_also'] = True
self.cards.cross_relations.add('see_also')
try:
@@ -2198,7 +2223,7 @@
])
def test_nonregr7(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any S,SUM(DUR),SUM(I),(SUM(I) - SUM(DUR)),MIN(DI),MAX(DI) GROUPBY S ORDERBY S WHERE A is Affaire, A duration DUR, A invoiced I, A modification_date DI, A in_state S, S name SN, (EXISTS(A concerne WP, W multisource_rel WP)) OR (EXISTS(A concerne W)), W eid %(n)s',
[('FetchStep', [('Any WP WHERE 999999 multisource_rel WP, WP is Note', [{'WP': 'Note'}])],
[self.cards], None, {'WP': u'table0.C0'}, []),
@@ -2208,7 +2233,7 @@
{'n': 999999})
def test_nonregr8(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any X,Z WHERE X eid %(x)s, X multisource_rel Y, Z concerne X',
[('FetchStep', [('Any 999999 WHERE 999999 multisource_rel Y, Y is Note',
[{'Y': 'Note'}])],
@@ -2223,8 +2248,8 @@
{'x': 999999})
def test_nonregr9(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
- repo._type_source_cache[999998] = ('Note', 'cards', 999998)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ repo._type_source_cache[999998] = ('Note', 'cards', 999998, 'cards')
self._test('SET X migrated_from Y WHERE X eid %(x)s, Y multisource_rel Z, Z eid %(z)s, Y migrated_from Z',
[('FetchStep', [('Any Y WHERE Y multisource_rel 999998, Y is Note', [{'Y': 'Note'}])],
[self.cards], None, {'Y': u'table0.C0'}, []),
@@ -2236,7 +2261,7 @@
{'x': 999999, 'z': 999998})
def test_nonregr10(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999)
+ repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap')
self._test('Any X,AA,AB ORDERBY AA WHERE E eid %(x)s, E owned_by X, X login AA, X modification_date AB',
[('FetchStep',
[('Any X,AA,AB WHERE X login AA, X modification_date AB, X is CWUser',
@@ -2254,7 +2279,7 @@
{'x': 999999})
def test_nonregr11(self):
- repo._type_source_cache[999999] = ('Bookmark', 'system', 999999)
+ repo._type_source_cache[999999] = ('Bookmark', 'system', 999999, 'system')
self._test('SET X bookmarked_by Y WHERE X eid %(x)s, Y login "hop"',
[('UpdateStep',
[('OneFetchStep', [('DISTINCT Any Y WHERE Y login "hop", Y is CWUser', [{'Y': 'CWUser'}])],
@@ -2263,7 +2288,7 @@
{'x': 999999})
def test_nonregr12(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any X ORDERBY Z DESC WHERE X modification_date Z, E eid %(x)s, E see_also X',
[('FetchStep', [('Any X,Z WHERE X modification_date Z, X is Note',
[{'X': 'Note', 'Z': 'Datetime'}])],
@@ -2347,38 +2372,38 @@
{'x': self.session.user.eid})
def test_nonregr14_1(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999)
+ repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap')
self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
[('OneFetchStep', [('Any 999999 WHERE 999999 owned_by 999999', [{}])],
None, None, [self.system], {}, [])],
{'x': 999999, 'u': 999999})
def test_nonregr14_2(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999)
- repo._type_source_cache[999998] = ('Note', 'system', 999998)
+ repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap')
+ repo._type_source_cache[999998] = ('Note', 'system', 999998, 'system')
self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
[('OneFetchStep', [('Any 999998 WHERE 999998 owned_by 999999', [{}])],
None, None, [self.system], {}, [])],
{'x': 999998, 'u': 999999})
def test_nonregr14_3(self):
- repo._type_source_cache[999999] = ('CWUser', 'system', 999999)
- repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998)
+ repo._type_source_cache[999999] = ('CWUser', 'system', 999999, 'system')
+ repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998, 'ldap')
self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
[('OneFetchStep', [('Any 999998 WHERE 999998 owned_by 999999', [{}])],
None, None, [self.system], {}, [])],
{'x': 999998, 'u': 999999})
def test_nonregr_identity_no_source_access_1(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999998)
+ repo._type_source_cache[999999] = ('CWUser', 'ldap', 999998, 'ldap')
self._test('Any S WHERE S identity U, S eid %(s)s, U eid %(u)s',
[('OneFetchStep', [('Any 999999 WHERE 999999 identity 999999', [{}])],
None, None, [self.system], {}, [])],
{'s': 999999, 'u': 999999})
def test_nonregr_identity_no_source_access_2(self):
- repo._type_source_cache[999999] = ('EmailAddress', 'system', 999999)
- repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998)
+ repo._type_source_cache[999999] = ('EmailAddress', 'system', 999999, 'system')
+ repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998, 'ldap')
self._test('Any X WHERE O use_email X, ((EXISTS(O identity U)) OR (EXISTS(O in_group G, G name IN("managers", "staff")))) OR (EXISTS(O in_group G2, U in_group G2, NOT G2 name "users")), X eid %(x)s, U eid %(u)s',
[('OneFetchStep', [('Any 999999 WHERE O use_email 999999, ((EXISTS(O identity 999998)) OR (EXISTS(O in_group G, G name IN("managers", "staff")))) OR (EXISTS(O in_group G2, 999998 in_group G2, NOT G2 name "users"))',
[{'G': 'CWGroup', 'G2': 'CWGroup', 'O': 'CWUser'}])],
@@ -2386,7 +2411,7 @@
{'x': 999999, 'u': 999998})
def test_nonregr_similar_subquery(self):
- repo._type_source_cache[999999] = ('Personne', 'system', 999999)
+ repo._type_source_cache[999999] = ('Personne', 'system', 999999, 'system')
self._test('Any T,TD,U,T,UL WITH T,TD,U,UL BEING ('
'(Any T,TD,U,UL WHERE X eid %(x)s, T comments X, T content TD, T created_by U?, U login UL)'
' UNION '
@@ -2435,6 +2460,21 @@
[])],
{'x': 999999})
+ def test_nonregr_dont_readd_already_processed_relation(self):
+ self._test('Any WO,D,SO WHERE WO is Note, D tags WO, WO in_state SO',
+ [('FetchStep',
+ [('Any WO,SO WHERE WO in_state SO, SO is State, WO is Note',
+ [{'SO': 'State', 'WO': 'Note'}])],
+ [self.cards, self.system], None,
+ {'SO': 'table0.C1', 'WO': 'table0.C0'},
+ []),
+ ('OneFetchStep',
+ [('Any WO,D,SO WHERE D tags WO, D is Tag, SO is State, WO is Note',
+ [{'D': 'Tag', 'SO': 'State', 'WO': 'Note'}])],
+ None, None, [self.system],
+ {'SO': 'table0.C1', 'WO': 'table0.C0'},
+ [])
+ ])
class MSPlannerTwoSameExternalSourcesTC(BasePlannerTC):
"""test planner related feature on a 3-sources repository:
@@ -2456,7 +2496,7 @@
def test_linked_external_entities(self):
- repo._type_source_cache[999999] = ('Tag', 'system', 999999)
+ repo._type_source_cache[999999] = ('Tag', 'system', 999999, 'system')
self._test('Any X,XT WHERE X is Card, X title XT, T tags X, T eid %(t)s',
[('FetchStep',
[('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
@@ -2472,7 +2512,7 @@
{'t': 999999})
def test_version_depends_on(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any X,AD,AE WHERE E eid %(x)s, E migrated_from X, X in_state AD, AD name AE',
[('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
[{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
@@ -2488,7 +2528,7 @@
{'x': 999999})
def test_version_crossed_depends_on_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
[('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
[{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
@@ -2511,7 +2551,7 @@
{'x': 999999})
def test_version_crossed_depends_on_2(self):
- self.repo._type_source_cache[999999] = ('Note', 'system', 999999)
+ self.repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
[('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
[{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
@@ -2587,7 +2627,7 @@
)
def test_nonregr_dont_cross_rel_source_filtering_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any S WHERE E eid %(x)s, E in_state S, NOT S name "moved"',
[('OneFetchStep', [('Any S WHERE 999999 in_state S, NOT S name "moved", S is State',
[{'S': 'State'}])],
@@ -2596,7 +2636,7 @@
{'x': 999999})
def test_nonregr_dont_cross_rel_source_filtering_2(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any X,AA,AB WHERE E eid %(x)s, E in_state X, X name AA, X modification_date AB',
[('OneFetchStep', [('Any X,AA,AB WHERE 999999 in_state X, X name AA, X modification_date AB, X is State',
[{'AA': 'String', 'AB': 'Datetime', 'X': 'State'}])],
@@ -2605,7 +2645,7 @@
{'x': 999999})
def test_nonregr_eid_query(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999)
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
self._test('Any X WHERE X eid 999999',
[('OneFetchStep', [('Any 999999', [{}])],
None, None, [self.system], {}, []
@@ -2671,6 +2711,29 @@
])
])
+ def test_remove_from_deleted_source_1(self):
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ self._test('Note X WHERE X eid 999999, NOT X cw_source Y',
+ [('OneFetchStep',
+ [('Any 999999 WHERE NOT EXISTS(999999 cw_source Y)',
+ [{'Y': 'CWSource'}])],
+ None, None, [self.system], {}, [])
+ ])
+
+ def test_remove_from_deleted_source_2(self):
+ self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
+ self.repo._type_source_cache[999998] = ('Note', 'cards', 999998, 'cards')
+ self._test('Note X WHERE X eid IN (999998, 999999), NOT X cw_source Y',
+ [('FetchStep',
+ [('Any X WHERE X eid IN(999998, 999999), X is Note',
+ [{'X': 'Note'}])],
+ [self.cards], None, {'X': 'table0.C0'}, []),
+ ('OneFetchStep',
+ [('Any X WHERE NOT EXISTS(X cw_source Y, Y is CWSource), X is Note',
+ [{'X': 'Note', 'Y': 'CWSource'}])],
+ None, None, [self.system],{'X': 'table0.C0'}, [])
+ ])
+
class FakeVCSSource(AbstractSource):
uri = 'ccc'
@@ -2707,17 +2770,17 @@
])
def test_fully_simplified_extsource(self):
- self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998)
- self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999)
+ self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998, 'vcs')
+ self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999, 'vcs')
self._test('Any X, Y WHERE NOT X multisource_rel Y, X eid 999998, Y eid 999999',
[('OneFetchStep', [('Any 999998,999999 WHERE NOT EXISTS(999998 multisource_rel 999999)', [{}])],
None, None, [self.vcs], {}, [])
])
def test_nonregr_fully_simplified_extsource(self):
- self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998)
- self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999)
- self.repo._type_source_cache[1000000] = ('Note', 'system', 1000000)
+ self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998, 'vcs')
+ self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999, 'vcs')
+ self.repo._type_source_cache[1000000] = ('Note', 'system', 1000000, 'system')
self._test('DISTINCT Any T,FALSE,L,M WHERE L eid 1000000, M eid 999999, T eid 999998',
[('OneFetchStep', [('DISTINCT Any 999998,FALSE,1000000,999999', [{}])],
None, None, [self.system], {}, [])
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_multisources.py
--- a/server/test/unittest_multisources.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_multisources.py Fri Dec 09 12:08:27 2011 +0100
@@ -30,7 +30,7 @@
class ExternalSource2Configuration(TestServerConfiguration):
sourcefile = 'sources_multi'
-MTIME = datetime.now() - timedelta(0, 10)
+MTIME = datetime.utcnow() - timedelta(0, 10)
EXTERN_SOURCE_CFG = u'''
pyro-ns-id = extern
@@ -160,11 +160,11 @@
# since they are orderd by eid, we know the 3 first one is coming from the system source
# and the others from external source
self.assertEqual(rset.get_entity(0, 0).cw_metainformation(),
- {'source': {'type': 'native', 'uri': 'system'},
+ {'source': {'type': 'native', 'uri': 'system', 'use-cwuri-as-url': False},
'type': u'Card', 'extid': None})
externent = rset.get_entity(3, 0)
metainf = externent.cw_metainformation()
- self.assertEqual(metainf['source'], {'type': 'pyrorql', 'base-url': 'http://extern.org/', 'uri': 'extern'})
+ self.assertEqual(metainf['source'], {'type': 'pyrorql', 'base-url': 'http://extern.org/', 'uri': 'extern', 'use-cwuri-as-url': False})
self.assertEqual(metainf['type'], 'Card')
self.assert_(metainf['extid'])
etype = self.sexecute('Any ETN WHERE X is ET, ET name ETN, X eid %(x)s',
@@ -381,6 +381,16 @@
def test_nonregr3(self):
self.sexecute('DELETE Card X WHERE X eid %(x)s, NOT X multisource_inlined_rel Y', {'x': self.ic1})
+ def test_nonregr4(self):
+ self.sexecute('Any X,S,U WHERE X in_state S, X todo_by U')
+
+ def test_delete_source(self):
+ req = self.request()
+ req.execute('DELETE CWSource S WHERE S name "extern"')
+ self.commit()
+ cu = self.session.system_sql("SELECT * FROM entities WHERE source='extern'")
+ self.failIf(cu.fetchall())
+
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
unittest_main()
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_querier.py
--- a/server/test/unittest_querier.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_querier.py Fri Dec 09 12:08:27 2011 +0100
@@ -311,6 +311,14 @@
seid = self.execute('State X WHERE X name "deactivated"')[0][0]
rset = self.execute('Any U,L,S GROUPBY U,L,S WHERE X in_state S, U login L, S eid %s' % seid)
+ def test_select_groupby_funccall(self):
+ rset = self.execute('Any YEAR(CD), COUNT(X) GROUPBY YEAR(CD) WHERE X is CWUser, X creation_date CD')
+ self.assertListEqual(rset.rows, [[date.today().year, 2]])
+
+ def test_select_groupby_colnumber(self):
+ rset = self.execute('Any YEAR(CD), COUNT(X) GROUPBY 1 WHERE X is CWUser, X creation_date CD')
+ self.assertListEqual(rset.rows, [[date.today().year, 2]])
+
def test_select_complex_orderby(self):
rset1 = self.execute('Any N ORDERBY N WHERE X name N')
self.assertEqual(sorted(rset1.rows), rset1.rows)
@@ -435,7 +443,7 @@
self.execute("INSERT Personne X: X nom 'foo', X datenaiss %(d)s",
{'d': datetime(2001, 2,3, 12,13)})
test_data = [('YEAR', 2001), ('MONTH', 2), ('DAY', 3),
- ('HOUR', 12), ('MINUTE', 13)]
+ ('HOUR', 12), ('MINUTE', 13), ('WEEKDAY', 6)]
for funcname, result in test_data:
rset = self.execute('Any %s(D) WHERE X is Personne, X datenaiss D'
% funcname)
@@ -443,6 +451,15 @@
self.assertEqual(rset.rows[0][0], result)
self.assertEqual(rset.description, [('Int',)])
+ def test_regexp_based_pattern_matching(self):
+ peid1 = self.execute("INSERT Personne X: X nom 'bidule'")[0][0]
+ peid2 = self.execute("INSERT Personne X: X nom 'cidule'")[0][0]
+ rset = self.execute('Any X WHERE X is Personne, X nom REGEXP "^b"')
+ self.assertEqual(len(rset.rows), 1, rset.rows)
+ self.assertEqual(rset.rows[0][0], peid1)
+ rset = self.execute('Any X WHERE X is Personne, X nom REGEXP "idu"')
+ self.assertEqual(len(rset.rows), 2, rset.rows)
+
def test_select_aggregat_count(self):
rset = self.execute('Any COUNT(X)')
self.assertEqual(len(rset.rows), 1)
@@ -768,7 +785,7 @@
def test_select_boolean(self):
rset = self.execute('Any N WHERE X is CWEType, X name N, X final %(val)s',
{'val': True})
- self.assertEqual(sorted(r[0] for r in rset.rows), ['Boolean', 'Bytes',
+ self.assertEqual(sorted(r[0] for r in rset.rows), ['BigInt', 'Boolean', 'Bytes',
'Date', 'Datetime',
'Decimal', 'Float',
'Int', 'Interval',
@@ -776,13 +793,19 @@
'TZDatetime', 'TZTime',
'Time'])
rset = self.execute('Any N WHERE X is CWEType, X name N, X final TRUE')
- self.assertEqual(sorted(r[0] for r in rset.rows), ['Boolean', 'Bytes',
+ self.assertEqual(sorted(r[0] for r in rset.rows), ['BigInt', 'Boolean', 'Bytes',
'Date', 'Datetime',
'Decimal', 'Float',
'Int', 'Interval',
'Password', 'String',
'TZDatetime', 'TZTime',
'Time'])
+ req = self.session
+ req.create_entity('Personne', nom=u'louis', test=True)
+ self.assertEqual(len(req.execute('Any X WHERE X test %(val)s', {'val': True})), 1)
+ self.assertEqual(len(req.execute('Any X WHERE X test TRUE')), 1)
+ self.assertEqual(len(req.execute('Any X WHERE X test %(val)s', {'val': False})), 0)
+ self.assertEqual(len(req.execute('Any X WHERE X test FALSE')), 0)
def test_select_constant(self):
rset = self.execute('Any X, "toto" ORDERBY X WHERE X is CWGroup')
@@ -1099,7 +1122,7 @@
#'INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y'
eeid, = self.o.execute(s, 'INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y WHERE Y is EmailAddress')[0]
self.o.execute(s, "DELETE Email X")
- sqlc = s.pool['system']
+ sqlc = s.cnxset['system']
sqlc.execute('SELECT * FROM recipients_relation')
self.assertEqual(len(sqlc.fetchall()), 0)
sqlc.execute('SELECT * FROM owned_by_relation WHERE eid_from=%s'%eeid)
@@ -1212,7 +1235,7 @@
self.assertEqual(rset.description, [('CWUser',)])
self.assertRaises(Unauthorized,
self.execute, "Any P WHERE X is CWUser, X login 'bob', X upassword P")
- cursor = self.pool['system']
+ cursor = self.cnxset['system']
cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'"
% (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX))
passwd = str(cursor.fetchone()[0])
@@ -1227,7 +1250,7 @@
self.assertEqual(rset.description[0][0], 'CWUser')
rset = self.execute("SET X upassword %(pwd)s WHERE X is CWUser, X login 'bob'",
{'pwd': 'tutu'})
- cursor = self.pool['system']
+ cursor = self.cnxset['system']
cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'"
% (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX))
passwd = str(cursor.fetchone()[0])
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_repository.py
--- a/server/test/unittest_repository.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_repository.py Fri Dec 09 12:08:27 2011 +0100
@@ -24,6 +24,7 @@
import sys
import threading
import time
+import logging
from copy import deepcopy
from datetime import datetime
@@ -62,13 +63,13 @@
table = SQL_PREFIX + 'CWEType'
namecol = SQL_PREFIX + 'name'
finalcol = SQL_PREFIX + 'final'
- self.session.set_pool()
+ self.session.set_cnxset()
cu = self.session.system_sql('SELECT %s FROM %s WHERE %s is NULL' % (
namecol, table, finalcol))
self.assertEqual(cu.fetchall(), [])
cu = self.session.system_sql('SELECT %s FROM %s WHERE %s=%%(final)s ORDER BY %s'
- % (namecol, table, finalcol, namecol), {'final': 'TRUE'})
- self.assertEqual(cu.fetchall(), [(u'Boolean',), (u'Bytes',),
+ % (namecol, table, finalcol, namecol), {'final': True})
+ self.assertEqual(cu.fetchall(), [(u'BigInt',), (u'Boolean',), (u'Bytes',),
(u'Date',), (u'Datetime',),
(u'Decimal',),(u'Float',),
(u'Int',),
@@ -259,7 +260,7 @@
cnxid = repo.connect(self.admlogin, password=self.admpassword)
# rollback state change which trigger TrInfo insertion
session = repo._get_session(cnxid)
- session.set_pool()
+ session.set_cnxset()
user = session.user
user.cw_adapt_to('IWorkflowable').fire_transition('deactivate')
rset = repo.execute(cnxid, 'TrInfo T WHERE T wf_info_for X, X eid %(x)s', {'x': user.eid})
@@ -292,7 +293,7 @@
try:
with self.assertRaises(Exception) as cm:
run_transaction()
- self.assertEqual(str(cm.exception), 'try to access pool on a closed session')
+ self.assertEqual(str(cm.exception), 'try to access connections set on a closed session %s' % cnxid)
finally:
t.join()
@@ -365,7 +366,6 @@
schema = cnx.get_schema()
self.failUnless(cnx.vreg)
self.failUnless('etypes'in cnx.vreg)
- self.assertEqual(schema.__hashmode__, None)
cu = cnx.cursor()
rset = cu.execute('Any U,G WHERE U in_group G')
user = iter(rset.entities()).next()
@@ -382,9 +382,9 @@
def test_internal_api(self):
repo = self.repo
cnxid = repo.connect(self.admlogin, password=self.admpassword)
- session = repo._get_session(cnxid, setpool=True)
+ session = repo._get_session(cnxid, setcnxset=True)
self.assertEqual(repo.type_and_source_from_eid(2, session),
- ('CWGroup', 'system', None))
+ ('CWGroup', 'system', None, 'system'))
self.assertEqual(repo.type_from_eid(2, session), 'CWGroup')
self.assertEqual(repo.source_from_eid(2, session).uri, 'system')
self.assertEqual(repo.eid2extid(repo.system_source, 2, session), None)
@@ -394,7 +394,10 @@
def test_public_api(self):
self.assertEqual(self.repo.get_schema(), self.repo.schema)
- self.assertEqual(self.repo.source_defs(), {'system': {'type': 'native', 'uri': 'system'}})
+ self.assertEqual(self.repo.source_defs(), {'system': {'type': 'native',
+ 'uri': 'system',
+ 'use-cwuri-as-url': False}
+ })
# .properties() return a result set
self.assertEqual(self.repo.properties().rql, 'Any K,V WHERE P is CWProperty,P pkey K, P value V, NOT P for_user U')
@@ -402,7 +405,7 @@
repo = self.repo
cnxid = repo.connect(self.admlogin, password=self.admpassword)
self.assertEqual(repo.user_info(cnxid), (6, 'admin', set([u'managers']), {}))
- self.assertEqual(repo.describe(cnxid, 2), (u'CWGroup', u'system', None))
+ self.assertEqual(repo.describe(cnxid, 2), (u'CWGroup', u'system', None, 'system'))
repo.close(cnxid)
self.assertRaises(BadConnectionId, repo.user_info, cnxid)
self.assertRaises(BadConnectionId, repo.describe, cnxid, 1)
@@ -519,38 +522,39 @@
class DataHelpersTC(CubicWebTC):
def test_create_eid(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assert_(self.repo.system_source.create_eid(self.session))
def test_source_from_eid(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertEqual(self.repo.source_from_eid(1, self.session),
self.repo.sources_by_uri['system'])
def test_source_from_eid_raise(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertRaises(UnknownEid, self.repo.source_from_eid, -2, self.session)
def test_type_from_eid(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertEqual(self.repo.type_from_eid(2, self.session), 'CWGroup')
def test_type_from_eid_raise(self):
- self.session.set_pool()
+ self.session.set_cnxset()
self.assertRaises(UnknownEid, self.repo.type_from_eid, -2, self.session)
def test_add_delete_info(self):
entity = self.repo.vreg['etypes'].etype_class('Personne')(self.session)
entity.eid = -1
entity.complete = lambda x: None
- self.session.set_pool()
+ self.session.set_cnxset()
self.repo.add_info(self.session, entity, self.repo.system_source)
cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1')
data = cu.fetchall()
- self.assertIsInstance(data[0][3], datetime)
+ self.assertIsInstance(data[0][4], datetime)
data[0] = list(data[0])
- data[0][3] = None
- self.assertEqual(tuplify(data), [(-1, 'Personne', 'system', None, None)])
+ data[0][4] = None
+ self.assertEqual(tuplify(data), [(-1, 'Personne', 'system', 'system',
+ None, None)])
self.repo.delete_info(self.session, entity, 'system', None)
#self.repo.commit()
cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1')
@@ -566,7 +570,7 @@
self.commit()
ts = datetime.now()
self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1)
- self.session.set_pool()
+ self.session.set_cnxset()
cu = self.session.system_sql('SELECT mtime, eid FROM entities WHERE eid = %s' % eidp)
omtime = cu.fetchone()[0]
# our sqlite datetime adapter is ignore seconds fraction, so we have to
@@ -575,7 +579,7 @@
self.execute('SET X nom "tata" WHERE X eid %(x)s', {'x': eidp})
self.commit()
self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1)
- self.session.set_pool()
+ self.session.set_cnxset()
cu = self.session.system_sql('SELECT mtime FROM entities WHERE eid = %s' % eidp)
mtime = cu.fetchone()[0]
self.failUnless(omtime < mtime)
@@ -646,7 +650,7 @@
CubicWebTC.setUp(self)
CALLED[:] = ()
- def _after_relation_hook(self, pool, fromeid, rtype, toeid):
+ def _after_relation_hook(self, cnxset, fromeid, rtype, toeid):
self.called.append((fromeid, rtype, toeid))
def test_inline_relation(self):
@@ -704,13 +708,18 @@
class PerformanceTest(CubicWebTC):
- def setup_database(self):
- import logging
+ def setUp(self):
+ super(PerformanceTest, self).setUp()
logger = logging.getLogger('cubicweb.session')
#logger.handlers = [logging.StreamHandler(sys.stdout)]
logger.setLevel(logging.INFO)
self.info = logger.info
+ def tearDown(self):
+ super(PerformanceTest, self).tearDown()
+ logger = logging.getLogger('cubicweb.session')
+ logger.setLevel(logging.CRITICAL)
+
def test_composite_deletion(self):
req = self.request()
personnes = []
@@ -807,6 +816,7 @@
req.cnx.commit()
t1 = time.time()
self.info('add relations: %.2gs', t1-t0)
+
def test_session_add_relation_inlined(self):
""" to be compared with test_session_add_relations"""
req = self.request()
@@ -847,7 +857,7 @@
p2 = req.create_entity('Personne', nom=u'Florent')
w = req.create_entity('Affaire', ref=u'wc')
w.set_relations(todo_by=[p1,p2])
- w.clear_all_caches()
+ w.cw_clear_all_caches()
self.commit()
self.assertEqual(len(w.todo_by), 1)
self.assertEqual(w.todo_by[0].eid, p2.eid)
@@ -860,7 +870,7 @@
w.set_relations(todo_by=p1)
self.commit()
w.set_relations(todo_by=p2)
- w.clear_all_caches()
+ w.cw_clear_all_caches()
self.commit()
self.assertEqual(len(w.todo_by), 1)
self.assertEqual(w.todo_by[0].eid, p2.eid)
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_rql2sql.py
--- a/server/test/unittest_rql2sql.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_rql2sql.py Fri Dec 09 12:08:27 2011 +0100
@@ -88,7 +88,6 @@
]
BASIC = [
-
("Any AS WHERE AS is Affaire",
'''SELECT _AS.cw_eid
FROM cw_Affaire AS _AS'''),
@@ -201,7 +200,12 @@
('Any X WHERE X title V, NOT X wikiid V, NOT X title "parent", X is Card',
'''SELECT _X.cw_eid
FROM cw_Card AS _X
-WHERE NOT (_X.cw_wikiid=_X.cw_title) AND NOT (_X.cw_title=parent)''')
+WHERE NOT (_X.cw_wikiid=_X.cw_title) AND NOT (_X.cw_title=parent)'''),
+
+ ("Any -AS WHERE AS is Affaire",
+ '''SELECT -_AS.cw_eid
+FROM cw_Affaire AS _AS'''),
+
]
BASIC_WITH_LIMIT = [
@@ -550,6 +554,15 @@
GROUP BY rel_todo_by0.eid_to
ORDER BY 2 DESC'''),
+ ('Any R2 WHERE R2 concerne R, R eid RE, R2 eid > RE',
+ '''SELECT _R2.eid
+FROM concerne_relation AS rel_concerne0, entities AS _R2
+WHERE _R2.eid=rel_concerne0.eid_from AND _R2.eid>rel_concerne0.eid_to'''),
+
+ ('Note X WHERE X eid IN (999998, 999999), NOT X cw_source Y',
+ '''SELECT _X.cw_eid
+FROM cw_Note AS _X
+WHERE _X.cw_eid IN(999998, 999999) AND NOT (EXISTS(SELECT 1 FROM cw_source_relation AS rel_cw_source0 WHERE rel_cw_source0.eid_from=_X.cw_eid))'''),
]
ADVANCED_WITH_GROUP_CONCAT = [
@@ -798,6 +811,11 @@
OUTER_JOIN = [
+
+ ('Any U,G WHERE U login L, G name L?, G is CWGroup',
+ '''SELECT _U.cw_eid, _G.cw_eid
+FROM cw_CWUser AS _U LEFT OUTER JOIN cw_CWGroup AS _G ON (_G.cw_name=_U.cw_login)'''),
+
('Any X,S WHERE X travaille S?',
'''SELECT _X.cw_eid, rel_travaille0.eid_to
FROM cw_Personne AS _X LEFT OUTER JOIN travaille_relation AS rel_travaille0 ON (rel_travaille0.eid_from=_X.cw_eid)'''
@@ -958,8 +976,31 @@
('Any CASE, CALIBCFG, CFG '
'WHERE CASE eid 1, CFG ecrit_par CASE, CALIBCFG? ecrit_par CASE',
'''SELECT _CFG.cw_ecrit_par, _CALIBCFG.cw_eid, _CFG.cw_eid
-FROM cw_Note AS _CFG LEFT OUTER JOIN cw_Note AS _CALIBCFG ON (_CALIBCFG.cw_ecrit_par=_CFG.cw_ecrit_par)
+FROM cw_Note AS _CFG LEFT OUTER JOIN cw_Note AS _CALIBCFG ON (_CALIBCFG.cw_ecrit_par=1)
WHERE _CFG.cw_ecrit_par=1'''),
+
+ ('Any U,G WHERE U login UL, G name GL, G is CWGroup HAVING UPPER(UL)=UPPER(GL)?',
+ '''SELECT _U.cw_eid, _G.cw_eid
+FROM cw_CWUser AS _U LEFT OUTER JOIN cw_CWGroup AS _G ON (UPPER(_U.cw_login)=UPPER(_G.cw_name))'''),
+
+ ('Any U,G WHERE U login UL, G name GL, G is CWGroup HAVING UPPER(UL)?=UPPER(GL)',
+ '''SELECT _U.cw_eid, _G.cw_eid
+FROM cw_CWGroup AS _G LEFT OUTER JOIN cw_CWUser AS _U ON (UPPER(_U.cw_login)=UPPER(_G.cw_name))'''),
+
+ ('Any U,G WHERE U login UL, G name GL, G is CWGroup HAVING UPPER(UL)?=UPPER(GL)?',
+ '''SELECT _U.cw_eid, _G.cw_eid
+FROM cw_CWUser AS _U FULL OUTER JOIN cw_CWGroup AS _G ON (UPPER(_U.cw_login)=UPPER(_G.cw_name))'''),
+
+ ('Any H, COUNT(X), SUM(XCE)/1000 '
+ 'WHERE X type "0", X date XSCT, X para XCE, X? ecrit_par F, F eid 999999, F is Personne, '
+ 'DH is Affaire, DH ref H '
+ 'HAVING XSCT?=H',
+ '''SELECT _DH.cw_ref, COUNT(_X.cw_eid), (SUM(_X.cw_para) / 1000)
+FROM cw_Affaire AS _DH LEFT OUTER JOIN cw_Note AS _X ON (_X.cw_date=_DH.cw_ref AND _X.cw_type=0 AND _X.cw_ecrit_par=999999)'''),
+
+ ('Any C WHERE X ecrit_par C?, X? inline1 F, F eid 1, X type XT, Z is Personne, Z nom ZN HAVING ZN=XT?',
+ '''SELECT _X.cw_ecrit_par
+FROM cw_Personne AS _Z LEFT OUTER JOIN cw_Note AS _X ON (_Z.cw_nom=_X.cw_type AND _X.cw_inline1=1)'''),
]
VIRTUAL_VARS = [
@@ -1355,11 +1396,28 @@
'''SELECT CAST(EXTRACT(MONTH from _P.cw_creation_date) AS INTEGER)
FROM cw_Personne AS _P''')
+ def test_weekday_extraction(self):
+ self._check("Any WEEKDAY(D) WHERE P is Personne, P creation_date D",
+ '''SELECT (CAST(EXTRACT(DOW from _P.cw_creation_date) AS INTEGER) + 1)
+FROM cw_Personne AS _P''')
+
def test_substring(self):
self._check("Any SUBSTRING(N, 1, 1) WHERE P nom N, P is Personne",
'''SELECT SUBSTR(_P.cw_nom, 1, 1)
FROM cw_Personne AS _P''')
+ def test_cast(self):
+ self._check("Any CAST(String, P) WHERE P is Personne",
+ '''SELECT CAST(_P.cw_eid AS text)
+FROM cw_Personne AS _P''')
+
+ def test_regexp(self):
+ self._check("Any X WHERE X login REGEXP '[0-9].*'",
+ '''SELECT _X.cw_eid
+FROM cw_CWUser AS _X
+WHERE _X.cw_login ~ [0-9].*
+''')
+
def test_parser_parse(self):
for t in self._parse(PARSER):
yield t
@@ -1471,6 +1529,12 @@
FROM (SELECT MAX(_A.cw_ordernum) AS C0
FROM cw_CWAttribute AS _A) AS _T0, cw_CWAttribute AS _A
WHERE _A.cw_ordernum=_T0.C0'''),
+
+ ('Any O1 HAVING O1=O2? WITH O1 BEING (Any MAX(O) WHERE A ordernum O, A is CWAttribute), O2 BEING (Any MAX(O) WHERE A ordernum O, A is CWRelation)',
+ '''SELECT _T0.C0
+FROM (SELECT MAX(_A.cw_ordernum) AS C0
+FROM cw_CWAttribute AS _A) AS _T0 LEFT OUTER JOIN (SELECT MAX(_A.cw_ordernum) AS C0
+FROM cw_CWRelation AS _A) AS _T1 ON (_T0.C0=_T1.C0)'''),
)):
yield t
@@ -1622,12 +1686,26 @@
'''SELECT (A || _X.cw_ref)
FROM cw_Affaire AS _X''')
- def test_or_having_fake_terms(self):
+ def test_or_having_fake_terms_base(self):
self._check('Any X WHERE X is CWUser, X creation_date D HAVING YEAR(D) = "2010" OR D = NULL',
'''SELECT _X.cw_eid
FROM cw_CWUser AS _X
WHERE ((CAST(EXTRACT(YEAR from _X.cw_creation_date) AS INTEGER)=2010) OR (_X.cw_creation_date IS NULL))''')
+ def test_or_having_fake_terms_exists(self):
+ # crash with rql <= 0.29.0
+ self._check('Any X WHERE X is CWUser, EXISTS(B bookmarked_by X, B creation_date D) HAVING D=2010 OR D=NULL, D=1 OR D=NULL',
+ '''SELECT _X.cw_eid
+FROM cw_CWUser AS _X
+WHERE EXISTS(SELECT 1 FROM bookmarked_by_relation AS rel_bookmarked_by0, cw_Bookmark AS _B WHERE rel_bookmarked_by0.eid_from=_B.cw_eid AND rel_bookmarked_by0.eid_to=_X.cw_eid AND ((_B.cw_creation_date=1) OR (_B.cw_creation_date IS NULL)) AND ((_B.cw_creation_date=2010) OR (_B.cw_creation_date IS NULL)))''')
+
+ def test_or_having_fake_terms_nocrash(self):
+ # crash with rql <= 0.29.0
+ self._check('Any X WHERE X is CWUser, X creation_date D HAVING D=2010 OR D=NULL, D=1 OR D=NULL',
+ '''SELECT _X.cw_eid
+FROM cw_CWUser AS _X
+WHERE ((_X.cw_creation_date=1) OR (_X.cw_creation_date IS NULL)) AND ((_X.cw_creation_date=2010) OR (_X.cw_creation_date IS NULL))''')
+
def test_not_no_where(self):
# XXX will check if some in_group relation exists, that's it.
# We can't actually know if we want to check if there are some
@@ -1669,21 +1747,29 @@
class SqlServer2005SQLGeneratorTC(PostgresSQLGeneratorTC):
backend = 'sqlserver2005'
def _norm_sql(self, sql):
- return sql.strip().replace(' SUBSTR', ' SUBSTRING').replace(' || ', ' + ').replace(' ILIKE ', ' LIKE ')
+ return sql.strip().replace(' SUBSTR', ' SUBSTRING').replace(' || ', ' + ').replace(' ILIKE ', ' LIKE ').replace('TRUE', '1').replace('FALSE', '0')
def test_has_text(self):
for t in self._parse(HAS_TEXT_LG_INDEXER):
yield t
- def test_or_having_fake_terms(self):
+ def test_regexp(self):
+ self.skipTest('regexp-based pattern matching not implemented in sqlserver')
+
+ def test_or_having_fake_terms_base(self):
self._check('Any X WHERE X is CWUser, X creation_date D HAVING YEAR(D) = "2010" OR D = NULL',
'''SELECT _X.cw_eid
FROM cw_CWUser AS _X
-WHERE ((YEAR(_X.cw_creation_date)=2010) OR (_X.cw_creation_date IS NULL))''')
+WHERE ((DATEPART(YEAR, _X.cw_creation_date)=2010) OR (_X.cw_creation_date IS NULL))''')
def test_date_extraction(self):
self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
- '''SELECT MONTH(_P.cw_creation_date)
+ '''SELECT DATEPART(MONTH, _P.cw_creation_date)
+FROM cw_Personne AS _P''')
+
+ def test_weekday_extraction(self):
+ self._check("Any WEEKDAY(D) WHERE P is Personne, P creation_date D",
+ '''SELECT DATEPART(WEEKDAY, _P.cw_creation_date)
FROM cw_Personne AS _P''')
def test_symmetric(self):
@@ -1813,14 +1899,19 @@
for t in self._parse(WITH_LIMIT):# + ADVANCED_WITH_LIMIT_OR_ORDERBY):
yield t
+ def test_cast(self):
+ self._check("Any CAST(String, P) WHERE P is Personne",
+ '''SELECT CAST(_P.cw_eid AS nvarchar(max))
+FROM cw_Personne AS _P''')
+
def test_groupby_orderby_insertion_dont_modify_intention(self):
self._check('Any YEAR(XECT)*100+MONTH(XECT), COUNT(X),SUM(XCE),AVG(XSCT-XECT) '
'GROUPBY YEAR(XECT),MONTH(XECT) ORDERBY 1 '
'WHERE X creation_date XSCT, X modification_date XECT, '
'X ordernum XCE, X is CWAttribute',
- '''SELECT ((YEAR(_X.cw_modification_date) * 100) + MONTH(_X.cw_modification_date)), COUNT(_X.cw_eid), SUM(_X.cw_ordernum), AVG((_X.cw_creation_date - _X.cw_modification_date))
+ '''SELECT ((DATEPART(YEAR, _X.cw_modification_date) * 100) + DATEPART(MONTH, _X.cw_modification_date)), COUNT(_X.cw_eid), SUM(_X.cw_ordernum), AVG((_X.cw_creation_date - _X.cw_modification_date))
FROM cw_CWAttribute AS _X
-GROUP BY YEAR(_X.cw_modification_date),MONTH(_X.cw_modification_date)
+GROUP BY DATEPART(YEAR, _X.cw_modification_date),DATEPART(MONTH, _X.cw_modification_date)
ORDER BY 1'''),
@@ -1828,13 +1919,27 @@
backend = 'sqlite'
def _norm_sql(self, sql):
- return sql.strip().replace(' ILIKE ', ' LIKE ')
+ return sql.strip().replace(' ILIKE ', ' LIKE ').replace('TRUE', '1').replace('FALSE', '0')
def test_date_extraction(self):
self._check("Any MONTH(D) WHERE P is Personne, P creation_date D",
'''SELECT MONTH(_P.cw_creation_date)
FROM cw_Personne AS _P''')
+ def test_weekday_extraction(self):
+ # custom impl. in cw.server.sqlutils
+ self._check("Any WEEKDAY(D) WHERE P is Personne, P creation_date D",
+ '''SELECT WEEKDAY(_P.cw_creation_date)
+FROM cw_Personne AS _P''')
+
+ def test_regexp(self):
+ self._check("Any X WHERE X login REGEXP '[0-9].*'",
+ '''SELECT _X.cw_eid
+FROM cw_CWUser AS _X
+WHERE _X.cw_login REGEXP [0-9].*
+''')
+
+
def test_union(self):
for t in self._parse((
('(Any N ORDERBY 1 WHERE X name N, X is State)'
@@ -1947,7 +2052,7 @@
yield t
- def test_or_having_fake_terms(self):
+ def test_or_having_fake_terms_base(self):
self._check('Any X WHERE X is CWUser, X creation_date D HAVING YEAR(D) = "2010" OR D = NULL',
'''SELECT _X.cw_eid
FROM cw_CWUser AS _X
@@ -1985,6 +2090,23 @@
'''SELECT EXTRACT(MONTH from _P.cw_creation_date)
FROM cw_Personne AS _P''')
+ def test_weekday_extraction(self):
+ self._check("Any WEEKDAY(D) WHERE P is Personne, P creation_date D",
+ '''SELECT DAYOFWEEK(_P.cw_creation_date)
+FROM cw_Personne AS _P''')
+
+ def test_cast(self):
+ self._check("Any CAST(String, P) WHERE P is Personne",
+ '''SELECT CAST(_P.cw_eid AS mediumtext)
+FROM cw_Personne AS _P''')
+
+ def test_regexp(self):
+ self._check("Any X WHERE X login REGEXP '[0-9].*'",
+ '''SELECT _X.cw_eid
+FROM cw_CWUser AS _X
+WHERE _X.cw_login REGEXP [0-9].*
+''')
+
def test_from_clause_needed(self):
queries = [("Any 1 WHERE EXISTS(T is CWGroup, T name 'managers')",
'''SELECT 1
@@ -2046,7 +2168,7 @@
FROM cw_Personne AS _P''')
- def test_or_having_fake_terms(self):
+ def test_or_having_fake_terms_base(self):
self._check('Any X WHERE X is CWUser, X creation_date D HAVING YEAR(D) = "2010" OR D = NULL',
'''SELECT _X.cw_eid
FROM cw_CWUser AS _X
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_rqlannotation.py
--- a/server/test/unittest_rqlannotation.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_rqlannotation.py Fri Dec 09 12:08:27 2011 +0100
@@ -340,6 +340,16 @@
self.assertEqual(rqlst.defined_vars['X']._q_invariant, False)
self.assertEqual(rqlst.defined_vars['S']._q_invariant, False)
+ def test_remove_from_deleted_source_1(self):
+ rqlst = self._prepare('Note X WHERE X eid 999998, NOT X cw_source Y')
+ self.failIf('X' in rqlst.defined_vars) # simplified
+ self.assertEqual(rqlst.defined_vars['Y']._q_invariant, True)
+
+ def test_remove_from_deleted_source_2(self):
+ rqlst = self._prepare('Note X WHERE X eid IN (999998, 999999), NOT X cw_source Y')
+ self.assertEqual(rqlst.defined_vars['X']._q_invariant, False)
+ self.assertEqual(rqlst.defined_vars['Y']._q_invariant, True)
+
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
unittest_main()
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_security.py
--- a/server/test/unittest_security.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_security.py Fri Dec 09 12:08:27 2011 +0100
@@ -221,7 +221,7 @@
rset = cu.execute('Personne P')
self.assertEqual(len(rset), 1)
ent = rset.get_entity(0, 0)
- session.set_pool() # necessary
+ session.set_cnxset() # necessary
self.assertRaises(Unauthorized, ent.cw_check_perm, 'update')
self.assertRaises(Unauthorized,
cu.execute, "SET P travaille S WHERE P is Personne, S is Societe")
@@ -579,7 +579,7 @@
cnx = self.login('iaminusersgrouponly')
session = self.session
# needed to avoid check_perm error
- session.set_pool()
+ session.set_cnxset()
# needed to remove rql expr granting update perm to the user
affaire_perms = self.schema['Affaire'].permissions.copy()
self.schema['Affaire'].set_action_permissions('update', self.schema['Affaire'].get_groups('update'))
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_session.py
--- a/server/test/unittest_session.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_session.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -15,13 +15,12 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see .
-"""
+from __future__ import with_statement
-"""
from logilab.common.testlib import TestCase, unittest_main, mock_object
from cubicweb.devtools.testlib import CubicWebTC
-from cubicweb.server.session import _make_description
+from cubicweb.server.session import _make_description, hooks_control
class Variable:
def __init__(self, name):
@@ -46,11 +45,70 @@
self.assertEqual(_make_description((Function('max', 'A'), Variable('B')), {}, solution),
['Int','CWUser'])
+
class InternalSessionTC(CubicWebTC):
def test_dbapi_query(self):
session = self.repo.internal_session()
self.assertFalse(session.running_dbapi_query)
session.close()
+
+class SessionTC(CubicWebTC):
+
+ def test_hooks_control(self):
+ session = self.session
+ self.assertEqual(session.hooks_mode, session.HOOKS_ALLOW_ALL)
+ self.assertEqual(session.disabled_hook_categories, set())
+ self.assertEqual(session.enabled_hook_categories, set())
+ self.assertEqual(len(session._tx_data), 1)
+ with hooks_control(session, session.HOOKS_DENY_ALL, 'metadata'):
+ self.assertEqual(session.hooks_mode, session.HOOKS_DENY_ALL)
+ self.assertEqual(session.disabled_hook_categories, set())
+ self.assertEqual(session.enabled_hook_categories, set(('metadata',)))
+ session.commit()
+ self.assertEqual(session.hooks_mode, session.HOOKS_DENY_ALL)
+ self.assertEqual(session.disabled_hook_categories, set())
+ self.assertEqual(session.enabled_hook_categories, set(('metadata',)))
+ session.rollback()
+ self.assertEqual(session.hooks_mode, session.HOOKS_DENY_ALL)
+ self.assertEqual(session.disabled_hook_categories, set())
+ self.assertEqual(session.enabled_hook_categories, set(('metadata',)))
+ with hooks_control(session, session.HOOKS_ALLOW_ALL, 'integrity'):
+ self.assertEqual(session.hooks_mode, session.HOOKS_ALLOW_ALL)
+ self.assertEqual(session.disabled_hook_categories, set(('integrity',)))
+ self.assertEqual(session.enabled_hook_categories, set(('metadata',))) # not changed in such case
+ self.assertEqual(session.hooks_mode, session.HOOKS_DENY_ALL)
+ self.assertEqual(session.disabled_hook_categories, set())
+ self.assertEqual(session.enabled_hook_categories, set(('metadata',)))
+ # leaving context manager with no transaction running should reset the
+ # transaction local storage (and associated cnxset)
+ self.assertEqual(session._tx_data, {})
+ self.assertEqual(session.cnxset, None)
+ self.assertEqual(session.hooks_mode, session.HOOKS_ALLOW_ALL)
+ self.assertEqual(session.disabled_hook_categories, set())
+ self.assertEqual(session.enabled_hook_categories, set())
+
+ def test_build_descr1(self):
+ rset = self.execute('(Any U,L WHERE U login L) UNION (Any G,N WHERE G name N, G is CWGroup)')
+ orig_length = len(rset)
+ rset.rows[0][0] = 9999999
+ description = self.session.build_description(rset.syntax_tree(), None, rset.rows)
+ self.assertEqual(len(description), orig_length - 1)
+ self.assertEqual(len(rset.rows), orig_length - 1)
+ self.failIf(rset.rows[0][0] == 9999999)
+
+ def test_build_descr2(self):
+ rset = self.execute('Any X,Y WITH X,Y BEING ((Any G,NULL WHERE G is CWGroup) UNION (Any U,G WHERE U in_group G))')
+ for x, y in rset.description:
+ if y is not None:
+ self.assertEqual(y, 'CWGroup')
+
+ def test_build_descr3(self):
+ rset = self.execute('(Any G,NULL WHERE G is CWGroup) UNION (Any U,G WHERE U in_group G)')
+ for x, y in rset.description:
+ if y is not None:
+ self.assertEqual(y, 'CWGroup')
+
+
if __name__ == '__main__':
unittest_main()
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_ssplanner.py
--- a/server/test/unittest_ssplanner.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_ssplanner.py Fri Dec 09 12:08:27 2011 +0100
@@ -16,14 +16,18 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see .
-from cubicweb.devtools import init_test_database
+from cubicweb.devtools import TestServerConfiguration, get_test_db_handler
from cubicweb.devtools.repotest import BasePlannerTC, test_plan
from cubicweb.server.ssplanner import SSPlanner
# keep cnx so it's not garbage collected and the associated session closed
def setUpModule(*args):
global repo, cnx
- repo, cnx = init_test_database(apphome=SSPlannerTC.datadir)
+ handler = get_test_db_handler(TestServerConfiguration(
+ 'data', apphome=SSPlannerTC.datadir))
+ handler.build_db_cache()
+ global repo, cnx
+ repo, cnx = handler.get_repo_and_cnx()
def tearDownModule(*args):
global repo, cnx
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_storage.py
--- a/server/test/unittest_storage.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_storage.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -261,7 +261,7 @@
def test_bfss_update_to_None(self):
f = self.session.create_entity('Affaire', opt_attr=Binary('toto'))
self.session.commit()
- self.session.set_pool()
+ self.session.set_cnxset()
f.set_attributes(opt_attr=None)
self.session.commit()
diff -r d8bb8f631d41 -r a4e667270dd4 server/test/unittest_undo.py
--- a/server/test/unittest_undo.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/test/unittest_undo.py Fri Dec 09 12:08:27 2011 +0100
@@ -153,8 +153,8 @@
txuuid = self.commit()
actions = self.cnx.transaction_info(txuuid).actions_list()
self.assertEqual(len(actions), 1)
- toto.clear_all_caches()
- e.clear_all_caches()
+ toto.cw_clear_all_caches()
+ e.cw_clear_all_caches()
errors = self.cnx.undo_transaction(txuuid)
undotxuuid = self.commit()
self.assertEqual(undotxuuid, None) # undo not undoable
@@ -195,7 +195,7 @@
self.commit()
errors = self.cnx.undo_transaction(txuuid)
self.commit()
- p.clear_all_caches()
+ p.cw_clear_all_caches()
self.assertEqual(p.fiche[0].eid, c2.eid)
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0],
@@ -235,7 +235,7 @@
self.failIf(self.execute('Any X WHERE X eid %(x)s', {'x': c.eid}))
self.failIf(self.execute('Any X WHERE X eid %(x)s', {'x': p.eid}))
self.failIf(self.execute('Any X,Y WHERE X fiche Y'))
- self.session.set_pool()
+ self.session.set_cnxset()
for eid in (p.eid, c.eid):
self.failIf(session.system_sql(
'SELECT * FROM entities WHERE eid=%s' % eid).fetchall())
diff -r d8bb8f631d41 -r a4e667270dd4 server/utils.py
--- a/server/utils.py Mon Sep 26 18:37:23 2011 +0200
+++ b/server/utils.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -16,6 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see .
"""Some utilities for the CubicWeb server."""
+
__docformat__ = "restructuredtext en"
import sys
@@ -121,11 +122,12 @@
class LoopTask(object):
"""threaded task restarting itself once executed"""
- def __init__(self, interval, func, args):
+ def __init__(self, repo, interval, func, args):
if interval <= 0:
raise ValueError('Loop task interval must be > 0 '
'(current value: %f for %s)' % \
(interval, func_name(func)))
+ self.repo = repo
self.interval = interval
def auto_restart_func(self=self, func=func, args=args):
restart = True
@@ -138,7 +140,7 @@
except BaseException:
restart = False
finally:
- if restart:
+ if restart and not self.repo.shutting_down:
self.start()
self.func = auto_restart_func
self.name = func_name(func)
@@ -167,7 +169,7 @@
def auto_remove_func(self=self, func=target):
try:
func()
- except:
+ except Exception:
logger = logging.getLogger('cubicweb.repository')
logger.exception('Unhandled exception in RepoThread %s', self._name)
raise
diff -r d8bb8f631d41 -r a4e667270dd4 setup.py
--- a/setup.py Mon Sep 26 18:37:23 2011 +0200
+++ b/setup.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,7 +1,7 @@
#!/usr/bin/env python
# pylint: disable=W0142,W0403,W0404,W0613,W0622,W0622,W0704,R0904,C0103,E0611
#
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -179,7 +179,7 @@
if USE_SETUPTOOLS:
# overwrite MyInstallData to use sys.prefix instead of the egg directory
MyInstallMoreData = MyInstallData
- class MyInstallData(MyInstallMoreData):
+ class MyInstallData(MyInstallMoreData): # pylint: disable=E0102
"""A class that manages data files installation"""
def run(self):
_old_install_dir = self.install_dir
diff -r d8bb8f631d41 -r a4e667270dd4 skeleton/__pkginfo__.py.tmpl
--- a/skeleton/__pkginfo__.py.tmpl Mon Sep 26 18:37:23 2011 +0200
+++ b/skeleton/__pkginfo__.py.tmpl Fri Dec 09 12:08:27 2011 +0100
@@ -18,7 +18,7 @@
from os import listdir as _listdir
-from os.path import join, isdir, exists
+from os.path import join, isdir
from glob import glob
THIS_CUBE_DIR = join('share', 'cubicweb', 'cubes', modname)
diff -r d8bb8f631d41 -r a4e667270dd4 skeleton/debian/control.tmpl
--- a/skeleton/debian/control.tmpl Mon Sep 26 18:37:23 2011 +0200
+++ b/skeleton/debian/control.tmpl Fri Dec 09 12:08:27 2011 +0100
@@ -2,7 +2,7 @@
Section: web
Priority: optional
Maintainer: %(author)s <%(author-email)s>
-Build-Depends: debhelper (>= 5.0.37.1), python (>=2.4), python-dev (>=2.4)
+Build-Depends: debhelper (>= 5.0.37.1), python (>=2.4), python-support
Standards-Version: 3.8.0
diff -r d8bb8f631d41 -r a4e667270dd4 skeleton/debian/rules.tmpl
--- a/skeleton/debian/rules.tmpl Mon Sep 26 18:37:23 2011 +0200
+++ b/skeleton/debian/rules.tmpl Fri Dec 09 12:08:27 2011 +0100
@@ -37,6 +37,7 @@
dh_installexamples -i
dh_installdocs -i
dh_installman -i
+ dh_pysupport -i /usr/share/cubicweb
dh_link -i
dh_compress -i -X.py -X.ini -X.xml -Xtest
dh_fixperms -i
diff -r d8bb8f631d41 -r a4e667270dd4 skeleton/test/realdb_test_CUBENAME.py
--- a/skeleton/test/realdb_test_CUBENAME.py Mon Sep 26 18:37:23 2011 +0200
+++ b/skeleton/test/realdb_test_CUBENAME.py Fri Dec 09 12:08:27 2011 +0100
@@ -18,8 +18,8 @@
"""
"""
-from cubicweb.devtools import buildconfig, loadconfig
-from cubicweb.devtools.testlib import RealDBTest
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.devtools.realdbtest import buildconfig, loadconfig
def setUpModule(options):
if options.source:
@@ -32,7 +32,8 @@
options.epassword)
RealDatabaseTC.configcls = configcls
-class RealDatabaseTC(RealDBTest):
+
+class RealDatabaseTC(CubicWebTC):
configcls = None # set by setUpModule()
def test_all_primaries(self):
diff -r d8bb8f631d41 -r a4e667270dd4 sobjects/notification.py
--- a/sobjects/notification.py Mon Sep 26 18:37:23 2011 +0200
+++ b/sobjects/notification.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -109,6 +109,8 @@
url: %(url)s
"""
+ # to be defined on concrete sub-classes
+ content_attr = None
def context(self, **kwargs):
entity = self.cw_rset.get_entity(self.cw_row or 0, self.cw_col or 0)
diff -r d8bb8f631d41 -r a4e667270dd4 sobjects/parsers.py
--- a/sobjects/parsers.py Mon Sep 26 18:37:23 2011 +0200
+++ b/sobjects/parsers.py Fri Dec 09 12:08:27 2011 +0100
@@ -31,26 +31,22 @@
"""
-import urllib2
-import StringIO
import os.path as osp
-from cookielib import CookieJar
-from datetime import datetime, timedelta
-
-from lxml import etree
+from datetime import datetime, timedelta, time
+from urllib import urlencode
+from cgi import parse_qs # in urlparse with python >= 2.6
from logilab.common.date import todate, totime
from logilab.common.textutils import splitstrip, text_to_dict
+from logilab.common.decorators import classproperty
from yams.constraints import BASE_CONVERTERS
from yams.schema import role_name as rn
-from cubicweb import ValidationError, typed_eid
+from cubicweb import ValidationError, RegistryException, typed_eid
+from cubicweb.view import Component
from cubicweb.server.sources import datafeed
-
-def ensure_str_keys(dic):
- for key in dic:
- dic[str(key)] = dic.pop(key)
+from cubicweb.server.hook import match_rtype
# XXX see cubicweb.cwvreg.YAMS_TO_PY
# XXX see cubicweb.web.views.xmlrss.SERIALIZERS
@@ -65,61 +61,31 @@
ustr = ustr.split('.',1)[0]
return datetime.strptime(ustr, '%Y-%m-%d %H:%M:%S')
DEFAULT_CONVERTERS['Datetime'] = convert_datetime
+# XXX handle timezone, though this will be enough as TZDatetime are
+# serialized without time zone by default (UTC time). See
+# cw.web.views.xmlrss.SERIALIZERS.
+DEFAULT_CONVERTERS['TZDatetime'] = convert_datetime
def convert_time(ustr):
return totime(datetime.strptime(ustr, '%H:%M:%S'))
DEFAULT_CONVERTERS['Time'] = convert_time
+DEFAULT_CONVERTERS['TZTime'] = convert_time
def convert_interval(ustr):
return time(seconds=int(ustr))
DEFAULT_CONVERTERS['Interval'] = convert_interval
-# use a cookie enabled opener to use session cookie if any
-_OPENER = urllib2.build_opener()
-try:
- from logilab.common import urllib2ext
- _OPENER.add_handler(urllib2ext.HTTPGssapiAuthHandler())
-except ImportError: # python-kerberos not available
- pass
-_OPENER.add_handler(urllib2.HTTPCookieProcessor(CookieJar()))
-
def extract_typed_attrs(eschema, stringdict, converters=DEFAULT_CONVERTERS):
typeddict = {}
for rschema in eschema.subject_relations():
if rschema.final and rschema in stringdict:
- if rschema == 'eid':
+ if rschema in ('eid', 'cwuri', 'cwtype', 'cwsource'):
continue
attrtype = eschema.destination(rschema)
- typeddict[rschema.type] = converters[attrtype](stringdict[rschema])
+ value = stringdict[rschema]
+ if value is not None:
+ value = converters[attrtype](value)
+ typeddict[rschema.type] = value
return typeddict
-def _parse_entity_etree(parent):
- for node in list(parent):
- try:
- item = {'cwtype': unicode(node.tag),
- 'cwuri': node.attrib['cwuri'],
- 'eid': typed_eid(node.attrib['eid']),
- }
- except KeyError:
- # cw < 3.11 compat mode XXX
- item = {'cwtype': unicode(node.tag),
- 'cwuri': node.find('cwuri').text,
- 'eid': typed_eid(node.find('eid').text),
- }
- rels = {}
- for child in node:
- role = child.get('role')
- if role:
- # relation
- related = rels.setdefault(role, {}).setdefault(child.tag, [])
- related += [ritem for ritem, _ in _parse_entity_etree(child)]
- else:
- # attribute
- item[child.tag] = unicode(child.text)
- yield item, rels
-
-def build_search_rql(etype, attrs):
- restrictions = ['X %(attr)s %%(%(attr)s)s'%{'attr': attr} for attr in attrs]
- return 'Any X WHERE X is %s, %s' % (etype, ', '.join(restrictions))
-
def rtype_role_rql(rtype, role):
if role == 'object':
return 'Y %s X WHERE X eid %%(x)s' % rtype
@@ -127,34 +93,40 @@
return 'X %s Y WHERE X eid %%(x)s' % rtype
-def _check_no_option(action, options, eid, _):
- if options:
- msg = _("'%s' action doesn't take any options") % action
- raise ValidationError(eid, {rn('options', 'subject'): msg})
+class CWEntityXMLParser(datafeed.DataFeedXMLParser):
+ """datafeed parser for the 'xml' entity view
-def _check_linkattr_option(action, options, eid, _):
- if not 'linkattr' in options:
- msg = _("'%s' action requires 'linkattr' option") % action
- raise ValidationError(eid, {rn('options', 'subject'): msg})
+ Most of the logic is delegated to the following components:
+
+ * an "item builder" component, turning an etree xml node into a specific
+ python dictionnary representing an entity
-
-class CWEntityXMLParser(datafeed.DataFeedParser):
- """datafeed parser for the 'xml' entity view"""
- __regid__ = 'cw.entityxml'
+ * "action" components, selected given an entity, a relation and its role in
+ the relation, and responsible to link the entity to given related items
+ (eg dictionnary)
- action_options = {
- 'copy': _check_no_option,
- 'link-or-create': _check_linkattr_option,
- 'link': _check_linkattr_option,
- }
+ So the parser is only doing the gluing service and the connection to the
+ source.
+ """
+ __regid__ = 'cw.entityxml'
def __init__(self, *args, **kwargs):
super(CWEntityXMLParser, self).__init__(*args, **kwargs)
- self.action_methods = {
- 'copy': self.related_copy,
- 'link-or-create': self.related_link_or_create,
- 'link': self.related_link,
- }
+ self._parsed_urls = {}
+ self._processed_entities = set()
+
+ def select_linker(self, action, rtype, role, entity=None):
+ try:
+ return self._cw.vreg['components'].select(
+ 'cw.entityxml.action.%s' % action, self._cw, entity=entity,
+ rtype=rtype, role=role, parser=self)
+ except RegistryException:
+ raise RegistryException('Unknown action %s' % action)
+
+ def list_actions(self):
+ reg = self._cw.vreg['components']
+ return sorted(clss[0].action for rid, clss in reg.iteritems()
+ if rid.startswith('cw.entityxml.action.'))
# mapping handling #########################################################
@@ -180,10 +152,14 @@
raise ValidationError(schemacfg.eid, {rn('options', 'subject'): msg})
try:
action = options.pop('action')
- self.action_options[action](action, options, schemacfg.eid, _)
+ linker = self.select_linker(action, rtype, role)
+ linker.check_options(options, schemacfg.eid)
except KeyError:
msg = _('"action" must be specified in options; allowed values are '
- '%s') % ', '.join(self.action_methods)
+ '%s') % ', '.join(self.list_actions())
+ raise ValidationError(schemacfg.eid, {rn('options', 'subject'): msg})
+ except RegistryException:
+ msg = _('allowed values for "action" are %s') % ', '.join(self.list_actions())
raise ValidationError(schemacfg.eid, {rn('options', 'subject'): msg})
if not checkonly:
if role == 'subject':
@@ -208,184 +184,325 @@
# import handling ##########################################################
- def process(self, url, partialcommit=True):
+ def process(self, url, raise_on_error=False, partialcommit=True):
"""IDataFeedParser main entry point"""
- # XXX suppression support according to source configuration. If set, get
- # all cwuri of entities from this source, and compare with newly
- # imported ones
- error = False
- for item, rels in self.parse(url):
- cwuri = item['cwuri']
- try:
- self.process_item(item, rels)
- if partialcommit:
- # commit+set_pool instead of commit(reset_pool=False) to let
- # other a chance to get our pool
- self._cw.commit()
- self._cw.set_pool()
- except ValidationError, exc:
- if partialcommit:
- self.source.error('Skipping %s because of validation error %s' % (cwuri, exc))
- self._cw.rollback()
- self._cw.set_pool()
- error = True
- else:
- raise
- return error
+ if url.startswith('http'): # XXX similar loose test as in parse of sources.datafeed
+ url = self.complete_url(url)
+ super(CWEntityXMLParser, self).process(url, raise_on_error, partialcommit)
- def parse(self, url):
- if not url.startswith('http'):
- stream = StringIO.StringIO(url)
- else:
- for mappedurl in HOST_MAPPING:
- if url.startswith(mappedurl):
- url = url.replace(mappedurl, HOST_MAPPING[mappedurl], 1)
- break
- self.source.info('GET %s', url)
- stream = _OPENER.open(url)
- return _parse_entity_etree(etree.parse(stream).getroot())
+ def parse_etree(self, parent):
+ for node in list(parent):
+ builder = self._cw.vreg['components'].select(
+ 'cw.entityxml.item-builder', self._cw, node=node,
+ parser=self)
+ yield builder.build_item()
def process_item(self, item, rels):
- entity = self.extid2entity(str(item.pop('cwuri')), item.pop('cwtype'),
- item=item)
+ """
+ item and rels are what's returned by the item builder `build_item` method:
+
+ * `item` is an {attribute: value} dictionary
+ * `rels` is for relations and structured as
+ {role: {relation: [(related item, related rels)...]}
+ """
+ entity = self.extid2entity(str(item['cwuri']), item['cwtype'],
+ cwsource=item['cwsource'], item=item)
+ if entity is None:
+ return None
+ if entity.eid in self._processed_entities:
+ return entity
+ self._processed_entities.add(entity.eid)
if not (self.created_during_pull(entity) or self.updated_during_pull(entity)):
self.notify_updated(entity)
- item.pop('eid')
- # XXX check modification date
attrs = extract_typed_attrs(entity.e_schema, item)
- entity.set_attributes(**attrs)
- for (rtype, role, action), rules in self.source.mapping.get(entity.__regid__, {}).iteritems():
+ # check modification date and compare attribute values to only
+ # update what's actually needed
+ entity.complete(tuple(attrs))
+ mdate = attrs.get('modification_date')
+ if not mdate or mdate > entity.modification_date:
+ attrs = dict( (k, v) for k, v in attrs.iteritems()
+ if v != getattr(entity, k))
+ if attrs:
+ entity.set_attributes(**attrs)
+ self.process_relations(entity, rels)
+ return entity
+
+ def process_relations(self, entity, rels):
+ etype = entity.__regid__
+ for (rtype, role, action), rules in self.source.mapping.get(etype, {}).iteritems():
try:
related_items = rels[role][rtype]
except KeyError:
self.source.error('relation %s-%s not found in xml export of %s',
- rtype, role, entity.__regid__)
+ rtype, role, etype)
continue
try:
- actionmethod = self.action_methods[action]
- except KeyError:
- raise Exception('Unknown action %s' % action)
- actionmethod(entity, rtype, role, related_items, rules)
- return entity
+ linker = self.select_linker(action, rtype, role, entity)
+ except RegistryException:
+ self.source.error('no linker for action %s', action)
+ else:
+ linker.link_items(related_items, rules)
def before_entity_copy(self, entity, sourceparams):
"""IDataFeedParser callback"""
attrs = extract_typed_attrs(entity.e_schema, sourceparams['item'])
entity.cw_edited.update(attrs)
- def related_copy(self, entity, rtype, role, others, rules):
- """implementation of 'copy' action
+ def complete_url(self, url, etype=None, known_relations=None):
+ """append to the url's query string information about relation that should
+ be included in the resulting xml, according to source mapping.
- Takes no option.
- """
- assert not any(x[1] for x in rules), "'copy' action takes no option"
- ttypes = set([x[0] for x in rules])
- others = [item for item in others if item['cwtype'] in ttypes]
- eids = [] # local eids
- if not others:
- self._clear_relation(entity, rtype, role, ttypes)
- return
- for item in others:
- item, _rels = self._complete_item(item)
- other_entity = self.process_item(item, [])
- eids.append(other_entity.eid)
- self._set_relation(entity, rtype, role, eids)
-
- def related_link(self, entity, rtype, role, others, rules):
- """implementation of 'link' action
+ If etype is not specified, try to guess it using the last path part of
+ the url, i.e. the format used by default in cubicweb to map all entities
+ of a given type as in 'http://mysite.org/EntityType'.
- requires an options to control search of the linked entity.
+ If `known_relations` is given, it should be a dictionary of already
+ known relations, so they don't get queried again.
"""
- for ttype, options in rules:
- assert 'linkattr' in options, (
- "'link' action requires a list of attributes used to "
- "search if the entity already exists")
- self._related_link(entity, rtype, role, ttype, others, [options['linkattr']],
- create_when_not_found=False)
+ try:
+ url, qs = url.split('?', 1)
+ except ValueError:
+ qs = ''
+ params = parse_qs(qs)
+ if not 'vid' in params:
+ params['vid'] = ['xml']
+ if etype is None:
+ try:
+ etype = url.rsplit('/', 1)[1]
+ except ValueError:
+ return url + '?' + self._cw.build_url_params(**params)
+ try:
+ etype = self._cw.vreg.case_insensitive_etypes[etype.lower()]
+ except KeyError:
+ return url + '?' + self._cw.build_url_params(**params)
+ relations = params.setdefault('relation', [])
+ for rtype, role, _ in self.source.mapping.get(etype, ()):
+ if known_relations and rtype in known_relations.get('role', ()):
+ continue
+ reldef = '%s-%s' % (rtype, role)
+ if not reldef in relations:
+ relations.append(reldef)
+ return url + '?' + self._cw.build_url_params(**params)
- def related_link_or_create(self, entity, rtype, role, others, rules):
- """implementation of 'link-or-create' action
+ def complete_item(self, item, rels):
+ try:
+ return self._parsed_urls[item['cwuri']]
+ except KeyError:
+ itemurl = self.complete_url(item['cwuri'], item['cwtype'], rels)
+ item_rels = list(self.parse(itemurl))
+ assert len(item_rels) == 1, 'url %s expected to bring back one '\
+ 'and only one entity, got %s' % (itemurl, len(item_rels))
+ self._parsed_urls[item['cwuri']] = item_rels[0]
+ if rels:
+ # XXX (do it better) merge relations
+ new_rels = item_rels[0][1]
+ new_rels.get('subject', {}).update(rels.get('subject', {}))
+ new_rels.get('object', {}).update(rels.get('object', {}))
+ return item_rels[0]
- requires an options to control search of the linked entity.
- """
- for ttype, options in rules:
- assert 'linkattr' in options, (
- "'link-or-create' action requires a list of attributes used to "
- "search if the entity already exists")
- self._related_link(entity, rtype, role, ttype, others, [options['linkattr']],
- create_when_not_found=True)
+
+class CWEntityXMLItemBuilder(Component):
+ __regid__ = 'cw.entityxml.item-builder'
+
+ def __init__(self, _cw, parser, node, **kwargs):
+ super(CWEntityXMLItemBuilder, self).__init__(_cw, **kwargs)
+ self.parser = parser
+ self.node = node
+
+ def build_item(self):
+ """parse a XML document node and return two dictionaries defining (part
+ of) an entity:
- def _related_link(self, entity, rtype, role, ttype, others, searchattrs,
- create_when_not_found):
- def issubset(x,y):
- return all(z in y for z in x)
+ - {attribute: value}
+ - {role: {relation: [(related item, related rels)...]}
+ """
+ node = self.node
+ item = dict(node.attrib.items())
+ item['cwtype'] = unicode(node.tag)
+ item.setdefault('cwsource', None)
+ try:
+ item['eid'] = typed_eid(item['eid'])
+ except KeyError:
+ # cw < 3.11 compat mode XXX
+ item['eid'] = typed_eid(node.find('eid').text)
+ item['cwuri'] = node.find('cwuri').text
+ rels = {}
+ for child in node:
+ role = child.get('role')
+ if role:
+ # relation
+ related = rels.setdefault(role, {}).setdefault(child.tag, [])
+ related += self.parser.parse_etree(child)
+ elif child.text:
+ # attribute
+ item[child.tag] = unicode(child.text)
+ else:
+ # None attribute (empty tag)
+ item[child.tag] = None
+ return item, rels
+
+
+class CWEntityXMLActionCopy(Component):
+ """implementation of cubicweb entity xml parser's'copy' action
+
+ Takes no option.
+ """
+ __regid__ = 'cw.entityxml.action.copy'
+
+ def __init__(self, _cw, parser, rtype, role, entity=None, **kwargs):
+ super(CWEntityXMLActionCopy, self).__init__(_cw, **kwargs)
+ self.parser = parser
+ self.rtype = rtype
+ self.role = role
+ self.entity = entity
+
+ @classproperty
+ def action(cls):
+ return cls.__regid__.rsplit('.', 1)[-1]
+
+ def check_options(self, options, eid):
+ self._check_no_options(options, eid)
+
+ def _check_no_options(self, options, eid, msg=None):
+ if options:
+ if msg is None:
+ msg = self._cw._("'%s' action doesn't take any options") % self.action
+ raise ValidationError(eid, {rn('options', 'subject'): msg})
+
+ def link_items(self, others, rules):
+ assert not any(x[1] for x in rules), "'copy' action takes no option"
+ ttypes = frozenset([x[0] for x in rules])
eids = [] # local eids
- for item in others:
- if item['cwtype'] != ttype:
- continue
- if not issubset(searchattrs, item):
- item, _rels = self._complete_item(item, False)
- if not issubset(searchattrs, item):
- self.source.error('missing attribute, got %s expected keys %s'
- % item, searchattrs)
- continue
- kwargs = dict((attr, item[attr]) for attr in searchattrs)
- rql = build_search_rql(item['cwtype'], kwargs)
- rset = self._cw.execute(rql, kwargs)
- if len(rset) > 1:
- self.source.error('ambiguous link: found %s entity %s with attributes %s',
- len(rset), item['cwtype'], kwargs)
- elif len(rset) == 1:
- eids.append(rset[0][0])
- elif create_when_not_found:
- ensure_str_keys(kwargs) # XXX necessary with python < 2.6
- eids.append(self._cw.create_entity(item['cwtype'], **kwargs).eid)
- else:
- self.source.error('can not find %s entity with attributes %s',
- item['cwtype'], kwargs)
- if not eids:
- self._clear_relation(entity, rtype, role, (ttype,))
+ for item, rels in others:
+ if item['cwtype'] in ttypes:
+ item, rels = self.parser.complete_item(item, rels)
+ other_entity = self.parser.process_item(item, rels)
+ if other_entity is not None:
+ eids.append(other_entity.eid)
+ if eids:
+ self._set_relation(eids)
else:
- self._set_relation(entity, rtype, role, eids)
+ self._clear_relation(ttypes)
- def _complete_item(self, item, add_relations=True):
- itemurl = item['cwuri'] + '?vid=xml'
- if add_relations:
- for rtype, role, _ in self.source.mapping.get(item['cwtype'], ()):
- itemurl += '&relation=%s-%s' % (rtype, role)
- item_rels = list(self.parse(itemurl))
- assert len(item_rels) == 1
- return item_rels[0]
-
- def _clear_relation(self, entity, rtype, role, ttypes):
- if entity.eid not in self.stats['created']:
+ def _clear_relation(self, ttypes):
+ if not self.parser.created_during_pull(self.entity):
if len(ttypes) > 1:
typerestr = ', Y is IN(%s)' % ','.join(ttypes)
else:
typerestr = ', Y is %s' % ','.join(ttypes)
- self._cw.execute('DELETE ' + rtype_role_rql(rtype, role) + typerestr,
- {'x': entity.eid})
+ self._cw.execute('DELETE ' + rtype_role_rql(self.rtype, self.role) + typerestr,
+ {'x': self.entity.eid})
+
+ def _set_relation(self, eids):
+ assert eids
+ rtype = self.rtype
+ rqlbase = rtype_role_rql(rtype, self.role)
+ eidstr = ','.join(str(eid) for eid in eids)
+ self._cw.execute('DELETE %s, NOT Y eid IN (%s)' % (rqlbase, eidstr),
+ {'x': self.entity.eid})
+ if self.role == 'object':
+ rql = 'SET %s, Y eid IN (%s), NOT Y %s X' % (rqlbase, eidstr, rtype)
+ else:
+ rql = 'SET %s, Y eid IN (%s), NOT X %s Y' % (rqlbase, eidstr, rtype)
+ self._cw.execute(rql, {'x': self.entity.eid})
+
+
+class CWEntityXMLActionLink(CWEntityXMLActionCopy):
+ """implementation of cubicweb entity xml parser's'link' action
+
+ requires a 'linkattr' option to control search of the linked entity.
+ """
+ __regid__ = 'cw.entityxml.action.link'
+
+ def check_options(self, options, eid):
+ if not 'linkattr' in options:
+ msg = self._cw._("'%s' action requires 'linkattr' option") % self.action
+ raise ValidationError(eid, {rn('options', 'subject'): msg})
+
+ create_when_not_found = False
- def _set_relation(self, entity, rtype, role, eids):
- rqlbase = rtype_role_rql(rtype, role)
- rql = 'DELETE %s' % rqlbase
+ def link_items(self, others, rules):
+ for ttype, options in rules:
+ searchattrs = splitstrip(options.get('linkattr', ''))
+ self._related_link(ttype, others, searchattrs)
+
+ def _related_link(self, ttype, others, searchattrs):
+ def issubset(x,y):
+ return all(z in y for z in x)
+ eids = [] # local eids
+ source = self.parser.source
+ for item, rels in others:
+ if item['cwtype'] != ttype:
+ continue
+ if not issubset(searchattrs, item):
+ item, rels = self.parser.complete_item(item, rels)
+ if not issubset(searchattrs, item):
+ source.error('missing attribute, got %s expected keys %s',
+ item, searchattrs)
+ continue
+ # XXX str() needed with python < 2.6
+ kwargs = dict((str(attr), item[attr]) for attr in searchattrs)
+ targets = self._find_entities(item, kwargs)
+ if len(targets) == 1:
+ entity = targets[0]
+ elif not targets and self.create_when_not_found:
+ entity = self._cw.create_entity(item['cwtype'], **kwargs)
+ else:
+ if len(targets) > 1:
+ source.error('ambiguous link: found %s entity %s with attributes %s',
+ len(targets), item['cwtype'], kwargs)
+ else:
+ source.error('can not find %s entity with attributes %s',
+ item['cwtype'], kwargs)
+ continue
+ eids.append(entity.eid)
+ self.parser.process_relations(entity, rels)
if eids:
- eidstr = ','.join(str(eid) for eid in eids)
- rql += ', NOT Y eid IN (%s)' % eidstr
- self._cw.execute(rql, {'x': entity.eid})
- if eids:
- if role == 'object':
- rql = 'SET %s, Y eid IN (%s), NOT Y %s X' % (rqlbase, eidstr, rtype)
- else:
- rql = 'SET %s, Y eid IN (%s), NOT X %s Y' % (rqlbase, eidstr, rtype)
- self._cw.execute(rql, {'x': entity.eid})
+ self._set_relation(eids)
+ else:
+ self._clear_relation((ttype,))
+
+ def _find_entities(self, item, kwargs):
+ return tuple(self._cw.find_entities(item['cwtype'], **kwargs))
+
+
+class CWEntityXMLActionLinkInState(CWEntityXMLActionLink):
+ """custom implementation of cubicweb entity xml parser's'link' action for
+ in_state relation
+ """
+ __select__ = match_rtype('in_state')
+
+ def check_options(self, options, eid):
+ super(CWEntityXMLActionLinkInState, self).check_options(options, eid)
+ if not 'name' in options['linkattr']:
+ msg = self._cw._("'%s' action for in_state relation should at least have 'linkattr=name' option") % self.action
+ raise ValidationError(eid, {rn('options', 'subject'): msg})
+
+ def _find_entities(self, item, kwargs):
+ assert 'name' in item # XXX else, complete_item
+ state_name = item['name']
+ wf = self.entity.cw_adapt_to('IWorkflowable').current_workflow
+ state = wf.state_by_name(state_name)
+ if state is None:
+ return ()
+ return (state,)
+
+
+class CWEntityXMLActionLinkOrCreate(CWEntityXMLActionLink):
+ """implementation of cubicweb entity xml parser's'link-or-create' action
+
+ requires a 'linkattr' option to control search of the linked entity.
+ """
+ __regid__ = 'cw.entityxml.action.link-or-create'
+ create_when_not_found = True
+
def registration_callback(vreg):
vreg.register_all(globals().values(), __name__)
- global HOST_MAPPING
- HOST_MAPPING = {}
+ global URL_MAPPING
+ URL_MAPPING = {}
if vreg.config.apphome:
- host_mapping_file = osp.join(vreg.config.apphome, 'hostmapping.py')
- if osp.exists(host_mapping_file):
- HOST_MAPPING = eval(file(host_mapping_file).read())
- vreg.info('using host mapping %s from %s', HOST_MAPPING, host_mapping_file)
+ url_mapping_file = osp.join(vreg.config.apphome, 'urlmapping.py')
+ if osp.exists(url_mapping_file):
+ URL_MAPPING = eval(file(url_mapping_file).read())
+ vreg.info('using url mapping %s from %s', URL_MAPPING, url_mapping_file)
diff -r d8bb8f631d41 -r a4e667270dd4 sobjects/test/data/schema.py
--- a/sobjects/test/data/schema.py Mon Sep 26 18:37:23 2011 +0200
+++ b/sobjects/test/data/schema.py Fri Dec 09 12:08:27 2011 +0100
@@ -25,4 +25,4 @@
class Tag(EntityType):
name = String(unique=True)
- tags = SubjectRelation('CWUser')
+ tags = SubjectRelation(('CWUser', 'CWGroup', 'EmailAddress'))
diff -r d8bb8f631d41 -r a4e667270dd4 sobjects/test/unittest_parsers.py
--- a/sobjects/test/unittest_parsers.py Mon Sep 26 18:37:23 2011 +0200
+++ b/sobjects/test/unittest_parsers.py Fri Dec 09 12:08:27 2011 +0100
@@ -16,6 +16,8 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see .
+from __future__ import with_statement
+
from datetime import datetime
from cubicweb.devtools.testlib import CubicWebTC
@@ -40,7 +42,7 @@
BASEXML = ''.join(u'''
-
+ sthenaulttoto2011-01-25 14:14:06
@@ -57,17 +59,23 @@
+
+
+
'''.splitlines())
-RELATEDXML ={
+RELATEDXML = {
'http://pouet.org/6': u'''
syt@logilab.fr
2010-04-13 14:35:562010-04-13 14:35:56
+
+
+
''',
@@ -75,6 +83,9 @@
users
+
+
+
''',
@@ -101,20 +112,66 @@
''',
}
+
+OTHERXML = ''.join(u'''
+
+
+ sthenault
+ toto
+ 2011-01-25 14:14:06
+ 2010-01-22 10:27:59
+ 2011-01-25 14:14:06
+
+
+
+
+
+'''.splitlines()
+)
+
+
class CWEntityXMLParserTC(CubicWebTC):
- def setup_database(self):
- req = self.request()
- source = req.create_entity('CWSource', name=u'myfeed', type=u'datafeed',
+ """/!\ this test use a pre-setup database /!\, if you modify above xml,
+ REMOVE THE DATABASE TEMPLATE else it won't be considered
+ """
+ test_db_id = 'xmlparser'
+ @classmethod
+ def pre_setup_database(cls, session, config):
+ myfeed = session.create_entity('CWSource', name=u'myfeed', type=u'datafeed',
parser=u'cw.entityxml', url=BASEXML)
- self.commit()
- source.init_mapping([(('CWUser', 'use_email', '*'),
+ myotherfeed = session.create_entity('CWSource', name=u'myotherfeed', type=u'datafeed',
+ parser=u'cw.entityxml', url=OTHERXML)
+ session.commit()
+ myfeed.init_mapping([(('CWUser', 'use_email', '*'),
u'role=subject\naction=copy'),
(('CWUser', 'in_group', '*'),
u'role=subject\naction=link\nlinkattr=name'),
- (('*', 'tags', 'CWUser'),
+ (('CWUser', 'in_state', '*'),
+ u'role=subject\naction=link\nlinkattr=name'),
+ (('*', 'tags', '*'),
u'role=object\naction=link-or-create\nlinkattr=name'),
])
- req.create_entity('Tag', name=u'hop')
+ myotherfeed.init_mapping([(('CWUser', 'in_group', '*'),
+ u'role=subject\naction=link\nlinkattr=name'),
+ (('CWUser', 'in_state', '*'),
+ u'role=subject\naction=link\nlinkattr=name'),
+ ])
+ session.create_entity('Tag', name=u'hop')
+
+ def test_complete_url(self):
+ dfsource = self.repo.sources_by_uri['myfeed']
+ parser = dfsource._get_parser(self.session)
+ self.assertEqual(parser.complete_url('http://www.cubicweb.org/CWUser'),
+ 'http://www.cubicweb.org/CWUser?relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject&vid=xml')
+ self.assertEqual(parser.complete_url('http://www.cubicweb.org/cwuser'),
+ 'http://www.cubicweb.org/cwuser?relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject&vid=xml')
+ self.assertEqual(parser.complete_url('http://www.cubicweb.org/cwuser?vid=rdf&relation=hop'),
+ 'http://www.cubicweb.org/cwuser?relation=hop&relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject&vid=rdf')
+ self.assertEqual(parser.complete_url('http://www.cubicweb.org/?rql=cwuser&vid=rdf&relation=hop'),
+ 'http://www.cubicweb.org/?rql=cwuser&relation=hop&vid=rdf')
+ self.assertEqual(parser.complete_url('http://www.cubicweb.org/?rql=cwuser&relation=hop'),
+ 'http://www.cubicweb.org/?rql=cwuser&relation=hop&vid=xml')
+
def test_actions(self):
dfsource = self.repo.sources_by_uri['myfeed']
@@ -122,13 +179,23 @@
{u'CWUser': {
(u'in_group', u'subject', u'link'): [
(u'CWGroup', {u'linkattr': u'name'})],
+ (u'in_state', u'subject', u'link'): [
+ (u'State', {u'linkattr': u'name'})],
(u'tags', u'object', u'link-or-create'): [
(u'Tag', {u'linkattr': u'name'})],
(u'use_email', u'subject', u'copy'): [
(u'EmailAddress', {})]
- }
+ },
+ u'CWGroup': {
+ (u'tags', u'object', u'link-or-create'): [
+ (u'Tag', {u'linkattr': u'name'})],
+ },
+ u'EmailAddress': {
+ (u'tags', u'object', u'link-or-create'): [
+ (u'Tag', {u'linkattr': u'name'})],
+ },
})
- session = self.repo.internal_session()
+ session = self.repo.internal_session(safe=True)
stats = dfsource.pull_data(session, force=True, raise_on_error=True)
self.assertEqual(sorted(stats.keys()), ['created', 'updated'])
self.assertEqual(len(stats['created']), 2)
@@ -139,31 +206,121 @@
self.assertEqual(user.modification_date, datetime(2011, 01, 25, 14, 14, 06))
self.assertEqual(user.cwuri, 'http://pouet.org/5')
self.assertEqual(user.cw_source[0].name, 'myfeed')
+ self.assertEqual(user.absolute_url(), 'http://pouet.org/5')
self.assertEqual(len(user.use_email), 1)
# copy action
email = user.use_email[0]
self.assertEqual(email.address, 'syt@logilab.fr')
self.assertEqual(email.cwuri, 'http://pouet.org/6')
+ self.assertEqual(email.absolute_url(), 'http://pouet.org/6')
self.assertEqual(email.cw_source[0].name, 'myfeed')
+ self.assertEqual(len(email.reverse_tags), 1)
+ self.assertEqual(email.reverse_tags[0].name, 'hop')
# link action
self.assertFalse(self.execute('CWGroup X WHERE X name "unknown"'))
groups = sorted([g.name for g in user.in_group])
self.assertEqual(groups, ['users'])
+ group = user.in_group[0]
+ self.assertEqual(len(group.reverse_tags), 1)
+ self.assertEqual(group.reverse_tags[0].name, 'hop')
# link or create action
- tags = sorted([t.name for t in user.reverse_tags])
- self.assertEqual(tags, ['hop', 'unknown'])
- tag = self.execute('Tag X WHERE X name "unknown"').get_entity(0, 0)
- self.assertEqual(tag.cwuri, 'http://testing.fr/cubicweb/%s' % tag.eid)
- self.assertEqual(tag.cw_source[0].name, 'system')
-
- stats = dfsource.pull_data(session, force=True, raise_on_error=True)
+ tags = set([(t.name, t.cwuri.replace(str(t.eid), ''), t.cw_source[0].name)
+ for t in user.reverse_tags])
+ self.assertEqual(tags, set((('hop', 'http://testing.fr/cubicweb/', 'system'),
+ ('unknown', 'http://testing.fr/cubicweb/', 'system')))
+ )
+ session.set_cnxset()
+ with session.security_enabled(read=False): # avoid Unauthorized due to password selection
+ stats = dfsource.pull_data(session, force=True, raise_on_error=True)
self.assertEqual(stats['created'], set())
self.assertEqual(len(stats['updated']), 2)
self.repo._type_source_cache.clear()
self.repo._extid_cache.clear()
- stats = dfsource.pull_data(session, force=True, raise_on_error=True)
+ session.set_cnxset()
+ with session.security_enabled(read=False): # avoid Unauthorized due to password selection
+ stats = dfsource.pull_data(session, force=True, raise_on_error=True)
self.assertEqual(stats['created'], set())
self.assertEqual(len(stats['updated']), 2)
+ session.commit()
+
+ # test move to system source
+ self.sexecute('SET X cw_source S WHERE X eid %(x)s, S name "system"', {'x': email.eid})
+ self.commit()
+ rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"')
+ self.assertEqual(len(rset), 1)
+ e = rset.get_entity(0, 0)
+ self.assertEqual(e.eid, email.eid)
+ self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system',
+ 'use-cwuri-as-url': False},
+ 'type': 'EmailAddress',
+ 'extid': None})
+ self.assertEqual(e.cw_source[0].name, 'system')
+ self.assertEqual(e.reverse_use_email[0].login, 'sthenault')
+ self.commit()
+ # test everything is still fine after source synchronization
+ session.set_cnxset()
+ with session.security_enabled(read=False): # avoid Unauthorized due to password selection
+ stats = dfsource.pull_data(session, force=True, raise_on_error=True)
+ rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"')
+ self.assertEqual(len(rset), 1)
+ e = rset.get_entity(0, 0)
+ self.assertEqual(e.eid, email.eid)
+ self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system',
+ 'use-cwuri-as-url': False},
+ 'type': 'EmailAddress',
+ 'extid': None})
+ self.assertEqual(e.cw_source[0].name, 'system')
+ self.assertEqual(e.reverse_use_email[0].login, 'sthenault')
+ session.commit()
+
+ # test delete entity
+ e.cw_delete()
+ self.commit()
+ # test everything is still fine after source synchronization
+ session.set_cnxset()
+ with session.security_enabled(read=False): # avoid Unauthorized due to password selection
+ stats = dfsource.pull_data(session, force=True, raise_on_error=True)
+ rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"')
+ self.assertEqual(len(rset), 0)
+ rset = self.sexecute('Any X WHERE X use_email E, X login "sthenault"')
+ self.assertEqual(len(rset), 0)
+
+ def test_external_entity(self):
+ dfsource = self.repo.sources_by_uri['myotherfeed']
+ session = self.repo.internal_session(safe=True)
+ stats = dfsource.pull_data(session, force=True, raise_on_error=True)
+ user = self.execute('CWUser X WHERE X login "sthenault"').get_entity(0, 0)
+ self.assertEqual(user.creation_date, datetime(2010, 01, 22, 10, 27, 59))
+ self.assertEqual(user.modification_date, datetime(2011, 01, 25, 14, 14, 06))
+ self.assertEqual(user.cwuri, 'http://pouet.org/5')
+ self.assertEqual(user.cw_source[0].name, 'myfeed')
+
+ def test_noerror_missing_fti_attribute(self):
+ dfsource = self.repo.sources_by_uri['myfeed']
+ session = self.repo.internal_session(safe=True)
+ parser = dfsource._get_parser(session)
+ dfsource.process_urls(parser, ['''
+
+
+ how-to
+
+
+'''], raise_on_error=True)
+
+ def test_noerror_unspecified_date(self):
+ dfsource = self.repo.sources_by_uri['myfeed']
+ session = self.repo.internal_session(safe=True)
+ parser = dfsource._get_parser(session)
+ dfsource.process_urls(parser, ['''
+
+
+ how-to
+ how-to
+ how-to
+
+
+
+'''], raise_on_error=True)
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
diff -r d8bb8f631d41 -r a4e667270dd4 sobjects/textparsers.py
--- a/sobjects/textparsers.py Mon Sep 26 18:37:23 2011 +0200
+++ b/sobjects/textparsers.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -83,7 +83,7 @@
trinfo = iworkflowable.fire_transition(tr)
caller.fire_event('state-changed', {'trinfo': trinfo,
'entity': entity})
- except:
+ except Exception:
self.exception('while changing state of %s', entity)
else:
self.error("can't pass transition %s on entity %s",
diff -r d8bb8f631d41 -r a4e667270dd4 test/data/bootstrap_cubes
diff -r d8bb8f631d41 -r a4e667270dd4 test/data/rewrite/schema.py
--- a/test/data/rewrite/schema.py Mon Sep 26 18:37:23 2011 +0200
+++ b/test/data/rewrite/schema.py Fri Dec 09 12:08:27 2011 +0100
@@ -63,3 +63,15 @@
object = 'Card'
inlined = True
cardinality = '?*'
+
+class inlined_note(RelationDefinition):
+ subject = 'Card'
+ object = 'Note'
+ inlined = True
+ cardinality = '?*'
+
+class inlined_affaire(RelationDefinition):
+ subject = 'Note'
+ object = 'Affaire'
+ inlined = True
+ cardinality = '?*'
diff -r d8bb8f631d41 -r a4e667270dd4 test/unittest_dbapi.py
--- a/test/unittest_dbapi.py Mon Sep 26 18:37:23 2011 +0200
+++ b/test/unittest_dbapi.py Fri Dec 09 12:08:27 2011 +0100
@@ -32,7 +32,8 @@
def test_public_repo_api(self):
cnx = self.login('anon')
self.assertEqual(cnx.get_schema(), self.repo.schema)
- self.assertEqual(cnx.source_defs(), {'system': {'type': 'native', 'uri': 'system'}})
+ self.assertEqual(cnx.source_defs(), {'system': {'type': 'native', 'uri': 'system',
+ 'use-cwuri-as-url': False}})
self.restore_connection() # proper way to close cnx
self.assertRaises(ProgrammingError, cnx.get_schema)
self.assertRaises(ProgrammingError, cnx.source_defs)
diff -r d8bb8f631d41 -r a4e667270dd4 test/unittest_entity.py
--- a/test/unittest_entity.py Mon Sep 26 18:37:23 2011 +0200
+++ b/test/unittest_entity.py Fri Dec 09 12:08:27 2011 +0100
@@ -572,7 +572,7 @@
self.assertEqual(person.rest_path(), 'personne/doe')
# ambiguity test
person2 = req.create_entity('Personne', prenom=u'remi', nom=u'doe')
- person.clear_all_caches()
+ person.cw_clear_all_caches()
self.assertEqual(person.rest_path(), 'personne/eid/%s' % person.eid)
self.assertEqual(person2.rest_path(), 'personne/eid/%s' % person2.eid)
# unique attr with None value (wikiid in this case)
@@ -610,7 +610,9 @@
req = self.request()
note = req.create_entity('Note', type=u'z')
metainf = note.cw_metainformation()
- self.assertEqual(metainf, {'source': {'type': 'native', 'uri': 'system'}, 'type': u'Note', 'extid': None})
+ self.assertEqual(metainf, {'source': {'type': 'native', 'uri': 'system',
+ 'use-cwuri-as-url': False},
+ 'type': u'Note', 'extid': None})
self.assertEqual(note.absolute_url(), 'http://testing.fr/cubicweb/note/%s' % note.eid)
metainf['source'] = metainf['source'].copy()
metainf['source']['base-url'] = 'http://cubicweb2.com/'
diff -r d8bb8f631d41 -r a4e667270dd4 test/unittest_req.py
--- a/test/unittest_req.py Mon Sep 26 18:37:23 2011 +0200
+++ b/test/unittest_req.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -15,12 +15,14 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see .
+
from logilab.common.testlib import TestCase, unittest_main
+from cubicweb import ObjectNotFound
from cubicweb.req import RequestSessionBase
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb import Unauthorized
-class RebuildURLTC(TestCase):
+class RequestTC(TestCase):
def test_rebuild_url(self):
rebuild_url = RequestSessionBase(None).rebuild_url
self.assertEqual(rebuild_url('http://logilab.fr?__message=pouet', __message='hop'),
@@ -49,5 +51,13 @@
self.assertRaises(Unauthorized, req.ensure_ro_rql, 'SET X login "toto" WHERE X is CWUser')
self.assertRaises(Unauthorized, req.ensure_ro_rql, ' SET X login "toto" WHERE X is CWUser ')
+
+class RequestCWTC(CubicWebTC):
+ def test_view_catch_ex(self):
+ req = self.request()
+ rset = self.execute('CWUser X WHERE X login "hop"')
+ self.assertEqual(req.view('oneline', rset, 'null'), '')
+ self.assertRaises(ObjectNotFound, req.view, 'onelinee', rset, 'null')
+
if __name__ == '__main__':
unittest_main()
diff -r d8bb8f631d41 -r a4e667270dd4 test/unittest_rqlrewrite.py
--- a/test/unittest_rqlrewrite.py Mon Sep 26 18:37:23 2011 +0200
+++ b/test/unittest_rqlrewrite.py Fri Dec 09 12:08:27 2011 +0100
@@ -21,9 +21,8 @@
from yams import BadSchemaDefinition
from rql import parse, nodes, RQLHelper
-from cubicweb import Unauthorized
+from cubicweb import Unauthorized, rqlrewrite
from cubicweb.schema import RRQLExpression, ERQLExpression
-from cubicweb.rqlrewrite import RQLRewriter
from cubicweb.devtools import repotest, TestServerConfiguration
@@ -62,9 +61,10 @@
@staticmethod
def simplify(mainrqlst, needcopy=False):
rqlhelper.simplify(rqlst, needcopy)
- rewriter = RQLRewriter(mock_object(vreg=FakeVReg, user=(mock_object(eid=1))))
+ rewriter = rqlrewrite.RQLRewriter(
+ mock_object(vreg=FakeVReg, user=(mock_object(eid=1))))
snippets = []
- for v, exprs in snippets_map.items():
+ for v, exprs in sorted(snippets_map.items()):
rqlexprs = [isinstance(snippet, basestring)
and mock_object(snippet_rqlst=parse('Any X WHERE '+snippet).children[0],
expression='Any X WHERE '+snippet)
@@ -210,8 +210,8 @@
}, {})
# XXX suboptimal
self.failUnlessEqual(rqlst.as_string(),
- "Any C,A,R WITH A,R,C BEING "
- "(Any A,R,C WHERE A ref R, A? inlined_card C, "
+ "Any C,A,R WITH A,C,R BEING "
+ "(Any A,C,R WHERE A? inlined_card C, A ref R, "
"(A is NULL) OR (EXISTS(A inlined_card B, B require_permission D, "
"B is Card, D is CWPermission)), "
"A is Affaire, C is Card, EXISTS(C require_permission E, E is CWPermission))")
@@ -236,6 +236,18 @@
('A2', 'X'): (c2,),
}, {})
+ def test_optional_var_inlined_linked(self):
+ c1 = ('X require_permission P')
+ c2 = ('X inlined_card O, O require_permission P')
+ rqlst = parse('Any A,W WHERE A inlined_card C?, C inlined_note N, '
+ 'N inlined_affaire W')
+ rewrite(rqlst, {('C', 'X'): (c1,)}, {})
+ self.failUnlessEqual(rqlst.as_string(),
+ 'Any A,W WHERE A inlined_card C?, A is Affaire '
+ 'WITH C,N,W BEING (Any C,N,W WHERE C inlined_note N, '
+ 'N inlined_affaire W, EXISTS(C require_permission B), '
+ 'C is Card, N is Note, W is Affaire)')
+
def test_relation_optimization_1_lhs(self):
# since Card in_state State as monovalued cardinality, the in_state
# relation used in the rql expression can be ignored and S replaced by
@@ -246,6 +258,7 @@
self.failUnlessEqual(rqlst.as_string(),
"Any C WHERE C in_state STATE, C is Card, "
"EXISTS(STATE name 'hop'), STATE is State")
+
def test_relation_optimization_1_rhs(self):
snippet = ('TW subworkflow_exit X, TW name "hop"')
rqlst = parse('WorkflowTransition C WHERE C subworkflow_exit EXIT')
diff -r d8bb8f631d41 -r a4e667270dd4 test/unittest_rset.py
--- a/test/unittest_rset.py Mon Sep 26 18:37:23 2011 +0200
+++ b/test/unittest_rset.py Fri Dec 09 12:08:27 2011 +0100
@@ -71,6 +71,13 @@
result = list(attr_desc_iterator(select, col, 2))
self.assertEqual(result, [])
+ def test_subquery_callfunc_2(self):
+ rql = ('Any X,S,L WHERE X in_state S WITH X, L BEING (Any X,MAX(L) GROUPBY X WHERE X is CWUser, T wf_info_for X, T creation_date L)')
+ rqlst = parse(rql)
+ select, col = rqlst.locate_subquery(0, 'CWUser', None)
+ result = list(attr_desc_iterator(select, col, 0))
+ self.assertEqual(result, [(1, 'in_state', 'subject')])
+
class ResultSetTC(CubicWebTC):
@@ -107,7 +114,7 @@
self.compare_urls(req.build_url('view', _restpath=''), baseurl)
- def test_resultset_build(self):
+ def test_build(self):
"""test basic build of a ResultSet"""
rs = ResultSet([1,2,3], 'CWGroup X', description=['CWGroup', 'CWGroup', 'CWGroup'])
self.assertEqual(rs.rowcount, 3)
@@ -115,7 +122,7 @@
self.assertEqual(rs.description, ['CWGroup', 'CWGroup', 'CWGroup'])
- def test_resultset_limit(self):
+ def test_limit(self):
rs = ResultSet([[12000, 'adim'], [13000, 'syt'], [14000, 'nico']],
'Any U,L where U is CWUser, U login L',
description=[['CWUser', 'String']] * 3)
@@ -128,8 +135,30 @@
self.assertEqual(rs.limit(2, offset=2).rows, [[14000, 'nico']])
self.assertEqual(rs.limit(2, offset=3).rows, [])
+ def test_limit_2(self):
+ req = self.request()
+ # drop user from cache for the sake of this test
+ req.drop_entity_cache(req.user.eid)
+ rs = req.execute('Any E,U WHERE E is CWEType, E created_by U')
+ # get entity on row 9. This will fill its created_by relation cache,
+ # with cwuser on row 9 as well
+ e1 = rs.get_entity(9, 0)
+ # get entity on row 10. This will fill its created_by relation cache,
+ # with cwuser built on row 9
+ e2 = rs.get_entity(10, 0)
+ # limit result set from row 10
+ rs.limit(1, 10, inplace=True)
+ # get back eid
+ e = rs.get_entity(0, 0)
+ self.assertTrue(e2 is e)
+ # rs.limit has properly removed cwuser for request cache, but it's
+ # still referenced by e/e2 relation cache
+ u = e.created_by[0]
+ # now ensure this doesn't trigger IndexError because cwuser.cw_row is 9
+ # while now rset has only one row
+ u.cw_rset[u.cw_row]
- def test_resultset_filter(self):
+ def test_filter(self):
rs = ResultSet([[12000, 'adim'], [13000, 'syt'], [14000, 'nico']],
'Any U,L where U is CWUser, U login L',
description=[['CWUser', 'String']] * 3)
@@ -142,7 +171,7 @@
self.assertEqual(len(rs2), 2)
self.assertEqual([login for _, login in rs2], ['adim', 'syt'])
- def test_resultset_transform(self):
+ def test_transform(self):
rs = ResultSet([[12, 'adim'], [13, 'syt'], [14, 'nico']],
'Any U,L where U is CWUser, U login L',
description=[['CWUser', 'String']] * 3)
@@ -154,7 +183,7 @@
self.assertEqual(len(rs2), 3)
self.assertEqual(list(rs2), [['adim'],['syt'],['nico']])
- def test_resultset_sort(self):
+ def test_sort(self):
rs = ResultSet([[12000, 'adim'], [13000, 'syt'], [14000, 'nico']],
'Any U,L where U is CWUser, U login L',
description=[['CWUser', 'String']] * 3)
@@ -179,7 +208,7 @@
# make sure rs is unchanged
self.assertEqual([login for _, login in rs], ['adim', 'syt', 'nico'])
- def test_resultset_split(self):
+ def test_split(self):
rs = ResultSet([[12000, 'adim', u'Adim chez les pinguins'],
[12000, 'adim', u'Jardiner facile'],
[13000, 'syt', u'Le carrelage en 42 leçons'],
@@ -457,5 +486,6 @@
self.assertIsInstance(str(rset), basestring)
self.assertEqual(len(str(rset).splitlines()), 1)
+
if __name__ == '__main__':
unittest_main()
diff -r d8bb8f631d41 -r a4e667270dd4 test/unittest_schema.py
--- a/test/unittest_schema.py Mon Sep 26 18:37:23 2011 +0200
+++ b/test/unittest_schema.py Fri Dec 09 12:08:27 2011 +0100
@@ -29,7 +29,7 @@
from yams import ValidationError, BadSchemaDefinition
from yams.constraints import SizeConstraint, StaticVocabularyConstraint
from yams.buildobjs import RelationDefinition, EntityType, RelationType
-from yams.reader import PyFileReader
+from yams.reader import fill_schema
from cubicweb.schema import (
CubicWebSchema, CubicWebEntitySchema, CubicWebSchemaLoader,
@@ -159,7 +159,7 @@
self.assert_(isinstance(schema, CubicWebSchema))
self.assertEqual(schema.name, 'data')
entities = sorted([str(e) for e in schema.entities()])
- expected_entities = ['BaseTransition', 'Bookmark', 'Boolean', 'Bytes', 'Card',
+ expected_entities = ['BaseTransition', 'BigInt', 'Bookmark', 'Boolean', 'Bytes', 'Card',
'Date', 'Datetime', 'Decimal',
'CWCache', 'CWConstraint', 'CWConstraintType', 'CWEType',
'CWAttribute', 'CWGroup', 'EmailAddress', 'CWRelation',
@@ -194,7 +194,7 @@
'from_entity', 'from_state', 'fulltext_container', 'fulltextindexed',
'has_text',
- 'identity', 'in_group', 'in_state', 'indexed',
+ 'identity', 'in_group', 'in_state', 'in_synchronization', 'indexed',
'initial_state', 'inlined', 'internationalizable', 'is', 'is_instance_of',
'label', 'last_login_time', 'latest_retrieval', 'lieu', 'login',
@@ -260,18 +260,23 @@
self.assertEqual([x.expression for x in aschema.get_rqlexprs('update')],
['U has_update_permission X'])
+ def test_nonregr_allowed_type_names(self):
+ schema = CubicWebSchema('Test Schema')
+ schema.add_entity_type(EntityType('NaN'))
+
+
class BadSchemaTC(TestCase):
def setUp(self):
self.loader = CubicWebSchemaLoader()
self.loader.defined = {}
self.loader.loaded_files = []
self.loader.post_build_callbacks = []
- self.loader._pyreader = PyFileReader(self.loader)
def _test(self, schemafile, msg):
self.loader.handle_file(join(DATADIR, schemafile))
+ sch = self.loader.schemacls('toto')
with self.assertRaises(BadSchemaDefinition) as cm:
- self.loader._build_schema('toto', False)
+ fill_schema(sch, self.loader.defined, False)
self.assertEqual(str(cm.exception), msg)
def test_lowered_etype(self):
diff -r d8bb8f631d41 -r a4e667270dd4 test/unittest_selectors.py
--- a/test/unittest_selectors.py Mon Sep 26 18:37:23 2011 +0200
+++ b/test/unittest_selectors.py Fri Dec 09 12:08:27 2011 +0100
@@ -26,7 +26,7 @@
from cubicweb.appobject import Selector, AndSelector, OrSelector
from cubicweb.selectors import (is_instance, adaptable, match_user_groups,
multi_lines_rset, score_entity, is_in_state,
- on_transition, rql_condition)
+ on_transition, rql_condition, relation_possible)
from cubicweb.web import action
@@ -102,6 +102,10 @@
self.assertIs(csel.search_selector(is_instance), sel)
csel = AndSelector(Selector(), sel)
self.assertIs(csel.search_selector(is_instance), sel)
+ self.assertIs(csel.search_selector((AndSelector, OrSelector)), csel)
+ self.assertIs(csel.search_selector((OrSelector, AndSelector)), csel)
+ self.assertIs(csel.search_selector((is_instance, score_entity)), sel)
+ self.assertIs(csel.search_selector((score_entity, is_instance)), sel)
def test_inplace_and(self):
selector = _1_()
@@ -140,35 +144,6 @@
self.assertEqual(selector(None), 0)
-class IsInStateSelectorTC(CubicWebTC):
- def setup_database(self):
- wf = self.shell().add_workflow("testwf", 'StateFull', default=True)
- initial = wf.add_state(u'initial', initial=True)
- final = wf.add_state(u'final')
- wf.add_transition(u'forward', (initial,), final)
-
- def test_initial_state(self):
- req = self.request()
- entity = req.create_entity('StateFull')
- selector = is_in_state(u'initial')
- self.commit()
- score = selector(entity.__class__, None, entity=entity)
- self.assertEqual(score, 1)
-
- def test_final_state(self):
- req = self.request()
- entity = req.create_entity('StateFull')
- selector = is_in_state(u'initial')
- self.commit()
- entity.cw_adapt_to('IWorkflowable').fire_transition(u'forward')
- self.commit()
- score = selector(entity.__class__, None, entity=entity)
- self.assertEqual(score, 0)
- selector = is_in_state(u'final')
- score = selector(entity.__class__, None, entity=entity)
- self.assertEqual(score, 1)
-
-
class ImplementsSelectorTC(CubicWebTC):
def test_etype_priority(self):
req = self.request()
@@ -189,11 +164,17 @@
self.assertEqual(is_instance('BaseTransition').score_class(cls, self.request()),
3)
+ def test_outer_join(self):
+ req = self.request()
+ rset = req.execute('Any U,B WHERE B? bookmarked_by U, U login "anon"')
+ self.assertEqual(is_instance('Bookmark')(None, req, rset=rset, row=0, col=1),
+ 0)
+
class WorkflowSelectorTC(CubicWebTC):
def _commit(self):
self.commit()
- self.wf_entity.clear_all_caches()
+ self.wf_entity.cw_clear_all_caches()
def setup_database(self):
wf = self.shell().add_workflow("wf_test", 'StateFull', default=True)
@@ -315,6 +296,27 @@
self.assertEqual(selector(None, self.req, rset=self.rset), 0)
+class RelationPossibleTC(CubicWebTC):
+
+ def test_rqlst_1(self):
+ req = self.request()
+ selector = relation_possible('in_group')
+ select = self.vreg.parse(req, 'Any X WHERE X is CWUser').children[0]
+ score = selector(None, req, rset=1,
+ select=select, filtered_variable=select.defined_vars['X'])
+ self.assertEqual(score, 1)
+
+ def test_rqlst_2(self):
+ req = self.request()
+ selector = relation_possible('in_group')
+ select = self.vreg.parse(req, 'Any 1, COUNT(X) WHERE X is CWUser, X creation_date XD, '
+ 'Y creation_date YD, Y is CWGroup '
+ 'HAVING DAY(XD)=DAY(YD)').children[0]
+ score = selector(None, req, rset=1,
+ select=select, filtered_variable=select.defined_vars['X'])
+ self.assertEqual(score, 1)
+
+
class MatchUserGroupsTC(CubicWebTC):
def test_owners_group(self):
"""tests usage of 'owners' group with match_user_group"""
diff -r d8bb8f631d41 -r a4e667270dd4 test/unittest_utils.py
--- a/test/unittest_utils.py Mon Sep 26 18:37:23 2011 +0200
+++ b/test/unittest_utils.py Fri Dec 09 12:08:27 2011 +0100
@@ -21,9 +21,12 @@
import decimal
import datetime
+
from logilab.common.testlib import TestCase, DocTest, unittest_main
-from cubicweb.utils import make_uid, UStringIO, SizeConstrainedList, RepeatList
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.utils import (make_uid, UStringIO, SizeConstrainedList,
+ RepeatList, HTMLHead)
from cubicweb.entity import Entity
try:
@@ -155,6 +158,102 @@
def test_encoding_unknown_stuff(self):
self.assertEqual(self.encode(TestCase), 'null')
+class HTMLHeadTC(CubicWebTC):
+ def test_concat_urls(self):
+ base_url = u'http://test.fr/data/'
+ head = HTMLHead(base_url)
+ urls = [base_url + u'bob1.js',
+ base_url + u'bob2.js',
+ base_url + u'bob3.js']
+ result = head.concat_urls(urls)
+ expected = u'http://test.fr/data/??bob1.js,bob2.js,bob3.js'
+ self.assertEqual(result, expected)
+
+ def test_group_urls(self):
+ base_url = u'http://test.fr/data/'
+ head = HTMLHead(base_url)
+ urls_spec = [(base_url + u'bob0.js', None),
+ (base_url + u'bob1.js', None),
+ (u'http://ext.com/bob2.js', None),
+ (u'http://ext.com/bob3.js', None),
+ (base_url + u'bob4.css', 'all'),
+ (base_url + u'bob5.css', 'all'),
+ (base_url + u'bob6.css', 'print'),
+ (base_url + u'bob7.css', 'print'),
+ (base_url + u'bob8.css', ('all', u'[if IE 8]')),
+ (base_url + u'bob9.css', ('print', u'[if IE 8]'))
+ ]
+ result = head.group_urls(urls_spec)
+ expected = [(base_url + u'??bob0.js,bob1.js', None),
+ (u'http://ext.com/bob2.js', None),
+ (u'http://ext.com/bob3.js', None),
+ (base_url + u'??bob4.css,bob5.css', 'all'),
+ (base_url + u'??bob6.css,bob7.css', 'print'),
+ (base_url + u'bob8.css', ('all', u'[if IE 8]')),
+ (base_url + u'bob9.css', ('print', u'[if IE 8]'))
+ ]
+ self.assertEqual(list(result), expected)
+
+ def test_getvalue_with_concat(self):
+ base_url = u'http://test.fr/data/'
+ head = HTMLHead(base_url)
+ head.add_js(base_url + u'bob0.js')
+ head.add_js(base_url + u'bob1.js')
+ head.add_js(u'http://ext.com/bob2.js')
+ head.add_js(u'http://ext.com/bob3.js')
+ head.add_css(base_url + u'bob4.css')
+ head.add_css(base_url + u'bob5.css')
+ head.add_css(base_url + u'bob6.css', 'print')
+ head.add_css(base_url + u'bob7.css', 'print')
+ head.add_ie_css(base_url + u'bob8.css')
+ head.add_ie_css(base_url + u'bob9.css', 'print', u'[if lt IE 7]')
+ result = head.getvalue()
+ expected = u"""
+
+
+
+
+
+
+
+"""
+ self.assertEqual(result, expected)
+
+ def test_getvalue_without_concat(self):
+ base_url = u'http://test.fr/data/'
+ head = HTMLHead()
+ head.add_js(base_url + u'bob0.js')
+ head.add_js(base_url + u'bob1.js')
+ head.add_js(u'http://ext.com/bob2.js')
+ head.add_js(u'http://ext.com/bob3.js')
+ head.add_css(base_url + u'bob4.css')
+ head.add_css(base_url + u'bob5.css')
+ head.add_css(base_url + u'bob6.css', 'print')
+ head.add_css(base_url + u'bob7.css', 'print')
+ head.add_ie_css(base_url + u'bob8.css')
+ head.add_ie_css(base_url + u'bob9.css', 'print', u'[if lt IE 7]')
+ result = head.getvalue()
+ expected = u"""
+
+
+
+
+
+
+
+
+
+
+"""
+ self.assertEqual(result, expected)
class DocTest(DocTest):
from cubicweb import utils as module
diff -r d8bb8f631d41 -r a4e667270dd4 toolsutils.py
--- a/toolsutils.py Mon Sep 26 18:37:23 2011 +0200
+++ b/toolsutils.py Fri Dec 09 12:08:27 2011 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -36,7 +36,7 @@
from logilab.common.compat import any
from logilab.common.shellutils import ASK
-from cubicweb import warning
+from cubicweb import warning # pylint: disable=E0611
from cubicweb import ConfigurationError, ExecutionError
def underline_title(title, car='-'):
@@ -159,15 +159,11 @@
print '-> set permissions to 0600 for %s' % filepath
chmod(filepath, 0600)
-def read_config(config_file):
- """read the instance configuration from a file and return it as a
- dictionnary
-
- :type config_file: str
- :param config_file: path to the configuration file
-
- :rtype: dict
- :return: a dictionary with specified values associated to option names
+def read_config(config_file, raise_if_unreadable=False):
+ """read some simple configuration from `config_file` and return it as a
+ dictionary. If `raise_if_unreadable` is false (the default), an empty
+ dictionary will be returned if the file is inexistant or unreadable, else
+ :exc:`ExecutionError` will be raised.
"""
from logilab.common.fileutils import lines
config = current = {}
@@ -190,8 +186,12 @@
value = value.strip()
current[option] = value or None
except IOError, ex:
- warning('missing or non readable configuration file %s (%s)',
- config_file, ex)
+ if raise_if_unreadable:
+ raise ExecutionError('%s. Are you logged with the correct user '
+ 'to use this instance?' % ex)
+ else:
+ warning('missing or non readable configuration file %s (%s)',
+ config_file, ex)
return config
diff -r d8bb8f631d41 -r a4e667270dd4 uilib.py
--- a/uilib.py Mon Sep 26 18:37:23 2011 +0200
+++ b/uilib.py Fri Dec 09 12:08:27 2011 +0100
@@ -31,7 +31,7 @@
from logilab.mtconverter import xml_escape, html_unescape
from logilab.common.date import ustrftime
-from cubicweb.utils import json_dumps
+from cubicweb.utils import JSString, json_dumps
def rql_for_eid(eid):
@@ -51,31 +51,65 @@
assert eid is not None
return '%s:%s' % (name, eid)
+def print_bytes(value, req, props, displaytime=True):
+ return u''
+
+def print_string(value, req, props, displaytime=True):
+ # don't translate empty value if you don't want strange results
+ if props is not None and value and props.get('internationalizable'):
+ return req._(value)
+ return value
+
+def print_date(value, req, props, displaytime=True):
+ return ustrftime(value, req.property_value('ui.date-format'))
+
+def print_time(value, req, props, displaytime=True):
+ return ustrftime(value, req.property_value('ui.time-format'))
+
+def print_tztime(value, req, props, displaytime=True):
+ return ustrftime(value, req.property_value('ui.time-format')) + u' UTC'
+
+def print_datetime(value, req, props, displaytime=True):
+ if displaytime:
+ return ustrftime(value, req.property_value('ui.datetime-format'))
+ return ustrftime(value, req.property_value('ui.date-format'))
+
+def print_tzdatetime(value, req, props, displaytime=True):
+ if displaytime:
+ return ustrftime(value, req.property_value('ui.datetime-format')) + u' UTC'
+ return ustrftime(value, req.property_value('ui.date-format'))
+
+def print_boolean(value, req, props, displaytime=True):
+ if value:
+ return req._('yes')
+ return req._('no')
+
+def print_float(value, req, props, displaytime=True):
+ return unicode(req.property_value('ui.float-format') % value)
+
+PRINTERS = {
+ 'Bytes': print_bytes,
+ 'String': print_string,
+ 'Date': print_date,
+ 'Time': print_time,
+ 'TZTime': print_tztime,
+ 'Datetime': print_datetime,
+ 'TZDatetime': print_tzdatetime,
+ 'Boolean': print_boolean,
+ 'Float': print_float,
+ 'Decimal': print_float,
+ # XXX Interval
+ }
+
def printable_value(req, attrtype, value, props=None, displaytime=True):
"""return a displayable value (i.e. unicode string)"""
- if value is None or attrtype == 'Bytes':
+ if value is None:
return u''
- if attrtype == 'String':
- # don't translate empty value if you don't want strange results
- if props is not None and value and props.get('internationalizable'):
- return req._(value)
- return value
- if attrtype == 'Date':
- return ustrftime(value, req.property_value('ui.date-format'))
- if attrtype in ('Time', 'TZTime'):
- return ustrftime(value, req.property_value('ui.time-format'))
- if attrtype in ('Datetime', 'TZDatetime'):
- if displaytime:
- return ustrftime(value, req.property_value('ui.datetime-format'))
- return ustrftime(value, req.property_value('ui.date-format'))
- if attrtype == 'Boolean':
- if value:
- return req._('yes')
- return req._('no')
- if attrtype in ('Float', 'Decimal'):
- value = req.property_value('ui.float-format') % value
- # XXX Interval
- return unicode(value)
+ try:
+ printer = PRINTERS[attrtype]
+ except KeyError:
+ return unicode(value)
+ return printer(value, req, props, displaytime)
# text publishing #############################################################
@@ -127,94 +161,84 @@
REM_ROOT_HTML_TAGS = re.compile('(body|html)>', re.U)
-try:
- from lxml import etree, html
- from lxml.html import clean, defs
+from lxml import etree, html
+from lxml.html import clean, defs
- ALLOWED_TAGS = (defs.general_block_tags | defs.list_tags | defs.table_tags |
- defs.phrase_tags | defs.font_style_tags |
- set(('span', 'a', 'br', 'img', 'map', 'area', 'sub', 'sup'))
- )
+ALLOWED_TAGS = (defs.general_block_tags | defs.list_tags | defs.table_tags |
+ defs.phrase_tags | defs.font_style_tags |
+ set(('span', 'a', 'br', 'img', 'map', 'area', 'sub', 'sup'))
+ )
- CLEANER = clean.Cleaner(allow_tags=ALLOWED_TAGS, remove_unknown_tags=False,
- style=True, safe_attrs_only=True,
- add_nofollow=False,
- )
+CLEANER = clean.Cleaner(allow_tags=ALLOWED_TAGS, remove_unknown_tags=False,
+ style=True, safe_attrs_only=True,
+ add_nofollow=False,
+ )
- def soup2xhtml(data, encoding):
- """tidy html soup by allowing some element tags and return the result
- """
- # remove spurious