--- a/.hgignore Tue Jun 10 09:35:26 2014 +0200
+++ b/.hgignore Tue Jun 10 09:49:45 2014 +0200
@@ -18,3 +18,4 @@
^doc/html/
^doc/doctrees/
^doc/book/en/devweb/js_api/
+data/pgdb/
--- a/.hgtags Tue Jun 10 09:35:26 2014 +0200
+++ b/.hgtags Tue Jun 10 09:49:45 2014 +0200
@@ -350,3 +350,9 @@
5071b69b6b0b0de937bb231404cbf652a103dbe0 cubicweb-version-3.18.5
5071b69b6b0b0de937bb231404cbf652a103dbe0 cubicweb-debian-version-3.18.5-1
5071b69b6b0b0de937bb231404cbf652a103dbe0 cubicweb-centos-version-3.18.5-1
+1141927b8494aabd16e31b0d0d9a50fe1fed5f2f cubicweb-version-3.19.0
+1141927b8494aabd16e31b0d0d9a50fe1fed5f2f cubicweb-debian-version-3.19.0-1
+1141927b8494aabd16e31b0d0d9a50fe1fed5f2f cubicweb-centos-version-3.19.0-1
+1fe4bc4a8ac8831a379e9ebea08d75fbb6fc5c2a cubicweb-version-3.19.1
+1fe4bc4a8ac8831a379e9ebea08d75fbb6fc5c2a cubicweb-debian-version-3.19.1-1
+1fe4bc4a8ac8831a379e9ebea08d75fbb6fc5c2a cubicweb-centos-version-3.19.1-1
--- a/MANIFEST.in Tue Jun 10 09:35:26 2014 +0200
+++ b/MANIFEST.in Tue Jun 10 09:49:45 2014 +0200
@@ -10,7 +10,7 @@
recursive-include misc *.py *.png *.display
include web/views/*.pt
-recursive-include web/data external_resources *.js *.css *.py *.png *.gif *.ico *.ttf
+recursive-include web/data external_resources *.js *.css *.py *.png *.gif *.ico *.ttf *.svg *.woff *.eot
recursive-include web/wdoc *.rst *.png *.xml ChangeLog*
recursive-include devtools/data *.js *.css *.sh
--- a/__init__.py Tue Jun 10 09:35:26 2014 +0200
+++ b/__init__.py Tue Jun 10 09:49:45 2014 +0200
@@ -242,3 +242,15 @@
errors[rname(*key)] = errors.pop(key)
return ValidationError(getattr(entity, 'eid', entity), errors,
substitutions, i18nvalues)
+
+
+# exceptions ##################################################################
+
+class ProgrammingError(Exception): #DatabaseError):
+ """Exception raised for errors that are related to the database's operation
+ and not necessarily under the control of the programmer, e.g. an unexpected
+ disconnect occurs, the data source name is not found, a transaction could
+ not be processed, a memory allocation error occurred during processing,
+ etc.
+ """
+
--- a/__pkginfo__.py Tue Jun 10 09:35:26 2014 +0200
+++ b/__pkginfo__.py Tue Jun 10 09:49:45 2014 +0200
@@ -22,7 +22,7 @@
modname = distname = "cubicweb"
-numversion = (3, 18, 5)
+numversion = (3, 19, 1)
version = '.'.join(str(num) for num in numversion)
description = "a repository of entities / relations for knowledge management"
@@ -50,7 +50,7 @@
'Twisted': '',
# XXX graphviz
# server dependencies
- 'logilab-database': '>= 1.11',
+ 'logilab-database': '>= 1.12.1',
'passlib': '',
}
--- a/_exceptions.py Tue Jun 10 09:35:26 2014 +0200
+++ b/_exceptions.py Tue Jun 10 09:49:45 2014 +0200
@@ -76,13 +76,6 @@
"""the eid is not defined in the system tables"""
msg = 'No entity with eid %s in the repository'
-class ETypeNotSupportedBySources(RepositoryError, InternalError):
- """no source support an entity type"""
- msg = 'No source supports %r entity\'s type'
-
-class MultiSourcesError(RepositoryError, InternalError):
- """usually due to bad multisources configuration or rql query"""
-
class UniqueTogetherError(RepositoryError):
"""raised when a unique_together constraint caused an IntegrityError"""
def __init__(self, session, **kwargs):
--- a/cubicweb.spec Tue Jun 10 09:35:26 2014 +0200
+++ b/cubicweb.spec Tue Jun 10 09:49:45 2014 +0200
@@ -7,7 +7,7 @@
%endif
Name: cubicweb
-Version: 3.18.5
+Version: 3.19.1
Release: logilab.1%{?dist}
Summary: CubicWeb is a semantic web application framework
Source0: http://download.logilab.org/pub/cubicweb/cubicweb-%{version}.tar.gz
@@ -24,7 +24,7 @@
Requires: %{python}-logilab-mtconverter >= 0.8.0
Requires: %{python}-rql >= 0.31.2
Requires: %{python}-yams >= 0.39.1
-Requires: %{python}-logilab-database >= 1.11.0
+Requires: %{python}-logilab-database >= 1.12.1
Requires: %{python}-passlib
Requires: %{python}-lxml
Requires: %{python}-twisted-web
--- a/cwconfig.py Tue Jun 10 09:35:26 2014 +0200
+++ b/cwconfig.py Tue Jun 10 09:49:45 2014 +0200
@@ -554,27 +554,19 @@
todo.append(depcube)
return cubes
- def reorder_cubes(self, cubes):
+ @classmethod
+ def reorder_cubes(cls, cubes):
"""reorder cubes from the top level cubes to inner dependencies
cubes
"""
from logilab.common.graph import ordered_nodes, UnorderableGraph
- # See help string for 'ui-cube' in web/webconfig.py for the reasons
- # behind this hack.
- uicube = self.get('ui-cube', None)
graph = {}
- if uicube:
- graph[uicube] = set()
for cube in cubes:
cube = CW_MIGRATION_MAP.get(cube, cube)
- graph[cube] = set(dep for dep in self.cube_dependencies(cube)
+ graph[cube] = set(dep for dep in cls.cube_dependencies(cube)
if dep in cubes)
- graph[cube] |= set(dep for dep in self.cube_recommends(cube)
+ graph[cube] |= set(dep for dep in cls.cube_recommends(cube)
if dep in cubes)
- if uicube and cube != uicube \
- and cube not in self.cube_dependencies(uicube) \
- and cube not in self.cube_recommends(uicube):
- graph[cube].add(uicube)
try:
return ordered_nodes(graph)
except UnorderableGraph as ex:
@@ -999,7 +991,7 @@
super(CubicWebConfiguration, self).adjust_sys_path()
# adding apphome to python path is not usually necessary in production
# environments, but necessary for tests
- if self.apphome and not self.apphome in sys.path:
+ if self.apphome and self.apphome not in sys.path:
sys.path.insert(0, self.apphome)
@property
--- a/cwctl.py Tue Jun 10 09:35:26 2014 +0200
+++ b/cwctl.py Tue Jun 10 09:49:45 2014 +0200
@@ -781,7 +781,9 @@
if self.config.fs_only or toupgrade:
for cube, fromversion, toversion in toupgrade:
print '-> migration needed from %s to %s for %s' % (fromversion, toversion, cube)
- mih.migrate(vcconf, reversed(toupgrade), self.config)
+ with mih.cnx:
+ with mih.cnx.security_enabled(False, False):
+ mih.migrate(vcconf, reversed(toupgrade), self.config)
else:
print '-> no data migration needed for instance %s.' % appid
# rewrite main configuration file
@@ -912,13 +914,14 @@
def _handle_networked(self, appuri):
""" returns migration context handler & shutdown function """
from cubicweb import AuthenticationError
- from cubicweb.dbapi import connect
+ from cubicweb.repoapi import connect, get_repository
from cubicweb.server.utils import manager_userpasswd
from cubicweb.server.migractions import ServerMigrationHelper
while True:
try:
login, pwd = manager_userpasswd(msg=None)
- cnx = connect(appuri, login=login, password=pwd, mulcnx=False)
+ repo = get_repository(appuri)
+ cnx = connect(repo, login=login, password=pwd, mulcnx=False)
except AuthenticationError as ex:
print ex
except (KeyboardInterrupt, EOFError):
@@ -948,15 +951,17 @@
else:
mih, shutdown_callback = self._handle_networked(appuri)
try:
- if args:
- # use cmdline parser to access left/right attributes only
- # remember that usage requires instance appid as first argument
- scripts, args = self.cmdline_parser.largs[1:], self.cmdline_parser.rargs
- for script in scripts:
- mih.cmd_process_script(script, scriptargs=args)
- mih.commit()
- else:
- mih.interactive_shell()
+ with mih.cnx:
+ with mih.cnx.security_enabled(False, False):
+ if args:
+ # use cmdline parser to access left/right attributes only
+ # remember that usage requires instance appid as first argument
+ scripts, args = self.cmdline_parser.largs[1:], self.cmdline_parser.rargs
+ for script in scripts:
+ mih.cmd_process_script(script, scriptargs=args)
+ mih.commit()
+ else:
+ mih.interactive_shell()
finally:
shutdown_callback()
--- a/cwvreg.py Tue Jun 10 09:35:26 2014 +0200
+++ b/cwvreg.py Tue Jun 10 09:49:45 2014 +0200
@@ -701,7 +701,7 @@
def solutions(self, req, rqlst, args):
def type_from_eid(eid, req=req):
- return req.describe(eid)[0]
+ return req.entity_metas(eid)['type']
return self.rqlhelper.compute_solutions(rqlst, {'eid': type_from_eid}, args)
def parse(self, req, rql, args=None):
--- a/dataimport.py Tue Jun 10 09:35:26 2014 +0200
+++ b/dataimport.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -622,11 +622,13 @@
self.rql('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
{'x': int(eid_from), 'y': int(eid_to)})
+ @deprecated("[3.19] use session.find(*args, **kwargs).entities() instead")
def find_entities(self, *args, **kwargs):
- return self.session.find_entities(*args, **kwargs)
+ return self.session.find(*args, **kwargs).entities()
+ @deprecated("[3.19] use session.find(*args, **kwargs).one() instead")
def find_one_entity(self, *args, **kwargs):
- return self.session.find_one_entity(*args, **kwargs)
+ return self.session.find(*args, **kwargs).one()
# the import controller ########################################################
@@ -948,7 +950,7 @@
def drop_indexes(self, etype):
"""Drop indexes for a given entity type"""
if etype not in self.indexes_etypes:
- cu = self.session.cnxset['system']
+ cu = self.session.cnxset.cu
def index_to_attr(index):
"""turn an index name to (database) attribute name"""
return index.replace(etype.lower(), '').replace('idx', '').strip('_')
@@ -981,7 +983,6 @@
self._storage_handler = self.system_source._storage_handler
self.preprocess_entity = self.system_source.preprocess_entity
self.sqlgen = self.system_source.sqlgen
- self.copy_based_source = self.system_source.copy_based_source
self.uri = self.system_source.uri
self.eid = self.system_source.eid
# Directory to write temporary files
@@ -1135,9 +1136,8 @@
if extid is not None:
assert isinstance(extid, str)
extid = b64encode(extid)
- uri = 'system' if source.copy_based_source else source.uri
attrs = {'type': entity.cw_etype, 'eid': entity.eid, 'extid': extid,
- 'source': uri, 'asource': source.uri, 'mtime': datetime.utcnow()}
+ 'source': 'system', 'asource': source.uri}
self._handle_insert_entity_sql(session, self.sqlgen.insert('entities', attrs), attrs)
# insert core relations: is, is_instance_of and cw_source
try:
--- a/dbapi.py Tue Jun 10 09:35:26 2014 +0200
+++ b/dbapi.py Tue Jun 10 09:49:45 2014 +0200
@@ -34,13 +34,13 @@
from urlparse import urlparse
from logilab.common.logging_ext import set_log_methods
-from logilab.common.decorators import monkeypatch
+from logilab.common.decorators import monkeypatch, cachedproperty
from logilab.common.deprecation import deprecated
-from cubicweb import ETYPE_NAME_MAP, ConnectionError, AuthenticationError,\
- cwvreg, cwconfig
+from cubicweb import (ETYPE_NAME_MAP, AuthenticationError, ProgrammingError,
+ cwvreg, cwconfig)
+from cubicweb.repoapi import get_repository
from cubicweb.req import RequestSessionBase
-from cubicweb.utils import parse_repo_uri
_MARKER = object()
@@ -91,53 +91,7 @@
self.close_on_del = close
-def _get_inmemory_repo(config, vreg=None):
- from cubicweb.server.repository import Repository
- from cubicweb.server.utils import TasksManager
- return Repository(config, TasksManager(), vreg=vreg)
-
-def get_repository(uri=None, config=None, vreg=None):
- """get a repository for the given URI or config/vregistry (in case we're
- loading the repository for a client, eg web server, configuration).
-
- The returned repository may be an in-memory repository or a proxy object
- using a specific RPC method, depending on the given URI (pyro or zmq).
- """
- if uri is None:
- return _get_inmemory_repo(config, vreg)
-
- protocol, hostport, appid = parse_repo_uri(uri)
-
- if protocol == 'inmemory':
- # me may have been called with a dummy 'inmemory://' uri ...
- return _get_inmemory_repo(config, vreg)
-
- if protocol == 'pyroloc': # direct connection to the instance
- from logilab.common.pyro_ext import get_proxy
- uri = uri.replace('pyroloc', 'PYRO')
- return get_proxy(uri)
-
- if protocol == 'pyro': # connection mediated through the pyro ns
- from logilab.common.pyro_ext import ns_get_proxy
- path = appid.strip('/')
- if not path:
- raise ConnectionError(
- "can't find instance name in %s (expected to be the path component)"
- % uri)
- if '.' in path:
- nsgroup, nsid = path.rsplit('.', 1)
- else:
- nsgroup = 'cubicweb'
- nsid = path
- return ns_get_proxy(nsid, defaultnsgroup=nsgroup, nshost=hostport)
-
- if protocol.startswith('zmqpickle-'):
- from cubicweb.zmqclient import ZMQRepositoryClient
- return ZMQRepositoryClient(uri)
- else:
- raise ConnectionError('unknown protocol: `%s`' % protocol)
-
-
+@deprecated('[3.19] the dbapi is deprecated. Have a look at the new repoapi.')
def _repo_connect(repo, login, **kwargs):
"""Constructor to create a new connection to the given CubicWeb repository.
@@ -291,7 +245,6 @@
self.cnx = cnx
self.data = {}
self.login = login
- self.mtime = time()
# dbapi session identifier is the same as the first connection
# identifier, but may later differ in case of auto-reconnection as done
# by the web authentication manager (in cw.web.views.authentication)
@@ -327,17 +280,17 @@
else:
# these args are initialized after a connection is
# established
- self.session = None
+ self.session = DBAPISession(None)
self.cnx = self.user = _NeedAuthAccessMock()
self.set_default_language(vreg)
- def from_controller(self):
- return 'view'
+ def get_option_value(self, option, foreid=None):
+ if foreid is not None:
+ warn('[3.19] foreid argument is deprecated', DeprecationWarning,
+ stacklevel=2)
+ return self.cnx.get_option_value(option)
- def get_option_value(self, option, foreid=None):
- return self.cnx.get_option_value(option, foreid)
-
- def set_session(self, session, user=None):
+ def set_session(self, session):
"""method called by the session handler when the user is authenticated
or an anonymous connection is open
"""
@@ -345,11 +298,8 @@
if session.cnx:
self.cnx = session.cnx
self.execute = session.cnx.cursor(self).execute
- if user is None:
- user = self.cnx.user(self)
- if user is not None:
- self.user = user
- self.set_entity_cache(user)
+ self.user = self.cnx.user(self)
+ self.set_entity_cache(self.user)
def execute(self, *args, **kwargs): # pylint: disable=E0202
"""overriden when session is set. By default raise authentication error
@@ -371,8 +321,8 @@
# server-side service call #################################################
- def call_service(self, regid, async=False, **kwargs):
- return self.cnx.call_service(regid, async, **kwargs)
+ def call_service(self, regid, **kwargs):
+ return self.cnx.call_service(regid, **kwargs)
# entities cache management ###############################################
@@ -407,20 +357,18 @@
# server session compat layer #############################################
- def describe(self, eid, asdict=False):
+ def entity_metas(self, eid):
"""return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
- return self.cnx.describe(eid, asdict)
+ return self.cnx.entity_metas(eid)
def source_defs(self):
"""return the definition of sources used by the repository."""
return self.cnx.source_defs()
- @deprecated('[3.17] do not use hijack_user. create new Session object')
- def hijack_user(self, user):
- """return a fake request/session using specified user"""
- req = DBAPIRequest(self.vreg)
- req.set_session(self.session, user)
- return req
+ @deprecated('[3.19] use .entity_metas(eid) instead')
+ def describe(self, eid, asdict=False):
+ """return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
+ return self.cnx.describe(eid, asdict)
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
@@ -429,16 +377,6 @@
set_log_methods(DBAPIRequest, getLogger('cubicweb.dbapi'))
-# exceptions ##################################################################
-
-class ProgrammingError(Exception): #DatabaseError):
- """Exception raised for errors that are related to the database's operation
- and not necessarily under the control of the programmer, e.g. an unexpected
- disconnect occurs, the data source name is not found, a transaction could
- not be processed, a memory allocation error occurred during processing,
- etc.
- """
-
# cursor / connection objects ##################################################
@@ -531,7 +469,6 @@
# make exceptions available through the connection object
ProgrammingError = ProgrammingError
# attributes that may be overriden per connection instance
- anonymous_connection = False
cursor_class = Cursor
vreg = None
_closed = None
@@ -557,6 +494,13 @@
return False
return isinstance(self._repo, Repository)
+ @property # could be a cached property but we want to prevent assigment to
+ # catch potential programming error.
+ def anonymous_connection(self):
+ login = self._repo.user_info(self.sessionid)[1]
+ anon_login = self.vreg.config.get('anonymous-user')
+ return login == anon_login
+
def __repr__(self):
if self.anonymous_connection:
return '<Connection %s (anonymous)>' % self.sessionid
@@ -583,8 +527,8 @@
# server-side service call #################################################
@check_not_closed
- def call_service(self, regid, async=False, **kwargs):
- return self._repo.call_service(self.sessionid, regid, async, **kwargs)
+ def call_service(self, regid, **kwargs):
+ return self._repo.call_service(self.sessionid, regid, **kwargs)
# connection initialization methods ########################################
@@ -641,11 +585,11 @@
def request(self):
if self._web_request:
- from cubicweb.web.request import CubicWebRequestBase
- req = CubicWebRequestBase(self.vreg, False)
+ from cubicweb.web.request import DBAPICubicWebRequestBase
+ req = DBAPICubicWebRequestBase(self.vreg, False)
req.get_header = lambda x, default=None: default
- req.set_session = lambda session, user=None: DBAPIRequest.set_session(
- req, session, user)
+ req.set_session = lambda session: DBAPIRequest.set_session(
+ req, session)
req.relative_path = lambda includeparams=True: ''
else:
req = DBAPIRequest(self.vreg)
@@ -720,22 +664,40 @@
@check_not_closed
def get_option_value(self, option, foreid=None):
- """Return the value for `option` in the configuration. If `foreid` is
- specified, the actual repository to which this entity belongs is
- dereferenced and the option value retrieved from it.
+ """Return the value for `option` in the configuration.
+
+ `foreid` argument is deprecated and now useless (as of 3.19).
"""
- return self._repo.get_option_value(option, foreid)
+ if foreid is not None:
+ warn('[3.19] foreid argument is deprecated', DeprecationWarning,
+ stacklevel=2)
+ return self._repo.get_option_value(option)
+
@check_not_closed
+ def entity_metas(self, eid):
+ """return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
+ return self._repo.entity_metas(self.sessionid, eid, **self._txid())
+
+ @deprecated('[3.19] use .entity_metas(eid) instead')
+ @check_not_closed
def describe(self, eid, asdict=False):
- metas = self._repo.describe(self.sessionid, eid, **self._txid())
- if len(metas) == 3: # backward compat
- metas = list(metas)
- metas.append(metas[1])
+ try:
+ metas = self._repo.entity_metas(self.sessionid, eid, **self._txid())
+ except AttributeError:
+ metas = self._repo.describe(self.sessionid, eid, **self._txid())
+ # talking to pre 3.19 repository
+ if len(metas) == 3: # even older backward compat
+ metas = list(metas)
+ metas.append(metas[1])
+ if asdict:
+ return dict(zip(('type', 'source', 'extid', 'asource'), metas))
+ return metas[:-1]
if asdict:
- return dict(zip(('type', 'source', 'extid', 'asource'), metas))
- # XXX :-1 for cw compat, use asdict=True for full information
- return metas[:-1]
+ metas['asource'] = meta['source'] # XXX pre 3.19 client compat
+ return metas
+ return metas['type'], metas['source'], metas['extid']
+
# db-api like interface ####################################################
--- a/debian/changelog Tue Jun 10 09:35:26 2014 +0200
+++ b/debian/changelog Tue Jun 10 09:49:45 2014 +0200
@@ -1,3 +1,15 @@
+cubicweb (3.19.1-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Julien Cristau <julien.cristau@logilab.fr> Tue, 03 Jun 2014 12:16:00 +0200
+
+cubicweb (3.19.0-1) unstable; urgency=low
+
+ * new upstream release
+
+ -- Julien Cristau <julien.cristau@logilab.fr> Mon, 28 Apr 2014 18:35:27 +0200
+
cubicweb (3.18.5-1) unstable; urgency=low
* new upstream release
--- a/debian/control Tue Jun 10 09:35:26 2014 +0200
+++ b/debian/control Tue Jun 10 09:49:45 2014 +0200
@@ -52,7 +52,7 @@
${python:Depends},
cubicweb-common (= ${source:Version}),
cubicweb-ctl (= ${source:Version}),
- python-logilab-database (>= 1.11.0),
+ python-logilab-database (>= 1.12.1),
cubicweb-postgresql-support
| cubicweb-mysql-support
| python-pysqlite2,
@@ -166,6 +166,11 @@
cubicweb-person (<< 1.8.0),
cubicweb-geocoding (<< 0.2.0),
cubicweb-invoice (<< 0.6.1),
+ cubicweb-mercurial-server (<< 0.4.2),
+ cubicweb-forgotpwd (<< 0.4.3),
+ cubicweb-registration (<< 0.4.3),
+ cubicweb-vcsfile (<< 1.15.0),
+ cubicweb-bootstrap (<< 0.6),
Description: common library for the CubicWeb framework
CubicWeb is a semantic web application framework.
.
--- a/devtools/__init__.py Tue Jun 10 09:35:26 2014 +0200
+++ b/devtools/__init__.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -25,8 +25,11 @@
import shutil
import pickle
import glob
+import random
+import subprocess
import warnings
import tempfile
+import getpass
from hashlib import sha1 # pylint: disable=E0611
from datetime import timedelta
from os.path import (abspath, join, exists, split, isabs, isdir)
@@ -86,6 +89,13 @@
'password': u'gingkow',
},
}
+DEFAULT_PSQL_SOURCES = DEFAULT_SOURCES.copy()
+DEFAULT_PSQL_SOURCES['system'] = DEFAULT_SOURCES['system'].copy()
+DEFAULT_PSQL_SOURCES['system']['db-driver'] = 'postgres'
+DEFAULT_PSQL_SOURCES['system']['db-host'] = '/tmp'
+DEFAULT_PSQL_SOURCES['system']['db-port'] = str(random.randrange(5432, 2**16))
+DEFAULT_PSQL_SOURCES['system']['db-user'] = unicode(getpass.getuser())
+DEFAULT_PSQL_SOURCES['system']['db-password'] = None
def turn_repo_off(repo):
""" Idea: this is less costly than a full re-creation of the repo object.
@@ -121,8 +131,7 @@
repo._type_source_cache = {}
repo._extid_cache = {}
repo.querier._rql_cache = {}
- for source in repo.sources:
- source.reset_caches()
+ repo.system_source.reset_caches()
repo._needs_refresh = False
@@ -131,6 +140,8 @@
read_instance_schema = False
init_repository = True
skip_db_create_and_restore = False
+ default_sources = DEFAULT_SOURCES
+
def __init__(self, appid='data', apphome=None, log_threshold=logging.CRITICAL+10):
# must be set before calling parent __init__
if apphome is None:
@@ -192,20 +203,20 @@
sourcefile = super(TestServerConfiguration, self).sources_file()
return sourcefile
- def sources(self):
+ def read_sources_file(self):
"""By default, we run tests with the sqlite DB backend. One may use its
own configuration by just creating a 'sources' file in the test
- directory from wich tests are launched or by specifying an alternative
+ directory from which tests are launched or by specifying an alternative
sources file using self.sourcefile.
"""
try:
- sources = super(TestServerConfiguration, self).sources()
+ sources = super(TestServerConfiguration, self).read_sources_file()
except ExecutionError:
sources = {}
if not sources:
- sources = DEFAULT_SOURCES
+ sources = self.default_sources
if 'admin' not in sources:
- sources['admin'] = DEFAULT_SOURCES['admin']
+ sources['admin'] = self.default_sources['admin']
return sources
# web config methods needed here for cases when we use this config as a web
@@ -246,6 +257,10 @@
self.sourcefile = sourcefile
+class PostgresApptestConfiguration(ApptestConfiguration):
+ default_sources = DEFAULT_PSQL_SOURCES
+
+
class RealDatabaseConfiguration(ApptestConfiguration):
"""configuration class for tests to run on a real database.
@@ -269,7 +284,6 @@
skip_db_create_and_restore = True
read_instance_schema = True # read schema from database
-
# test database handling #######################################################
DEFAULT_EMPTY_DB_ID = '__default_empty_db__'
@@ -389,12 +403,12 @@
def get_cnx(self):
"""return Connection object on the current repository"""
- from cubicweb.dbapi import _repo_connect
+ from cubicweb.repoapi import connect
repo = self.get_repo()
- sources = self.config.sources()
+ sources = self.config.read_sources_file()
login = unicode(sources['admin']['login'])
password = sources['admin']['password'] or 'xxx'
- cnx = _repo_connect(repo, login, password=password)
+ cnx = connect(repo, login, password=password)
return cnx
def get_repo_and_cnx(self, db_id=DEFAULT_EMPTY_DB_ID):
@@ -412,8 +426,7 @@
@property
def system_source(self):
- sources = self.config.sources()
- return sources['system']
+ return self.config.system_source_config
@property
def dbname(self):
@@ -522,6 +535,27 @@
class PostgresTestDataBaseHandler(TestDataBaseHandler):
DRIVER = 'postgres'
+ __CTL = set()
+
+ @classmethod
+ def killall(cls):
+ for datadir in cls.__CTL:
+ subprocess.call(['pg_ctl', 'stop', '-D', datadir, '-m', 'fast'])
+
+ def __init__(self, config):
+ super(PostgresTestDataBaseHandler, self).__init__(config)
+ datadir = join(self.config.apphome, 'pgdb')
+ if not exists(datadir):
+ subprocess.check_call(['initdb', '-D', datadir, '-E', 'utf-8', '--locale=C'])
+ port = self.system_source['db-port']
+ directory = self.system_source['db-host']
+ env = os.environ.copy()
+ env['PGPORT'] = str(port)
+ env['PGHOST'] = str(directory)
+ subprocess.check_call(['pg_ctl', 'start', '-w', '-D', datadir, '-o', '-h "" -k %s -p %s' % (directory, port)],
+ env=env)
+ self.__CTL.add(datadir)
+
@property
@cached
def helper(self):
@@ -694,8 +728,8 @@
def absolute_dbfile(self):
"""absolute path of current database file"""
dbfile = join(self._ensure_test_backup_db_dir(),
- self.config.sources()['system']['db-name'])
- self.config.sources()['system']['db-name'] = dbfile
+ self.system_source['db-name'])
+ self.system_source['db-name'] = dbfile
return dbfile
def process_cache_entry(self, directory, dbname, db_id, entry):
@@ -734,6 +768,7 @@
import atexit
atexit.register(SQLiteTestDataBaseHandler._cleanup_all_tmpdb)
+atexit.register(PostgresTestDataBaseHandler.killall)
def install_sqlite_patch(querier):
@@ -840,8 +875,7 @@
handler = HCACHE.get(config)
if handler is not None:
return handler
- sources = config.sources()
- driver = sources['system']['db-driver']
+ driver = config.system_source_config['db-driver']
key = (driver, config)
handlerkls = HANDLERS.get(driver, None)
if handlerkls is not None:
--- a/devtools/devctl.py Tue Jun 10 09:35:26 2014 +0200
+++ b/devtools/devctl.py Tue Jun 10 09:49:45 2014 +0200
@@ -776,13 +776,19 @@
'short': "i", 'metavar': "<types>",
'help':'coma separated list of entity types to include in view',
}),
+ ('show-etype',
+ {'type':'string', 'default':'',
+ 'metavar': '<etype>',
+ 'help':'show graph of this etype and its neighbours'
+ }),
]
def run(self, args):
from subprocess import Popen
from tempfile import NamedTemporaryFile
from logilab.common.textutils import splitstrip
- from yams import schema2dot, BASE_TYPES
+ from logilab.common.graph import GraphGenerator, DotBackend
+ from yams import schema2dot as s2d, BASE_TYPES
from cubicweb.schema import (META_RTYPES, SCHEMA_TYPES, SYSTEM_RTYPES,
WORKFLOW_TYPES, INTERNAL_TYPES)
cubes = splitstrip(args[0])
@@ -801,7 +807,22 @@
skiptypes |= set(('CWUser', 'CWGroup', 'EmailAddress'))
skiptypes |= set(self['exclude-type'].split(','))
skiptypes -= set(self['include-type'].split(','))
- schema2dot.schema2dot(schema, out, skiptypes=skiptypes)
+
+ if not self['show-etype']:
+ s2d.schema2dot(schema, out, skiptypes=skiptypes)
+ else:
+ etype = self['show-etype']
+ visitor = s2d.OneHopESchemaVisitor(schema[etype], skiptypes=skiptypes)
+ propshdlr = s2d.SchemaDotPropsHandler(visitor)
+ backend = DotBackend('schema', 'BT',
+ ratio='compress',size=None,
+ renderer='dot',
+ additionnal_param={'overlap' : 'false',
+ 'splines' : 'true',
+ 'sep' : '0.2'})
+ generator = s2d.GraphGenerator(backend)
+ generator.generate(visitor, propshdlr, out)
+
if viewer:
p = Popen((viewer, out))
p.wait()
--- a/devtools/fake.py Tue Jun 10 09:35:26 2014 +0200
+++ b/devtools/fake.py Tue Jun 10 09:49:45 2014 +0200
@@ -24,7 +24,7 @@
from cubicweb.req import RequestSessionBase
from cubicweb.cwvreg import CWRegistryStore
-from cubicweb.web.request import CubicWebRequestBase
+from cubicweb.web.request import ConnectionCubicWebRequestBase
from cubicweb.devtools import BASE_URL, BaseApptestConfiguration
@@ -53,7 +53,7 @@
return {'system': {'db-driver': 'sqlite'}}
-class FakeRequest(CubicWebRequestBase):
+class FakeRequest(ConnectionCubicWebRequestBase):
"""test implementation of an cubicweb request object"""
def __init__(self, *args, **kwargs):
@@ -88,20 +88,20 @@
return url.split('?', 1)[0]
def set_request_header(self, header, value, raw=False):
- """set an incoming HTTP header (For test purpose only)"""
+ """set an incoming HTTP header (for test purpose only)"""
if isinstance(value, basestring):
value = [value]
- if raw: #
+ if raw:
# adding encoded header is important, else page content
# will be reconverted back to unicode and apart unefficiency, this
# may cause decoding problem (e.g. when downloading a file)
self._headers_in.setRawHeaders(header, value)
- else: #
+ else:
self._headers_in.setHeader(header, value) #
def get_response_header(self, header, default=None, raw=False):
- """return output header (For test purpose only"""
- if raw: #
+ """return output header (for test purpose only)"""
+ if raw:
return self.headers_out.getRawHeaders(header, [default])[0]
return self.headers_out.getHeader(header, default)
@@ -169,7 +169,6 @@
self.config = config or FakeConfig()
self.vreg = vreg or CWRegistryStore(self.config, initlog=False)
self.vreg.schema = schema
- self.sources = []
def internal_session(self):
return FakeSession(self)
@@ -188,9 +187,6 @@
source.after_entity_insertion(session, extid, entity)
return eid
- def eid2extid(self, source, eid, session=None):
- return self.eids[eid]
-
class FakeSource(object):
dbhelper = get_db_helper('sqlite')
--- a/devtools/httptest.py Tue Jun 10 09:35:26 2014 +0200
+++ b/devtools/httptest.py Tue Jun 10 09:49:45 2014 +0200
@@ -104,7 +104,7 @@
reactor.addSystemEventTrigger('after', 'startup', semaphore.release)
t = threading.Thread(target=safe_run, name='cubicweb_test_web_server',
- args=(self.config, self.vreg, True))
+ args=(self.config, True), kwargs={'repo': self.repo})
self.web_thread = t
t.start()
semaphore.acquire()
--- a/devtools/repotest.py Tue Jun 10 09:35:26 2014 +0200
+++ b/devtools/repotest.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -141,7 +141,7 @@
from rql import RQLHelper
-from cubicweb.devtools.fake import FakeRepo, FakeSession
+from cubicweb.devtools.fake import FakeRepo, FakeConfig, FakeSession
from cubicweb.server import set_debug, debugged
from cubicweb.server.querier import QuerierHelper
from cubicweb.server.session import Session
@@ -159,7 +159,7 @@
raise SkipTest(str(ex))
def setUp(self):
- self.repo = FakeRepo(self.schema)
+ self.repo = FakeRepo(self.schema, config=FakeConfig(apphome=self.datadir))
self.repo.system_source = mock_object(dbdriver=self.backend)
self.rqlhelper = RQLHelper(self.schema, special_relations={'eid': 'uid',
'has_text': 'fti'},
@@ -262,8 +262,8 @@
u = self.repo._build_user(self.session, self.session.user.eid)
u._groups = set(groups)
s = Session(u, self.repo)
- s._tx.cnxset = self.cnxset
- s._tx.ctx_count = 1
+ s._cnx.cnxset = self.cnxset
+ s._cnx.ctx_count = 1
# register session to ensure it gets closed
self._dumb_sessions.append(s)
return s
@@ -277,42 +277,23 @@
class BasePlannerTC(BaseQuerierTC):
- newsources = ()
def setup(self):
- clear_cache(self.repo, 'rel_type_sources')
- clear_cache(self.repo, 'rel_type_sources')
- clear_cache(self.repo, 'can_cross_relation')
- clear_cache(self.repo, 'is_multi_sources_relation')
# XXX source_defs
self.o = self.repo.querier
self.session = self.repo._sessions.values()[0]
self.cnxset = self.session.set_cnxset()
self.schema = self.o.schema
- self.sources = self.o._repo.sources
- self.system = self.sources[-1]
+ self.system = self.repo.system_source
do_monkey_patch()
self._dumb_sessions = [] # by hi-jacked parent setup
self.repo.vreg.rqlhelper.backend = 'postgres' # so FTIRANK is considered
- self.newsources = []
-
- def add_source(self, sourcecls, uri):
- source = sourcecls(self.repo, {'uri': uri, 'type': 'whatever'})
- if not source.copy_based_source:
- self.sources.append(source)
- self.newsources.append(source)
- self.repo.sources_by_uri[uri] = source
- setattr(self, uri, source)
def tearDown(self):
- for source in self.newsources:
- if not source.copy_based_source:
- self.sources.remove(source)
- del self.repo.sources_by_uri[source.uri]
undo_monkey_patch()
for session in self._dumb_sessions:
- if session._tx.cnxset is not None:
- session._tx.cnxset = None
+ if session._cnx.cnxset is not None:
+ session._cnx.cnxset = None
session.close()
def _prepare_plan(self, rql, kwargs=None):
@@ -324,7 +305,8 @@
select.solutions.sort()
else:
rqlst.solutions.sort()
- return self.o.plan_factory(rqlst, kwargs, self.session)
+ with self.session.ensure_cnx_set:
+ return self.o.plan_factory(rqlst, kwargs, self.session)
# monkey patch some methods to get predicatable results #######################
@@ -350,7 +332,6 @@
from cubicweb.server.querier import ExecutionPlan
_orig_check_permissions = ExecutionPlan._check_permissions
-_orig_init_temp_table = ExecutionPlan.init_temp_table
def _check_permissions(*args, **kwargs):
res, restricted = _orig_check_permissions(*args, **kwargs)
@@ -360,15 +341,6 @@
def _dummy_check_permissions(self, rqlst):
return {(): rqlst.solutions}, set()
-def _init_temp_table(self, table, selection, solution):
- if self.tablesinorder is None:
- tablesinorder = self.tablesinorder = {}
- else:
- tablesinorder = self.tablesinorder
- if not table in tablesinorder:
- tablesinorder[table] = 'table%s' % len(tablesinorder)
- return _orig_init_temp_table(self, table, selection, solution)
-
from cubicweb.server import rqlannotation
_orig_select_principal = rqlannotation._select_principal
@@ -381,16 +353,6 @@
return _orig_select_principal(scope, relations,
_sort=lambda rels: sorted(rels, key=sort_key))
-try:
- from cubicweb.server.msplanner import PartPlanInformation
-except ImportError:
- class PartPlanInformation(object):
- def merge_input_maps(self, *args, **kwargs):
- pass
- def _choose_term(self, sourceterms):
- pass
-_orig_merge_input_maps = PartPlanInformation.merge_input_maps
-_orig_choose_term = PartPlanInformation._choose_term
def _merge_input_maps(*args, **kwargs):
return sorted(_orig_merge_input_maps(*args, **kwargs))
@@ -410,12 +372,6 @@
return x.value
return _orig_choose_term(self, source, DumbOrderedDict2(sourceterms, get_key))
-from cubicweb.server.sources.pyrorql import PyroRQLSource
-_orig_syntax_tree_search = PyroRQLSource.syntax_tree_search
-
-def _syntax_tree_search(*args, **kwargs):
- return deepcopy(_orig_syntax_tree_search(*args, **kwargs))
-
def _ordered_iter_relations(stinfo):
return sorted(_orig_iter_relations(stinfo), key=lambda x:x.r_type)
@@ -425,17 +381,9 @@
rqlrewrite.RQLRewriter.build_variantes = _build_variantes
ExecutionPlan._check_permissions = _check_permissions
ExecutionPlan.tablesinorder = None
- ExecutionPlan.init_temp_table = _init_temp_table
- PartPlanInformation.merge_input_maps = _merge_input_maps
- PartPlanInformation._choose_term = _choose_term
- PyroRQLSource.syntax_tree_search = _syntax_tree_search
def undo_monkey_patch():
rqlrewrite.iter_relations = _orig_iter_relations
rqlrewrite.RQLRewriter.insert_snippets = _orig_insert_snippets
rqlrewrite.RQLRewriter.build_variantes = _orig_build_variantes
ExecutionPlan._check_permissions = _orig_check_permissions
- ExecutionPlan.init_temp_table = _orig_init_temp_table
- PartPlanInformation.merge_input_maps = _orig_merge_input_maps
- PartPlanInformation._choose_term = _orig_choose_term
- PyroRQLSource.syntax_tree_search = _orig_syntax_tree_search
--- a/devtools/test/unittest_dbfill.py Tue Jun 10 09:35:26 2014 +0200
+++ b/devtools/test/unittest_dbfill.py Tue Jun 10 09:49:45 2014 +0200
@@ -86,7 +86,7 @@
# Test for random index
for index in range(5):
cost_value = self.bug_valgen.generate_attribute_value({}, 'cost', index)
- self.assertTrue(cost_value in range(index+1))
+ self.assertIn(cost_value, range(index+1))
def test_date(self):
"""test date generation"""
--- a/devtools/test/unittest_fill.py Tue Jun 10 09:35:26 2014 +0200
+++ b/devtools/test/unittest_fill.py Tue Jun 10 09:49:45 2014 +0200
@@ -39,31 +39,31 @@
def test_autoextend(self):
- self.assertFalse('generate_server' in dir(ValueGenerator))
+ self.assertNotIn('generate_server', dir(ValueGenerator))
class MyValueGenerator(ValueGenerator):
def generate_server(self, index):
return attrname
- self.assertTrue('generate_server' in dir(ValueGenerator))
+ self.assertIn('generate_server', dir(ValueGenerator))
def test_bad_signature_detection(self):
- self.assertFalse('generate_server' in dir(ValueGenerator))
+ self.assertNotIn('generate_server', dir(ValueGenerator))
try:
class MyValueGenerator(ValueGenerator):
def generate_server(self):
pass
except TypeError:
- self.assertFalse('generate_server' in dir(ValueGenerator))
+ self.assertNotIn('generate_server', dir(ValueGenerator))
else:
self.fail('TypeError not raised')
def test_signature_extension(self):
- self.assertFalse('generate_server' in dir(ValueGenerator))
+ self.assertNotIn('generate_server', dir(ValueGenerator))
class MyValueGenerator(ValueGenerator):
def generate_server(self, index, foo):
pass
- self.assertTrue('generate_server' in dir(ValueGenerator))
+ self.assertIn('generate_server', dir(ValueGenerator))
if __name__ == '__main__':
--- a/devtools/test/unittest_testlib.py Tue Jun 10 09:35:26 2014 +0200
+++ b/devtools/test/unittest_testlib.py Tue Jun 10 09:49:45 2014 +0200
@@ -189,5 +189,62 @@
self.assertIn(AnAppobject, self.vreg['hip']['hop'])
self.assertNotIn(AnAppobject, self.vreg['hip']['hop'])
+ def test_login(self):
+ """Calling login should not break self.session hook control"""
+ self.hook_executed = False
+ babar = self.create_user(self.request(), 'babar')
+ self.commit()
+
+ from cubicweb.server import hook
+ from cubicweb.predicates import is_instance
+
+ class MyHook(hook.Hook):
+ __regid__ = 'whatever'
+ __select__ = hook.Hook.__select__ & is_instance('CWProperty')
+ category = 'test-hook'
+ events = ('after_add_entity',)
+ test = self
+
+ def __call__(self):
+ self.test.hook_executed = True
+
+ self.login('babar')
+ with self.temporary_appobjects(MyHook):
+ with self.session.allow_all_hooks_but('test-hook'):
+ req = self.request()
+ prop = req.create_entity('CWProperty', pkey=u'ui.language', value=u'en')
+ self.commit()
+ self.assertFalse(self.hook_executed)
+
+
+class RepoAccessTC(CubicWebTC):
+ def test_repo_connection(self):
+ acc = self.new_access('admin')
+ with acc.repo_cnx() as cnx:
+ rset = cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+ def test_client_connection(self):
+ acc = self.new_access('admin')
+ with acc.client_cnx() as cnx:
+ rset = cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+ def test_web_request(self):
+ acc = self.new_access('admin')
+ with acc.web_request(elephant='babar') as req:
+ rset = req.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+ self.assertEqual('babar', req.form['elephant'])
+
+ def test_close(self):
+ acc = self.new_access('admin')
+ acc.close()
+
+ def test_admin_access(self):
+ with self.admin_access.client_cnx() as cnx:
+ self.assertEqual('admin', cnx.user.login)
+
+
if __name__ == '__main__':
unittest_main()
--- a/devtools/testlib.py Tue Jun 10 09:35:26 2014 +0200
+++ b/devtools/testlib.py Tue Jun 10 09:49:45 2014 +0200
@@ -39,12 +39,14 @@
from logilab.common.deprecation import deprecated, class_deprecated
from logilab.common.shellutils import getlogin
-from cubicweb import ValidationError, NoSelectableObject
-from cubicweb import cwconfig, dbapi, devtools, web, server
+from cubicweb import (ValidationError, NoSelectableObject, AuthenticationError,
+ ProgrammingError, BadConnectionId)
+from cubicweb import cwconfig, devtools, web, server, repoapi
from cubicweb.utils import json
from cubicweb.sobjects import notification
from cubicweb.web import Redirect, application
from cubicweb.server.hook import SendMailOp
+from cubicweb.server.session import Session
from cubicweb.devtools import SYSTEM_ENTITIES, SYSTEM_RELATIONS, VIEW_VALIDATORS
from cubicweb.devtools import fake, htmlparser, DEFAULT_EMPTY_DB_ID
from cubicweb.utils import json
@@ -155,7 +157,7 @@
class TestCaseConnectionProxy(object):
- """thin wrapper around `cubicweb.dbapi.Connection` context-manager
+ """thin wrapper around `cubicweb.repoapi.ClientConnection` context-manager
used in CubicWebTC (cf. `cubicweb.devtools.testlib.CubicWebTC.login` method)
It just proxies to the default connection context manager but
@@ -169,15 +171,106 @@
return getattr(self.cnx, attrname)
def __enter__(self):
- return self.cnx.__enter__()
+ # already open
+ return self.cnx
def __exit__(self, exctype, exc, tb):
try:
return self.cnx.__exit__(exctype, exc, tb)
finally:
- self.cnx.close()
self.testcase.restore_connection()
+# Repoaccess utility ###############################################3###########
+
+class RepoAccess(object):
+ """An helper to easily create object to access the repo as a specific user
+
+ Each RepoAccess have it own session.
+
+ A repo access can create three type of object:
+
+ .. automethod:: cubicweb.testlib.RepoAccess.repo_cnx
+ .. automethod:: cubicweb.testlib.RepoAccess.client_cnx
+ .. automethod:: cubicweb.testlib.RepoAccess.web_request
+
+ The RepoAccess need to be closed to destroy the associated Session.
+ TestCase usually take care of this aspect for the user.
+
+ .. automethod:: cubicweb.testlib.RepoAccess.close
+ """
+
+ def __init__(self, repo, login, requestcls):
+ self._repo = repo
+ self._login = login
+ self.requestcls = requestcls
+ self._session = self._unsafe_connect(login)
+
+ def _unsafe_connect(self, login, **kwargs):
+ """ a completely unsafe connect method for the tests """
+ # use an internal connection
+ with self._repo.internal_cnx() as cnx:
+ # try to get a user object
+ user = cnx.find('CWUser', login=login).one()
+ user.groups
+ user.properties
+ user.login
+ session = Session(user, self._repo)
+ self._repo._sessions[session.sessionid] = session
+ user._cw = user.cw_rset.req = session
+ with session.new_cnx() as cnx:
+ self._repo.hm.call_hooks('session_open', cnx)
+ # commit connection at this point in case write operation has been
+ # done during `session_open` hooks
+ cnx.commit()
+ return session
+
+ @contextmanager
+ def repo_cnx(self):
+ """Context manager returning a server side connection for the user"""
+ with self._session.new_cnx() as cnx:
+ yield cnx
+
+ @contextmanager
+ def client_cnx(self):
+ """Context manager returning a client side connection for the user"""
+ with repoapi.ClientConnection(self._session) as cnx:
+ yield cnx
+
+ @contextmanager
+ def web_request(self, url=None, headers={}, method='GET', **kwargs):
+ """Context manager returning a web request pre-linked to a client cnx
+
+ To commit and rollback use::
+
+ req.cnx.commit()
+ req.cnx.rolback()
+ """
+ req = self.requestcls(self._repo.vreg, url=url, headers=headers,
+ method=method, form=kwargs)
+ clt_cnx = repoapi.ClientConnection(self._session)
+ req.set_cnx(clt_cnx)
+ with clt_cnx:
+ yield req
+
+ def close(self):
+ """Close the session associated to the RepoAccess"""
+ if self._session is not None:
+ self._repo.close(self._session.sessionid)
+ self._session = None
+
+ @contextmanager
+ def shell(self):
+ from cubicweb.server.migractions import ServerMigrationHelper
+ with repoapi.ClientConnection(self._session) as cnx:
+ mih = ServerMigrationHelper(None, repo=self._repo, cnx=cnx,
+ interactive=False,
+ # hack so it don't try to load fs schema
+ schema=1)
+ yield mih
+ cnx.commit()
+
+
+
# base class for cubicweb tests requiring a full cw environments ###############
class CubicWebTC(TestCase):
@@ -188,7 +281,7 @@
* `vreg`, the vregistry
* `schema`, self.vreg.schema
* `config`, cubicweb configuration
- * `cnx`, dbapi connection to the repository using an admin user
+ * `cnx`, repoapi connection to the repository using an admin user
* `session`, server side session associated to `cnx`
* `app`, the cubicweb publisher (for web testing)
* `repo`, the repository object
@@ -198,21 +291,200 @@
"""
appid = 'data'
configcls = devtools.ApptestConfiguration
- reset_schema = reset_vreg = False # reset schema / vreg between tests
+ requestcls = fake.FakeRequest
tags = TestCase.tags | Tags('cubicweb', 'cw_repo')
test_db_id = DEFAULT_EMPTY_DB_ID
_cnxs = set() # establised connection
- _cnx = None # current connection
+ # stay on connection for leak detection purpose
+
+ def __init__(self, *args, **kwargs):
+ self._admin_session = None
+ self._admin_clt_cnx = None
+ self._current_session = None
+ self._current_clt_cnx = None
+ self.repo = None
+ self._open_access = set()
+ super(CubicWebTC, self).__init__(*args, **kwargs)
+
+ # repository connection handling ###########################################
+
+ def new_access(self, login):
+ """provide a new RepoAccess object for a given user
+
+ The access is automatically closed at the end of the test."""
+ access = RepoAccess(self.repo, login, self.requestcls)
+ self._open_access.add(access)
+ return access
+
+ def _close_access(self):
+ while self._open_access:
+ try:
+ self._open_access.pop().close()
+ except BadConnectionId:
+ continue # already closed
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def set_cnx(self, cnx):
+ assert getattr(cnx, '_session', None) is not None
+ if cnx is self._admin_clt_cnx:
+ self._pop_custom_cnx()
+ else:
+ self._cnxs.add(cnx) # register the cnx to make sure it is removed
+ self._current_session = cnx._session
+ self._current_clt_cnx = cnx
- # Too much complicated stuff. the class doesn't need to bear the repo anymore
- @classmethod
- def set_cnx(cls, cnx):
- cls._cnxs.add(cnx)
- cls._cnx = cnx
+ @property
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def cnx(self):
+ # XXX we want to deprecate this
+ clt_cnx = self._current_clt_cnx
+ if clt_cnx is None:
+ clt_cnx = self._admin_clt_cnx
+ return clt_cnx
+
+ def _close_cnx(self):
+ """ensure that all cnx used by a test have been closed"""
+ for cnx in list(self._cnxs):
+ if cnx._open and not cnx._session.closed:
+ cnx.rollback()
+ cnx.close()
+ self._cnxs.remove(cnx)
+
+ @property
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def session(self):
+ """return current server side session"""
+ # XXX We want to use a srv_connection instead and deprecate this
+ # property
+ session = self._current_session
+ if session is None:
+ session = self._admin_session
+ # bypassing all sanity to use the same repo cnx in the session
+ #
+ # we can't call set_cnx as the Connection is not managed by the
+ # session.
+ session._Session__threaddata.cnx = self._admin_clt_cnx._cnx
+ else:
+ session._Session__threaddata.cnx = self.cnx._cnx
+ session.set_cnxset()
+ return session
+
+ @property
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def websession(self):
+ return self.session
@property
- def cnx(self):
- return self.__class__._cnx
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def adminsession(self):
+ """return current server side session (using default manager account)"""
+ return self._admin_session
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def login(self, login, **kwargs):
+ """return a connection for the given login/password"""
+ __ = kwargs.pop('autoclose', True) # not used anymore
+ if login == self.admlogin:
+ # undo any previous login, if we're not used as a context manager
+ self.restore_connection()
+ return self.cnx
+ else:
+ if not kwargs:
+ kwargs['password'] = str(login)
+ clt_cnx = repoapi.connect(self.repo, login, **kwargs)
+ self.set_cnx(clt_cnx)
+ clt_cnx.__enter__()
+ return TestCaseConnectionProxy(self, clt_cnx)
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def restore_connection(self):
+ self._pop_custom_cnx()
+
+ def _pop_custom_cnx(self):
+ if self._current_clt_cnx is not None:
+ if self._current_clt_cnx._open:
+ self._current_clt_cnx.close()
+ if not self._current_session.closed:
+ self.repo.close(self._current_session.sessionid)
+ self._current_clt_cnx = None
+ self._current_session = None
+
+ #XXX this doesn't need to a be classmethod anymore
+ def _init_repo(self):
+ """init the repository and connection to it.
+ """
+ # setup configuration for test
+ self.init_config(self.config)
+ # get or restore and working db.
+ db_handler = devtools.get_test_db_handler(self.config)
+ db_handler.build_db_cache(self.test_db_id, self.pre_setup_database)
+
+ db_handler.restore_database(self.test_db_id)
+ self.repo = db_handler.get_repo(startup=True)
+ # get an admin session (without actual login)
+ login = unicode(db_handler.config.default_admin_config['login'])
+ self.admin_access = self.new_access(login)
+ self._admin_session = self.admin_access._session
+ self._admin_clt_cnx = repoapi.ClientConnection(self._admin_session)
+ self._cnxs.add(self._admin_clt_cnx)
+ self._admin_clt_cnx.__enter__()
+ self.config.repository = lambda x=None: self.repo
+
+ # db api ##################################################################
+
+ @nocoverage
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def cursor(self, req=None):
+ if req is not None:
+ return req.cnx
+ else:
+ return self.cnx
+
+ @nocoverage
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def execute(self, rql, args=None, req=None):
+ """executes <rql>, builds a resultset, and returns a couple (rset, req)
+ where req is a FakeRequest
+ """
+ req = req or self.request(rql=rql)
+ return req.execute(unicode(rql), args)
+
+ @nocoverage
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def commit(self):
+ try:
+ return self.cnx.commit()
+ finally:
+ self.session.set_cnxset() # ensure cnxset still set after commit
+
+ @nocoverage
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def rollback(self):
+ try:
+ self.cnx.rollback()
+ except ProgrammingError:
+ pass # connection closed
+ finally:
+ self.session.set_cnxset() # ensure cnxset still set after commit
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def request(self, rollbackfirst=False, url=None, headers={}, **kwargs):
+ """return a web ui request"""
+ if rollbackfirst:
+ self.cnx.rollback()
+ req = self.requestcls(self.vreg, url=url, headers=headers, form=kwargs)
+ req.set_cnx(self.cnx)
+ return req
+
+ # server side db api #######################################################
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def sexecute(self, rql, args=None):
+ self.session.set_cnxset()
+ return self.session.execute(rql, args)
+
+
+ # config management ########################################################
@classproperty
def config(cls):
@@ -237,7 +509,7 @@
Otherwise, consider to use a different :class:`ApptestConfiguration`
defined in the `configcls` class attribute"""
- source = config.sources()['system']
+ source = config.system_source_config
cls.admlogin = unicode(source['db-user'])
cls.admpassword = source['db-password']
# uncomment the line below if you want rql queries to be logged
@@ -260,32 +532,10 @@
except Exception: # not in server only configuration
pass
- #XXX this doesn't need to a be classmethod anymore
- @classmethod
- def _init_repo(cls):
- """init the repository and connection to it.
- """
- # setup configuration for test
- cls.init_config(cls.config)
- # get or restore and working db.
- db_handler = devtools.get_test_db_handler(cls.config)
- db_handler.build_db_cache(cls.test_db_id, cls.pre_setup_database)
+ @property
+ def vreg(self):
+ return self.repo.vreg
- cls.repo, cnx = db_handler.get_repo_and_cnx(cls.test_db_id)
- # no direct assignation to cls.cnx anymore.
- # cnx is now an instance property that use a class protected attributes.
- cls.set_cnx(cnx)
- cls.vreg = cls.repo.vreg
- cls.websession = dbapi.DBAPISession(cnx, cls.admlogin)
- cls._orig_cnx = (cnx, cls.websession)
- cls.config.repository = lambda x=None: cls.repo
-
- def _close_cnx(self):
- for cnx in list(self._cnxs):
- if not cnx._closed:
- cnx.rollback()
- cnx.close()
- self._cnxs.remove(cnx)
# global resources accessors ###############################################
@@ -294,18 +544,7 @@
"""return the application schema"""
return self.vreg.schema
- @property
- def session(self):
- """return current server side session (using default manager account)"""
- session = self.repo._sessions[self.cnx.sessionid]
- session.set_cnxset()
- return session
-
- @property
- def adminsession(self):
- """return current server side session (using default manager account)"""
- return self.repo._sessions[self._orig_cnx[0].sessionid]
-
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
def shell(self):
"""return a shell session object"""
from cubicweb.server.migractions import ServerMigrationHelper
@@ -338,13 +577,22 @@
except Exception as ex:
self.__class__._repo_init_failed = ex
raise
+ self.addCleanup(self._close_access)
resume_tracing()
self.setup_database()
- self.commit()
+ self._admin_clt_cnx.commit()
MAILBOX[:] = [] # reset mailbox
def tearDown(self):
# XXX hack until logilab.common.testlib is fixed
+ if self._admin_clt_cnx is not None:
+ if self._admin_clt_cnx._open:
+ self._admin_clt_cnx.close()
+ self._admin_clt_cnx = None
+ if self._admin_session is not None:
+ if not self._admin_session.closed:
+ self.repo.close(self._admin_session.sessionid)
+ self._admin_session = None
while self._cleanups:
cleanup, args, kwargs = self._cleanups.pop(-1)
cleanup(*args, **kwargs)
@@ -370,11 +618,11 @@
# user / session management ###############################################
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
def user(self, req=None):
"""return the application schema"""
if req is None:
- req = self.request()
- return self.cnx.user(req)
+ return self.request().user
else:
return req.user
@@ -392,7 +640,7 @@
groups = login
login = req
assert not isinstance(self, type)
- req = self._orig_cnx[0].request()
+ req = self._admin_clt_cnx
if password is None:
password = login.encode('utf8')
user = req.create_entity('CWUser', login=unicode(login),
@@ -411,65 +659,6 @@
req.cnx.commit()
return user
- def login(self, login, **kwargs):
- """return a connection for the given login/password"""
- if login == self.admlogin:
- self.restore_connection()
- # definitly don't want autoclose when used as a context manager
- return self.cnx
- autoclose = kwargs.pop('autoclose', True)
- if not kwargs:
- kwargs['password'] = str(login)
- self.set_cnx(dbapi._repo_connect(self.repo, unicode(login), **kwargs))
- self.websession = dbapi.DBAPISession(self.cnx)
- if login == self.vreg.config.anonymous_user()[0]:
- self.cnx.anonymous_connection = True
- if autoclose:
- return TestCaseConnectionProxy(self, self.cnx)
- return self.cnx
-
- def restore_connection(self):
- if not self.cnx is self._orig_cnx[0]:
- if not self.cnx._closed:
- self.cnx.close()
- cnx, self.websession = self._orig_cnx
- self.set_cnx(cnx)
-
- # db api ##################################################################
-
- @nocoverage
- def cursor(self, req=None):
- return self.cnx.cursor(req or self.request())
-
- @nocoverage
- def execute(self, rql, args=None, req=None):
- """executes <rql>, builds a resultset, and returns a couple (rset, req)
- where req is a FakeRequest
- """
- req = req or self.request(rql=rql)
- return req.execute(unicode(rql), args)
-
- @nocoverage
- def commit(self):
- try:
- return self.cnx.commit()
- finally:
- self.session.set_cnxset() # ensure cnxset still set after commit
-
- @nocoverage
- def rollback(self):
- try:
- self.cnx.rollback()
- except dbapi.ProgrammingError:
- pass # connection closed
- finally:
- self.session.set_cnxset() # ensure cnxset still set after commit
-
- # server side db api #######################################################
-
- def sexecute(self, rql, args=None):
- self.session.set_cnxset()
- return self.session.execute(rql, args)
# other utilities #########################################################
@@ -507,14 +696,13 @@
.. sourcecode:: python
- rdef = self.schema['CWUser'].rdef('login')
with self.temporary_permissions(CWUser={'read': ()}):
...
- Usually the former will be prefered to override permissions on a
+ Usually the former will be preferred to override permissions on a
relation definition, while the latter is well suited for entity types.
- The allowed keys in the permission dictionary depends on the schema type
+ The allowed keys in the permission dictionary depend on the schema type
(entity type / relation definition). Resulting permissions will be
similar to `orig_permissions.update(partial_perms)`.
"""
@@ -647,21 +835,12 @@
@cached
def app(self):
"""return a cubicweb publisher"""
- publisher = application.CubicWebPublisher(self.config, vreg=self.vreg)
+ publisher = application.CubicWebPublisher(self.repo, self.config)
def raise_error_handler(*args, **kwargs):
raise
publisher.error_handler = raise_error_handler
return publisher
- requestcls = fake.FakeRequest
- def request(self, rollbackfirst=False, url=None, headers={}, **kwargs):
- """return a web ui request"""
- req = self.requestcls(self.vreg, url=url, headers=headers, form=kwargs)
- if rollbackfirst:
- self.websession.cnx.rollback()
- req.set_session(self.websession)
- return req
-
def remote_call(self, fname, *args):
"""remote json call simulation"""
dump = json.dumps
@@ -779,33 +958,29 @@
def init_authentication(self, authmode, anonuser=None):
self.set_auth_mode(authmode, anonuser)
- req = self.request(url='login')
- origsession = req.session
- req.session = req.cnx = None
- del req.execute # get back to class implementation
+ req = self.requestcls(self.vreg, url='login')
sh = self.app.session_handler
authm = sh.session_manager.authmanager
authm.anoninfo = self.vreg.config.anonymous_user()
authm.anoninfo = authm.anoninfo[0], {'password': authm.anoninfo[1]}
# not properly cleaned between tests
self.open_sessions = sh.session_manager._sessions = {}
- return req, origsession
+ return req, self.session
def assertAuthSuccess(self, req, origsession, nbsessions=1):
sh = self.app.session_handler
- self.app.connect(req)
- session = req.session
+ session = self.app.get_session(req)
+ clt_cnx = repoapi.ClientConnection(session)
+ req.set_cnx(clt_cnx)
self.assertEqual(len(self.open_sessions), nbsessions, self.open_sessions)
self.assertEqual(session.login, origsession.login)
self.assertEqual(session.anonymous_session, False)
def assertAuthFailure(self, req, nbsessions=0):
- self.app.connect(req)
- self.assertIsInstance(req.session, dbapi.DBAPISession)
- self.assertEqual(req.session.cnx, None)
- self.assertIsInstance(req.cnx, (dbapi._NeedAuthAccessMock, NoneType))
- # + 1 since we should still have session without connection set
- self.assertEqual(len(self.open_sessions), nbsessions + 1)
+ with self.assertRaises(AuthenticationError):
+ self.app.get_session(req)
+ # +0 since we do not track the opened session
+ self.assertEqual(len(self.open_sessions), nbsessions)
clear_cache(req, 'get_authorization')
# content validation #######################################################
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc/3.19.rst Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,177 @@
+What's new in CubicWeb 3.19?
+============================
+
+New functionalities
+--------------------
+
+* implement Cross Origin Resource Sharing (CORS)
+ (see `#2491768 <http://www.cubicweb.org/2491768>`_)
+
+* system_source.create_eid can get a range of IDs, to reduce overhead of batch
+ entity creation
+
+Behaviour Changes
+-----------------
+
+* The anonymous property of Session and Connection are now computed from the
+ related user login. If it matches the ``anonymous-user`` in the config the
+ connection is anonymous. Beware that the ``anonymous-user`` config is web
+ specific. Therefore, no session may be anonymous in a repository only setup.
+
+
+New Repository Access API
+-------------------------
+
+Connection replaces Session
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A new explicit Connection object replaces Session as the main repository entry
+point. Connection holds all the necessary methods to be used server-side
+(``execute``, ``commit``, ``rollback``, ``call_service``, ``entity_from_eid``,
+etc...). One obtains a new Connection object using ``session.new_cnx()``.
+Connection objects need to have an explicit begin and end. Use them as a context
+manager to never miss an end::
+
+ with session.new_cnx() as cnx:
+ cnx.execute('INSERT Elephant E, E name "Babar"')
+ cnx.commit()
+ cnx.execute('INSERT Elephant E, E name "Celeste"')
+ cnx.commit()
+ # Once you get out of the "with" clause, the connection is closed.
+
+Using the same Connection object in multiple threads will give you access to the
+same Transaction. However, Connection objects are not thread safe (hence at your
+own risks).
+
+``repository.internal_session`` is deprecated in favor of
+``repository.internal_cnx``. Note that internal connections are now `safe` by default,
+i.e. the integrity hooks are enabled.
+
+Backward compatibility is preserved on Session.
+
+
+dbapi vs repoapi
+~~~~~~~~~~~~~~~~
+
+A new API has been introduced to replace the dbapi. It is called `repoapi`.
+
+There are three relevant functions for now:
+
+* ``repoapi.get_repository`` returns a Repository object either from an
+ URI when used as ``repoapi.get_repository(uri)`` or from a config
+ when used as ``repoapi.get_repository(config=config)``.
+
+* ``repoapi.connect(repo, login, **credentials)`` returns a ClientConnection
+ associated with the user identified by the credentials. The
+ ClientConnection is associated with its own Session that is closed
+ when the ClientConnection is closed. A ClientConnection is a
+ Connection-like object to be used client side.
+
+* ``repoapi.anonymous_cnx(repo)`` returns a ClientConnection associated
+ with the anonymous user if described in the config.
+
+
+repoapi.ClientConnection replace dbapi.Connection and company
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+On the client/web side, the Request is now using a ``repoapi.ClientConnection``
+instead of a ``dbapi.connection``. The ``ClientConnection`` has multiple backward
+compatible methods to make it look like a ``dbapi.Cursor`` and ``dbapi.Connection``.
+
+Session used on the Web side are now the same than the one used Server side.
+Some backward compatibility methods have been installed on the server side Session
+to ease the transition.
+
+The authentication stack has been altered to use the ``repoapi`` instead of
+the ``dbapi``. Cubes adding new element to this stack are likely to break.
+
+
+New API in tests
+~~~~~~~~~~~~~~~~
+
+All current methods and attributes used to access the repo on ``CubicWebTC`` are
+deprecated. You may now use a ``RepoAccess`` object. A ``RepoAccess`` object is
+linked to a new ``Session`` for a specified user. It is able to create
+``Connection``, ``ClientConnection`` and web side requests linked to this
+session::
+
+ access = self.new_access('babar') # create a new RepoAccess for user babar
+ with access.repo_cnx() as cnx:
+ # some work with server side cnx
+ cnx.execute(...)
+ cnx.commit()
+ cnx.execute(...)
+ cnx.commit()
+
+ with access.client_cnx() as cnx:
+ # some work with client side cnx
+ cnx.execute(...)
+ cnx.commit()
+
+ with access.web_request(elephant='babar') as req:
+ # some work with client side cnx
+ elephant_name = req.form['elephant']
+ req.execute(...)
+ req.cnx.commit()
+
+By default ``testcase.admin_access`` contains a ``RepoAccess`` object for the
+default admin session.
+
+
+API changes
+-----------
+
+* ``RepositorySessionManager.postlogin`` is now called with two arguments,
+ request and session. And this now happens before the session is linked to the
+ request.
+
+* ``SessionManager`` and ``AuthenticationManager`` now take a repo object at
+ initialization time instead of a vreg.
+
+* The ``async`` argument of ``_cw.call_service`` has been dropped. All calls are
+ now synchronous. The zmq notification bus looks like a good replacement for
+ most async use cases.
+
+* ``repo.stats()`` is now deprecated. The same information is available through
+ a service (``_cw.call_service('repo_stats')``).
+
+* ``repo.gc_stats()`` is now deprecated. The same information is available through
+ a service (``_cw.call_service('repo_gc_stats')``).
+
+* ``repo.register_user()`` is now deprecated. The functionality is now
+ available through a service (``_cw.call_service('register_user')``).
+
+* ``request.set_session`` no longer takes an optional ``user`` argument.
+
+* CubicwebTC does not have repo and cnx as class attributes anymore. They are
+ standard instance attributes. ``set_cnx`` and ``_init_repo`` class methods
+ become instance methods.
+
+* ``set_cnxset`` and ``free_cnxset`` are deprecated. cnxset are now
+ automatically managed.
+
+* The implementation of cascading deletion when deleting `composite`
+ entities has changed. There comes a semantic change: merely deleting
+ a composite relation does not entail any more the deletion of the
+ component side of the relation.
+
+* ``_cw.user_callback`` and ``_cw.user_rql_callback`` are deprecated. Users
+ are encouraged to write an actual controller (e.g. using ``ajaxfunc``)
+ instead of storing a closure in the session data.
+
+* A new ``entity.cw_linkable_rql`` method provides the rql to fetch all entities
+ that are already or may be related to the current entity using the given
+ relation.
+
+
+Deprecated Code Drops
+----------------------
+
+* session.hijack_user mechanism has been dropped.
+
+* EtypeRestrictionComponent has been removed, its functionality has been
+ replaced by facets a while ago.
+
+* the old multi-source support has been removed. Only copy-based sources
+ remain, such as datafeed or ldapfeed.
+
--- a/doc/book/en/admin/instance-config.rst Tue Jun 10 09:35:26 2014 +0200
+++ b/doc/book/en/admin/instance-config.rst Tue Jun 10 09:49:45 2014 +0200
@@ -189,3 +189,38 @@
:`navigation.combobox-limit`:
number of entities unrelated to show up on the drop-down lists of
the sight on an editing entity view
+
+Cross-Origin Resource Sharing
+-----------------------------
+
+CubicWeb provides some support for the CORS_ protocol. For now, the
+provided implementation only deals with access to a CubicWeb instance
+as a whole. Support for a finer granularity may be considered in the
+future.
+
+Specificities of the provided implementation:
+
+- ``Access-Control-Allow-Credentials`` is always true
+- ``Access-Control-Allow-Origin`` header in response will never be
+ ``*``
+- ``Access-Control-Expose-Headers`` can be configured globally (see below)
+- ``Access-Control-Max-Age`` can be configured globally (see below)
+- ``Access-Control-Allow-Methods`` can be configured globally (see below)
+- ``Access-Control-Allow-Headers`` can be configured globally (see below)
+
+
+A few parameters can be set to configure the CORS_ capabilities of CubicWeb.
+
+.. _CORS: http://www.w3.org/TR/cors/
+
+:`access-control-allow-origin`:
+ comma-separated list of allowed origin domains or "*" for any domain
+:`access-control-allow-methods`:
+ comma-separated list of allowed HTTP methods
+:`access-control-max-age`:
+ maximum age of cross-origin resource sharing (in seconds)
+:`access-control-allow-headers`:
+ comma-separated list of allowed HTTP custom headers (used in simple requests)
+:`access-control-expose-headers`:
+ comma-separated list of allowed HTTP custom headers (used in preflight requests)
+
--- a/doc/book/en/admin/ldap.rst Tue Jun 10 09:35:26 2014 +0200
+++ b/doc/book/en/admin/ldap.rst Tue Jun 10 09:49:45 2014 +0200
@@ -85,7 +85,9 @@
If the LDAP server accepts anonymous binds, then it is possible to
leave data-cnx-dn and data-cnx-password empty. This is, however, quite
-unlikely in practice.
+unlikely in practice. Beware that the LDAP server might hide attributes
+such as "userPassword" while the rest of the attributes remain visible
+through an anonymous binding.
LDAP schema mapping options:
--- a/doc/book/en/devrepo/profiling.rst Tue Jun 10 09:35:26 2014 +0200
+++ b/doc/book/en/devrepo/profiling.rst Tue Jun 10 09:49:45 2014 +0200
@@ -10,7 +10,7 @@
queries. In your ``all-in-one.conf`` file, set the **query-log-file** option::
# web application query log file
- query-log-file=~/myapp-rql.log
+ query-log-file=/home/user/myapp-rql.log
Then restart your application, reload your page and stop your application.
The file ``myapp-rql.log`` now contains the list of RQL queries that were
@@ -28,7 +28,7 @@
.. sourcecode:: sh
- $ cubicweb-ctl exlog ~/myapp-rql.log
+ $ cubicweb-ctl exlog /home/user/myapp-rql.log
0.07 50 Any A WHERE X eid %(x)s, X firstname A {}
0.05 50 Any A WHERE X eid %(x)s, X lastname A {}
0.01 1 Any X,AA ORDERBY AA DESC WHERE E eid %(x)s, E employees X, X modification_date AA {}
--- a/doc/book/en/devrepo/repo/sessions.rst Tue Jun 10 09:35:26 2014 +0200
+++ b/doc/book/en/devrepo/repo/sessions.rst Tue Jun 10 09:49:45 2014 +0200
@@ -3,50 +3,47 @@
Sessions
========
-Sessions are object carrying the `.execute` method to query the data
-sources.
+Sessions are objects linked to an authenticated user. The `Session.new_cnx`
+method returns a new Connection linked to that session.
+
+Connections
+===========
-Kinds of sessions
------------------
+Connections provide the `.execute` method to query the data sources.
-There are two kinds of sessions.
+Kinds of connections
+--------------------
-* `normal sessions` are the most common: they are related to users and
+There are two kinds of connections.
+
+* `normal connections` are the most common: they are related to users and
carry security checks coming with user credentials
-* `internal sessions` have all the powers; they are also used in only a
+* `internal connections` have all the powers; they are also used in only a
few situations where you don't already have an adequate session at
hand, like: user authentication, data synchronisation in
multi-source contexts
-.. note::
- Do not confuse the session type with their connection mode, for
- instance : `in memory` or `pyro`.
-
-Normal sessions are typically named `_cw` in most appobjects or
+Normal connections are typically named `_cw` in most appobjects or
sometimes just `session`.
-Internal sessions are available from the `Repository` object and are
+Internal connections are available from the `Repository` object and are
to be used like this:
.. sourcecode:: python
- session = self.repo.internal_session()
- try:
- do_stuff_with(session)
- finally:
- session.close()
+ with self.repo.internal_cnx() as cnx:
+ do_stuff_with(cnx)
+ cnx.commit()
-.. warning::
- Do not forget to close such a session after use for a session leak
- will quickly lead to an application crash.
+Connections should always be used as context managers, to avoid leaks.
Authentication and management of sessions
-----------------------------------------
The authentication process is a ballet involving a few dancers:
-* through its `connect` method the top-level application object (the
+* through its `get_session` method the top-level application object (the
`CubicWebPublisher`) will open a session whenever a web request
comes in; it asks the `session manager` to open a session (giving
the web request object as context) using `open_session`
@@ -88,7 +85,7 @@
------------------------------
Sometimes CubicWeb's out-of-the-box authentication schemes (cookie and
-http) are not sufficient. Nowadays there is a plethore of such schemes
+http) are not sufficient. Nowadays there is a plethora of such schemes
and the framework cannot provide them all, but as the sequence above
shows, it is extensible.
@@ -154,7 +151,7 @@
.. sourcecode:: python
- class XFooUserRetriever(authentication.LoginPasswordRetreiver):
+ class XFooUserRetriever(authentication.LoginPasswordRetriever):
""" authenticate by the x-foo-user http header
or just do normal login/password authentication
"""
@@ -200,7 +197,8 @@
return 1
return 0
-Full API Session
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+Full Session and Connection API
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: cubicweb.server.session.Session
+.. autoclass:: cubicweb.server.session.Connection
--- a/doc/book/en/devrepo/testing.rst Tue Jun 10 09:35:26 2014 +0200
+++ b/doc/book/en/devrepo/testing.rst Tue Jun 10 09:49:45 2014 +0200
@@ -466,29 +466,26 @@
First, remember to think that some code run on a client side, some
other on the repository side. More precisely:
-* client side: web interface, raw db-api connection (cubicweb-ctl shell for
+* client side: web interface, raw repoapi connection (cubicweb-ctl shell for
instance);
* repository side: RQL query execution, that may trigger hooks and operation.
-The client interact with the repository through a db-api connection.
+The client interacts with the repository through a repoapi connection.
-A db-api connection is tied to a session in the repository. The connection and
+A repoapi connection is tied to a session in the repository. The connection and
request objects are unaccessible from repository code / the session object is
-unaccessible from client code (theorically at least).
+unaccessible from client code (theoretically at least).
-The :mod:`cubicweb.dbapi` module provides a base request class. The web interface
-provides an extended request class.
-
-
-The `request` object provides access to all cubicweb resources, eg:
+The web interface provides a request class. That `request` object provides
+access to all cubicweb resources, eg:
* the registry (which itself provides access to the schema and the
configuration);
-* an underlying db-api connection (when using req.execute, you actually call the
- db-api);
+* an underlying repoapi connection (when using req.execute, you actually call the
+ repoapi);
* other specific resources depending on the client type (url generation according
to base url, form parameters, etc.).
@@ -510,37 +507,32 @@
The `_cw` attribute
```````````````````
The `_cw` attribute available on every application object provides access to all
-cubicweb resources, eg:
+cubicweb resources, i.e.:
-For code running on the client side (eg web interface view), `_cw` is a request
-instance.
+- For code running on the client side (eg web interface view), `_cw` is a request
+ instance.
-For code running on the repository side (hooks and operation), `_cw` is a session
-instance.
+- For code running on the repository side (hooks and operation), `_cw` is a
+ Connection or Session instance.
-Beware some views may be called with a session (eg notifications) or with a
-DB-API request. In the later case, see :meth:`use_web_compatible_requests` on
-:class:`Connection` instances.
+Beware some views may be called with a session (e.g. notifications) or with a
+request.
Request, session and transaction
````````````````````````````````
-In the web interface, an HTTP request is handle by a single request, which will
-be thrown way once the response send.
+In the web interface, an HTTP request is handled by a single request, which will
+be thrown away once the response is sent.
-The web publisher handle the transaction:
+The web publisher handles the transaction:
* commit / rollback is done automatically
* you should not commit / rollback explicitly
-When using a raw db-api, you're on your own regarding transaction.
-
-On the other hand, db-api connection and session live from a user login to its logout.
-
-Because session lives for a long time, and database connections is a limited
-resource, we can't bound a session to its own database connection for all its
+Because a session lives for a long time, and database connections are a limited
+resource, we can't bind a session to its own database connection for all its
lifetime. The repository handles a pool of connections (4 by default), and it's
responsible to attribute them as needed.
@@ -550,13 +542,13 @@
2. the repository attributes a database connection to the session
-3. the repository's querier execute the query
+3. the repository's querier executes the query
4. this query may trigger hooks. Hooks and operation may execute some rql queries
through `_cw.execute`. Those queries go directly to the querier, hence don't
touch the database connection, they use the one attributed in 2.
-5. the repository's get the result of the query in 1. If it was a RQL read query,
+5. the repository gets the result of the query in 1. If it was a RQL read query,
the database connection is released. If it was a write query, the connection
is then tied to the session until the transaction is commited or rollbacked.
@@ -567,11 +559,11 @@
* when using a request, or code executed in hooks, this database connection
handling is totally transparent
-* however, take care when writing test: you are usually faking / testing both the
- server and the client side, so you have to decide when to use self.request() /
- self.session. Ask yourself "where the code I want to test will be running,
- client or repository side ?". The response is usually : use a request :)
- However, if you really need using a session:
+* however, take care when writing tests: you are usually faking / testing both the
+ server and the client side, so you have to decide when to use RepoAccess.client_cnx /
+ RepoAccess.repo_cnx. Ask yourself "where the code I want to test will be running,
+ client or repository side ?". The response is usually : use a client connection :)
+ However, if you really need using a server-side object:
- commit / rollback will free the database connection (unless explicitly told
not to do so).
--- a/doc/book/en/devweb/js.rst Tue Jun 10 09:35:26 2014 +0200
+++ b/doc/book/en/devweb/js.rst Tue Jun 10 09:49:45 2014 +0200
@@ -317,67 +317,6 @@
}
-python/ajax dynamic callbacks
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-CubicWeb provides a way to dynamically register a function and make it
-callable from the javascript side. The typical use case for this is a
-situation where you have everything at hand to implement an action
-(whether it be performing a RQL query or executing a few python
-statements) that you'd like to defer to a user click in the web
-interface. In other words, generate an HTML ``<a href=...`` link that
-would execute your few lines of code.
-
-The trick is to create a python function and store this function in
-the user's session data. You will then be able to access it later.
-While this might sound hard to implement, it's actually quite easy
-thanks to the ``_cw.user_callback()``. This method takes a function,
-registers it and returns a javascript instruction suitable for
-``href`` or ``onclick`` usage. The call is then performed
-asynchronously.
-
-Here's a simplified example taken from the vcreview_ cube that will
-generate a link to change an entity state directly without the
-standard intermediate *comment / validate* step:
-
-.. sourcecode:: python
-
- def entity_call(self, entity):
- # [...]
- def change_state(req, eid):
- entity = req.entity_from_eid(eid)
- entity.cw_adapt_to('IWorkflowable').fire_transition('done')
- url = self._cw.user_callback(change_state, (entity.eid,))
- self.w(tags.input(type='button', onclick=url, value=self._cw._('mark as done')))
-
-
-The ``change_state`` callback function is registered with
-``self._cw.user_callback()`` which returns the ``url`` value directly
-used for the ``onclick`` attribute of the button. On the javascript
-side, the ``userCallback()`` function is used but you most probably
-won't have to bother with it.
-
-Of course, when dealing with session data, the question of session
-cleaning pops up immediately. If you use ``user_callback()``, the
-registered function will be deleted automatically at some point
-as any other session data. If you want your function to be deleted once
-the web page is unloaded or when the user has clicked once on your link, then
-``_cw.register_onetime_callback()`` is what you need. It behaves as
-``_cw.user_callback()`` but stores the function in page data instead
-of global session data.
-
-
-.. Warning::
-
- Be careful when registering functions with closures, keep in mind that
- enclosed data will be kept in memory until the session gets cleared. Also,
- if you keep entities or any object referecing the current ``req`` object, you
- might have problems reusing them later because the underlying session
- might have been closed at the time the callback gets executed.
-
-
-.. _vcreview: http://www.cubicweb.org/project/cubicweb-vcreview
-
Javascript library: overview
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
--- a/doc/book/en/devweb/request.rst Tue Jun 10 09:35:26 2014 +0200
+++ b/doc/book/en/devweb/request.rst Tue Jun 10 09:49:45 2014 +0200
@@ -20,8 +20,6 @@
* `ie_browser`: tells if the browser belong to the Internet Explorer
family
- * `xhtml_browser`: tells if the browser is able to properly handle
- XHTML (at the HTTP content_type level)
* `User and identification`:
@@ -30,7 +28,8 @@
* `Session data handling`
- * `session.data` is the dictionnary of the session data; it can be manipulated like an ordinary Python dictionnary
+ * `session.data` is the dictionary of the session data; it can be
+ manipulated like an ordinary Python dictionary
* `Edition` (utilities for edition control):
@@ -104,8 +103,7 @@
* `get_header(header)`, returns the value associated to an arbitrary header
of the HTTP request
* `set_header(header, value)`, adds an arbitrary header in the response
- * `cursor()` returns a RQL cursor on the session
- * `execute(*args, **kwargs)`, shortcut to ``.cursor().execute()``
+ * `execute(*args, **kwargs)`, executes an RQL query and return the result set
* `property_value(key)`, properties management (`CWProperty`)
* dictionary `data` to store data to share informations between components
*while a request is executed*
@@ -120,14 +118,14 @@
```
The elements we gave in overview for above are built in three layers,
-from ``cubicweb.req.RequestSessionBase``, ``cubicweb.dbapi.DBAPIRequest`` and
-``cubicweb.web.CubicWebRequestBase``.
+from ``cubicweb.req.RequestSessionBase``, ``cubicweb.repoapi.ClientConnection`` and
+``cubicweb.web.ConnectionCubicWebRequestBase``.
.. autoclass:: cubicweb.req.RequestSessionBase
:members:
-.. autoclass:: cubicweb.dbapi.DBAPIRequest
+.. autoclass:: cubicweb.repoapi.ClientConnection
:members:
-.. autoclass:: cubicweb.web.request.CubicWebRequestBase
+.. autoclass:: cubicweb.web.request.ConnectionCubicWebRequestBase
:members:
--- a/doc/book/en/devweb/rtags.rst Tue Jun 10 09:35:26 2014 +0200
+++ b/doc/book/en/devweb/rtags.rst Tue Jun 10 09:49:45 2014 +0200
@@ -17,7 +17,7 @@
The part of uicfg that deals with primary views is in the
:ref:`primary_view_configuration` chapter.
-.. automodule:: cubicweb.web.uicfg
+.. automodule:: cubicweb.web.views.uicfg
The uihelper module
Binary file doc/book/en/images/request_session.png has changed
--- a/doc/book/en/images/request_session.svg Tue Jun 10 09:35:26 2014 +0200
+++ b/doc/book/en/images/request_session.svg Tue Jun 10 09:49:45 2014 +0200
@@ -13,7 +13,7 @@
height="12.382812"
id="svg2"
version="1.1"
- inkscape:version="0.48.1 r9760"
+ inkscape:version="0.48.3.1 r9886"
sodipodi:docname="request_session.svg">
<defs
id="defs4">
@@ -48,10 +48,10 @@
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
- inkscape:window-width="766"
- inkscape:window-height="1151"
- inkscape:window-x="1152"
- inkscape:window-y="24"
+ inkscape:window-width="958"
+ inkscape:window-height="1160"
+ inkscape:window-x="0"
+ inkscape:window-y="38"
inkscape:window-maximized="0"
inkscape:snap-global="true" />
<metadata
@@ -62,7 +62,7 @@
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
+ <dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
@@ -113,7 +113,7 @@
sodipodi:role="line"
id="tspan3763"
x="262.63968"
- y="470.51431">DB API</tspan></text>
+ y="470.51431">REPOAPI</tspan></text>
<text
xml:space="preserve"
style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
--- a/doc/tools/pyjsrest.py Tue Jun 10 09:35:26 2014 +0200
+++ b/doc/tools/pyjsrest.py Tue Jun 10 09:49:45 2014 +0200
@@ -142,7 +142,6 @@
FILES_TO_IGNORE = set([
'jquery.js',
'jquery.treeview.js',
- 'jquery.json.js',
'jquery.tablesorter.js',
'jquery.timePicker.js',
'jquery.flot.js',
--- a/entities/authobjs.py Tue Jun 10 09:35:26 2014 +0200
+++ b/entities/authobjs.py Tue Jun 10 09:49:45 2014 +0200
@@ -166,6 +166,17 @@
dc_long_title = name
+ def __call__(self, *args, **kwargs):
+ """ugly hack for compatibility betweeb dbapi and repo api
+
+ In the dbapi, Connection and Session have a ``user`` method to
+ generated a user for a request In the repo api, Connection and Session
+ have a user attribute inherited from SessionRequestBase prototype. This
+ ugly hack allows to not break user of the user method.
+
+ XXX Deprecate me ASAP"""
+ return self
+
from logilab.common.deprecation import class_renamed
EUser = class_renamed('EUser', CWUser)
EGroup = class_renamed('EGroup', CWGroup)
--- a/entities/lib.py Tue Jun 10 09:35:26 2014 +0200
+++ b/entities/lib.py Tue Jun 10 09:49:45 2014 +0200
@@ -18,6 +18,7 @@
"""entity classes for optional library entities"""
__docformat__ = "restructuredtext en"
+from warnings import warn
from urlparse import urlsplit, urlunsplit
from datetime import datetime
@@ -130,6 +131,13 @@
__regid__ = 'CWCache'
fetch_attrs, cw_fetch_order = fetch_config(['name'])
+ def __init__(self, *args, **kwargs):
+ warn('[3.19] CWCache entity type is going away soon. '
+ 'Other caching mechanisms can be used more reliably '
+ 'to the same effect.',
+ DeprecationWarning)
+ super(CWCache, self).__init__(*args, **kwargs)
+
def touch(self):
self._cw.execute('SET X timestamp %(t)s WHERE X eid %(x)s',
{'t': datetime.now(), 'x': self.eid})
--- a/entities/test/data/schema.py Tue Jun 10 09:35:26 2014 +0200
+++ b/entities/test/data/schema.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -15,11 +15,9 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""entities tests schema
+"""entities tests schema"""
-"""
-
-from yams.buildobjs import EntityType, String
+from yams.buildobjs import EntityType, String, RichString
from cubicweb.schema import make_workflowable
class Company(EntityType):
--- a/entities/test/unittest_wfobjs.py Tue Jun 10 09:35:26 2014 +0200
+++ b/entities/test/unittest_wfobjs.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -19,12 +19,11 @@
from cubicweb import ValidationError
from cubicweb.devtools.testlib import CubicWebTC
-
-def add_wf(self, etype, name=None, default=False):
+def add_wf(shell, etype, name=None, default=False):
if name is None:
name = etype
- return self.shell().add_workflow(name, etype, default=default,
- ensure_workflowable=False)
+ return shell.add_workflow(name, etype, default=default,
+ ensure_workflowable=False)
def parse_hist(wfhist):
return [(ti.previous_state.name, ti.new_state.name,
@@ -35,101 +34,104 @@
class WorkflowBuildingTC(CubicWebTC):
def test_wf_construction(self):
- wf = add_wf(self, 'Company')
- foo = wf.add_state(u'foo', initial=True)
- bar = wf.add_state(u'bar')
- self.assertEqual(wf.state_by_name('bar').eid, bar.eid)
- self.assertEqual(wf.state_by_name('barrr'), None)
- baz = wf.add_transition(u'baz', (foo,), bar, ('managers',))
- self.assertEqual(wf.transition_by_name('baz').eid, baz.eid)
- self.assertEqual(len(baz.require_group), 1)
- self.assertEqual(baz.require_group[0].name, 'managers')
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ foo = wf.add_state(u'foo', initial=True)
+ bar = wf.add_state(u'bar')
+ self.assertEqual(wf.state_by_name('bar').eid, bar.eid)
+ self.assertEqual(wf.state_by_name('barrr'), None)
+ baz = wf.add_transition(u'baz', (foo,), bar, ('managers',))
+ self.assertEqual(wf.transition_by_name('baz').eid, baz.eid)
+ self.assertEqual(len(baz.require_group), 1)
+ self.assertEqual(baz.require_group[0].name, 'managers')
def test_duplicated_state(self):
- wf = add_wf(self, 'Company')
- wf.add_state(u'foo', initial=True)
- self.commit()
- wf.add_state(u'foo')
- with self.assertRaises(ValidationError) as cm:
- self.commit()
- self.assertEqual({'name-subject': 'workflow already has a state of that name'},
- cm.exception.errors)
- # no pb if not in the same workflow
- wf2 = add_wf(self, 'Company')
- foo = wf2.add_state(u'foo', initial=True)
- self.commit()
- # gnark gnark
- bar = wf.add_state(u'bar')
- self.commit()
- bar.cw_set(name=u'foo')
- with self.assertRaises(ValidationError) as cm:
- self.commit()
- self.assertEqual({'name-subject': 'workflow already has a state of that name'},
- cm.exception.errors)
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ wf.add_state(u'foo', initial=True)
+ shell.commit()
+ wf.add_state(u'foo')
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual({'name-subject': 'workflow already has a state of that name'},
+ cm.exception.errors)
+ # no pb if not in the same workflow
+ wf2 = add_wf(shell, 'Company')
+ foo = wf2.add_state(u'foo', initial=True)
+ shell.commit()
+ # gnark gnark
+ bar = wf.add_state(u'bar')
+ shell.commit()
+ bar.cw_set(name=u'foo')
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual({'name-subject': 'workflow already has a state of that name'},
+ cm.exception.errors)
def test_duplicated_transition(self):
- wf = add_wf(self, 'Company')
- foo = wf.add_state(u'foo', initial=True)
- bar = wf.add_state(u'bar')
- wf.add_transition(u'baz', (foo,), bar, ('managers',))
- wf.add_transition(u'baz', (bar,), foo)
- with self.assertRaises(ValidationError) as cm:
- self.commit()
- self.assertEqual(cm.exception.errors, {'name-subject': 'workflow already has a transition of that name'})
- # no pb if not in the same workflow
- wf2 = add_wf(self, 'Company')
- foo = wf.add_state(u'foo', initial=True)
- bar = wf.add_state(u'bar')
- wf.add_transition(u'baz', (foo,), bar, ('managers',))
- self.commit()
- # gnark gnark
- biz = wf.add_transition(u'biz', (bar,), foo)
- self.commit()
- biz.cw_set(name=u'baz')
- with self.assertRaises(ValidationError) as cm:
- self.commit()
- self.assertEqual(cm.exception.errors, {'name-subject': 'workflow already has a transition of that name'})
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ foo = wf.add_state(u'foo', initial=True)
+ bar = wf.add_state(u'bar')
+ wf.add_transition(u'baz', (foo,), bar, ('managers',))
+ wf.add_transition(u'baz', (bar,), foo)
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'name-subject': 'workflow already has a transition of that name'})
+ # no pb if not in the same workflow
+ wf2 = add_wf(shell, 'Company')
+ foo = wf.add_state(u'foo', initial=True)
+ bar = wf.add_state(u'bar')
+ wf.add_transition(u'baz', (foo,), bar, ('managers',))
+ shell.commit()
+ # gnark gnark
+ biz = wf.add_transition(u'biz', (bar,), foo)
+ shell.commit()
+ biz.cw_set(name=u'baz')
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'name-subject': 'workflow already has a transition of that name'})
class WorkflowTC(CubicWebTC):
def setup_database(self):
- req = self.request()
rschema = self.schema['in_state']
for rdef in rschema.rdefs.itervalues():
self.assertEqual(rdef.cardinality, '1*')
- self.member = self.create_user(req, 'member')
+ with self.admin_access.client_cnx() as cnx:
+ self.member_eid = self.create_user(cnx, 'member').eid
+ cnx.commit()
def test_workflow_base(self):
- req = self.request()
- e = self.create_user(req, 'toto')
- iworkflowable = e.cw_adapt_to('IWorkflowable')
- self.assertEqual(iworkflowable.state, 'activated')
- iworkflowable.change_state('deactivated', u'deactivate 1')
- self.commit()
- iworkflowable.change_state('activated', u'activate 1')
- self.commit()
- iworkflowable.change_state('deactivated', u'deactivate 2')
- self.commit()
- e.cw_clear_relation_cache('wf_info_for', 'object')
- self.assertEqual([tr.comment for tr in e.reverse_wf_info_for],
- ['deactivate 1', 'activate 1', 'deactivate 2'])
- self.assertEqual(iworkflowable.latest_trinfo().comment, 'deactivate 2')
+ with self.admin_access.web_request() as req:
+ e = self.create_user(req, 'toto')
+ iworkflowable = e.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'activated')
+ iworkflowable.change_state('deactivated', u'deactivate 1')
+ req.cnx.commit()
+ iworkflowable.change_state('activated', u'activate 1')
+ req.cnx.commit()
+ iworkflowable.change_state('deactivated', u'deactivate 2')
+ req.cnx.commit()
+ e.cw_clear_relation_cache('wf_info_for', 'object')
+ self.assertEqual([tr.comment for tr in e.reverse_wf_info_for],
+ ['deactivate 1', 'activate 1', 'deactivate 2'])
+ self.assertEqual(iworkflowable.latest_trinfo().comment, 'deactivate 2')
def test_possible_transitions(self):
- user = self.execute('CWUser X').get_entity(0, 0)
- iworkflowable = user.cw_adapt_to('IWorkflowable')
- trs = list(iworkflowable.possible_transitions())
- self.assertEqual(len(trs), 1)
- self.assertEqual(trs[0].name, u'deactivate')
- self.assertEqual(trs[0].destination(None).name, u'deactivated')
+ with self.admin_access.web_request() as req:
+ user = req.execute('CWUser X').get_entity(0, 0)
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ trs = list(iworkflowable.possible_transitions())
+ self.assertEqual(len(trs), 1)
+ self.assertEqual(trs[0].name, u'deactivate')
+ self.assertEqual(trs[0].destination(None).name, u'deactivated')
# test a std user get no possible transition
- cnx = self.login('member')
- req = self.request()
- # fetch the entity using the new session
- trs = list(req.user.cw_adapt_to('IWorkflowable').possible_transitions())
- self.assertEqual(len(trs), 0)
- cnx.close()
+ with self.new_access('member').web_request() as req:
+ # fetch the entity using the new session
+ trs = list(req.user.cw_adapt_to('IWorkflowable').possible_transitions())
+ self.assertEqual(len(trs), 0)
def _test_manager_deactivate(self, user):
iworkflowable = user.cw_adapt_to('IWorkflowable')
@@ -144,90 +146,93 @@
return trinfo
def test_change_state(self):
- user = self.user()
- iworkflowable = user.cw_adapt_to('IWorkflowable')
- iworkflowable.change_state('deactivated', comment=u'deactivate user')
- trinfo = self._test_manager_deactivate(user)
- self.assertEqual(trinfo.transition, None)
+ with self.admin_access.client_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.change_state('deactivated', comment=u'deactivate user')
+ trinfo = self._test_manager_deactivate(user)
+ self.assertEqual(trinfo.transition, None)
def test_set_in_state_bad_wf(self):
- wf = add_wf(self, 'CWUser')
- s = wf.add_state(u'foo', initial=True)
- self.commit()
- with self.session.security_enabled(write=False):
- with self.assertRaises(ValidationError) as cm:
- self.session.execute('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
- {'x': self.user().eid, 's': s.eid})
- self.assertEqual(cm.exception.errors, {'in_state-subject': "state doesn't belong to entity's workflow. "
- "You may want to set a custom workflow for this entity first."})
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ s = wf.add_state(u'foo', initial=True)
+ shell.commit()
+ with self.admin_access.repo_cnx() as cnx:
+ with cnx.security_enabled(write=False):
+ with self.assertRaises(ValidationError) as cm:
+ cnx.execute('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
+ {'x': cnx.user.eid, 's': s.eid})
+ self.assertEqual(cm.exception.errors, {'in_state-subject': "state doesn't belong to entity's workflow. "
+ "You may want to set a custom workflow for this entity first."})
def test_fire_transition(self):
- user = self.user()
- iworkflowable = user.cw_adapt_to('IWorkflowable')
- iworkflowable.fire_transition('deactivate', comment=u'deactivate user')
- user.cw_clear_all_caches()
- self.assertEqual(iworkflowable.state, 'deactivated')
- self._test_manager_deactivate(user)
- trinfo = self._test_manager_deactivate(user)
- self.assertEqual(trinfo.transition.name, 'deactivate')
+ with self.admin_access.client_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate', comment=u'deactivate user')
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'deactivated')
+ self._test_manager_deactivate(user)
+ trinfo = self._test_manager_deactivate(user)
+ self.assertEqual(trinfo.transition.name, 'deactivate')
def test_goback_transition(self):
- req = self.request()
- wf = req.user.cw_adapt_to('IWorkflowable').current_workflow
- asleep = wf.add_state('asleep')
- wf.add_transition('rest', (wf.state_by_name('activated'),
- wf.state_by_name('deactivated')),
- asleep)
- wf.add_transition('wake up', asleep)
- user = self.create_user(req, 'stduser')
- iworkflowable = user.cw_adapt_to('IWorkflowable')
- iworkflowable.fire_transition('rest')
- self.commit()
- iworkflowable.fire_transition('wake up')
- self.commit()
- self.assertEqual(iworkflowable.state, 'activated')
- iworkflowable.fire_transition('deactivate')
- self.commit()
- iworkflowable.fire_transition('rest')
- self.commit()
- iworkflowable.fire_transition('wake up')
- self.commit()
- user.cw_clear_all_caches()
- self.assertEqual(iworkflowable.state, 'deactivated')
+ with self.admin_access.web_request() as req:
+ wf = req.user.cw_adapt_to('IWorkflowable').current_workflow
+ asleep = wf.add_state('asleep')
+ wf.add_transition('rest', (wf.state_by_name('activated'),
+ wf.state_by_name('deactivated')),
+ asleep)
+ wf.add_transition('wake up', asleep)
+ user = self.create_user(req, 'stduser')
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ iworkflowable.fire_transition('wake up')
+ req.cnx.commit()
+ self.assertEqual(iworkflowable.state, 'activated')
+ iworkflowable.fire_transition('deactivate')
+ req.cnx.commit()
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ iworkflowable.fire_transition('wake up')
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'deactivated')
# XXX test managers can change state without matching transition
def _test_stduser_deactivate(self):
- ueid = self.member.eid
- req = self.request()
- self.create_user(req, 'tutu')
- cnx = self.login('tutu')
- req = self.request()
- iworkflowable = req.entity_from_eid(self.member.eid).cw_adapt_to('IWorkflowable')
- with self.assertRaises(ValidationError) as cm:
+ with self.admin_access.repo_cnx() as cnx:
+ self.create_user(cnx, 'tutu')
+ with self.new_access('tutu').web_request() as req:
+ iworkflowable = req.entity_from_eid(self.member_eid).cw_adapt_to('IWorkflowable')
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('deactivate')
+ self.assertEqual(cm.exception.errors, {'by_transition-subject': "transition may not be fired"})
+ with self.new_access('member').web_request() as req:
+ iworkflowable = req.entity_from_eid(self.member_eid).cw_adapt_to('IWorkflowable')
iworkflowable.fire_transition('deactivate')
- self.assertEqual(cm.exception.errors, {'by_transition-subject': "transition may not be fired"})
- cnx.close()
- cnx = self.login('member')
- req = self.request()
- iworkflowable = req.entity_from_eid(self.member.eid).cw_adapt_to('IWorkflowable')
- iworkflowable.fire_transition('deactivate')
- cnx.commit()
- with self.assertRaises(ValidationError) as cm:
- iworkflowable.fire_transition('activate')
- self.assertEqual(cm.exception.errors, {'by_transition-subject': "transition may not be fired"})
- cnx.close()
+ req.cnx.commit()
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('activate')
+ self.assertEqual(cm.exception.errors, {'by_transition-subject': "transition may not be fired"})
def test_fire_transition_owned_by(self):
- self.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
- 'X expression "X owned_by U", T condition X '
- 'WHERE T name "deactivate"')
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
+ 'X expression "X owned_by U", T condition X '
+ 'WHERE T name "deactivate"')
+ cnx.commit()
self._test_stduser_deactivate()
def test_fire_transition_has_update_perm(self):
- self.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
- 'X expression "U has_update_permission X", T condition X '
- 'WHERE T name "deactivate"')
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
+ 'X expression "U has_update_permission X", T condition X '
+ 'WHERE T name "deactivate"')
+ cnx.commit()
self._test_stduser_deactivate()
def test_swf_base(self):
@@ -250,334 +255,357 @@
+--------+
"""
# sub-workflow
- swf = add_wf(self, 'CWGroup', name='subworkflow')
- swfstate1 = swf.add_state(u'swfstate1', initial=True)
- swfstate2 = swf.add_state(u'swfstate2')
- swfstate3 = swf.add_state(u'swfstate3')
- tr1 = swf.add_transition(u'tr1', (swfstate1,), swfstate2)
- tr2 = swf.add_transition(u'tr2', (swfstate1,), swfstate3)
- # main workflow
- mwf = add_wf(self, 'CWGroup', name='main workflow', default=True)
- state1 = mwf.add_state(u'state1', initial=True)
- state2 = mwf.add_state(u'state2')
- state3 = mwf.add_state(u'state3')
- swftr1 = mwf.add_wftransition(u'swftr1', swf, state1,
- [(swfstate2, state2), (swfstate3, state3)])
- self.assertEqual(swftr1.destination(None).eid, swfstate1.eid)
+ with self.admin_access.shell() as shell:
+ swf = add_wf(shell, 'CWGroup', name='subworkflow')
+ swfstate1 = swf.add_state(u'swfstate1', initial=True)
+ swfstate2 = swf.add_state(u'swfstate2')
+ swfstate3 = swf.add_state(u'swfstate3')
+ tr1 = swf.add_transition(u'tr1', (swfstate1,), swfstate2)
+ tr2 = swf.add_transition(u'tr2', (swfstate1,), swfstate3)
+ # main workflow
+ mwf = add_wf(shell, 'CWGroup', name='main workflow', default=True)
+ state1 = mwf.add_state(u'state1', initial=True)
+ state2 = mwf.add_state(u'state2')
+ state3 = mwf.add_state(u'state3')
+ swftr1 = mwf.add_wftransition(u'swftr1', swf, state1,
+ [(swfstate2, state2), (swfstate3, state3)])
+ swf.cw_clear_all_caches()
+ self.assertEqual(swftr1.destination(None).eid, swfstate1.eid)
# workflows built, begin test
- group = self.request().create_entity('CWGroup', name=u'grp1')
- self.commit()
- iworkflowable = group.cw_adapt_to('IWorkflowable')
- self.assertEqual(iworkflowable.current_state.eid, state1.eid)
- self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
- self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
- self.assertEqual(iworkflowable.subworkflow_input_transition(), None)
- iworkflowable.fire_transition('swftr1', u'go')
- self.commit()
- group.cw_clear_all_caches()
- self.assertEqual(iworkflowable.current_state.eid, swfstate1.eid)
- self.assertEqual(iworkflowable.current_workflow.eid, swf.eid)
- self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
- self.assertEqual(iworkflowable.subworkflow_input_transition().eid, swftr1.eid)
- iworkflowable.fire_transition('tr1', u'go')
- self.commit()
- group.cw_clear_all_caches()
- self.assertEqual(iworkflowable.current_state.eid, state2.eid)
- self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
- self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
- self.assertEqual(iworkflowable.subworkflow_input_transition(), None)
- # force back to swfstate1 is impossible since we can't any more find
- # subworkflow input transition
- with self.assertRaises(ValidationError) as cm:
- iworkflowable.change_state(swfstate1, u'gadget')
- self.assertEqual(cm.exception.errors, {'to_state-subject': "state doesn't belong to entity's workflow"})
- self.rollback()
- # force back to state1
- iworkflowable.change_state('state1', u'gadget')
- iworkflowable.fire_transition('swftr1', u'au')
- group.cw_clear_all_caches()
- iworkflowable.fire_transition('tr2', u'chapeau')
- self.commit()
- group.cw_clear_all_caches()
- self.assertEqual(iworkflowable.current_state.eid, state3.eid)
- self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
- self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
- self.assertListEqual(parse_hist(iworkflowable.workflow_history),
- [('state1', 'swfstate1', 'swftr1', 'go'),
- ('swfstate1', 'swfstate2', 'tr1', 'go'),
- ('swfstate2', 'state2', 'swftr1', 'exiting from subworkflow subworkflow'),
- ('state2', 'state1', None, 'gadget'),
- ('state1', 'swfstate1', 'swftr1', 'au'),
- ('swfstate1', 'swfstate3', 'tr2', 'chapeau'),
- ('swfstate3', 'state3', 'swftr1', 'exiting from subworkflow subworkflow'),
- ])
+ with self.admin_access.web_request() as req:
+ group = req.create_entity('CWGroup', name=u'grp1')
+ req.cnx.commit()
+ iworkflowable = group.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.current_state.eid, state1.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.subworkflow_input_transition(), None)
+ iworkflowable.fire_transition('swftr1', u'go')
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_state.eid, swfstate1.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, swf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.subworkflow_input_transition().eid, swftr1.eid)
+ iworkflowable.fire_transition('tr1', u'go')
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_state.eid, state2.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.subworkflow_input_transition(), None)
+ # force back to swfstate1 is impossible since we can't any more find
+ # subworkflow input transition
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.change_state(swfstate1, u'gadget')
+ self.assertEqual(cm.exception.errors, {'to_state-subject': "state doesn't belong to entity's workflow"})
+ req.cnx.rollback()
+ # force back to state1
+ iworkflowable.change_state('state1', u'gadget')
+ iworkflowable.fire_transition('swftr1', u'au')
+ group.cw_clear_all_caches()
+ iworkflowable.fire_transition('tr2', u'chapeau')
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_state.eid, state3.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertListEqual(parse_hist(iworkflowable.workflow_history),
+ [('state1', 'swfstate1', 'swftr1', 'go'),
+ ('swfstate1', 'swfstate2', 'tr1', 'go'),
+ ('swfstate2', 'state2', 'swftr1', 'exiting from subworkflow subworkflow'),
+ ('state2', 'state1', None, 'gadget'),
+ ('state1', 'swfstate1', 'swftr1', 'au'),
+ ('swfstate1', 'swfstate3', 'tr2', 'chapeau'),
+ ('swfstate3', 'state3', 'swftr1', 'exiting from subworkflow subworkflow'),
+ ])
def test_swf_exit_consistency(self):
- # sub-workflow
- swf = add_wf(self, 'CWGroup', name='subworkflow')
- swfstate1 = swf.add_state(u'swfstate1', initial=True)
- swfstate2 = swf.add_state(u'swfstate2')
- tr1 = swf.add_transition(u'tr1', (swfstate1,), swfstate2)
- # main workflow
- mwf = add_wf(self, 'CWGroup', name='main workflow', default=True)
- state1 = mwf.add_state(u'state1', initial=True)
- state2 = mwf.add_state(u'state2')
- state3 = mwf.add_state(u'state3')
- mwf.add_wftransition(u'swftr1', swf, state1,
- [(swfstate2, state2), (swfstate2, state3)])
- with self.assertRaises(ValidationError) as cm:
- self.commit()
- self.assertEqual(cm.exception.errors, {'subworkflow_exit-subject': u"can't have multiple exits on the same state"})
+ with self.admin_access.shell() as shell:
+ # sub-workflow
+ swf = add_wf(shell, 'CWGroup', name='subworkflow')
+ swfstate1 = swf.add_state(u'swfstate1', initial=True)
+ swfstate2 = swf.add_state(u'swfstate2')
+ tr1 = swf.add_transition(u'tr1', (swfstate1,), swfstate2)
+ # main workflow
+ mwf = add_wf(shell, 'CWGroup', name='main workflow', default=True)
+ state1 = mwf.add_state(u'state1', initial=True)
+ state2 = mwf.add_state(u'state2')
+ state3 = mwf.add_state(u'state3')
+ mwf.add_wftransition(u'swftr1', swf, state1,
+ [(swfstate2, state2), (swfstate2, state3)])
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'subworkflow_exit-subject': u"can't have multiple exits on the same state"})
def test_swf_fire_in_a_row(self):
- # sub-workflow
- subwf = add_wf(self, 'CWGroup', name='subworkflow')
- xsigning = subwf.add_state('xsigning', initial=True)
- xaborted = subwf.add_state('xaborted')
- xsigned = subwf.add_state('xsigned')
- xabort = subwf.add_transition('xabort', (xsigning,), xaborted)
- xsign = subwf.add_transition('xsign', (xsigning,), xsigning)
- xcomplete = subwf.add_transition('xcomplete', (xsigning,), xsigned,
- type=u'auto')
- # main workflow
- twf = add_wf(self, 'CWGroup', name='mainwf', default=True)
- created = twf.add_state(_('created'), initial=True)
- identified = twf.add_state(_('identified'))
- released = twf.add_state(_('released'))
- closed = twf.add_state(_('closed'))
- twf.add_wftransition(_('identify'), subwf, (created,),
- [(xsigned, identified), (xaborted, created)])
- twf.add_wftransition(_('release'), subwf, (identified,),
- [(xsigned, released), (xaborted, identified)])
- twf.add_wftransition(_('close'), subwf, (released,),
- [(xsigned, closed), (xaborted, released)])
- self.commit()
- group = self.request().create_entity('CWGroup', name=u'grp1')
- self.commit()
- iworkflowable = group.cw_adapt_to('IWorkflowable')
- for trans in ('identify', 'release', 'close'):
- iworkflowable.fire_transition(trans)
- self.commit()
+ with self.admin_access.shell() as shell:
+ # sub-workflow
+ subwf = add_wf(shell, 'CWGroup', name='subworkflow')
+ xsigning = subwf.add_state('xsigning', initial=True)
+ xaborted = subwf.add_state('xaborted')
+ xsigned = subwf.add_state('xsigned')
+ xabort = subwf.add_transition('xabort', (xsigning,), xaborted)
+ xsign = subwf.add_transition('xsign', (xsigning,), xsigning)
+ xcomplete = subwf.add_transition('xcomplete', (xsigning,), xsigned,
+ type=u'auto')
+ # main workflow
+ twf = add_wf(shell, 'CWGroup', name='mainwf', default=True)
+ created = twf.add_state(_('created'), initial=True)
+ identified = twf.add_state(_('identified'))
+ released = twf.add_state(_('released'))
+ closed = twf.add_state(_('closed'))
+ twf.add_wftransition(_('identify'), subwf, (created,),
+ [(xsigned, identified), (xaborted, created)])
+ twf.add_wftransition(_('release'), subwf, (identified,),
+ [(xsigned, released), (xaborted, identified)])
+ twf.add_wftransition(_('close'), subwf, (released,),
+ [(xsigned, closed), (xaborted, released)])
+ shell.commit()
+ with self.admin_access.repo_cnx() as cnx:
+ group = cnx.create_entity('CWGroup', name=u'grp1')
+ cnx.commit()
+ iworkflowable = group.cw_adapt_to('IWorkflowable')
+ for trans in ('identify', 'release', 'close'):
+ iworkflowable.fire_transition(trans)
+ cnx.commit()
def test_swf_magic_tr(self):
- # sub-workflow
- subwf = add_wf(self, 'CWGroup', name='subworkflow')
- xsigning = subwf.add_state('xsigning', initial=True)
- xaborted = subwf.add_state('xaborted')
- xsigned = subwf.add_state('xsigned')
- xabort = subwf.add_transition('xabort', (xsigning,), xaborted)
- xsign = subwf.add_transition('xsign', (xsigning,), xsigned)
- # main workflow
- twf = add_wf(self, 'CWGroup', name='mainwf', default=True)
- created = twf.add_state(_('created'), initial=True)
- identified = twf.add_state(_('identified'))
- released = twf.add_state(_('released'))
- twf.add_wftransition(_('identify'), subwf, created,
- [(xaborted, None), (xsigned, identified)])
- twf.add_wftransition(_('release'), subwf, identified,
- [(xaborted, None)])
- self.commit()
- group = self.request().create_entity('CWGroup', name=u'grp1')
- self.commit()
- iworkflowable = group.cw_adapt_to('IWorkflowable')
- for trans, nextstate in (('identify', 'xsigning'),
- ('xabort', 'created'),
- ('identify', 'xsigning'),
- ('xsign', 'identified'),
- ('release', 'xsigning'),
- ('xabort', 'identified')
- ):
- iworkflowable.fire_transition(trans)
- self.commit()
- group.cw_clear_all_caches()
- self.assertEqual(iworkflowable.state, nextstate)
+ with self.admin_access.shell() as shell:
+ # sub-workflow
+ subwf = add_wf(shell, 'CWGroup', name='subworkflow')
+ xsigning = subwf.add_state('xsigning', initial=True)
+ xaborted = subwf.add_state('xaborted')
+ xsigned = subwf.add_state('xsigned')
+ xabort = subwf.add_transition('xabort', (xsigning,), xaborted)
+ xsign = subwf.add_transition('xsign', (xsigning,), xsigned)
+ # main workflow
+ twf = add_wf(shell, 'CWGroup', name='mainwf', default=True)
+ created = twf.add_state(_('created'), initial=True)
+ identified = twf.add_state(_('identified'))
+ released = twf.add_state(_('released'))
+ twf.add_wftransition(_('identify'), subwf, created,
+ [(xaborted, None), (xsigned, identified)])
+ twf.add_wftransition(_('release'), subwf, identified,
+ [(xaborted, None)])
+ shell.commit()
+ with self.admin_access.web_request() as req:
+ group = req.create_entity('CWGroup', name=u'grp1')
+ req.cnx.commit()
+ iworkflowable = group.cw_adapt_to('IWorkflowable')
+ for trans, nextstate in (('identify', 'xsigning'),
+ ('xabort', 'created'),
+ ('identify', 'xsigning'),
+ ('xsign', 'identified'),
+ ('release', 'xsigning'),
+ ('xabort', 'identified')
+ ):
+ iworkflowable.fire_transition(trans)
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, nextstate)
class CustomWorkflowTC(CubicWebTC):
def setup_database(self):
- req = self.request()
- self.member = self.create_user(req, 'member')
+ with self.admin_access.repo_cnx() as cnx:
+ self.member_eid = self.create_user(cnx, 'member').eid
def test_custom_wf_replace_state_no_history(self):
"""member in inital state with no previous history, state is simply
redirected when changing workflow
"""
- wf = add_wf(self, 'CWUser')
- wf.add_state('asleep', initial=True)
- self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
- {'wf': wf.eid, 'x': self.member.eid})
- self.member.cw_clear_all_caches()
- iworkflowable = self.member.cw_adapt_to('IWorkflowable')
- self.assertEqual(iworkflowable.state, 'activated')# no change before commit
- self.commit()
- self.member.cw_clear_all_caches()
- self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
- self.assertEqual(iworkflowable.state, 'asleep')
- self.assertEqual(iworkflowable.workflow_history, ())
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep', initial=True)
+ with self.admin_access.web_request() as req:
+ req.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'activated') # no change before commit
+ req.cnx.commit()
+ member.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual(iworkflowable.workflow_history, ())
def test_custom_wf_replace_state_keep_history(self):
"""member in inital state with some history, state is redirected and
state change is recorded to history
"""
- iworkflowable = self.member.cw_adapt_to('IWorkflowable')
- iworkflowable.fire_transition('deactivate')
- iworkflowable.fire_transition('activate')
- wf = add_wf(self, 'CWUser')
- wf.add_state('asleep', initial=True)
- self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
- {'wf': wf.eid, 'x': self.member.eid})
- self.commit()
- self.member.cw_clear_all_caches()
- self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
- self.assertEqual(iworkflowable.state, 'asleep')
- self.assertEqual(parse_hist(iworkflowable.workflow_history),
- [('activated', 'deactivated', 'deactivate', None),
- ('deactivated', 'activated', 'activate', None),
- ('activated', 'asleep', None, 'workflow changed to "CWUser"')])
+ with self.admin_access.web_request() as req:
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate')
+ iworkflowable.fire_transition('activate')
+ req.cnx.commit()
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep', initial=True)
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ with self.admin_access.web_request() as req:
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('activated', 'deactivated', 'deactivate', None),
+ ('deactivated', 'activated', 'activate', None),
+ ('activated', 'asleep', None, 'workflow changed to "CWUser"')])
def test_custom_wf_no_initial_state(self):
"""try to set a custom workflow which has no initial state"""
- iworkflowable = self.member.cw_adapt_to('IWorkflowable')
- iworkflowable.fire_transition('deactivate')
- wf = add_wf(self, 'CWUser')
- wf.add_state('asleep')
- self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
- {'wf': wf.eid, 'x': self.member.eid})
- with self.assertRaises(ValidationError) as cm:
- self.commit()
- self.assertEqual(cm.exception.errors, {'custom_workflow-subject': u'workflow has no initial state'})
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep')
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'custom_workflow-subject': u'workflow has no initial state'})
def test_custom_wf_bad_etype(self):
"""try to set a custom workflow which doesn't apply to entity type"""
- wf = add_wf(self, 'Company')
- wf.add_state('asleep', initial=True)
- self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
- {'wf': wf.eid, 'x': self.member.eid})
- with self.assertRaises(ValidationError) as cm:
- self.commit()
- self.assertEqual(cm.exception.errors, {'custom_workflow-subject': u"workflow isn't a workflow for this type"})
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ wf.add_state('asleep', initial=True)
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'custom_workflow-subject': u"workflow isn't a workflow for this type"})
def test_del_custom_wf(self):
"""member in some state shared by the new workflow, nothing has to be
done
"""
- iworkflowable = self.member.cw_adapt_to('IWorkflowable')
- iworkflowable.fire_transition('deactivate')
- wf = add_wf(self, 'CWUser')
- wf.add_state('asleep', initial=True)
- self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
- {'wf': wf.eid, 'x': self.member.eid})
- self.commit()
- self.execute('DELETE X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
- {'wf': wf.eid, 'x': self.member.eid})
- self.member.cw_clear_all_caches()
- self.assertEqual(iworkflowable.state, 'asleep')# no change before commit
- self.commit()
- self.member.cw_clear_all_caches()
- self.assertEqual(iworkflowable.current_workflow.name, "default user workflow")
- self.assertEqual(iworkflowable.state, 'activated')
- self.assertEqual(parse_hist(iworkflowable.workflow_history),
- [('activated', 'deactivated', 'deactivate', None),
- ('deactivated', 'asleep', None, 'workflow changed to "CWUser"'),
- ('asleep', 'activated', None, 'workflow changed to "default user workflow"'),])
+ with self.admin_access.web_request() as req:
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate')
+ req.cnx.commit()
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep', initial=True)
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ shell.commit()
+ with self.admin_access.web_request() as req:
+ req.execute('DELETE X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'asleep')# no change before commit
+ req.cnx.commit()
+ member.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_workflow.name, "default user workflow")
+ self.assertEqual(iworkflowable.state, 'activated')
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('activated', 'deactivated', 'deactivate', None),
+ ('deactivated', 'asleep', None, 'workflow changed to "CWUser"'),
+ ('asleep', 'activated', None, 'workflow changed to "default user workflow"'),])
class AutoTransitionTC(CubicWebTC):
def setup_custom_wf(self):
- wf = add_wf(self, 'CWUser')
- asleep = wf.add_state('asleep', initial=True)
- dead = wf.add_state('dead')
- wf.add_transition('rest', asleep, asleep)
- wf.add_transition('sick', asleep, dead, type=u'auto',
- conditions=({'expr': u'X surname "toto"',
- 'mainvars': u'X'},))
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ asleep = wf.add_state('asleep', initial=True)
+ dead = wf.add_state('dead')
+ wf.add_transition('rest', asleep, asleep)
+ wf.add_transition('sick', asleep, dead, type=u'auto',
+ conditions=({'expr': u'X surname "toto"',
+ 'mainvars': u'X'},))
return wf
def test_auto_transition_fired(self):
wf = self.setup_custom_wf()
- req = self.request()
- user = self.create_user(req, 'member')
- iworkflowable = user.cw_adapt_to('IWorkflowable')
- self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
- {'wf': wf.eid, 'x': user.eid})
- self.commit()
- user.cw_clear_all_caches()
- self.assertEqual(iworkflowable.state, 'asleep')
- self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
- ['rest'])
- iworkflowable.fire_transition('rest')
- self.commit()
- user.cw_clear_all_caches()
- self.assertEqual(iworkflowable.state, 'asleep')
- self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
- ['rest'])
- self.assertEqual(parse_hist(iworkflowable.workflow_history),
- [('asleep', 'asleep', 'rest', None)])
- user.cw_set(surname=u'toto') # fulfill condition
- self.commit()
- iworkflowable.fire_transition('rest')
- self.commit()
- user.cw_clear_all_caches()
- self.assertEqual(iworkflowable.state, 'dead')
- self.assertEqual(parse_hist(iworkflowable.workflow_history),
- [('asleep', 'asleep', 'rest', None),
- ('asleep', 'asleep', 'rest', None),
- ('asleep', 'dead', 'sick', None),])
+ with self.admin_access.web_request() as req:
+ user = self.create_user(req, 'member')
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ req.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': user.eid})
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
+ ['rest'])
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
+ ['rest'])
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('asleep', 'asleep', 'rest', None)])
+ user.cw_set(surname=u'toto') # fulfill condition
+ req.cnx.commit()
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'dead')
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('asleep', 'asleep', 'rest', None),
+ ('asleep', 'asleep', 'rest', None),
+ ('asleep', 'dead', 'sick', None),])
def test_auto_transition_custom_initial_state_fired(self):
wf = self.setup_custom_wf()
- req = self.request()
- user = self.create_user(req, 'member', surname=u'toto')
- self.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ with self.admin_access.web_request() as req:
+ user = self.create_user(req, 'member', surname=u'toto')
+ req.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
{'wf': wf.eid, 'x': user.eid})
- self.commit()
- iworkflowable = user.cw_adapt_to('IWorkflowable')
- self.assertEqual(iworkflowable.state, 'dead')
+ req.cnx.commit()
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'dead')
def test_auto_transition_initial_state_fired(self):
- wf = self.execute('Any WF WHERE ET default_workflow WF, '
- 'ET name %(et)s', {'et': 'CWUser'}).get_entity(0, 0)
- dead = wf.add_state('dead')
- wf.add_transition('sick', wf.state_by_name('activated'), dead,
- type=u'auto', conditions=({'expr': u'X surname "toto"',
- 'mainvars': u'X'},))
- self.commit()
- req = self.request()
- user = self.create_user(req, 'member', surname=u'toto')
- self.commit()
- iworkflowable = user.cw_adapt_to('IWorkflowable')
- self.assertEqual(iworkflowable.state, 'dead')
+ with self.admin_access.web_request() as req:
+ wf = req.execute('Any WF WHERE ET default_workflow WF, '
+ 'ET name %(et)s', {'et': 'CWUser'}).get_entity(0, 0)
+ dead = wf.add_state('dead')
+ wf.add_transition('sick', wf.state_by_name('activated'), dead,
+ type=u'auto', conditions=({'expr': u'X surname "toto"',
+ 'mainvars': u'X'},))
+ req.cnx.commit()
+ with self.admin_access.web_request() as req:
+ user = self.create_user(req, 'member', surname=u'toto')
+ req.cnx.commit()
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'dead')
class WorkflowHooksTC(CubicWebTC):
def setUp(self):
CubicWebTC.setUp(self)
- req = self.request()
- self.wf = req.user.cw_adapt_to('IWorkflowable').current_workflow
- self.s_activated = self.wf.state_by_name('activated').eid
- self.s_deactivated = self.wf.state_by_name('deactivated').eid
- self.s_dummy = self.wf.add_state(u'dummy').eid
- self.wf.add_transition(u'dummy', (self.s_deactivated,), self.s_dummy)
- ueid = self.create_user(req, 'stduser', commit=False).eid
- # test initial state is set
- rset = self.execute('Any N WHERE S name N, X in_state S, X eid %(x)s',
- {'x' : ueid})
- self.assertFalse(rset, rset.rows)
- self.commit()
- initialstate = self.execute('Any N WHERE S name N, X in_state S, X eid %(x)s',
- {'x' : ueid})[0][0]
- self.assertEqual(initialstate, u'activated')
- # give access to users group on the user's wf transitions
- # so we can test wf enforcing on euser (managers don't have anymore this
- # enforcement
- self.execute('SET X require_group G '
- 'WHERE G name "users", X transition_of WF, WF eid %(wf)s',
- {'wf': self.wf.eid})
- self.commit()
+ with self.admin_access.web_request() as req:
+ self.wf = req.user.cw_adapt_to('IWorkflowable').current_workflow
+ self.s_activated = self.wf.state_by_name('activated').eid
+ self.s_deactivated = self.wf.state_by_name('deactivated').eid
+ self.s_dummy = self.wf.add_state(u'dummy').eid
+ self.wf.add_transition(u'dummy', (self.s_deactivated,), self.s_dummy)
+ ueid = self.create_user(req, 'stduser', commit=False).eid
+ # test initial state is set
+ rset = req.execute('Any N WHERE S name N, X in_state S, X eid %(x)s',
+ {'x' : ueid})
+ self.assertFalse(rset, rset.rows)
+ req.cnx.commit()
+ initialstate = req.execute('Any N WHERE S name N, X in_state S, X eid %(x)s',
+ {'x' : ueid})[0][0]
+ self.assertEqual(initialstate, u'activated')
+ # give access to users group on the user's wf transitions
+ # so we can test wf enforcing on euser (managers don't have anymore this
+ # enforcement
+ req.execute('SET X require_group G '
+ 'WHERE G name "users", X transition_of WF, WF eid %(wf)s',
+ {'wf': self.wf.eid})
+ req.cnx.commit()
# XXX currently, we've to rely on hooks to set initial state, or to use execute
# def test_initial_state(self):
@@ -602,42 +630,37 @@
return ' '.join(lmsg)
def test_transition_checking1(self):
- cnx = self.login('stduser')
- user = cnx.user(self.session)
- iworkflowable = user.cw_adapt_to('IWorkflowable')
- with self.assertRaises(ValidationError) as cm:
- iworkflowable.fire_transition('activate')
- self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
- u"transition isn't allowed from")
- cnx.close()
+ with self.new_access('stduser').repo_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('activate')
+ self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
+ u"transition isn't allowed from")
def test_transition_checking2(self):
- cnx = self.login('stduser')
- user = cnx.user(self.session)
- iworkflowable = user.cw_adapt_to('IWorkflowable')
- with self.assertRaises(ValidationError) as cm:
- iworkflowable.fire_transition('dummy')
- self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
- u"transition isn't allowed from")
- cnx.close()
+ with self.new_access('stduser').repo_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('dummy')
+ self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
+ u"transition isn't allowed from")
def test_transition_checking3(self):
- with self.login('stduser') as cnx:
- session = self.session
- user = self.user()
+ with self.new_access('stduser').repo_cnx() as cnx:
+ user = cnx.user
iworkflowable = user.cw_adapt_to('IWorkflowable')
iworkflowable.fire_transition('deactivate')
- session.commit()
- session.set_cnxset()
+ cnx.commit()
with self.assertRaises(ValidationError) as cm:
iworkflowable.fire_transition('deactivate')
self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
u"transition isn't allowed from")
- session.rollback()
- session.set_cnxset()
+ cnx.rollback()
# get back now
iworkflowable.fire_transition('activate')
- session.commit()
+ cnx.commit()
if __name__ == '__main__':
--- a/entity.py Tue Jun 10 09:35:26 2014 +0200
+++ b/entity.py Tue Jun 10 09:49:45 2014 +0200
@@ -633,11 +633,9 @@
@cached
def cw_metainformation(self):
- res = self._cw.describe(self.eid, asdict=True)
- # use 'asource' and not 'source' since this is the actual source,
- # while 'source' is the physical source (where it's stored)
- res['source'] = self._cw.source_defs()[res.pop('asource')]
- return res
+ metas = self._cw.entity_metas(self.eid)
+ metas['source'] = self._cw.source_defs()[metas['source']]
+ return metas
def cw_check_perm(self, action):
self.e_schema.check_perm(self._cw, action, eid=self.eid)
@@ -1076,6 +1074,25 @@
# generic vocabulary methods ##############################################
+ def cw_linkable_rql(self, rtype, targettype, role, ordermethod=None,
+ vocabconstraints=True, lt_infos={}, limit=None):
+ """build a rql to fetch targettype entities either related or unrelated
+ to this entity using (rtype, role) relation.
+
+ Consider relation permissions so that returned entities may be actually
+ linked by `rtype`.
+
+ `lt_infos` are supplementary informations, usually coming from __linkto
+ parameter, that can help further restricting the results in case current
+ entity is not yet created. It is a dict describing entities the current
+ entity will be linked to, which keys are (rtype, role) tuples and values
+ are a list of eids.
+ """
+ return self._cw_compute_linkable_rql(rtype, targettype, role, ordermethod=None,
+ vocabconstraints=vocabconstraints,
+ lt_infos=lt_infos, limit=limit,
+ unrelated_only=False)
+
def cw_unrelated_rql(self, rtype, targettype, role, ordermethod=None,
vocabconstraints=True, lt_infos={}, limit=None):
"""build a rql to fetch `targettype` entities unrelated to this entity
@@ -1090,6 +1107,21 @@
entity will be linked to, which keys are (rtype, role) tuples and values
are a list of eids.
"""
+ return self._cw_compute_linkable_rql(rtype, targettype, role, ordermethod=None,
+ vocabconstraints=vocabconstraints,
+ lt_infos=lt_infos, limit=limit,
+ unrelated_only=True)
+
+ def _cw_compute_linkable_rql(self, rtype, targettype, role, ordermethod=None,
+ vocabconstraints=True, lt_infos={}, limit=None,
+ unrelated_only=False):
+ """build a rql to fetch `targettype` entities that may be related to
+ this entity using the (rtype, role) relation.
+
+ By default (unrelated_only=False), this includes the already linked
+ entities as well as the unrelated ones. If `unrelated_only` is True, the
+ rql filters out the already related entities.
+ """
ordermethod = ordermethod or 'fetch_unrelated_order'
rschema = self._cw.vreg.schema.rschema(rtype)
rdef = rschema.role_rdef(self.e_schema, targettype, role)
@@ -1118,7 +1150,7 @@
else:
rel = make_relation(searchedvar, rtype, (variable,), VariableRef)
select.add_restriction(Not(rel))
- elif self.has_eid():
+ elif self.has_eid() and unrelated_only:
# elif we have an eid, we don't want a target entity which is
# already linked to ourself through this relation
rel = make_relation(subjvar, rtype, (objvar,), VariableRef)
--- a/etwist/server.py Tue Jun 10 09:35:26 2014 +0200
+++ b/etwist/server.py Tue Jun 10 09:49:45 2014 +0200
@@ -57,12 +57,12 @@
class CubicWebRootResource(resource.Resource):
- def __init__(self, config, vreg=None):
+ def __init__(self, config, repo):
resource.Resource.__init__(self)
self.config = config
# instantiate publisher here and not in init_publisher to get some
# checks done before daemonization (eg versions consistency)
- self.appli = CubicWebPublisher(config, vreg=vreg)
+ self.appli = CubicWebPublisher(repo, config)
self.base_url = config['base-url']
self.https_url = config['https-url']
global MAX_POST_LENGTH
@@ -271,12 +271,20 @@
LOGGER = getLogger('cubicweb.twisted')
set_log_methods(CubicWebRootResource, LOGGER)
-def run(config, vreg=None, debug=None):
+def run(config, debug=None, repo=None):
+ # repo may by passed during test.
+ #
+ # Test has already created a repo object so we should not create a new one.
+ # Explicitly passing the repo object avoid relying on the fragile
+ # config.repository() cache. We could imagine making repo a mandatory
+ # argument and receives it from the starting command directly.
if debug is not None:
config.debugmode = debug
config.check_writeable_uid_directory(config.appdatahome)
# create the site
- root_resource = CubicWebRootResource(config, vreg=vreg)
+ if repo is None:
+ repo = config.repository()
+ root_resource = CubicWebRootResource(config, repo)
website = server.Site(root_resource)
# serve it via standard HTTP on port set in the configuration
port = config['port'] or 8080
--- a/hooks/__init__.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/__init__.py Tue Jun 10 09:49:45 2014 +0200
@@ -39,10 +39,6 @@
session.system_sql(
'DELETE FROM transactions WHERE tx_time < %(time)s',
{'time': mindate})
- # cleanup deleted entities
- session.system_sql(
- 'DELETE FROM deleted_entities WHERE dtime < %(time)s',
- {'time': mindate})
session.commit()
finally:
session.close()
@@ -57,22 +53,18 @@
def __call__(self):
def update_feeds(repo):
- # don't iter on repo.sources which doesn't include copy based
- # sources (the one we're looking for)
- # take a list to avoid iterating on a dictionary which size may
+ # take a list to avoid iterating on a dictionary whose size may
# change
- for source in list(repo.sources_by_eid.values()):
- if (not source.copy_based_source
+ for uri, source in list(repo.sources_by_uri.iteritems()):
+ if (uri == 'system'
or not repo.config.source_enabled(source)
or not source.config['synchronize']):
continue
- session = repo.internal_session(safe=True)
- try:
- source.pull_data(session)
- except Exception as exc:
- session.exception('while trying to update feed %s', source)
- finally:
- session.close()
+ with repo.internal_connection() as cnx:
+ try:
+ source.pull_data(cnx)
+ except Exception as exc:
+ cnx.exception('while trying to update feed %s', source)
self.repo.looping_task(60, update_feeds, self.repo)
@@ -83,8 +75,8 @@
def __call__(self):
def expire_dataimports(repo=self.repo):
- for source in repo.sources_by_eid.itervalues():
- if (not source.copy_based_source
+ for uri, source in repo.sources_by_uri.iteritems():
+ if (uri == 'system'
or not repo.config.source_enabled(source)):
continue
session = repo.internal_session()
--- a/hooks/bookmark.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/bookmark.py Tue Jun 10 09:49:45 2014 +0200
@@ -25,7 +25,7 @@
class AutoDeleteBookmarkOp(hook.Operation):
bookmark = None # make pylint happy
def precommit_event(self):
- if not self.session.deleted_in_transaction(self.bookmark.eid):
+ if not self.cnx.deleted_in_transaction(self.bookmark.eid):
if not self.bookmark.bookmarked_by:
self.bookmark.cw_delete()
--- a/hooks/email.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/email.py Tue Jun 10 09:49:45 2014 +0200
@@ -38,7 +38,7 @@
def precommit_event(self):
if self.condition():
- self.session.execute(
+ self.cnx.execute(
'SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % self.rtype,
{'x': self.entity.eid, 'y': self.email.eid})
--- a/hooks/integrity.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/integrity.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -24,10 +24,10 @@
from threading import Lock
-from cubicweb import validation_error
+from cubicweb import validation_error, neg_role
from cubicweb.schema import (META_RTYPES, WORKFLOW_RTYPES,
RQLConstraint, RQLUniqueConstraint)
-from cubicweb.predicates import is_instance
+from cubicweb.predicates import is_instance, composite_etype
from cubicweb.uilib import soup2xhtml
from cubicweb.server import hook
@@ -40,30 +40,30 @@
_UNIQUE_CONSTRAINTS_HOLDER = None
-def _acquire_unique_cstr_lock(session):
- """acquire the _UNIQUE_CONSTRAINTS_LOCK for the session.
+def _acquire_unique_cstr_lock(cnx):
+ """acquire the _UNIQUE_CONSTRAINTS_LOCK for the cnx.
This lock used to avoid potential integrity pb when checking
RQLUniqueConstraint in two different transactions, as explained in
http://intranet.logilab.fr/jpl/ticket/36564
"""
- if 'uniquecstrholder' in session.transaction_data:
+ if 'uniquecstrholder' in cnx.transaction_data:
return
_UNIQUE_CONSTRAINTS_LOCK.acquire()
- session.transaction_data['uniquecstrholder'] = True
+ cnx.transaction_data['uniquecstrholder'] = True
# register operation responsible to release the lock on commit/rollback
- _ReleaseUniqueConstraintsOperation(session)
+ _ReleaseUniqueConstraintsOperation(cnx)
-def _release_unique_cstr_lock(session):
- if 'uniquecstrholder' in session.transaction_data:
- del session.transaction_data['uniquecstrholder']
+def _release_unique_cstr_lock(cnx):
+ if 'uniquecstrholder' in cnx.transaction_data:
+ del cnx.transaction_data['uniquecstrholder']
_UNIQUE_CONSTRAINTS_LOCK.release()
class _ReleaseUniqueConstraintsOperation(hook.Operation):
def postcommit_event(self):
- _release_unique_cstr_lock(self.session)
+ _release_unique_cstr_lock(self.cnx)
def rollback_event(self):
- _release_unique_cstr_lock(self.session)
+ _release_unique_cstr_lock(self.cnx)
class _CheckRequiredRelationOperation(hook.DataOperationMixIn,
@@ -75,17 +75,17 @@
role = key = base_rql = None
def precommit_event(self):
- session = self.session
- pendingeids = session.transaction_data.get('pendingeids', ())
- pendingrtypes = session.transaction_data.get('pendingrtypes', ())
+ cnx = self.cnx
+ pendingeids = cnx.transaction_data.get('pendingeids', ())
+ pendingrtypes = cnx.transaction_data.get('pendingrtypes', ())
for eid, rtype in self.get_data():
# recheck pending eids / relation types
if eid in pendingeids:
continue
if rtype in pendingrtypes:
continue
- if not session.execute(self.base_rql % rtype, {'x': eid}):
- etype = session.describe(eid)[0]
+ if not cnx.execute(self.base_rql % rtype, {'x': eid}):
+ etype = cnx.entity_metas(eid)['type']
msg = _('at least one relation %(rtype)s is required on '
'%(etype)s (%(eid)s)')
raise validation_error(eid, {(rtype, self.role): msg},
@@ -142,16 +142,16 @@
rtype = self.rtype
if rtype in DONT_CHECK_RTYPES_ON_DEL:
return
- session = self._cw
+ cnx = self._cw
eidfrom, eidto = self.eidfrom, self.eidto
- rdef = session.rtype_eids_rdef(rtype, eidfrom, eidto)
- if (rdef.subject, rtype, rdef.object) in session.transaction_data.get('pendingrdefs', ()):
+ rdef = cnx.rtype_eids_rdef(rtype, eidfrom, eidto)
+ if (rdef.subject, rtype, rdef.object) in cnx.transaction_data.get('pendingrdefs', ()):
return
card = rdef.cardinality
- if card[0] in '1+' and not session.deleted_in_transaction(eidfrom):
- _CheckSRelationOp.get_instance(session).add_data((eidfrom, rtype))
- if card[1] in '1+' and not session.deleted_in_transaction(eidto):
- _CheckORelationOp.get_instance(session).add_data((eidto, rtype))
+ if card[0] in '1+' and not cnx.deleted_in_transaction(eidfrom):
+ _CheckSRelationOp.get_instance(cnx).add_data((eidfrom, rtype))
+ if card[1] in '1+' and not cnx.deleted_in_transaction(eidto):
+ _CheckORelationOp.get_instance(cnx).add_data((eidto, rtype))
class CheckCardinalityHookAfterAddEntity(IntegrityHook):
@@ -179,14 +179,14 @@
""" check a new relation satisfy its constraints """
containercls = list
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
for values in self.get_data():
eidfrom, rtype, eidto, constraints = values
# first check related entities have not been deleted in the same
# transaction
- if session.deleted_in_transaction(eidfrom):
+ if cnx.deleted_in_transaction(eidfrom):
continue
- if session.deleted_in_transaction(eidto):
+ if cnx.deleted_in_transaction(eidto):
continue
for constraint in constraints:
# XXX
@@ -194,9 +194,9 @@
# * use a constraint id to use per constraint lock and avoid
# unnecessary commit serialization ?
if isinstance(constraint, RQLUniqueConstraint):
- _acquire_unique_cstr_lock(session)
+ _acquire_unique_cstr_lock(cnx)
try:
- constraint.repo_check(session, eidfrom, rtype, eidto)
+ constraint.repo_check(cnx, eidfrom, rtype, eidto)
except NotImplementedError:
self.critical('can\'t check constraint %s, not supported',
constraint)
@@ -309,69 +309,27 @@
self.entity.cw_edited['login'] = login.strip()
-# 'active' integrity hooks: you usually don't want to deactivate them, they are
-# not really integrity check, they maintain consistency on changes
-
-class _DelayedDeleteOp(hook.DataOperationMixIn, hook.Operation):
- """delete the object of composite relation except if the relation has
- actually been redirected to another composite
- """
- base_rql = None
-
- def precommit_event(self):
- session = self.session
- pendingeids = session.transaction_data.get('pendingeids', ())
- eids_by_etype_rtype = {}
- for eid, rtype in self.get_data():
- # don't do anything if the entity is being deleted
- if eid not in pendingeids:
- etype = session.describe(eid)[0]
- key = (etype, rtype)
- if key not in eids_by_etype_rtype:
- eids_by_etype_rtype[key] = [str(eid)]
- else:
- eids_by_etype_rtype[key].append(str(eid))
- for (etype, rtype), eids in eids_by_etype_rtype.iteritems():
- # quite unexpectedly, not deleting too many entities at a time in
- # this operation benefits to the exec speed (possibly on the RQL
- # parsing side)
- start = 0
- incr = 500
- while start < len(eids):
- session.execute(self.base_rql % (etype, ','.join(eids[start:start+incr]), rtype))
- start += incr
-
-class _DelayedDeleteSEntityOp(_DelayedDeleteOp):
- """delete orphan subject entity of a composite relation"""
- base_rql = 'DELETE %s X WHERE X eid IN (%s), NOT X %s Y'
-
-class _DelayedDeleteOEntityOp(_DelayedDeleteOp):
- """check required object relation"""
- base_rql = 'DELETE %s X WHERE X eid IN (%s), NOT Y %s X'
-
-
class DeleteCompositeOrphanHook(hook.Hook):
- """delete the composed of a composite relation when this relation is deleted
+ """Delete the composed of a composite relation when the composite is
+ deleted (this is similar to the cascading ON DELETE CASCADE
+ semantics of sql).
"""
__regid__ = 'deletecomposite'
- events = ('before_delete_relation',)
+ __select__ = hook.Hook.__select__ & composite_etype()
+ events = ('before_delete_entity',)
category = 'activeintegrity'
+ # give the application's before_delete_entity hooks a chance to run before we cascade
+ order = 99
def __call__(self):
- # if the relation is being delete, don't delete composite's components
- # automatically
- session = self._cw
- rtype = self.rtype
- rdef = session.rtype_eids_rdef(rtype, self.eidfrom, self.eidto)
- if (rdef.subject, rtype, rdef.object) in session.transaction_data.get('pendingrdefs', ()):
- return
- composite = rdef.composite
- if composite == 'subject':
- _DelayedDeleteOEntityOp.get_instance(self._cw).add_data(
- (self.eidto, rtype))
- elif composite == 'object':
- _DelayedDeleteSEntityOp.get_instance(self._cw).add_data(
- (self.eidfrom, rtype))
+ eid = self.entity.eid
+ for rdef, role in self.entity.e_schema.composite_rdef_roles:
+ rtype = rdef.rtype.type
+ target = getattr(rdef, neg_role(role))
+ expr = ('C %s X' % rtype) if role == 'subject' else ('X %s C' % rtype)
+ self._cw.execute('DELETE %s X WHERE C eid %%(c)s, %s' % (target, expr),
+ {'c': eid})
+
def registration_callback(vreg):
vreg.register_all(globals().values(), __name__)
--- a/hooks/metadata.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/metadata.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -69,14 +69,14 @@
class SetCreatorOp(hook.DataOperationMixIn, hook.Operation):
def precommit_event(self):
- session = self.session
- relations = [(eid, session.user.eid) for eid in self.get_data()
+ cnx = self.cnx
+ relations = [(eid, cnx.user.eid) for eid in self.get_data()
# don't consider entities that have been created and deleted in
# the same transaction, nor ones where created_by has been
# explicitly set
- if not session.deleted_in_transaction(eid) and \
- not session.entity_from_eid(eid).created_by]
- session.add_relations([('created_by', relations)])
+ if not cnx.deleted_in_transaction(eid) and \
+ not cnx.entity_from_eid(eid).created_by]
+ cnx.add_relations([('created_by', relations)])
class SetOwnershipHook(MetaDataHook):
@@ -93,7 +93,7 @@
class SyncOwnersOp(hook.DataOperationMixIn, hook.Operation):
def precommit_event(self):
for compositeeid, composedeid in self.get_data():
- self.session.execute('SET X owned_by U WHERE C owned_by U, C eid %(c)s,'
+ self.cnx.execute('SET X owned_by U WHERE C owned_by U, C eid %(c)s,'
'NOT EXISTS(X owned_by U, X eid %(x)s)',
{'c': compositeeid, 'x': composedeid})
@@ -136,14 +136,14 @@
def __call__(self):
rtype = self.rtype
- session = self._cw
- ftcontainer = session.vreg.schema.rschema(rtype).fulltext_container
+ cnx = self._cw
+ ftcontainer = cnx.vreg.schema.rschema(rtype).fulltext_container
if ftcontainer == 'subject':
- session.repo.system_source.index_entity(
- session, session.entity_from_eid(self.eidfrom))
+ cnx.repo.system_source.index_entity(
+ cnx, cnx.entity_from_eid(self.eidfrom))
elif ftcontainer == 'object':
- session.repo.system_source.index_entity(
- session, session.entity_from_eid(self.eidto))
+ cnx.repo.system_source.index_entity(
+ cnx, cnx.entity_from_eid(self.eidto))
@@ -154,16 +154,13 @@
def postcommit_event(self):
self.oldsource.reset_caches()
- repo = self.session.repo
+ repo = self.cnx.repo
entity = self.entity
extid = entity.cw_metainformation()['extid']
repo._type_source_cache[entity.eid] = (
- entity.cw_etype, self.newsource.uri, None, self.newsource.uri)
- if self.oldsource.copy_based_source:
- uri = 'system'
- else:
- uri = self.oldsource.uri
- repo._extid_cache[(extid, uri)] = -entity.eid
+ entity.cw_etype, None, self.newsource.uri)
+ repo._extid_cache[extid] = -entity.eid
+
class ChangeEntitySourceDeleteHook(MetaDataHook):
"""support for moving an entity from an external source by watching 'Any
@@ -197,16 +194,6 @@
syssource = newsource.repo_source
oldsource = self._cw.entity_from_eid(schange[self.eidfrom])
entity = self._cw.entity_from_eid(self.eidfrom)
- # copy entity if necessary
- if not oldsource.repo_source.copy_based_source:
- entity.complete(skip_bytes=False, skip_pwd=False)
- if not entity.creation_date:
- entity.cw_attr_cache['creation_date'] = datetime.now()
- if not entity.modification_date:
- entity.cw_attr_cache['modification_date'] = datetime.now()
- entity.cw_attr_cache['cwuri'] = u'%s%s' % (self._cw.base_url(), entity.eid)
- entity.cw_edited = EditedEntity(entity, **entity.cw_attr_cache)
- syssource.add_entity(self._cw, entity)
# we don't want the moved entity to be reimported later. To
# distinguish this state, the trick is to change the associated
# record in the 'entities' system table with eid=-eid while leaving
@@ -217,8 +204,7 @@
self._cw.system_sql('UPDATE entities SET eid=-eid WHERE eid=%(eid)s',
{'eid': self.eidfrom})
attrs = {'type': entity.cw_etype, 'eid': entity.eid, 'extid': None,
- 'source': 'system', 'asource': 'system',
- 'mtime': datetime.now()}
+ 'asource': 'system'}
self._cw.system_sql(syssource.sqlgen.insert('entities', attrs), attrs)
# register an operation to update repository/sources caches
ChangeEntitySourceUpdateCaches(self._cw, entity=entity,
--- a/hooks/notification.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/notification.py Tue Jun 10 09:49:45 2014 +0200
@@ -29,11 +29,11 @@
@deprecated('[3.17] use notify_on_commit instead')
-def RenderAndSendNotificationView(session, view, viewargs=None):
- notify_on_commit(session, view, viewargs)
+def RenderAndSendNotificationView(cnx, view, viewargs=None):
+ notify_on_commit(cnx, view, viewargs)
-def notify_on_commit(session, view, viewargs=None):
+def notify_on_commit(cnx, view, viewargs=None):
"""register a notification view (see
:class:`~cubicweb.sobjects.notification.NotificationView`) to be sent at
post-commit time, ie only if the transaction has succeeded.
@@ -43,7 +43,7 @@
"""
if viewargs is None:
viewargs = {}
- notif_op = _RenderAndSendNotificationOp.get_instance(session)
+ notif_op = _RenderAndSendNotificationOp.get_instance(cnx)
notif_op.add_data((view, viewargs))
@@ -58,7 +58,7 @@
containercls = list
def postcommit_event(self):
- deleted = self.session.deleted_in_transaction
+ deleted = self.cnx.deleted_in_transaction
for view, viewargs in self.get_data():
if view.cw_rset is not None:
if not view.cw_rset:
@@ -153,13 +153,13 @@
def precommit_event(self):
# precommit event that creates postcommit operation
- session = self.session
- for eid in session.transaction_data['changes']:
- view = session.vreg['views'].select('notif_entity_updated', session,
- rset=session.eid_rset(eid),
- row=0)
- notify_on_commit(self.session, view,
- viewargs={'changes': session.transaction_data['changes'][eid]})
+ cnx = self.cnx
+ for eid in cnx.transaction_data['changes']:
+ view = cnx.vreg['views'].select('notif_entity_updated', cnx,
+ rset=cnx.eid_rset(eid),
+ row=0)
+ notify_on_commit(self.cnx, view,
+ viewargs={'changes': cnx.transaction_data['changes'][eid]})
class EntityUpdateHook(NotificationHook):
@@ -170,15 +170,15 @@
skip_attrs = set()
def __call__(self):
- session = self._cw
- if session.added_in_transaction(self.entity.eid):
+ cnx = self._cw
+ if cnx.added_in_transaction(self.entity.eid):
return # entity is being created
# then compute changes
attrs = [k for k in self.entity.cw_edited
if not k in self.skip_attrs]
if not attrs:
return
- changes = session.transaction_data.setdefault('changes', {})
+ changes = cnx.transaction_data.setdefault('changes', {})
thisentitychanges = changes.setdefault(self.entity.eid, set())
rqlsel, rqlrestr = [], ['X eid %(x)s']
for i, attr in enumerate(attrs):
@@ -186,14 +186,14 @@
rqlsel.append(var)
rqlrestr.append('X %s %s' % (attr, var))
rql = 'Any %s WHERE %s' % (','.join(rqlsel), ','.join(rqlrestr))
- rset = session.execute(rql, {'x': self.entity.eid})
+ rset = cnx.execute(rql, {'x': self.entity.eid})
for i, attr in enumerate(attrs):
oldvalue = rset[0][i]
newvalue = self.entity.cw_edited[attr]
if oldvalue != newvalue:
thisentitychanges.add((attr, oldvalue, newvalue))
if thisentitychanges:
- EntityUpdatedNotificationOp(session)
+ EntityUpdatedNotificationOp(cnx)
# supervising ##################################################################
--- a/hooks/security.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/security.py Tue Jun 10 09:49:45 2014 +0200
@@ -16,7 +16,7 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""Security hooks: check permissions to add/delete/update entities according to
-the user connected to a session
+the connected user
"""
__docformat__ = "restructuredtext en"
@@ -31,7 +31,7 @@
-def check_entity_attributes(session, entity, action, editedattrs=None):
+def check_entity_attributes(cnx, entity, action, editedattrs=None):
eid = entity.eid
eschema = entity.e_schema
# ._cw_skip_security_attributes is there to bypass security for attributes
@@ -63,25 +63,25 @@
# That means an immutable attribute; as an optimization, avoid
# going through check_perm.
raise Unauthorized(action, str(rdef))
- rdef.check_perm(session, action, eid=eid)
+ rdef.check_perm(cnx, action, eid=eid)
class CheckEntityPermissionOp(hook.DataOperationMixIn, hook.LateOperation):
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
for eid, action, edited in self.get_data():
- entity = session.entity_from_eid(eid)
+ entity = cnx.entity_from_eid(eid)
entity.cw_check_perm(action)
- check_entity_attributes(session, entity, action, edited)
+ check_entity_attributes(cnx, entity, action, edited)
class CheckRelationPermissionOp(hook.DataOperationMixIn, hook.LateOperation):
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
for action, rschema, eidfrom, eidto in self.get_data():
- rdef = rschema.rdef(session.describe(eidfrom)[0],
- session.describe(eidto)[0])
- rdef.check_perm(session, action, fromeid=eidfrom, toeid=eidto)
+ rdef = rschema.rdef(cnx.entity_metas(eidfrom)['type'],
+ cnx.entity_metas(eidto)['type'])
+ rdef.check_perm(cnx, action, fromeid=eidfrom, toeid=eidto)
@objectify_predicate
@@ -135,8 +135,8 @@
if (self.eidfrom, self.rtype, self.eidto) in nocheck:
return
rschema = self._cw.repo.schema[self.rtype]
- rdef = rschema.rdef(self._cw.describe(self.eidfrom)[0],
- self._cw.describe(self.eidto)[0])
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
rdef.check_perm(self._cw, 'add', fromeid=self.eidfrom, toeid=self.eidto)
@@ -154,8 +154,8 @@
CheckRelationPermissionOp.get_instance(self._cw).add_data(
('add', rschema, self.eidfrom, self.eidto) )
else:
- rdef = rschema.rdef(self._cw.describe(self.eidfrom)[0],
- self._cw.describe(self.eidto)[0])
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
rdef.check_perm(self._cw, 'add', fromeid=self.eidfrom, toeid=self.eidto)
@@ -168,7 +168,7 @@
if (self.eidfrom, self.rtype, self.eidto) in nocheck:
return
rschema = self._cw.repo.schema[self.rtype]
- rdef = rschema.rdef(self._cw.describe(self.eidfrom)[0],
- self._cw.describe(self.eidto)[0])
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
rdef.check_perm(self._cw, 'delete', fromeid=self.eidfrom, toeid=self.eidto)
--- a/hooks/syncschema.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/syncschema.py Tue Jun 10 09:49:45 2014 +0200
@@ -44,10 +44,10 @@
('CWUser', 'CWGroup','login', 'upassword', 'name', 'in_group'))
-def get_constraints(session, entity):
+def get_constraints(cnx, entity):
constraints = []
- for cstreid in session.transaction_data.get(entity.eid, ()):
- cstrent = session.entity_from_eid(cstreid)
+ for cstreid in cnx.transaction_data.get(entity.eid, ()):
+ cstrent = cnx.entity_from_eid(cstreid)
cstr = CONSTRAINTS[cstrent.type].deserialize(cstrent.value)
cstr.eid = cstreid
constraints.append(cstr)
@@ -60,32 +60,32 @@
cw.transaction_data['groupmap'] = gmap = ss.group_mapping(cw)
return gmap
-def add_inline_relation_column(session, etype, rtype):
+def add_inline_relation_column(cnx, etype, rtype):
"""add necessary column and index for an inlined relation"""
attrkey = '%s.%s' % (etype, rtype)
- createdattrs = session.transaction_data.setdefault('createdattrs', set())
+ createdattrs = cnx.transaction_data.setdefault('createdattrs', set())
if attrkey in createdattrs:
return
createdattrs.add(attrkey)
table = SQL_PREFIX + etype
column = SQL_PREFIX + rtype
try:
- session.system_sql(str('ALTER TABLE %s ADD %s integer'
+ cnx.system_sql(str('ALTER TABLE %s ADD %s integer'
% (table, column)), rollback_on_failure=False)
- session.info('added column %s to table %s', column, table)
+ cnx.info('added column %s to table %s', column, table)
except Exception:
# silent exception here, if this error has not been raised because the
# column already exists, index creation will fail anyway
- session.exception('error while adding column %s to table %s',
+ cnx.exception('error while adding column %s to table %s',
table, column)
# create index before alter table which may expectingly fail during test
# (sqlite) while index creation should never fail (test for index existence
# is done by the dbhelper)
- session.cnxset.source('system').create_index(session, table, column)
- session.info('added index on %s(%s)', table, column)
+ cnx.repo.system_source.create_index(cnx, table, column)
+ cnx.info('added index on %s(%s)', table, column)
-def insert_rdef_on_subclasses(session, eschema, rschema, rdefdef, props):
+def insert_rdef_on_subclasses(cnx, eschema, rschema, rdefdef, props):
# XXX 'infered': True/False, not clear actually
props.update({'constraints': rdefdef.constraints,
'description': rdefdef.description,
@@ -94,19 +94,19 @@
'order': rdefdef.order,
'infered': False, 'eid': None
})
- cstrtypemap = ss.cstrtype_mapping(session)
- groupmap = group_mapping(session)
+ cstrtypemap = ss.cstrtype_mapping(cnx)
+ groupmap = group_mapping(cnx)
object = rschema.schema.eschema(rdefdef.object)
for specialization in eschema.specialized_by(False):
if (specialization, rdefdef.object) in rschema.rdefs:
continue
sperdef = RelationDefinitionSchema(specialization, rschema,
object, None, values=props)
- ss.execschemarql(session.execute, sperdef,
+ ss.execschemarql(cnx.execute, sperdef,
ss.rdef2rql(sperdef, cstrtypemap, groupmap))
-def check_valid_changes(session, entity, ro_attrs=('name', 'final')):
+def check_valid_changes(cnx, entity, ro_attrs=('name', 'final')):
errors = {}
# don't use getattr(entity, attr), we would get the modified value if any
for attr in entity.cw_edited:
@@ -137,22 +137,22 @@
"""actually remove a database from the instance's schema"""
table = None # make pylint happy
def precommit_event(self):
- dropped = self.session.transaction_data.setdefault('droppedtables',
+ dropped = self.cnx.transaction_data.setdefault('droppedtables',
set())
if self.table in dropped:
return # already processed
dropped.add(self.table)
- self.session.system_sql('DROP TABLE %s' % self.table)
+ self.cnx.system_sql('DROP TABLE %s' % self.table)
self.info('dropped table %s', self.table)
# XXX revertprecommit_event
class DropRelationTable(DropTable):
- def __init__(self, session, rtype):
+ def __init__(self, cnx, rtype):
super(DropRelationTable, self).__init__(
- session, table='%s_relation' % rtype)
- session.transaction_data.setdefault('pendingrtypes', set()).add(rtype)
+ cnx, table='%s_relation' % rtype)
+ cnx.transaction_data.setdefault('pendingrtypes', set()).add(rtype)
class DropColumn(hook.Operation):
@@ -161,12 +161,12 @@
"""
table = column = None # make pylint happy
def precommit_event(self):
- session, table, column = self.session, self.table, self.column
- source = session.repo.system_source
+ cnx, table, column = self.cnx, self.table, self.column
+ source = cnx.repo.system_source
# drop index if any
- source.drop_index(session, table, column)
+ source.drop_index(cnx, table, column)
if source.dbhelper.alter_column_support:
- session.system_sql('ALTER TABLE %s DROP COLUMN %s'
+ cnx.system_sql('ALTER TABLE %s DROP COLUMN %s'
% (table, column), rollback_on_failure=False)
self.info('dropped column %s from table %s', column, table)
else:
@@ -187,16 +187,16 @@
schema changes.
"""
- def __init__(self, session):
- hook.SingleLastOperation.__init__(self, session)
+ def __init__(self, cnx):
+ hook.SingleLastOperation.__init__(self, cnx)
def precommit_event(self):
- for eschema in self.session.repo.schema.entities():
+ for eschema in self.cnx.repo.schema.entities():
if not eschema.final:
clear_cache(eschema, 'ordered_relations')
def postcommit_event(self):
- repo = self.session.repo
+ repo = self.cnx.repo
# commit event should not raise error, while set_schema has chances to
# do so because it triggers full vreg reloading
try:
@@ -204,7 +204,7 @@
# trigger vreg reload
repo.set_schema(repo.schema)
# CWUser class might have changed, update current session users
- cwuser_cls = self.session.vreg['etypes'].etype_class('CWUser')
+ cwuser_cls = self.cnx.vreg['etypes'].etype_class('CWUser')
for session in repo._sessions.itervalues():
session.user.__class__ = cwuser_cls
except Exception:
@@ -216,10 +216,10 @@
class MemSchemaOperation(hook.Operation):
"""base class for schema operations"""
- def __init__(self, session, **kwargs):
- hook.Operation.__init__(self, session, **kwargs)
+ def __init__(self, cnx, **kwargs):
+ hook.Operation.__init__(self, cnx, **kwargs)
# every schema operation is triggering a schema update
- MemSchemaNotifyChanges(session)
+ MemSchemaNotifyChanges(cnx)
# operations for high-level source database alteration ########################
@@ -235,21 +235,21 @@
entity = None # make pylint happy
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
entity = self.entity
- schema = session.vreg.schema
+ schema = cnx.vreg.schema
etype = ybo.EntityType(eid=entity.eid, name=entity.name,
description=entity.description)
eschema = schema.add_entity_type(etype)
# create the necessary table
- tablesql = y2sql.eschema2sql(session.cnxset.source('system').dbhelper,
+ tablesql = y2sql.eschema2sql(cnx.repo.system_source.dbhelper,
eschema, prefix=SQL_PREFIX)
for sql in tablesql.split(';'):
if sql.strip():
- session.system_sql(sql)
+ cnx.system_sql(sql)
# add meta relations
- gmap = group_mapping(session)
- cmap = ss.cstrtype_mapping(session)
+ gmap = group_mapping(cnx)
+ cmap = ss.cstrtype_mapping(cnx)
for rtype in (META_RTYPES - VIRTUAL_RTYPES):
try:
rschema = schema[rtype]
@@ -268,13 +268,13 @@
continue
rdef.subject = _MockEntity(eid=entity.eid)
mock = _MockEntity(eid=None)
- ss.execschemarql(session.execute, mock, ss.rdef2rql(rdef, cmap, gmap))
+ ss.execschemarql(cnx.execute, mock, ss.rdef2rql(rdef, cmap, gmap))
def revertprecommit_event(self):
# revert changes on in memory schema
- self.session.vreg.schema.del_entity_type(self.entity.name)
+ self.cnx.vreg.schema.del_entity_type(self.entity.name)
# revert changes on database
- self.session.system_sql('DROP TABLE %s%s' % (SQL_PREFIX, self.entity.name))
+ self.cnx.system_sql('DROP TABLE %s%s' % (SQL_PREFIX, self.entity.name))
class CWETypeRenameOp(MemSchemaOperation):
@@ -282,21 +282,19 @@
oldname = newname = None # make pylint happy
def rename(self, oldname, newname):
- self.session.vreg.schema.rename_entity_type(oldname, newname)
+ self.cnx.vreg.schema.rename_entity_type(oldname, newname)
# we need sql to operate physical changes on the system database
- sqlexec = self.session.system_sql
- dbhelper= self.session.cnxset.source('system').dbhelper
+ sqlexec = self.cnx.system_sql
+ dbhelper = self.cnx.repo.system_source.dbhelper
sql = dbhelper.sql_rename_table(SQL_PREFIX+oldname,
SQL_PREFIX+newname)
sqlexec(sql)
self.info('renamed table %s to %s', oldname, newname)
sqlexec('UPDATE entities SET type=%(newname)s WHERE type=%(oldname)s',
{'newname': newname, 'oldname': oldname})
- for eid, (etype, uri, extid, auri) in self.session.repo._type_source_cache.items():
+ for eid, (etype, extid, auri) in self.cnx.repo._type_source_cache.items():
if etype == oldname:
- self.session.repo._type_source_cache[eid] = (newname, uri, extid, auri)
- sqlexec('UPDATE deleted_entities SET type=%(newname)s WHERE type=%(oldname)s',
- {'newname': newname, 'oldname': oldname})
+ self.cnx.repo._type_source_cache[eid] = (newname, extid, auri)
# XXX transaction records
def precommit_event(self):
@@ -315,9 +313,9 @@
rschema = self.rschema
if rschema.final:
return # watched changes to final relation type are unexpected
- session = self.session
+ cnx = self.cnx
if 'fulltext_container' in self.values:
- op = UpdateFTIndexOp.get_instance(session)
+ op = UpdateFTIndexOp.get_instance(cnx)
for subjtype, objtype in rschema.rdefs:
op.add_data(subjtype)
op.add_data(objtype)
@@ -332,19 +330,19 @@
if inlined:
self.entity.check_inlined_allowed()
# inlined changed, make necessary physical changes!
- sqlexec = self.session.system_sql
+ sqlexec = self.cnx.system_sql
rtype = rschema.type
eidcolumn = SQL_PREFIX + 'eid'
if not inlined:
# need to create the relation if it has not been already done by
# another event of the same transaction
- if not rschema.type in session.transaction_data.get('createdtables', ()):
+ if not rschema.type in cnx.transaction_data.get('createdtables', ()):
tablesql = y2sql.rschema2sql(rschema)
# create the necessary table
for sql in tablesql.split(';'):
if sql.strip():
sqlexec(sql)
- session.transaction_data.setdefault('createdtables', []).append(
+ cnx.transaction_data.setdefault('createdtables', []).append(
rschema.type)
# copy existant data
column = SQL_PREFIX + rtype
@@ -353,14 +351,14 @@
sqlexec('INSERT INTO %s_relation SELECT %s, %s FROM %s WHERE NOT %s IS NULL'
% (rtype, eidcolumn, column, table, column))
# drop existant columns
- #if session.repo.system_source.dbhelper.alter_column_support:
+ #if cnx.repo.system_source.dbhelper.alter_column_support:
for etype in rschema.subjects():
- DropColumn(session, table=SQL_PREFIX + str(etype),
+ DropColumn(cnx, table=SQL_PREFIX + str(etype),
column=SQL_PREFIX + rtype)
else:
for etype in rschema.subjects():
try:
- add_inline_relation_column(session, str(etype), rtype)
+ add_inline_relation_column(cnx, str(etype), rtype)
except Exception as ex:
# the column probably already exists. this occurs when the
# entity's type has just been added or if the column has not
@@ -382,7 +380,7 @@
cursor.executemany('UPDATE %s SET %s=%%(val)s WHERE %s=%%(x)s'
% (table, column, eidcolumn), args)
# drop existant table
- DropRelationTable(session, rtype)
+ DropRelationTable(cnx, rtype)
def revertprecommit_event(self):
# revert changes on in memory schema
@@ -407,10 +405,10 @@
rdefdef = self.rdefdef = ybo.RelationDefinition(
str(fromentity.name), entity.rtype.name, str(entity.otype.name),
description=entity.description, cardinality=entity.cardinality,
- constraints=get_constraints(self.session, entity),
+ constraints=get_constraints(self.cnx, entity),
order=entity.ordernum, eid=entity.eid, **kwargs)
- self.session.vreg.schema.add_relation_def(rdefdef)
- self.session.execute('SET X ordernum Y+1 '
+ self.cnx.vreg.schema.add_relation_def(rdefdef)
+ self.cnx.execute('SET X ordernum Y+1 '
'WHERE X from_entity SE, SE eid %(se)s, X ordernum Y, '
'X ordernum >= %(order)s, NOT X eid %(x)s',
{'x': entity.eid, 'se': fromentity.eid,
@@ -418,7 +416,7 @@
return rdefdef
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
entity = self.entity
# entity.defaultval is a Binary or None, but we need a correctly typed
# value
@@ -432,7 +430,7 @@
# update the in-memory schema first
rdefdef = self.init_rdef(**props)
# then make necessary changes to the system source database
- syssource = session.cnxset.source('system')
+ syssource = cnx.repo.system_source
attrtype = y2sql.type_from_constraints(
syssource.dbhelper, rdefdef.object, rdefdef.constraints)
# XXX should be moved somehow into lgdb: sqlite doesn't support to
@@ -448,7 +446,7 @@
table = SQL_PREFIX + rdefdef.subject
column = SQL_PREFIX + rdefdef.name
try:
- session.system_sql(str('ALTER TABLE %s ADD %s %s'
+ cnx.system_sql(str('ALTER TABLE %s ADD %s %s'
% (table, column, attrtype)),
rollback_on_failure=False)
self.info('added column %s to table %s', table, column)
@@ -459,13 +457,13 @@
self.error('error while altering table %s: %s', table, ex)
if extra_unique_index or entity.indexed:
try:
- syssource.create_index(session, table, column,
+ syssource.create_index(cnx, table, column,
unique=extra_unique_index)
except Exception as ex:
self.error('error while creating index for %s.%s: %s',
table, column, ex)
# final relations are not infered, propagate
- schema = session.vreg.schema
+ schema = cnx.vreg.schema
try:
eschema = schema.eschema(rdefdef.subject)
except KeyError:
@@ -475,18 +473,18 @@
# if relation type has been inserted in the same transaction, its final
# attribute is still set to False, so we've to ensure it's False
rschema.final = True
- insert_rdef_on_subclasses(session, eschema, rschema, rdefdef, props)
+ insert_rdef_on_subclasses(cnx, eschema, rschema, rdefdef, props)
# update existing entities with the default value of newly added attribute
if default is not None:
default = convert_default_value(self.rdefdef, default)
- session.system_sql('UPDATE %s SET %s=%%(default)s' % (table, column),
+ cnx.system_sql('UPDATE %s SET %s=%%(default)s' % (table, column),
{'default': default})
def revertprecommit_event(self):
# revert changes on in memory schema
if getattr(self, 'rdefdef', None) is None:
return
- self.session.vreg.schema.del_relation_def(
+ self.cnx.vreg.schema.del_relation_def(
self.rdefdef.subject, self.rdefdef.name, self.rdefdef.object)
# XXX revert changes on database
@@ -505,12 +503,12 @@
entity = None # make pylint happy
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
entity = self.entity
# update the in-memory schema first
rdefdef = self.init_rdef(composite=entity.composite)
# then make necessary changes to the system source database
- schema = session.vreg.schema
+ schema = cnx.vreg.schema
rtype = rdefdef.name
rschema = schema.rschema(rtype)
# this have to be done before permissions setting
@@ -518,9 +516,9 @@
# need to add a column if the relation is inlined and if this is the
# first occurence of "Subject relation Something" whatever Something
if len(rschema.objects(rdefdef.subject)) == 1:
- add_inline_relation_column(session, rdefdef.subject, rtype)
+ add_inline_relation_column(cnx, rdefdef.subject, rtype)
eschema = schema[rdefdef.subject]
- insert_rdef_on_subclasses(session, eschema, rschema, rdefdef,
+ insert_rdef_on_subclasses(cnx, eschema, rschema, rdefdef,
{'composite': entity.composite})
else:
if rschema.symmetric:
@@ -533,13 +531,13 @@
# schema and if it has not been added during other event of the same
# transaction
if not (relation_already_defined or
- rtype in session.transaction_data.get('createdtables', ())):
+ rtype in cnx.transaction_data.get('createdtables', ())):
rschema = schema.rschema(rtype)
# create the necessary table
for sql in y2sql.rschema2sql(rschema).split(';'):
if sql.strip():
- session.system_sql(sql)
- session.transaction_data.setdefault('createdtables', []).append(
+ cnx.system_sql(sql)
+ cnx.transaction_data.setdefault('createdtables', []).append(
rtype)
# XXX revertprecommit_event
@@ -550,12 +548,12 @@
rdef = None # make pylint happy
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
rdef = self.rdef
rschema = rdef.rtype
# make necessary changes to the system source database first
rdeftype = rschema.final and 'CWAttribute' or 'CWRelation'
- execute = session.execute
+ execute = cnx.execute
rset = execute('Any COUNT(X) WHERE X is %s, X relation_type R,'
'R eid %%(x)s' % rdeftype, {'x': rschema.eid})
lastrel = rset[0][0] == 0
@@ -567,19 +565,19 @@
'R eid %%(r)s, X from_entity E, E eid %%(e)s'
% rdeftype,
{'r': rschema.eid, 'e': rdef.subject.eid})
- if rset[0][0] == 0 and not session.deleted_in_transaction(rdef.subject.eid):
- ptypes = session.transaction_data.setdefault('pendingrtypes', set())
+ if rset[0][0] == 0 and not cnx.deleted_in_transaction(rdef.subject.eid):
+ ptypes = cnx.transaction_data.setdefault('pendingrtypes', set())
ptypes.add(rschema.type)
- DropColumn(session, table=SQL_PREFIX + str(rdef.subject),
+ DropColumn(cnx, table=SQL_PREFIX + str(rdef.subject),
column=SQL_PREFIX + str(rschema))
elif lastrel:
- DropRelationTable(session, str(rschema))
+ DropRelationTable(cnx, str(rschema))
# then update the in-memory schema
if rdef.subject not in ETYPE_NAME_MAP and rdef.object not in ETYPE_NAME_MAP:
rschema.del_relation_def(rdef.subject, rdef.object)
# if this is the last relation definition of this type, drop associated
# relation type
- if lastrel and not session.deleted_in_transaction(rschema.eid):
+ if lastrel and not cnx.deleted_in_transaction(rschema.eid):
execute('DELETE CWRType X WHERE X eid %(x)s', {'x': rschema.eid})
def revertprecommit_event(self):
@@ -590,7 +588,7 @@
rdef = self.rdef
rdef.name = str(rdef.rtype)
if rdef.subject not in ETYPE_NAME_MAP and rdef.object not in ETYPE_NAME_MAP:
- self.session.vreg.schema.add_relation_def(rdef)
+ self.cnx.vreg.schema.add_relation_def(rdef)
@@ -601,23 +599,23 @@
indexed_changed = null_allowed_changed = False
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
rdef = self.rdef = self.rschema.rdefs[self.rdefkey]
# update the in-memory schema first
self.oldvalues = dict( (attr, getattr(rdef, attr)) for attr in self.values)
rdef.update(self.values)
# then make necessary changes to the system source database
- syssource = session.cnxset.source('system')
+ syssource = cnx.repo.system_source
if 'indexed' in self.values:
- syssource.update_rdef_indexed(session, rdef)
+ syssource.update_rdef_indexed(cnx, rdef)
self.indexed_changed = True
if 'cardinality' in self.values and (rdef.rtype.final or
rdef.rtype.inlined) \
and self.values['cardinality'][0] != self.oldvalues['cardinality'][0]:
- syssource.update_rdef_null_allowed(self.session, rdef)
+ syssource.update_rdef_null_allowed(self.cnx, rdef)
self.null_allowed_changed = True
if 'fulltextindexed' in self.values:
- UpdateFTIndexOp.get_instance(session).add_data(rdef.subject)
+ UpdateFTIndexOp.get_instance(cnx).add_data(rdef.subject)
def revertprecommit_event(self):
if self.rdef is None:
@@ -625,17 +623,17 @@
# revert changes on in memory schema
self.rdef.update(self.oldvalues)
# revert changes on database
- syssource = self.session.cnxset.source('system')
+ syssource = self.cnx.repo.system_source
if self.indexed_changed:
- syssource.update_rdef_indexed(self.session, self.rdef)
+ syssource.update_rdef_indexed(self.cnx, self.rdef)
if self.null_allowed_changed:
- syssource.update_rdef_null_allowed(self.session, self.rdef)
+ syssource.update_rdef_null_allowed(self.cnx, self.rdef)
def _set_modifiable_constraints(rdef):
# for proper in-place modification of in-memory schema: if rdef.constraints
# is already a list, reuse it (we're updating multiple constraints of the
- # same rdef in the same transactions)
+ # same rdef in the same transaction)
if not isinstance(rdef.constraints, list):
rdef.constraints = list(rdef.constraints)
@@ -646,7 +644,7 @@
size_cstr_changed = unique_changed = False
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
rdef = self.rdef
# in-place modification of in-memory schema first
_set_modifiable_constraints(rdef)
@@ -657,13 +655,13 @@
self.oldcstr, rdef)
# then update database: alter the physical schema on size/unique
# constraint changes
- syssource = session.cnxset.source('system')
+ syssource = cnx.repo.system_source
cstrtype = self.oldcstr.type()
if cstrtype == 'SizeConstraint':
- syssource.update_rdef_column(session, rdef)
+ syssource.update_rdef_column(cnx, rdef)
self.size_cstr_changed = True
elif cstrtype == 'UniqueConstraint':
- syssource.update_rdef_unique(session, rdef)
+ syssource.update_rdef_unique(cnx, rdef)
self.unique_changed = True
def revertprecommit_event(self):
@@ -673,11 +671,11 @@
if self.oldcstr is not None:
self.rdef.constraints.append(self.oldcstr)
# revert changes on database
- syssource = self.session.cnxset.source('system')
+ syssource = self.cnx.repo.system_source
if self.size_cstr_changed:
- syssource.update_rdef_column(self.session, self.rdef)
+ syssource.update_rdef_column(self.cnx, self.rdef)
if self.unique_changed:
- syssource.update_rdef_unique(self.session, self.rdef)
+ syssource.update_rdef_unique(self.cnx, self.rdef)
class CWConstraintAddOp(CWConstraintDelOp):
@@ -685,14 +683,14 @@
entity = None # make pylint happy
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
rdefentity = self.entity.reverse_constrained_by[0]
# when the relation is added in the same transaction, the constraint
# object is created by the operation adding the attribute or relation,
# so there is nothing to do here
- if session.added_in_transaction(rdefentity.eid):
+ if cnx.added_in_transaction(rdefentity.eid):
return
- rdef = self.rdef = session.vreg.schema.schema_by_eid(rdefentity.eid)
+ rdef = self.rdef = cnx.vreg.schema.schema_by_eid(rdefentity.eid)
cstrtype = self.entity.type
oldcstr = self.oldcstr = rdef.constraint_by_type(cstrtype)
newcstr = self.newcstr = CONSTRAINTS[cstrtype].deserialize(self.entity.value)
@@ -704,13 +702,13 @@
rdef.constraints.append(newcstr)
# then update database: alter the physical schema on size/unique
# constraint changes
- syssource = session.cnxset.source('system')
+ syssource = cnx.repo.system_source
if cstrtype == 'SizeConstraint' and (oldcstr is None or
oldcstr.max != newcstr.max):
- syssource.update_rdef_column(session, rdef)
+ syssource.update_rdef_column(cnx, rdef)
self.size_cstr_changed = True
elif cstrtype == 'UniqueConstraint' and oldcstr is None:
- syssource.update_rdef_unique(session, rdef)
+ syssource.update_rdef_unique(cnx, rdef)
self.unique_changed = True
@@ -718,19 +716,19 @@
entity = None # make pylint happy
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
prefix = SQL_PREFIX
entity = self.entity
table = '%s%s' % (prefix, entity.constraint_of[0].name)
cols = ['%s%s' % (prefix, r.name) for r in entity.relations]
- dbhelper = session.cnxset.source('system').dbhelper
+ dbhelper = cnx.repo.system_source.dbhelper
sqls = dbhelper.sqls_create_multicol_unique_index(table, cols, entity.name)
for sql in sqls:
- session.system_sql(sql)
+ cnx.system_sql(sql)
def postcommit_event(self):
entity = self.entity
- eschema = self.session.vreg.schema.schema_by_eid(entity.constraint_of[0].eid)
+ eschema = self.cnx.vreg.schema.schema_by_eid(entity.constraint_of[0].eid)
attrs = [r.name for r in entity.relations]
eschema._unique_together.append(attrs)
@@ -740,17 +738,17 @@
cols = () # for pylint
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
prefix = SQL_PREFIX
table = '%s%s' % (prefix, self.entity.type)
- dbhelper = session.cnxset.source('system').dbhelper
+ dbhelper = cnx.repo.system_source.dbhelper
cols = ['%s%s' % (prefix, c) for c in self.cols]
sqls = dbhelper.sqls_drop_multicol_unique_index(table, cols, self.cstrname)
for sql in sqls:
- session.system_sql(sql)
+ cnx.system_sql(sql)
def postcommit_event(self):
- eschema = self.session.vreg.schema.schema_by_eid(self.entity.eid)
+ eschema = self.cnx.vreg.schema.schema_by_eid(self.entity.eid)
cols = set(self.cols)
unique_together = [ut for ut in eschema._unique_together
if set(ut) != cols]
@@ -765,7 +763,7 @@
def postcommit_event(self):
# del_entity_type also removes entity's relations
- self.session.vreg.schema.del_entity_type(self.etype)
+ self.cnx.vreg.schema.del_entity_type(self.etype)
class MemSchemaCWRTypeAdd(MemSchemaOperation):
@@ -773,10 +771,10 @@
rtypedef = None # make pylint happy
def precommit_event(self):
- self.session.vreg.schema.add_relation_type(self.rtypedef)
+ self.cnx.vreg.schema.add_relation_type(self.rtypedef)
def revertprecommit_event(self):
- self.session.vreg.schema.del_relation_type(self.rtypedef.name)
+ self.cnx.vreg.schema.del_relation_type(self.rtypedef.name)
class MemSchemaCWRTypeDel(MemSchemaOperation):
@@ -785,7 +783,7 @@
def postcommit_event(self):
try:
- self.session.vreg.schema.del_relation_type(self.rtype)
+ self.cnx.vreg.schema.del_relation_type(self.rtype)
except KeyError:
# s/o entity type have already been deleted
pass
@@ -799,14 +797,14 @@
def precommit_event(self):
"""the observed connections.cnxset has been commited"""
try:
- erschema = self.session.vreg.schema.schema_by_eid(self.eid)
+ erschema = self.cnx.vreg.schema.schema_by_eid(self.eid)
except KeyError:
# duh, schema not found, log error and skip operation
self.warning('no schema for %s', self.eid)
return
perms = list(erschema.action_permissions(self.action))
if self.group_eid is not None:
- perm = self.session.entity_from_eid(self.group_eid).name
+ perm = self.cnx.entity_from_eid(self.group_eid).name
else:
perm = erschema.rql_expression(self.expr)
try:
@@ -828,7 +826,7 @@
def precommit_event(self):
"""the observed connections set has been commited"""
try:
- erschema = self.session.vreg.schema.schema_by_eid(self.eid)
+ erschema = self.cnx.vreg.schema.schema_by_eid(self.eid)
except KeyError:
# duh, schema not found, log error and skip operation
self.warning('no schema for %s', self.eid)
@@ -840,7 +838,7 @@
return
perms = list(erschema.action_permissions(self.action))
if self.group_eid is not None:
- perm = self.session.entity_from_eid(self.group_eid).name
+ perm = self.cnx.entity_from_eid(self.group_eid).name
else:
perm = erschema.rql_expression(self.expr)
try:
@@ -857,8 +855,8 @@
etypeeid = parentetypeeid = None # make pylint happy
def precommit_event(self):
- eschema = self.session.vreg.schema.schema_by_eid(self.etypeeid)
- parenteschema = self.session.vreg.schema.schema_by_eid(self.parentetypeeid)
+ eschema = self.cnx.vreg.schema.schema_by_eid(self.etypeeid)
+ parenteschema = self.cnx.vreg.schema.schema_by_eid(self.parentetypeeid)
eschema._specialized_type = parenteschema.type
parenteschema._specialized_by.append(eschema.type)
@@ -870,8 +868,8 @@
def precommit_event(self):
try:
- eschema = self.session.vreg.schema.schema_by_eid(self.etypeeid)
- parenteschema = self.session.vreg.schema.schema_by_eid(self.parentetypeeid)
+ eschema = self.cnx.vreg.schema.schema_by_eid(self.etypeeid)
+ parenteschema = self.cnx.vreg.schema.schema_by_eid(self.parentetypeeid)
except KeyError:
# etype removed, nothing to do
return
@@ -1030,14 +1028,14 @@
events = ('after_delete_relation',)
def __call__(self):
- session = self._cw
+ cnx = self._cw
try:
- rdef = session.vreg.schema.schema_by_eid(self.eidfrom)
+ rdef = cnx.vreg.schema.schema_by_eid(self.eidfrom)
except KeyError:
self.critical('cant get schema rdef associated to %s', self.eidfrom)
return
subjschema, rschema, objschema = rdef.as_triple()
- pendingrdefs = session.transaction_data.setdefault('pendingrdefs', set())
+ pendingrdefs = cnx.transaction_data.setdefault('pendingrdefs', set())
# first delete existing relation if necessary
if rschema.final:
rdeftype = 'CWAttribute'
@@ -1045,11 +1043,11 @@
else:
rdeftype = 'CWRelation'
pendingrdefs.add((subjschema, rschema, objschema))
- if not (session.deleted_in_transaction(subjschema.eid) or
- session.deleted_in_transaction(objschema.eid)):
- session.execute('DELETE X %s Y WHERE X is %s, Y is %s'
+ if not (cnx.deleted_in_transaction(subjschema.eid) or
+ cnx.deleted_in_transaction(objschema.eid)):
+ cnx.execute('DELETE X %s Y WHERE X is %s, Y is %s'
% (rschema, subjschema, objschema))
- RDefDelOp(session, rdef=rdef)
+ RDefDelOp(cnx, rdef=rdef)
# CWAttribute / CWRelation hooks ###############################################
@@ -1127,20 +1125,21 @@
self._cw.transaction_data.setdefault(self.eidfrom, []).append(self.eidto)
-class BeforeDeleteConstrainedByHook(SyncSchemaHook):
- __regid__ = 'syncdelconstrainedby'
- __select__ = SyncSchemaHook.__select__ & hook.match_rtype('constrained_by')
- events = ('before_delete_relation',)
+class BeforeDeleteCWConstraintHook(SyncSchemaHook):
+ __regid__ = 'syncdelcwconstraint'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWConstraint')
+ events = ('before_delete_entity',)
def __call__(self):
- if self._cw.deleted_in_transaction(self.eidfrom):
- return
+ entity = self.entity
schema = self._cw.vreg.schema
- rdef = schema.schema_by_eid(self.eidfrom)
try:
- cstr = rdef.constraint_by_eid(self.eidto)
- except ValueError:
- self._cw.critical('constraint no more accessible')
+ # KeyError, e.g. composite chain deletion
+ rdef = schema.schema_by_eid(entity.reverse_constrained_by[0].eid)
+ # IndexError
+ cstr = rdef.constraint_by_eid(entity.eid)
+ except (KeyError, IndexError):
+ self._cw.critical('constraint type no more accessible')
else:
CWConstraintDelOp(self._cw, rdef=rdef, oldcstr=cstr)
@@ -1183,7 +1182,7 @@
def __call__(self):
action = self.rtype.split('_', 1)[0]
- if self._cw.describe(self.eidto)[0] == 'CWGroup':
+ if self._cw.entity_metas(self.eidto)['type'] == 'CWGroup':
MemSchemaPermissionAdd(self._cw, action=action, eid=self.eidfrom,
group_eid=self.eidto)
else: # RQLExpression
@@ -1204,7 +1203,7 @@
if self._cw.deleted_in_transaction(self.eidfrom):
return
action = self.rtype.split('_', 1)[0]
- if self._cw.describe(self.eidto)[0] == 'CWGroup':
+ if self._cw.entity_metas(self.eidto)['type'] == 'CWGroup':
MemSchemaPermissionDel(self._cw, action=action, eid=self.eidfrom,
group_eid=self.eidto)
else: # RQLExpression
@@ -1222,26 +1221,26 @@
"""
def postcommit_event(self):
- session = self.session
- source = session.repo.system_source
- schema = session.repo.vreg.schema
+ cnx = self.cnx
+ source = cnx.repo.system_source
+ schema = cnx.repo.vreg.schema
to_reindex = self.get_data()
self.info('%i etypes need full text indexed reindexation',
len(to_reindex))
for etype in to_reindex:
- rset = session.execute('Any X WHERE X is %s' % etype)
+ rset = cnx.execute('Any X WHERE X is %s' % etype)
self.info('Reindexing full text index for %i entity of type %s',
len(rset), etype)
still_fti = list(schema[etype].indexable_attributes())
for entity in rset.entities():
- source.fti_unindex_entities(session, [entity])
+ source.fti_unindex_entities(cnx, [entity])
for container in entity.cw_adapt_to('IFTIndexable').fti_containers():
if still_fti or container is not entity:
- source.fti_unindex_entities(session, [container])
- source.fti_index_entities(session, [container])
+ source.fti_unindex_entities(cnx, [container])
+ source.fti_index_entities(cnx, [container])
if to_reindex:
# Transaction has already been committed
- session.cnxset.commit()
+ cnx.cnxset.commit()
--- a/hooks/syncsession.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/syncsession.py Tue Jun 10 09:49:45 2014 +0200
@@ -229,7 +229,7 @@
def __call__(self):
session = self._cw
eidfrom = self.eidfrom
- if not session.describe(eidfrom)[0] == 'CWProperty':
+ if not session.entity_metas(eidfrom)['type'] == 'CWProperty':
return
key, value = session.execute('Any K,V WHERE P eid %(x)s,P pkey K,P value V',
{'x': eidfrom})[0]
--- a/hooks/syncsources.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/syncsources.py Tue Jun 10 09:49:45 2014 +0200
@@ -37,7 +37,7 @@
class SourceAddedOp(hook.Operation):
entity = None # make pylint happy
def postcommit_event(self):
- self.session.repo.add_source(self.entity)
+ self.cnx.repo.add_source(self.entity)
class SourceAddedHook(SourceHook):
__regid__ = 'cw.sources.added'
@@ -61,7 +61,7 @@
class SourceRemovedOp(hook.Operation):
uri = None # make pylint happy
def postcommit_event(self):
- self.session.repo.remove_source(self.uri)
+ self.cnx.repo.remove_source(self.uri)
class SourceRemovedHook(SourceHook):
__regid__ = 'cw.sources.removed'
@@ -79,7 +79,7 @@
def precommit_event(self):
self.__processed = []
for source in self.get_data():
- if not self.session.deleted_in_transaction(source.eid):
+ if not self.cnx.deleted_in_transaction(source.eid):
conf = source.repo_source.check_config(source)
self.__processed.append( (source, conf) )
@@ -92,16 +92,13 @@
oldname = newname = None # make pylint happy
def precommit_event(self):
- source = self.session.repo.sources_by_uri[self.oldname]
- if source.copy_based_source:
- sql = 'UPDATE entities SET asource=%(newname)s WHERE asource=%(oldname)s'
- else:
- sql = 'UPDATE entities SET source=%(newname)s, asource=%(newname)s WHERE source=%(oldname)s'
- self.session.system_sql(sql, {'oldname': self.oldname,
+ source = self.cnx.repo.sources_by_uri[self.oldname]
+ sql = 'UPDATE entities SET asource=%(newname)s WHERE asource=%(oldname)s'
+ self.cnx.system_sql(sql, {'oldname': self.oldname,
'newname': self.newname})
def postcommit_event(self):
- repo = self.session.repo
+ repo = self.cnx.repo
# XXX race condition
source = repo.sources_by_uri.pop(self.oldname)
source.uri = self.newname
@@ -109,11 +106,6 @@
repo.sources_by_uri[self.newname] = source
repo._type_source_cache.clear()
clear_cache(repo, 'source_defs')
- if not source.copy_based_source:
- repo._extid_cache.clear()
- repo._clear_planning_caches()
- for cnxset in repo.cnxsets:
- cnxset.source_cnxs[self.oldname] = cnxset.source_cnxs.pop(self.oldname)
class SourceUpdatedHook(SourceHook):
@@ -172,7 +164,7 @@
class SourceMappingChangedOp(hook.DataOperationMixIn, hook.Operation):
def check_or_update(self, checkonly):
- session = self.session
+ cnx = self.cnx
# take care, can't call get_data() twice
try:
data = self.__data
@@ -181,10 +173,10 @@
for schemacfg, source in data:
if source is None:
source = schemacfg.cwsource.repo_source
- if session.added_in_transaction(schemacfg.eid):
- if not session.deleted_in_transaction(schemacfg.eid):
+ if cnx.added_in_transaction(schemacfg.eid):
+ if not cnx.deleted_in_transaction(schemacfg.eid):
source.add_schema_config(schemacfg, checkonly=checkonly)
- elif session.deleted_in_transaction(schemacfg.eid):
+ elif cnx.deleted_in_transaction(schemacfg.eid):
source.del_schema_config(schemacfg, checkonly=checkonly)
else:
source.update_schema_config(schemacfg, checkonly=checkonly)
--- a/hooks/test/data/schema.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/test/data/schema.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -16,10 +16,23 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-from yams.buildobjs import RelationDefinition
+from yams.buildobjs import RelationDefinition, EntityType, String
class friend(RelationDefinition):
subject = ('CWUser', 'CWGroup')
object = ('CWUser', 'CWGroup')
symmetric = True
+class Folder(EntityType):
+ name = String()
+
+class parent(RelationDefinition):
+ subject = 'Folder'
+ object = 'Folder'
+ composite = 'object'
+ cardinality = '?*'
+
+class children(RelationDefinition):
+ subject = 'Folder'
+ object = 'Folder'
+ composite = 'subject'
--- a/hooks/test/unittest_integrity.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/test/unittest_integrity.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -61,7 +61,7 @@
self.commit()
self.execute('DELETE Email X')
rset = self.execute('Any X WHERE X is EmailPart')
- self.assertEqual(len(rset), 1)
+ self.assertEqual(len(rset), 0)
self.commit()
rset = self.execute('Any X WHERE X is EmailPart')
self.assertEqual(len(rset), 0)
@@ -93,6 +93,42 @@
self.assertEqual(len(rset), 1)
self.assertEqual(rset.get_entity(0, 0).reverse_parts[0].messageid, '<2345>')
+ def test_composite_object_relation_deletion(self):
+ req = self.request()
+ root = req.create_entity('Folder', name=u'root')
+ a = req.create_entity('Folder', name=u'a', parent=root)
+ b = req.create_entity('Folder', name=u'b', parent=a)
+ c = req.create_entity('Folder', name=u'c', parent=root)
+ self.commit()
+ req = self.request()
+ req.execute('DELETE Folder F WHERE F name "a"')
+ req.execute('DELETE F parent R WHERE R name "root"')
+ self.commit()
+ req = self.request()
+ self.assertEqual([['root'], ['c']],
+ req.execute('Any NF WHERE F is Folder, F name NF').rows)
+ self.assertEqual([],
+ req.execute('Any NF,NP WHERE F parent P, F name NF, P name NP').rows)
+
+ def test_composite_subject_relation_deletion(self):
+ req = self.request()
+ root = req.create_entity('Folder', name=u'root')
+ a = req.create_entity('Folder', name=u'a')
+ b = req.create_entity('Folder', name=u'b')
+ c = req.create_entity('Folder', name=u'c')
+ root.cw_set(children=(a, c))
+ a.cw_set(children=b)
+ self.commit()
+ req = self.request()
+ req.execute('DELETE Folder F WHERE F name "a"')
+ req.execute('DELETE R children F WHERE R name "root"')
+ self.commit()
+ req = self.request()
+ self.assertEqual([['root'], ['c']],
+ req.execute('Any NF WHERE F is Folder, F name NF').rows)
+ self.assertEqual([],
+ req.execute('Any NF,NP WHERE F parent P, F name NF, P name NP').rows)
+
def test_unsatisfied_constraints(self):
releid = self.execute('SET U in_group G WHERE G name "owners", U login "admin"')[0][0]
with self.assertRaises(ValidationError) as cm:
--- a/hooks/test/unittest_syncschema.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/test/unittest_syncschema.py Tue Jun 10 09:49:45 2014 +0200
@@ -30,7 +30,6 @@
del SchemaModificationHooksTC.schema_eids
class SchemaModificationHooksTC(CubicWebTC):
- reset_schema = True
def setUp(self):
super(SchemaModificationHooksTC, self).setUp()
@@ -39,8 +38,8 @@
def index_exists(self, etype, attr, unique=False):
self.session.set_cnxset()
- dbhelper = self.session.cnxset.source('system').dbhelper
- sqlcursor = self.session.cnxset['system']
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = self.session.cnxset.cu
return dbhelper.index_exists(sqlcursor, SQL_PREFIX + etype, SQL_PREFIX + attr, unique=unique)
def _set_perms(self, eid):
@@ -60,8 +59,8 @@
def test_base(self):
schema = self.repo.schema
self.session.set_cnxset()
- dbhelper = self.session.cnxset.source('system').dbhelper
- sqlcursor = self.session.cnxset['system']
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = self.session.cnxset.cu
self.assertFalse(schema.has_entity('Societe2'))
self.assertFalse(schema.has_entity('concerne2'))
# schema should be update on insertion (after commit)
@@ -83,12 +82,12 @@
'INSERT CWRelation X: X cardinality "**", X relation_type RT, X from_entity E, X to_entity E '
'WHERE RT name "concerne2", E name "Societe2"')[0][0]
self._set_perms(concerne2_rdef_eid)
- self.assertFalse('name' in schema['Societe2'].subject_relations())
- self.assertFalse('concerne2' in schema['Societe2'].subject_relations())
+ self.assertNotIn('name', schema['Societe2'].subject_relations())
+ self.assertNotIn('concerne2', schema['Societe2'].subject_relations())
self.assertFalse(self.index_exists('Societe2', 'name'))
self.commit()
- self.assertTrue('name' in schema['Societe2'].subject_relations())
- self.assertTrue('concerne2' in schema['Societe2'].subject_relations())
+ self.assertIn('name', schema['Societe2'].subject_relations())
+ self.assertIn('concerne2', schema['Societe2'].subject_relations())
self.assertTrue(self.index_exists('Societe2', 'name'))
# now we should be able to insert and query Societe2
s2eid = self.execute('INSERT Societe2 X: X name "logilab"')[0][0]
@@ -104,8 +103,8 @@
self.commit()
self.execute('DELETE CWRelation X WHERE X eid %(x)s', {'x': concerne2_rdef_eid})
self.commit()
- self.assertTrue('concerne2' in schema['CWUser'].subject_relations())
- self.assertFalse('concerne2' in schema['Societe2'].subject_relations())
+ self.assertIn('concerne2', schema['CWUser'].subject_relations())
+ self.assertNotIn('concerne2', schema['Societe2'].subject_relations())
self.assertFalse(self.execute('Any X WHERE X concerne2 Y'))
# schema should be cleaned on delete (after commit)
self.execute('DELETE CWEType X WHERE X name "Societe2"')
@@ -117,7 +116,7 @@
self.assertFalse(self.index_exists('Societe2', 'name'))
self.assertFalse(schema.has_entity('Societe2'))
self.assertFalse(schema.has_entity('concerne2'))
- self.assertFalse('concerne2' in schema['CWUser'].subject_relations())
+ self.assertNotIn('concerne2', schema['CWUser'].subject_relations())
def test_metartype_with_nordefs(self):
META_RTYPES.add('custom_meta')
@@ -153,9 +152,9 @@
instanceof_etypes = [etype for etype, in self.execute('Any ETN WHERE X eid %s, X is_instance_of ET, ET name ETN' % seid)]
self.assertEqual(sorted(instanceof_etypes), ['BaseTransition', 'Transition'])
snames = [name for name, in self.execute('Any N WHERE S is BaseTransition, S name N')]
- self.assertFalse('subdiv' in snames)
+ self.assertNotIn('subdiv', snames)
snames = [name for name, in self.execute('Any N WHERE S is_instance_of BaseTransition, S name N')]
- self.assertTrue('subdiv' in snames)
+ self.assertIn('subdiv', snames)
def test_perms_synchronization_1(self):
@@ -201,8 +200,8 @@
def test_uninline_relation(self):
self.session.set_cnxset()
- dbhelper = self.session.cnxset.source('system').dbhelper
- sqlcursor = self.session.cnxset['system']
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = self.session.cnxset.cu
self.assertTrue(self.schema['state_of'].inlined)
try:
self.execute('SET X inlined FALSE WHERE X name "state_of"')
@@ -226,8 +225,8 @@
def test_indexed_change(self):
self.session.set_cnxset()
- dbhelper = self.session.cnxset.source('system').dbhelper
- sqlcursor = self.session.cnxset['system']
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = self.session.cnxset.cu
try:
self.execute('SET X indexed FALSE WHERE X relation_type R, R name "name"')
self.assertTrue(self.schema['name'].rdef('Workflow', 'String').indexed)
@@ -245,23 +244,19 @@
def test_unique_change(self):
self.session.set_cnxset()
- dbhelper = self.session.cnxset.source('system').dbhelper
- sqlcursor = self.session.cnxset['system']
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = self.session.cnxset.cu
try:
- self.execute('INSERT CWConstraint X: X cstrtype CT, DEF constrained_by X '
- 'WHERE CT name "UniqueConstraint", DEF relation_type RT, DEF from_entity E,'
- 'RT name "name", E name "Workflow"')
+ eid = self.execute('INSERT CWConstraint X: X cstrtype CT, DEF constrained_by X '
+ 'WHERE CT name "UniqueConstraint", DEF relation_type RT, DEF from_entity E,'
+ 'RT name "name", E name "Workflow"').rows[0][0]
self.assertFalse(self.schema['Workflow'].has_unique_values('name'))
self.assertFalse(self.index_exists('Workflow', 'name', unique=True))
self.commit()
self.assertTrue(self.schema['Workflow'].has_unique_values('name'))
self.assertTrue(self.index_exists('Workflow', 'name', unique=True))
finally:
- self.execute('DELETE DEF constrained_by X WHERE X cstrtype CT, '
- 'CT name "UniqueConstraint", DEF relation_type RT, DEF from_entity E,'
- 'RT name "name", E name "Workflow"')
- self.assertTrue(self.schema['Workflow'].has_unique_values('name'))
- self.assertTrue(self.index_exists('Workflow', 'name', unique=True))
+ self.execute('DELETE CWConstraint C WHERE C eid %(eid)s', {'eid': eid})
self.commit()
self.assertFalse(self.schema['Workflow'].has_unique_values('name'))
self.assertFalse(self.index_exists('Workflow', 'name', unique=True))
@@ -299,8 +294,8 @@
{'x': attreid})
self.commit()
self.schema.rebuild_infered_relations()
- self.assertTrue('Transition' in self.schema['messageid'].subjects())
- self.assertTrue('WorkflowTransition' in self.schema['messageid'].subjects())
+ self.assertIn('Transition', self.schema['messageid'].subjects())
+ self.assertIn('WorkflowTransition', self.schema['messageid'].subjects())
self.execute('Any X WHERE X is_instance_of BaseTransition, X messageid "hop"')
def test_change_fulltextindexed(self):
--- a/hooks/workflow.py Tue Jun 10 09:35:26 2014 +0200
+++ b/hooks/workflow.py Tue Jun 10 09:49:45 2014 +0200
@@ -28,17 +28,13 @@
from cubicweb.server import hook
-def _change_state(session, x, oldstate, newstate):
- nocheck = session.transaction_data.setdefault('skip-security', set())
+def _change_state(cnx, x, oldstate, newstate):
+ nocheck = cnx.transaction_data.setdefault('skip-security', set())
nocheck.add((x, 'in_state', oldstate))
nocheck.add((x, 'in_state', newstate))
- # delete previous state first unless in_state isn't stored in the system
- # source
- fromsource = session.describe(x)[1]
- if fromsource == 'system' or \
- not session.repo.sources_by_uri[fromsource].support_relation('in_state'):
- session.delete_relation(x, 'in_state', oldstate)
- session.add_relation(x, 'in_state', newstate)
+ # delete previous state first
+ cnx.delete_relation(x, 'in_state', oldstate)
+ cnx.add_relation(x, 'in_state', newstate)
# operations ###################################################################
@@ -48,17 +44,17 @@
entity = None # make pylint happy
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
entity = self.entity
iworkflowable = entity.cw_adapt_to('IWorkflowable')
# if there is an initial state and the entity's state is not set,
# use the initial state as a default state
- if not (session.deleted_in_transaction(entity.eid) or entity.in_state) \
+ if not (cnx.deleted_in_transaction(entity.eid) or entity.in_state) \
and iworkflowable.current_workflow:
state = iworkflowable.current_workflow.initial
if state:
- session.add_relation(entity.eid, 'in_state', state.eid)
- _FireAutotransitionOp(session, entity=entity)
+ cnx.add_relation(entity.eid, 'in_state', state.eid)
+ _FireAutotransitionOp(cnx, entity=entity)
class _FireAutotransitionOp(hook.Operation):
"""try to fire auto transition after state changes"""
@@ -80,11 +76,11 @@
def precommit_event(self):
# notice that enforcement that new workflow apply to the entity's type is
# done by schema rule, no need to check it here
- session = self.session
- pendingeids = session.transaction_data.get('pendingeids', ())
+ cnx = self.cnx
+ pendingeids = cnx.transaction_data.get('pendingeids', ())
if self.eid in pendingeids:
return
- entity = session.entity_from_eid(self.eid)
+ entity = cnx.entity_from_eid(self.eid)
iworkflowable = entity.cw_adapt_to('IWorkflowable')
# check custom workflow has not been rechanged to another one in the same
# transaction
@@ -100,13 +96,13 @@
# if there are no history, simply go to new workflow's initial state
if not iworkflowable.workflow_history:
if iworkflowable.current_state.eid != deststate.eid:
- _change_state(session, entity.eid,
+ _change_state(cnx, entity.eid,
iworkflowable.current_state.eid, deststate.eid)
- _FireAutotransitionOp(session, entity=entity)
+ _FireAutotransitionOp(cnx, entity=entity)
return
- msg = session._('workflow changed to "%s"')
- msg %= session._(mainwf.name)
- session.transaction_data[(entity.eid, 'customwf')] = self.wfeid
+ msg = cnx._('workflow changed to "%s"')
+ msg %= cnx._(mainwf.name)
+ cnx.transaction_data[(entity.eid, 'customwf')] = self.wfeid
iworkflowable.change_state(deststate, msg, u'text/plain')
@@ -114,7 +110,7 @@
treid = None # make pylint happy
def precommit_event(self):
- tr = self.session.entity_from_eid(self.treid)
+ tr = self.cnx.entity_from_eid(self.treid)
outputs = set()
for ep in tr.subworkflow_exit:
if ep.subwf_state.eid in outputs:
@@ -127,7 +123,7 @@
forentity = trinfo = None # make pylint happy
def precommit_event(self):
- session = self.session
+ cnx = self.cnx
forentity = self.forentity
iworkflowable = forentity.cw_adapt_to('IWorkflowable')
trinfo = self.trinfo
@@ -141,8 +137,8 @@
if tostate is not None:
# reached an exit point
msg = _('exiting from subworkflow %s')
- msg %= session._(iworkflowable.current_workflow.name)
- session.transaction_data[(forentity.eid, 'subwfentrytr')] = True
+ msg %= cnx._(iworkflowable.current_workflow.name)
+ cnx.transaction_data[(forentity.eid, 'subwfentrytr')] = True
iworkflowable.change_state(tostate, msg, u'text/plain', tr=wftr)
@@ -177,7 +173,7 @@
events = ('before_add_entity',)
def __call__(self):
- session = self._cw
+ cnx = self._cw
entity = self.entity
# first retreive entity to which the state change apply
try:
@@ -185,15 +181,15 @@
except KeyError:
msg = _('mandatory relation')
raise validation_error(entity, {('wf_info_for', 'subject'): msg})
- forentity = session.entity_from_eid(foreid)
+ forentity = cnx.entity_from_eid(foreid)
# see comment in the TrInfo entity definition
entity.cw_edited['tr_count']=len(forentity.reverse_wf_info_for)
iworkflowable = forentity.cw_adapt_to('IWorkflowable')
# then check it has a workflow set, unless we're in the process of changing
# entity's workflow
- if session.transaction_data.get((forentity.eid, 'customwf')):
- wfeid = session.transaction_data[(forentity.eid, 'customwf')]
- wf = session.entity_from_eid(wfeid)
+ if cnx.transaction_data.get((forentity.eid, 'customwf')):
+ wfeid = cnx.transaction_data[(forentity.eid, 'customwf')]
+ wf = cnx.entity_from_eid(wfeid)
else:
wf = iworkflowable.current_workflow
if wf is None:
@@ -205,9 +201,9 @@
msg = _('related entity has no state')
raise validation_error(entity, {None: msg})
# True if we are coming back from subworkflow
- swtr = session.transaction_data.pop((forentity.eid, 'subwfentrytr'), None)
- cowpowers = (session.user.is_in_group('managers')
- or not session.write_security)
+ swtr = cnx.transaction_data.pop((forentity.eid, 'subwfentrytr'), None)
+ cowpowers = (cnx.user.is_in_group('managers')
+ or not cnx.write_security)
# no investigate the requested state change...
try:
treid = entity.cw_attr_cache['by_transition']
@@ -228,7 +224,7 @@
else:
# check transition is valid and allowed, unless we're coming back
# from subworkflow
- tr = session.entity_from_eid(treid)
+ tr = cnx.entity_from_eid(treid)
if swtr is None:
qname = ('by_transition', 'subject')
if tr is None:
@@ -247,7 +243,7 @@
msg = _("transition isn't allowed")
raise validation_error(entity, {('by_transition', 'subject'): msg})
if swtr is None:
- deststate = session.entity_from_eid(deststateeid)
+ deststate = cnx.entity_from_eid(deststateeid)
if not cowpowers and deststate is None:
msg = _("state doesn't belong to entity's workflow")
raise validation_error(entity, {('to_state', 'subject'): msg})
@@ -256,10 +252,10 @@
# everything is ok, add missing information on the trinfo entity
entity.cw_edited['from_state'] = fromstate.eid
entity.cw_edited['to_state'] = deststateeid
- nocheck = session.transaction_data.setdefault('skip-security', set())
+ nocheck = cnx.transaction_data.setdefault('skip-security', set())
nocheck.add((entity.eid, 'from_state', fromstate.eid))
nocheck.add((entity.eid, 'to_state', deststateeid))
- _FireAutotransitionOp(session, entity=forentity)
+ _FireAutotransitionOp(cnx, entity=forentity)
class FiredTransitionHook(WorkflowHook):
@@ -289,12 +285,12 @@
category = 'integrity'
def __call__(self):
- session = self._cw
- nocheck = session.transaction_data.get('skip-security', ())
+ cnx = self._cw
+ nocheck = cnx.transaction_data.get('skip-security', ())
if (self.eidfrom, 'in_state', self.eidto) in nocheck:
# state changed through TrInfo insertion, so we already know it's ok
return
- entity = session.entity_from_eid(self.eidfrom)
+ entity = cnx.entity_from_eid(self.eidfrom)
iworkflowable = entity.cw_adapt_to('IWorkflowable')
mainwf = iworkflowable.main_workflow
if mainwf is None:
--- a/i18n/de.po Tue Jun 10 09:35:26 2014 +0200
+++ b/i18n/de.po Tue Jun 10 09:49:45 2014 +0200
@@ -118,10 +118,6 @@
msgstr ""
#, python-format
-msgid "%s relation should not be in mapped"
-msgstr ""
-
-#, python-format
msgid "%s software version of the database"
msgstr "Software-Version der Datenbank %s"
@@ -1276,9 +1272,6 @@
msgid "attribute"
msgstr "Attribut"
-msgid "attribute/relation can't be mapped, only entity and relation types"
-msgstr ""
-
msgid "august"
msgstr "August"
@@ -1401,22 +1394,12 @@
msgstr ""
#, python-format
-msgid "can't connect to source %s, some data may be missing"
-msgstr "Keine Verbindung zu der Quelle %s, einige Daten könnten fehlen"
-
-#, python-format
msgid "can't display data, unexpected error: %s"
msgstr "Kann die Daten aufgrund des folgenden Fehlers nicht anzeigen: %s"
msgid "can't have multiple exits on the same state"
msgstr "Mehrere Ausgänge aus demselben Zustand nicht möglich."
-msgid "can't mix dontcross and maycross options"
-msgstr ""
-
-msgid "can't mix dontcross and write options"
-msgstr ""
-
#, python-format
msgid "can't parse %(value)r (expected %(format)s)"
msgstr ""
@@ -2426,12 +2409,6 @@
msgid "error while publishing ReST text"
msgstr "Fehler beim Ãœbersetzen von reST"
-#, python-format
-msgid "error while querying source %s, some data may be missing"
-msgstr ""
-"Fehler beim Zugriff auf Quelle %s, möglicherweise sind die Daten "
-"unvollständig."
-
msgid "exit state must be a subworkflow state"
msgstr "Exit-Zustand muss ein Subworkflow-Zustand sein."
@@ -2906,10 +2883,6 @@
msgid "inlined"
msgstr "eingereiht"
-#, python-format
-msgid "inlined relation %(rtype)s of %(etype)s should be supported"
-msgstr ""
-
msgid "instance home"
msgstr "Startseite der Instanz"
@@ -3297,10 +3270,6 @@
msgid "no edited fields specified"
msgstr ""
-#, python-format
-msgid "no edited fields specified for entity %s"
-msgstr "kein Eingabefeld spezifiziert Für Entität %s"
-
msgid "no log to display"
msgstr ""
@@ -4275,10 +4244,6 @@
msgstr "(Externe) Entität nicht gefunden"
#, python-format
-msgid "unknown option(s): %s"
-msgstr ""
-
-#, python-format
msgid "unknown options %s"
msgstr ""
@@ -4585,13 +4550,16 @@
msgid "you have been logged out"
msgstr "Sie sind jetzt abgemeldet."
-#, python-format
-msgid "you may want to specify something for %s"
-msgstr ""
-
msgid "you should probably delete that property"
msgstr "Sie sollten diese Eigenschaft wahrscheinlich löschen."
-#, python-format
-msgid "you should un-inline relation %s which is supported and may be crossed "
-msgstr ""
+#~ msgid "can't connect to source %s, some data may be missing"
+#~ msgstr "Keine Verbindung zu der Quelle %s, einige Daten könnten fehlen"
+
+#~ msgid "error while querying source %s, some data may be missing"
+#~ msgstr ""
+#~ "Fehler beim Zugriff auf Quelle %s, möglicherweise sind die Daten "
+#~ "unvollständig."
+
+#~ msgid "no edited fields specified for entity %s"
+#~ msgstr "kein Eingabefeld spezifiziert Für Entität %s"
--- a/i18n/en.po Tue Jun 10 09:35:26 2014 +0200
+++ b/i18n/en.po Tue Jun 10 09:49:45 2014 +0200
@@ -110,10 +110,6 @@
msgstr ""
#, python-format
-msgid "%s relation should not be in mapped"
-msgstr ""
-
-#, python-format
msgid "%s software version of the database"
msgstr ""
@@ -1233,9 +1229,6 @@
msgid "attribute"
msgstr ""
-msgid "attribute/relation can't be mapped, only entity and relation types"
-msgstr ""
-
msgid "august"
msgstr ""
@@ -1358,22 +1351,12 @@
msgstr ""
#, python-format
-msgid "can't connect to source %s, some data may be missing"
-msgstr ""
-
-#, python-format
msgid "can't display data, unexpected error: %s"
msgstr ""
msgid "can't have multiple exits on the same state"
msgstr ""
-msgid "can't mix dontcross and maycross options"
-msgstr ""
-
-msgid "can't mix dontcross and write options"
-msgstr ""
-
#, python-format
msgid "can't parse %(value)r (expected %(format)s)"
msgstr ""
@@ -2375,10 +2358,6 @@
msgid "error while publishing ReST text"
msgstr ""
-#, python-format
-msgid "error while querying source %s, some data may be missing"
-msgstr ""
-
msgid "exit state must be a subworkflow state"
msgstr ""
@@ -2833,10 +2812,6 @@
msgid "inlined"
msgstr "inlined"
-#, python-format
-msgid "inlined relation %(rtype)s of %(etype)s should be supported"
-msgstr ""
-
msgid "instance home"
msgstr ""
@@ -3213,10 +3188,6 @@
msgid "no edited fields specified"
msgstr ""
-#, python-format
-msgid "no edited fields specified for entity %s"
-msgstr ""
-
msgid "no log to display"
msgstr ""
@@ -4175,10 +4146,6 @@
msgstr ""
#, python-format
-msgid "unknown option(s): %s"
-msgstr ""
-
-#, python-format
msgid "unknown options %s"
msgstr ""
@@ -4472,13 +4439,5 @@
msgid "you have been logged out"
msgstr ""
-#, python-format
-msgid "you may want to specify something for %s"
-msgstr ""
-
msgid "you should probably delete that property"
msgstr ""
-
-#, python-format
-msgid "you should un-inline relation %s which is supported and may be crossed "
-msgstr ""
--- a/i18n/es.po Tue Jun 10 09:35:26 2014 +0200
+++ b/i18n/es.po Tue Jun 10 09:49:45 2014 +0200
@@ -1,17 +1,22 @@
# cubicweb i18n catalog
-# Copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# Copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# Logilab <contact@logilab.fr>
+# Translators:
+# CreaLibre <info@crealibre.com>, 2014
msgid ""
msgstr ""
-"Project-Id-Version: cubicweb 2.46.0\n"
-"POT-Creation-Date: \n"
-"PO-Revision-Date: 2011-05-03 12:53-0600\n"
+"Project-Id-Version: Cubicweb\n"
+"POT-Creation-Date: 2006-01-12 17:35+CET\n"
+"PO-Revision-Date: 2014-03-04 08:10+0000\n"
"Last-Translator: CreaLibre <info@crealibre.com>\n"
-"Language-Team: es <contact@logilab.fr>\n"
-"Language: \n"
+"Language-Team: Spanish (http://www.transifex.com/projects/p/cubicweb/"
+"language/es/)\n"
+"Language: es\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: pygettext.py 1.5\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
#, python-format
msgid ""
@@ -26,7 +31,7 @@
msgstr ""
"\n"
"%(user)s ha cambiado su estado de <%(previous_state)s> hacia <"
-"%(current_state)s> por la entidad\n"
+"%(current_state)s> en la entidad\n"
"'%(title)s'\n"
"\n"
"%(comment)s\n"
@@ -52,11 +57,11 @@
#, python-format
msgid "%(KEY-cstr)s constraint failed for value %(KEY-value)r"
-msgstr ""
+msgstr "%(KEY-cstr)s restricción errónea para el valor %(KEY-value)r"
#, python-format
msgid "%(KEY-value)r doesn't match the %(KEY-regexp)r regular expression"
-msgstr ""
+msgstr "%(KEY-value)r no corresponde a la expresión regular %(KEY-regexp)r"
#, python-format
msgid "%(attr)s set to %(newvalue)s"
@@ -116,11 +121,7 @@
#, python-format
msgid "%s is part of violated unicity constraint"
-msgstr ""
-
-#, python-format
-msgid "%s relation should not be in mapped"
-msgstr "la relación %s no deberÃa estar mapeada"
+msgstr "%s pertenece a una restricción de unidad no respectada"
#, python-format
msgid "%s software version of the database"
@@ -138,6 +139,8 @@
msgid ""
"'%s' action for in_state relation should at least have 'linkattr=name' option"
msgstr ""
+"'%s' acción en la relación in_state debe por lo menos tener la opción "
+"'linkattr=name'"
#, python-format
msgid "'%s' action requires 'linkattr' option"
@@ -148,7 +151,7 @@
#, python-format
msgid "(suppressed) entity #%d"
-msgstr ""
+msgstr "(eliminada) entidad #%d"
msgid "**"
msgstr "0..n 0..n"
@@ -200,7 +203,7 @@
"div>"
msgid "<no relation>"
-msgstr ""
+msgstr "<sin relación>"
msgid "<not specified>"
msgstr "<no especificado>"
@@ -225,7 +228,7 @@
#, python-format
msgid "Added relation : %(entity_from)s %(rtype)s %(entity_to)s"
-msgstr ""
+msgstr "Relación agregada : %(entity_from)s %(rtype)s %(entity_to)s"
msgid "Any"
msgstr "Cualquiera"
@@ -243,10 +246,10 @@
msgstr "Transiciones (abstractas)"
msgid "BigInt"
-msgstr ""
+msgstr "Big integer"
msgid "BigInt_plural"
-msgstr ""
+msgstr "Big integers"
msgid "Bookmark"
msgstr "Favorito"
@@ -274,7 +277,7 @@
#, python-format
msgid "By %(user)s on %(dt)s [%(undo_link)s]"
-msgstr ""
+msgstr "Por %(user)s en %(dt)s [%(undo_link)s]"
msgid "Bytes"
msgstr "Bytes"
@@ -307,10 +310,10 @@
msgstr "Restricciones"
msgid "CWDataImport"
-msgstr ""
+msgstr "Importación de Datos"
msgid "CWDataImport_plural"
-msgstr ""
+msgstr "Importaciones de Datos"
msgid "CWEType"
msgstr "Tipo de entidad"
@@ -425,19 +428,21 @@
"tipo ya no existe"
msgid "Click to sort on this column"
-msgstr ""
+msgstr "Seleccione para ordenar columna"
msgid ""
"Configuration of the system source goes to the 'sources' file, not in the "
"database"
msgstr ""
+"La configuración de la fuente sistema va en el archivo \"Sources\"/Fuentes, "
+"y no en la base de datos."
#, python-format
msgid "Created %(etype)s : %(entity)s"
-msgstr ""
+msgstr "Se creó %(etype)s : %(entity)s"
msgid "DEBUG"
-msgstr ""
+msgstr "DEPURAR"
msgid "Date"
msgstr "Fecha"
@@ -459,23 +464,23 @@
#, python-format
msgid "Delete relation : %(entity_from)s %(rtype)s %(entity_to)s"
-msgstr ""
+msgstr "Eliminar relación : %(entity_from)s %(rtype)s %(entity_to)s"
#, python-format
msgid "Deleted %(etype)s : %(entity)s"
-msgstr ""
+msgstr "Se eliminó %(etype)s : %(entity)s"
msgid "Detected problems"
msgstr "Problemas detectados"
msgid "Do you want to delete the following element(s)?"
-msgstr "Desea eliminar el(los) elemento(s) siguiente(s)"
+msgstr "Desea eliminar el/los elemento(s) a continuación?"
msgid "Download schema as OWL"
msgstr "Descargar el esquema en formato OWL"
msgid "ERROR"
-msgstr ""
+msgstr "ERROR"
msgid "EmailAddress"
msgstr "Correo Electrónico"
@@ -500,7 +505,7 @@
msgstr "Uris externos"
msgid "FATAL"
-msgstr ""
+msgstr "FATAL"
msgid "Float"
msgstr "Número flotante"
@@ -521,7 +526,7 @@
msgstr "Ayuda"
msgid "INFO"
-msgstr ""
+msgstr "INFO"
msgid "Instance"
msgstr "Instancia"
@@ -542,7 +547,7 @@
msgstr "Duraciones"
msgid "Link:"
-msgstr ""
+msgstr "Liga:"
msgid "Looked up classes"
msgstr "Clases buscadas"
@@ -554,7 +559,7 @@
msgstr "Gestión de seguridad"
msgid "Message threshold"
-msgstr ""
+msgstr "LÃmite de mensajes"
msgid "Most referenced classes"
msgstr "Clases más referenciadas"
@@ -578,7 +583,7 @@
msgstr "Agregar tipo de Restricción"
msgid "New CWDataImport"
-msgstr ""
+msgstr "Nueva importación de datos"
msgid "New CWEType"
msgstr "Agregar tipo de entidad"
@@ -795,7 +800,7 @@
msgstr "Este tipo de Restricción"
msgid "This CWDataImport"
-msgstr ""
+msgstr "Esta importación de datos"
msgid "This CWEType"
msgstr "Este tipo de Entidad"
@@ -858,6 +863,8 @@
"This action is forbidden. If you think it should be allowed, please contact "
"the site administrator."
msgstr ""
+"Esta acción le es prohibida. Si cree que Ud. deberÃa de tener autorización, "
+"favor de contactar al administrador del sitio. "
msgid "This entity type permissions:"
msgstr "Permisos para este tipo de entidad:"
@@ -886,23 +893,23 @@
"por lÃnea."
msgid "Undoable actions"
-msgstr ""
+msgstr "Acciones irreversibles"
msgid "Undoing"
-msgstr ""
+msgstr "Deshaciendo"
msgid "UniqueConstraint"
msgstr "Restricción de Unicidad"
msgid "Unknown source type"
-msgstr ""
+msgstr "tipo de fuente desconocida"
msgid "Unreachable objects"
msgstr "Objetos inaccesibles"
#, python-format
msgid "Updated %(etype)s : %(entity)s"
-msgstr ""
+msgstr "Se actualizó %(etype)s : %(entity)s"
msgid "Used by:"
msgstr "Utilizado por :"
@@ -911,7 +918,7 @@
msgstr "Usuarios y grupos de administradores"
msgid "WARNING"
-msgstr ""
+msgstr "ADVERTENCIA"
msgid "Web server"
msgstr "Servidor web"
@@ -950,13 +957,13 @@
"en el campo siguiente."
msgid "You can't change this relation"
-msgstr ""
+msgstr "Usted no puede modificar esta relación"
msgid "You cannot remove the system source"
-msgstr ""
+msgstr "Usted no puede eliminar la fuente sistema"
msgid "You cannot rename the system source"
-msgstr ""
+msgstr "Usted no puede Renombrar la fuente sistema"
msgid ""
"You have no access to this view or it can not be used to display the current "
@@ -992,7 +999,7 @@
msgstr "un número flotante es requerido"
msgid "a number (in seconds) or 20s, 10min, 24h or 4d are expected"
-msgstr ""
+msgstr "se espera un número (en segundos) ó 20s, 10min, 24h ó 4d "
msgid ""
"a simple cache entity characterized by a name and a validity date. The "
@@ -1010,7 +1017,7 @@
msgstr "Clase de base abstracta para la transiciones"
msgid "action menu"
-msgstr ""
+msgstr "acciones"
msgid "action(s) on this selection"
msgstr "Acción(es) en esta selección"
@@ -1031,7 +1038,7 @@
msgstr "Agregar a los favoritos "
msgid "add CWAttribute add_permission RQLExpression subject"
-msgstr ""
+msgstr "Expresión RQL de agregación"
msgid "add CWAttribute constrained_by CWConstraint subject"
msgstr "Restricción"
@@ -1143,7 +1150,7 @@
msgctxt "CWAttribute"
msgid "add_permission"
-msgstr ""
+msgstr "Permiso de agregar"
# subject and object forms for each relation type
# (no object form for final relation types)
@@ -1182,7 +1189,7 @@
"ha sido agregada"
msgid "additional type specific properties"
-msgstr ""
+msgstr "propiedades adicionales especÃficas al tipo"
msgid "addrelated"
msgstr "Agregar"
@@ -1212,7 +1219,7 @@
#, python-format
msgid "allowed values for \"action\" are %s"
-msgstr ""
+msgstr "los valores permitidos por \"acción\" son %s"
msgid "allowed_transition"
msgstr "transiciones autorizadas"
@@ -1261,7 +1268,7 @@
msgstr "anónimo"
msgid "anyrsetview"
-msgstr ""
+msgstr "vistas rset"
msgid "april"
msgstr "Abril"
@@ -1283,11 +1290,6 @@
msgid "attribute"
msgstr "Atributo"
-msgid "attribute/relation can't be mapped, only entity and relation types"
-msgstr ""
-"los atributos y las relaciones no pueden ser mapeados, solamente los tipos "
-"de entidad y de relación"
-
msgid "august"
msgstr "Agosto"
@@ -1299,19 +1301,20 @@
msgid "autocomputed attribute used to ensure transition coherency"
msgstr ""
+"Atributo automatizado usado para asegurar la coherencia en la transición"
msgid "automatic"
msgstr "Automático"
#, python-format
msgid "back to pagination (%s results)"
-msgstr ""
+msgstr "regresar a paginación (%s resultados)"
msgid "bad value"
msgstr "Valor erróneo"
msgid "badly formatted url"
-msgstr ""
+msgstr "url mal formateado"
msgid "base url"
msgstr "Url de base"
@@ -1398,7 +1401,7 @@
msgstr "Imposible de interpretar los tipos de entidades:"
msgid "can only have one url"
-msgstr ""
+msgstr "solo puede tener un URL"
msgid "can't be changed"
msgstr "No puede ser modificado"
@@ -1407,11 +1410,7 @@
msgstr "No puede ser eliminado"
msgid "can't change this attribute"
-msgstr ""
-
-#, python-format
-msgid "can't connect to source %s, some data may be missing"
-msgstr "no se puede conectar a la fuente %s, algunos datos pueden faltar"
+msgstr "no puede modificar este atributo"
#, python-format
msgid "can't display data, unexpected error: %s"
@@ -1420,12 +1419,6 @@
msgid "can't have multiple exits on the same state"
msgstr "no puede tener varias salidas en el mismo estado"
-msgid "can't mix dontcross and maycross options"
-msgstr "no puede mezclar las opciones dontcross y maycross"
-
-msgid "can't mix dontcross and write options"
-msgstr "no puede mezclar las opciones dontcross y write"
-
#, python-format
msgid "can't parse %(value)r (expected %(format)s)"
msgstr "no puede analizar %(value)r (formato requerido : %(format)s)"
@@ -1435,16 +1428,22 @@
"can't restore entity %(eid)s of type %(eschema)s, target of %(rtype)s (eid "
"%(value)s) does not exist any longer"
msgstr ""
+"no se pudo restaurar la entidad %(eid)s del tipo %(eschema)s, objetivo de "
+"%(rtype)s (eid %(value)s) pues ésta ya no existe"
#, python-format
msgid ""
"can't restore relation %(rtype)s of entity %(eid)s, this relation does not "
"exist in the schema anymore."
msgstr ""
+"no se pudo restaurar la relación %(rtype)s de la entidad %(eid)s, esta "
+"relación ya no existe en el esquema. "
#, python-format
msgid "can't restore state of entity %s, it has been deleted inbetween"
msgstr ""
+"no se puede restaurar el estado de la entidad %s, se ha borrado desde "
+"entonces"
#, python-format
msgid ""
@@ -1503,7 +1502,7 @@
msgstr "seleccione para editar este campo"
msgid "close all"
-msgstr ""
+msgstr "cerrar todos"
msgid "comment"
msgstr "Comentario"
@@ -1734,7 +1733,7 @@
msgid ""
"creating RQLExpression (CWAttribute %(linkto)s add_permission RQLExpression)"
-msgstr ""
+msgstr "Creación de una expresión RQL para permitir agregar %(linkto)s"
msgid ""
"creating RQLExpression (CWAttribute %(linkto)s read_permission RQLExpression)"
@@ -1849,7 +1848,7 @@
msgstr "Exportar en CSV"
msgid "csv export (entities)"
-msgstr ""
+msgstr "Exportar a CSV (entidades)"
msgid "ctxcomponents"
msgstr "Componentes contextuales"
@@ -1968,10 +1967,10 @@
msgstr "Workflow de"
msgid "cw.groups-management"
-msgstr ""
+msgstr "grupos"
msgid "cw.users-management"
-msgstr ""
+msgstr "usuarios"
msgid "cw_for_source"
msgstr "fuente"
@@ -2002,18 +2001,18 @@
msgstr "tiene la configuración del host"
msgid "cw_import_of"
-msgstr ""
+msgstr "fuente"
msgctxt "CWDataImport"
msgid "cw_import_of"
-msgstr ""
+msgstr "fuente"
msgid "cw_import_of_object"
-msgstr ""
+msgstr "importación"
msgctxt "CWSource"
msgid "cw_import_of_object"
-msgstr ""
+msgstr "importación"
msgid "cw_schema"
msgstr "esquema"
@@ -2071,7 +2070,7 @@
msgstr "Permisos"
msgid "cwsource-imports"
-msgstr ""
+msgstr "importación"
msgid "cwsource-main"
msgstr "descripción"
@@ -2120,7 +2119,7 @@
msgstr "Valor por defecto"
msgid "default value as gziped pickled python object"
-msgstr ""
+msgstr "valor por defecto, en la forma de objeto python, al usar pickle y gzip"
msgid "default workflow for an entity type"
msgstr "Workflow por defecto para un tipo de entidad"
@@ -2409,11 +2408,11 @@
msgstr "Html incrustado"
msgid "end_timestamp"
-msgstr ""
+msgstr "horario final"
msgctxt "CWDataImport"
msgid "end_timestamp"
-msgstr ""
+msgstr "horario final"
msgid "entities deleted"
msgstr "Entidades eliminadas"
@@ -2457,7 +2456,7 @@
msgstr "Actualización de la Entidad"
msgid "entityview"
-msgstr ""
+msgstr "vistas de entidades"
msgid "error"
msgstr "error"
@@ -2466,12 +2465,6 @@
msgstr ""
"Se ha producido un error durante la interpretación del texto en formato ReST"
-#, python-format
-msgid "error while querying source %s, some data may be missing"
-msgstr ""
-"Un error ha ocurrido al interrogar %s, es posible que los \n"
-"datos visibles se encuentren incompletos"
-
msgid "exit state must be a subworkflow state"
msgstr "El estado de salida debe de ser un estado del Sub-Workflow"
@@ -2500,14 +2493,14 @@
msgstr "Tipo"
msgid "extra_props"
-msgstr ""
+msgstr "propiedades adicionales"
msgctxt "CWAttribute"
msgid "extra_props"
-msgstr ""
+msgstr "propiedades adicionales"
msgid "facet-loading-msg"
-msgstr ""
+msgstr "procesando, espere por favor"
msgid "facet.filters"
msgstr "Filtros"
@@ -2534,10 +2527,10 @@
msgstr "Faceta para las entidades \"finales\""
msgid "facets_datafeed.dataimport.status"
-msgstr ""
+msgstr "estado de la importación"
msgid "facets_datafeed.dataimport.status_description"
-msgstr ""
+msgstr "Estado de la importación de datos"
msgid "facets_etype-facet"
msgstr "Faceta \"es de tipo\""
@@ -2564,7 +2557,7 @@
msgstr "Faceta en el estado"
msgid "failed"
-msgstr ""
+msgstr "fallido"
#, python-format
msgid "failed to uniquify path (%s, %s)"
@@ -2601,7 +2594,7 @@
msgstr "Amigo de un Amigo, FOAF"
msgid "focus on this selection"
-msgstr ""
+msgstr "muestre esta selección"
msgid "follow"
msgstr "Seguir la liga"
@@ -2732,16 +2725,16 @@
msgstr "Grupos"
msgid "groups allowed to add entities/relations of this type"
-msgstr ""
+msgstr "grupos autorizados a agregar entidades/relaciones de este tipo"
msgid "groups allowed to delete entities/relations of this type"
-msgstr ""
+msgstr "grupos autorizados a eliminar entidades/relaciones de este tipo"
msgid "groups allowed to read entities/relations of this type"
-msgstr ""
+msgstr "grupos autorizados a leer entidades/relaciones de este tipo"
msgid "groups allowed to update entities/relations of this type"
-msgstr ""
+msgstr "grupos autorizados a actualizar entidades/relaciones de este tipo"
msgid "groups grant permissions to the user"
msgstr "Los grupos otorgan los permisos al usuario"
@@ -2756,7 +2749,7 @@
msgstr "Contiene el texto"
msgid "header-center"
-msgstr ""
+msgstr "header - centro"
msgid "header-left"
msgstr "encabezado (izquierdo)"
@@ -2857,7 +2850,7 @@
msgstr "Imagen"
msgid "in progress"
-msgstr ""
+msgstr "en progreso"
msgid "in_group"
msgstr "En el grupo"
@@ -2880,11 +2873,11 @@
msgstr "Estado de"
msgid "in_synchronization"
-msgstr ""
+msgstr "sincronizado"
msgctxt "CWSource"
msgid "in_synchronization"
-msgstr ""
+msgstr "sincronizado"
msgid "incontext"
msgstr "En el contexto"
@@ -2894,7 +2887,7 @@
#, python-format
msgid "incorrect value (%(KEY-value)r) for type \"%(KEY-type)s\""
-msgstr ""
+msgstr "el valor (%(KEY-value)r) es incorrecto para el tipo \"%(KEY-type)s\""
msgid "index this attribute's value in the plain text index"
msgstr "Indexar el valor de este atributo en el Ãndice de texto simple"
@@ -2945,12 +2938,6 @@
msgid "inlined"
msgstr "Inlined"
-#, python-format
-msgid "inlined relation %(rtype)s of %(etype)s should be supported"
-msgstr ""
-"la relación %(rtype)s del tipo de entidad %(etype)s debe ser aceptada "
-"('inlined')"
-
msgid "instance home"
msgstr "Repertorio de la Instancia"
@@ -2970,7 +2957,7 @@
#, python-format
msgid "invalid value %(KEY-value)s, it must be one of %(KEY-choices)s"
-msgstr ""
+msgstr "Valor %(KEY-value)s es incorrecto, seleccione entre %(KEY-choices)s"
msgid "is"
msgstr "es"
@@ -3014,10 +3001,10 @@
msgstr "Enero"
msgid "json-entities-export-view"
-msgstr ""
+msgstr "Exportación JSON (de entidades)"
msgid "json-export-view"
-msgstr ""
+msgstr "Exportación JSON"
msgid "july"
msgstr "Julio"
@@ -3048,7 +3035,7 @@
msgstr "Ultima conexión"
msgid "latest import"
-msgstr ""
+msgstr "importaciones recientes"
msgid "latest modification time of an entity"
msgstr "Fecha de la última modificación de una entidad "
@@ -3070,7 +3057,7 @@
msgstr "izquierda"
msgid "line"
-msgstr ""
+msgstr "lÃnea"
msgid ""
"link a property to the user which want this property customization. Unless "
@@ -3105,11 +3092,11 @@
msgstr "Lista"
msgid "log"
-msgstr ""
+msgstr "log"
msgctxt "CWDataImport"
msgid "log"
-msgstr ""
+msgstr "log"
msgid "log in"
msgstr "Acceder"
@@ -3194,7 +3181,7 @@
msgstr "depuración (debugging) de fuga de memoria"
msgid "message"
-msgstr ""
+msgstr "mensaje"
#, python-format
msgid "missing parameters for entity %s"
@@ -3253,11 +3240,11 @@
msgctxt "CWSource"
msgid "name"
-msgstr ""
+msgstr "nombre"
msgctxt "CWUniqueTogetherConstraint"
msgid "name"
-msgstr ""
+msgstr "nombre"
msgctxt "State"
msgid "name"
@@ -3319,7 +3306,7 @@
msgstr "Nuevo"
msgid "next page"
-msgstr ""
+msgstr "página siguiente"
msgid "next_results"
msgstr "Siguientes resultados"
@@ -3328,20 +3315,16 @@
msgstr "No"
msgid "no content next link"
-msgstr ""
+msgstr "no hay liga siguiente"
msgid "no content prev link"
-msgstr ""
+msgstr "no existe liga previa"
msgid "no edited fields specified"
-msgstr ""
-
-#, python-format
-msgid "no edited fields specified for entity %s"
-msgstr "Ningún campo editable especificado para la entidad %s"
+msgstr "ningún campo por editar especificado"
msgid "no log to display"
-msgstr ""
+msgstr "no arrojó elementos para mostrar"
msgid "no related entity"
msgstr "No posee entidad asociada"
@@ -3375,7 +3358,7 @@
msgstr "Noviembre"
msgid "num. users"
-msgstr ""
+msgstr "Número de Usuarios"
msgid "object"
msgstr "Objeto"
@@ -3475,7 +3458,7 @@
msgstr "Ruta"
msgid "permalink to this message"
-msgstr ""
+msgstr "liga permanente a este mensaje"
msgid "permission"
msgstr "Permiso"
@@ -3520,7 +3503,7 @@
msgstr "Preferencias"
msgid "previous page"
-msgstr ""
+msgstr "página anterior"
msgid "previous_results"
msgstr "Resultados Anteriores"
@@ -3552,7 +3535,7 @@
msgstr "Permisos"
msgid "rdf export"
-msgstr ""
+msgstr "Exportación RDF"
msgid "read"
msgstr "Lectura"
@@ -3727,22 +3710,22 @@
msgstr "Derecha"
msgid "rql expression allowing to add entities/relations of this type"
-msgstr ""
+msgstr "Expresión RQL que permite AGREGAR entidades/relaciones de este tipo"
msgid "rql expression allowing to delete entities/relations of this type"
-msgstr ""
+msgstr "Expresión RQL que permite ELIMINAR entidades/relaciones de este tipo"
msgid "rql expression allowing to read entities/relations of this type"
-msgstr ""
+msgstr "Expresión RQL que permite LEER entidades/relaciones de este tipo"
msgid "rql expression allowing to update entities/relations of this type"
-msgstr ""
+msgstr "Expresión RQL que permite ACTUALIZAR entidades/relaciones de este tipo"
msgid "rql expressions"
msgstr "Expresiones RQL"
msgid "rss export"
-msgstr ""
+msgstr "Exportación RSS"
msgid "same_as"
msgstr "Idéntico a"
@@ -3829,7 +3812,7 @@
msgstr "Información del servidor"
msgid "severity"
-msgstr ""
+msgstr "severidad"
msgid ""
"should html fields being edited using fckeditor (a HTML WYSIWYG editor). "
@@ -3868,7 +3851,7 @@
"Las transacciones más recientes modificaron esta entidad, anúlelas primero"
msgid "some relations violate a unicity constraint"
-msgstr ""
+msgstr "algunas relaciones no respetan la restricción de unicidad"
msgid "sorry, the server is unable to handle this query"
msgstr "Lo sentimos, el servidor no puede manejar esta consulta"
@@ -3908,19 +3891,21 @@
"start timestamp of the currently in synchronization, or NULL when no "
"synchronization in progress."
msgstr ""
+"horario de inicio de la sincronización en curso, o NULL cuando no existe "
+"sincronización en curso"
msgid "start_timestamp"
-msgstr ""
+msgstr "horario inicio"
msgctxt "CWDataImport"
msgid "start_timestamp"
-msgstr ""
+msgstr "horario inicio"
msgid "startup views"
msgstr "Vistas de inicio"
msgid "startupview"
-msgstr ""
+msgstr "Vistas de Inicio"
msgid "state"
msgstr "Estado"
@@ -3962,11 +3947,11 @@
msgstr "Tiene por Estado"
msgid "status"
-msgstr ""
+msgstr "estado"
msgctxt "CWDataImport"
msgid "status"
-msgstr ""
+msgstr "estado"
msgid "status change"
msgstr "Cambio de Estatus"
@@ -4039,7 +4024,7 @@
msgstr "Estado de Salida de"
msgid "success"
-msgstr ""
+msgstr "éxito"
msgid "sunday"
msgstr "Domingo"
@@ -4097,14 +4082,14 @@
msgstr "El valor \"%s\" ya esta en uso, favor de utilizar otro"
msgid "there is no next page"
-msgstr ""
+msgstr "no existe página siguiente"
msgid "there is no previous page"
-msgstr ""
+msgstr "no existe página anterior"
#, python-format
msgid "there is no transaction #%s"
-msgstr ""
+msgstr "no existe la transacción #%s"
msgid "this action is not reversible!"
msgstr "Esta acción es irreversible!."
@@ -4200,7 +4185,7 @@
msgstr "n° de transición"
msgid "transaction undone"
-msgstr ""
+msgstr "transacción anulada"
#, python-format
msgid "transition %(tr)s isn't allowed from %(st)s"
@@ -4311,7 +4296,7 @@
msgstr "Valor no permitido"
msgid "undefined user"
-msgstr ""
+msgstr "usuario indefinido"
msgid "undo"
msgstr "Anular"
@@ -4323,10 +4308,6 @@
msgstr "Entidad externa desconocida"
#, python-format
-msgid "unknown option(s): %s"
-msgstr "opcion(es) desconocida(s): %s"
-
-#, python-format
msgid "unknown options %s"
msgstr "opciones desconocidas: %s"
@@ -4338,7 +4319,7 @@
msgstr "Vocabulario desconocido: "
msgid "unsupported protocol"
-msgstr ""
+msgstr "protocolo no soportado"
msgid "upassword"
msgstr "Contraseña"
@@ -4450,7 +4431,7 @@
msgstr "Preferencias"
msgid "user's email account"
-msgstr ""
+msgstr "email del usuario"
msgid "users"
msgstr "Usuarios"
@@ -4480,26 +4461,26 @@
#, python-format
msgid "value %(KEY-value)s must be %(KEY-op)s %(KEY-boundary)s"
-msgstr ""
+msgstr "El valor %(KEY-value)s debe ser %(KEY-op)s %(KEY-boundary)s"
#, python-format
msgid "value %(KEY-value)s must be <= %(KEY-boundary)s"
-msgstr ""
+msgstr "el valor %(KEY-value)s debe ser <= %(KEY-boundary)s"
#, python-format
msgid "value %(KEY-value)s must be >= %(KEY-boundary)s"
-msgstr ""
+msgstr "el valor %(KEY-value)s debe ser >= %(KEY-boundary)s"
msgid "value associated to this key is not editable manually"
msgstr "El valor asociado a este elemento no es editable manualmente"
#, python-format
msgid "value should have maximum size of %(KEY-max)s but found %(KEY-size)s"
-msgstr ""
+msgstr "el valor máximo es %(KEY-max)s y encontramos %(KEY-size)s"
#, python-format
msgid "value should have minimum size of %(KEY-min)s but found %(KEY-size)s"
-msgstr ""
+msgstr "el valor mÃnimo debe ser %(KEY-min)s y encontramos %(KEY-size)s"
msgid "vcard"
msgstr "vcard"
@@ -4541,14 +4522,14 @@
msgstr "Aún no podemos manejar este tipo de consulta Sparql"
msgid "web sessions without CNX"
-msgstr ""
+msgstr "sesiones web sin conexión asociada"
msgid "wednesday"
msgstr "Miércoles"
#, python-format
msgid "welcome %s!"
-msgstr "¡ Bienvenido %s !"
+msgstr "Bienvenido %s."
msgid "wf_info_for"
msgstr "Histórico de"
@@ -4575,10 +4556,10 @@
msgstr "Workflow"
msgid "workflow already has a state of that name"
-msgstr ""
+msgstr "el workflow posee ya un estado con ese nombre"
msgid "workflow already has a transition of that name"
-msgstr ""
+msgstr "El Workflow posee ya una transición con ese nombre"
#, python-format
msgid "workflow changed to \"%s\""
@@ -4618,13 +4599,13 @@
msgstr "Parámetro erróneo de consulta lÃnea %s"
msgid "xbel export"
-msgstr ""
+msgstr "Exportación XBEL"
msgid "xml export"
msgstr "Exportar XML"
msgid "xml export (entities)"
-msgstr ""
+msgstr "Exportación XML (entidades)"
msgid "yes"
msgstr "SÃ"
@@ -4632,15 +4613,47 @@
msgid "you have been logged out"
msgstr "Ha terminado la sesión"
-#, python-format
-msgid "you may want to specify something for %s"
-msgstr "usted desea quizás especificar algo para la relación %s"
-
msgid "you should probably delete that property"
msgstr "probablamente deberÃa suprimir esta propriedad"
-#, python-format
-msgid "you should un-inline relation %s which is supported and may be crossed "
-msgstr ""
-"usted debe quitar la puesta en lÃnea de la relación %s que es aceptada y "
-"puede ser cruzada"
+#~ msgid "%s relation should not be in mapped"
+#~ msgstr "la relación %s no deberÃa estar mapeada"
+
+#~ msgid "attribute/relation can't be mapped, only entity and relation types"
+#~ msgstr ""
+#~ "los atributos y las relaciones no pueden ser mapeados, solamente los "
+#~ "tipos de entidad y de relación"
+
+#~ msgid "can't connect to source %s, some data may be missing"
+#~ msgstr "no se puede conectar a la fuente %s, algunos datos pueden faltar"
+
+#~ msgid "can't mix dontcross and maycross options"
+#~ msgstr "no puede mezclar las opciones dontcross y maycross"
+
+#~ msgid "can't mix dontcross and write options"
+#~ msgstr "no puede mezclar las opciones dontcross y write"
+
+#~ msgid "error while querying source %s, some data may be missing"
+#~ msgstr ""
+#~ "Un error ha ocurrido al interrogar %s, es posible que los \n"
+#~ "datos visibles se encuentren incompletos"
+
+#~ msgid "inlined relation %(rtype)s of %(etype)s should be supported"
+#~ msgstr ""
+#~ "la relación %(rtype)s del tipo de entidad %(etype)s debe ser aceptada "
+#~ "('inlined')"
+
+#~ msgid "no edited fields specified for entity %s"
+#~ msgstr "Ningún campo editable especificado para la entidad %s"
+
+#~ msgid "unknown option(s): %s"
+#~ msgstr "opcion(es) desconocida(s): %s"
+
+#~ msgid "you may want to specify something for %s"
+#~ msgstr "usted desea quizás especificar algo para la relación %s"
+
+#~ msgid ""
+#~ "you should un-inline relation %s which is supported and may be crossed "
+#~ msgstr ""
+#~ "usted debe quitar la puesta en lÃnea de la relación %s que es aceptada y "
+#~ "puede ser cruzada"
--- a/i18n/fr.po Tue Jun 10 09:35:26 2014 +0200
+++ b/i18n/fr.po Tue Jun 10 09:49:45 2014 +0200
@@ -119,10 +119,6 @@
msgstr "%s appartient à une contrainte d'unicité transgressée"
#, python-format
-msgid "%s relation should not be in mapped"
-msgstr "la relation %s ne devrait pas ếtre mappé"
-
-#, python-format
msgid "%s software version of the database"
msgstr "version logicielle de la base pour %s"
@@ -1288,11 +1284,6 @@
msgid "attribute"
msgstr "attribut"
-msgid "attribute/relation can't be mapped, only entity and relation types"
-msgstr ""
-"les attributs et relations ne peuvent être mappés, uniquement les types "
-"d'entité et de relation"
-
msgid "august"
msgstr "août"
@@ -1417,22 +1408,12 @@
msgstr "cet attribut ne peut pas être modifié"
#, python-format
-msgid "can't connect to source %s, some data may be missing"
-msgstr "ne peut se connecter à la source %s, des données peuvent manquer"
-
-#, python-format
msgid "can't display data, unexpected error: %s"
msgstr "impossible d'afficher les données à cause de l'erreur suivante: %s"
msgid "can't have multiple exits on the same state"
msgstr "ne peut avoir plusieurs sorties sur le même état"
-msgid "can't mix dontcross and maycross options"
-msgstr "ne peut mélanger dontcross et maycross options"
-
-msgid "can't mix dontcross and write options"
-msgstr "ne peut mélanger dontcross et write options"
-
#, python-format
msgid "can't parse %(value)r (expected %(format)s)"
msgstr "ne peut analyser %(value)r (format attendu : %(format)s)"
@@ -2479,12 +2460,6 @@
msgstr ""
"une erreur s'est produite lors de l'interprétation du texte au format ReST"
-#, python-format
-msgid "error while querying source %s, some data may be missing"
-msgstr ""
-"une erreur est survenue en interrogeant %s, il est possible que les\n"
-"données affichées soient incomplètes"
-
msgid "exit state must be a subworkflow state"
msgstr "l'état de sortie doit être un état du sous-workflow"
@@ -2956,12 +2931,6 @@
msgid "inlined"
msgstr "mise en ligne"
-#, python-format
-msgid "inlined relation %(rtype)s of %(etype)s should be supported"
-msgstr ""
-"la relation %(rtype)s du type d'entité %(etype)s doit être supportée "
-"('inlined')"
-
msgid "instance home"
msgstr "répertoire de l'instance"
@@ -3349,10 +3318,6 @@
msgid "no edited fields specified"
msgstr "aucun champ à éditer spécifié"
-#, python-format
-msgid "no edited fields specified for entity %s"
-msgstr "aucun champ à éditer spécifié pour l'entité %s"
-
msgid "no log to display"
msgstr "rien à afficher"
@@ -4340,10 +4305,6 @@
msgstr "entité (externe) introuvable"
#, python-format
-msgid "unknown option(s): %s"
-msgstr "option(s) inconnue(s) : %s"
-
-#, python-format
msgid "unknown options %s"
msgstr "options inconnues : %s"
@@ -4654,15 +4615,47 @@
msgid "you have been logged out"
msgstr "vous avez été déconnecté"
-#, python-format
-msgid "you may want to specify something for %s"
-msgstr "vous désirez peut-être spécifié quelque chose pour la relation %s"
-
msgid "you should probably delete that property"
msgstr "vous devriez probablement supprimer cette propriété"
-#, python-format
-msgid "you should un-inline relation %s which is supported and may be crossed "
-msgstr ""
-"vous devriez enlevé la mise en ligne de la relation %s qui est supportée et "
-"peut-être croisée"
+#~ msgid "%s relation should not be in mapped"
+#~ msgstr "la relation %s ne devrait pas ếtre mappé"
+
+#~ msgid "attribute/relation can't be mapped, only entity and relation types"
+#~ msgstr ""
+#~ "les attributs et relations ne peuvent être mappés, uniquement les types "
+#~ "d'entité et de relation"
+
+#~ msgid "can't connect to source %s, some data may be missing"
+#~ msgstr "ne peut se connecter à la source %s, des données peuvent manquer"
+
+#~ msgid "can't mix dontcross and maycross options"
+#~ msgstr "ne peut mélanger dontcross et maycross options"
+
+#~ msgid "can't mix dontcross and write options"
+#~ msgstr "ne peut mélanger dontcross et write options"
+
+#~ msgid "error while querying source %s, some data may be missing"
+#~ msgstr ""
+#~ "une erreur est survenue en interrogeant %s, il est possible que les\n"
+#~ "données affichées soient incomplètes"
+
+#~ msgid "inlined relation %(rtype)s of %(etype)s should be supported"
+#~ msgstr ""
+#~ "la relation %(rtype)s du type d'entité %(etype)s doit être supportée "
+#~ "('inlined')"
+
+#~ msgid "no edited fields specified for entity %s"
+#~ msgstr "aucun champ à éditer spécifié pour l'entité %s"
+
+#~ msgid "unknown option(s): %s"
+#~ msgstr "option(s) inconnue(s) : %s"
+
+#~ msgid "you may want to specify something for %s"
+#~ msgstr "vous désirez peut-être spécifié quelque chose pour la relation %s"
+
+#~ msgid ""
+#~ "you should un-inline relation %s which is supported and may be crossed "
+#~ msgstr ""
+#~ "vous devriez enlevé la mise en ligne de la relation %s qui est supportée "
+#~ "et peut-être croisée"
--- a/misc/migration/3.10.0_Any.py Tue Jun 10 09:35:26 2014 +0200
+++ b/misc/migration/3.10.0_Any.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,6 +1,6 @@
from cubicweb.server.session import hooks_control
-for uri, cfg in config.sources().items():
+for uri, cfg in config.read_sources_file().items():
if uri in ('system', 'admin'):
continue
repo.sources_by_uri[uri] = repo.get_source(cfg['adapter'], uri, cfg.copy())
@@ -18,7 +18,7 @@
'WHERE s.cw_name=e.type')
commit()
-for uri, cfg in config.sources().items():
+for uri, cfg in config.read_sources_file().items():
if uri in ('system', 'admin'):
continue
repo.sources_by_uri.pop(uri)
--- a/misc/migration/3.11.0_Any.py Tue Jun 10 09:35:26 2014 +0200
+++ b/misc/migration/3.11.0_Any.py Tue Jun 10 09:49:45 2014 +0200
@@ -9,77 +9,3 @@
add_attribute('CWSource', 'url')
add_attribute('CWSource', 'parser')
add_attribute('CWSource', 'latest_retrieval')
-
-try:
- from cubicweb.server.sources.pyrorql import PyroRQLSource
-except ImportError:
- pass
-else:
-
- from os.path import join
- # function to read old python mapping file
- def load_mapping_file(source):
- mappingfile = source.config['mapping-file']
- mappingfile = join(source.repo.config.apphome, mappingfile)
- mapping = {}
- execfile(mappingfile, mapping)
- for junk in ('__builtins__', '__doc__'):
- mapping.pop(junk, None)
- mapping.setdefault('support_relations', {})
- mapping.setdefault('dont_cross_relations', set())
- mapping.setdefault('cross_relations', set())
- # do some basic checks of the mapping content
- assert 'support_entities' in mapping, \
- 'mapping file should at least define support_entities'
- assert isinstance(mapping['support_entities'], dict)
- assert isinstance(mapping['support_relations'], dict)
- assert isinstance(mapping['dont_cross_relations'], set)
- assert isinstance(mapping['cross_relations'], set)
- unknown = set(mapping) - set( ('support_entities', 'support_relations',
- 'dont_cross_relations', 'cross_relations') )
- assert not unknown, 'unknown mapping attribute(s): %s' % unknown
- # relations that are necessarily not crossed
- for rtype in ('is', 'is_instance_of', 'cw_source'):
- assert rtype not in mapping['dont_cross_relations'], \
- '%s relation should not be in dont_cross_relations' % rtype
- assert rtype not in mapping['support_relations'], \
- '%s relation should not be in support_relations' % rtype
- return mapping
- # for now, only pyrorql sources have a mapping
- for source in repo.sources_by_uri.itervalues():
- if not isinstance(source, PyroRQLSource):
- continue
- sourceentity = session.entity_from_eid(source.eid)
- mapping = load_mapping_file(source)
- # write mapping as entities
- print 'migrating map for', source
- for etype, write in mapping['support_entities'].items():
- create_entity('CWSourceSchemaConfig',
- cw_for_source=sourceentity,
- cw_schema=session.entity_from_eid(schema[etype].eid),
- options=write and u'write' or None,
- ask_confirm=False)
- for rtype, write in mapping['support_relations'].items():
- options = []
- if write:
- options.append(u'write')
- if rtype in mapping['cross_relations']:
- options.append(u'maycross')
- create_entity('CWSourceSchemaConfig',
- cw_for_source=sourceentity,
- cw_schema=session.entity_from_eid(schema[rtype].eid),
- options=u':'.join(options) or None,
- ask_confirm=False)
- for rtype in mapping['dont_cross_relations']:
- create_entity('CWSourceSchemaConfig',
- cw_for_source=source,
- cw_schema=session.entity_from_eid(schema[rtype].eid),
- options=u'dontcross',
- ask_confirm=False)
- # latest update time cwproperty is now a source attribute (latest_retrieval)
- pkey = u'sources.%s.latest-update-time' % source.uri
- rset = session.execute('Any V WHERE X is CWProperty, X value V, X pkey %(k)s',
- {'k': pkey})
- timestamp = int(rset[0][0])
- sourceentity.cw_set(latest_retrieval=datetime.fromtimestamp(timestamp))
- session.execute('DELETE CWProperty X WHERE X pkey %(k)s', {'k': pkey})
--- a/misc/migration/3.14.4_Any.py Tue Jun 10 09:35:26 2014 +0200
+++ b/misc/migration/3.14.4_Any.py Tue Jun 10 09:49:45 2014 +0200
@@ -4,8 +4,7 @@
rdefdef = schema['CWSource'].rdef('name')
attrtype = y2sql.type_from_constraints(dbhelper, rdefdef.object, rdefdef.constraints).split()[0]
-cursor = session.cnxset['system']
+cursor = session.cnxset.cu
sql('UPDATE entities SET asource = source WHERE asource is NULL')
dbhelper.change_col_type(cursor, 'entities', 'asource', attrtype, False)
dbhelper.change_col_type(cursor, 'entities', 'source', attrtype, False)
-dbhelper.change_col_type(cursor, 'deleted_entities', 'source', attrtype, False)
--- a/misc/migration/3.16.0_Any.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-sync_schema_props_perms('EmailAddress')
-
-for source in rql('CWSource X WHERE X type "pyrorql"').entities():
- sconfig = source.dictconfig
- nsid = sconfig.pop('pyro-ns-id', config.appid)
- nshost = sconfig.pop('pyro-ns-host', '')
- nsgroup = sconfig.pop('pyro-ns-group', ':cubicweb')
- if nsgroup:
- nsgroup += '.'
- source.cw_set(url=u'pyro://%s/%s%s' % (nshost, nsgroup, nsid))
- source.update_config(skip_unknown=True, **sconfig)
-
-commit()
--- a/misc/migration/3.17.11_Any.py Tue Jun 10 09:35:26 2014 +0200
+++ b/misc/migration/3.17.11_Any.py Tue Jun 10 09:49:45 2014 +0200
@@ -2,6 +2,6 @@
('transactions', 'tx_time'),
('tx_entity_actions', 'tx_uuid'),
('tx_relation_actions', 'tx_uuid')]:
- session.cnxset.source('system').create_index(session, table, column)
+ repo.system_source.create_index(session, table, column)
commit()
--- a/misc/migration/3.18.0_Any.py Tue Jun 10 09:35:26 2014 +0200
+++ b/misc/migration/3.18.0_Any.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-driver = config.sources()['system']['db-driver']
+driver = config.system_source_config['db-driver']
if not (driver == 'postgres' or driver.startswith('sqlserver')):
import sys
print >>sys.stderr, 'This migration is not supported for backends other than sqlserver or postgres (yet).'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.19.0_Any.py Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,3 @@
+sql('DROP TABLE "deleted_entities"')
+
+commit()
--- a/misc/migration/3.8.5_Any.py Tue Jun 10 09:35:26 2014 +0200
+++ b/misc/migration/3.8.5_Any.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,5 +1,5 @@
def migrate_varchar_to_nvarchar():
- dbdriver = config.sources()['system']['db-driver']
+ dbdriver = config.system_source_config['db-driver']
if dbdriver != "sqlserver2005":
return
--- a/misc/migration/bootstrapmigration_repository.py Tue Jun 10 09:35:26 2014 +0200
+++ b/misc/migration/bootstrapmigration_repository.py Tue Jun 10 09:49:45 2014 +0200
@@ -35,6 +35,28 @@
ss.execschemarql(rql, rdef, ss.rdef2rql(rdef, CSTRMAP, groupmap=None))
commit(ask_confirm=False)
+def replace_eid_sequence_with_eid_numrange(session):
+ dbh = session.repo.system_source.dbhelper
+ cursor = session.cnxset.cu
+ try:
+ cursor.execute(dbh.sql_sequence_current_state('entities_id_seq'))
+ lasteid = cursor.fetchone()[0]
+ except: # programming error, already migrated
+ return
+
+ cursor.execute(dbh.sql_drop_sequence('entities_id_seq'))
+ cursor.execute(dbh.sql_create_numrange('entities_id_seq'))
+ cursor.execute(dbh.sql_restart_numrange('entities_id_seq', initial_value=lasteid))
+ session.commit()
+
+if applcubicwebversion < (3, 19, 0) and cubicwebversion >= (3, 19, 0):
+ sql('ALTER TABLE "entities" DROP COLUMN "mtime"')
+ sql('ALTER TABLE "entities" DROP COLUMN "source"')
+
+ commit()
+
+ replace_eid_sequence_with_eid_numrange(session)
+
if applcubicwebversion < (3, 17, 0) and cubicwebversion >= (3, 17, 0):
try:
add_cube('sioc', update_database=False)
@@ -223,11 +245,11 @@
if applcubicwebversion < (3, 2, 2) and cubicwebversion >= (3, 2, 1):
from base64 import b64encode
- for table in ('entities', 'deleted_entities'):
- for eid, extid in sql('SELECT eid, extid FROM %s WHERE extid is NOT NULL'
- % table, ask_confirm=False):
- sql('UPDATE %s SET extid=%%(extid)s WHERE eid=%%(eid)s' % table,
- {'extid': b64encode(extid), 'eid': eid}, ask_confirm=False)
+ for eid, extid in sql('SELECT eid, extid FROM entities '
+ 'WHERE extid is NOT NULL',
+ ask_confirm=False):
+ sql('UPDATE entities SET extid=%(extid)s WHERE eid=%(eid)s',
+ {'extid': b64encode(extid), 'eid': eid}, ask_confirm=False)
commit()
if applcubicwebversion < (3, 2, 0) and cubicwebversion >= (3, 2, 0):
--- a/misc/scripts/cwuser_ldap2system.py Tue Jun 10 09:35:26 2014 +0200
+++ b/misc/scripts/cwuser_ldap2system.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,7 +1,7 @@
import base64
from cubicweb.server.utils import crypt_password
-dbdriver = config.sources()['system']['db-driver']
+dbdriver = config.system_source_config['db-driver']
from logilab.database import get_db_helper
dbhelper = get_db_helper(driver)
--- a/misc/scripts/drop_external_entities.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-from cubicweb import UnknownEid
-source, = __args__
-
-sql("DELETE FROM entities WHERE type='Int'")
-
-ecnx = session.cnxset.connection(source)
-for e in rql('Any X WHERE X cw_source S, S name %(name)s', {'name': source}).entities():
- meta = e.cw_metainformation()
- assert meta['source']['uri'] == source
- try:
- suri = ecnx.describe(meta['extid'])[1]
- except UnknownEid:
- print 'cant describe', e.cw_etype, e.eid, meta
- continue
- if suri != 'system':
- try:
- print 'deleting', e.cw_etype, e.eid, suri, e.dc_title().encode('utf8')
- repo.delete_info(session, e, suri, scleanup=e.eid)
- except UnknownEid:
- print ' cant delete', e.cw_etype, e.eid, meta
-
-
-commit()
--- a/misc/scripts/ldap_change_base_dn.py Tue Jun 10 09:35:26 2014 +0200
+++ b/misc/scripts/ldap_change_base_dn.py Tue Jun 10 09:49:45 2014 +0200
@@ -6,7 +6,7 @@
print
print 'you should not have updated your sources file yet'
-olddn = repo.config.sources()[uri]['user-base-dn']
+olddn = repo.sources_by_uri[uri].config['user-base-dn']
assert olddn != newdn
--- a/misc/scripts/repair_file_1-9_migration.py Tue Jun 10 09:35:26 2014 +0200
+++ b/misc/scripts/repair_file_1-9_migration.py Tue Jun 10 09:49:45 2014 +0200
@@ -15,11 +15,11 @@
from cubicweb import cwconfig, dbapi
from cubicweb.server.session import hooks_control
-sourcescfg = repo.config.sources()
+defaultadmin = repo.config.default_admin_config
backupcfg = cwconfig.instance_configuration(backupinstance)
backupcfg.repairing = True
-backuprepo, backupcnx = dbapi.in_memory_repo_cnx(backupcfg, sourcescfg['admin']['login'],
- password=sourcescfg['admin']['password'],
+backuprepo, backupcnx = dbapi.in_memory_repo_cnx(backupcfg, defaultadmin['login'],
+ password=defaultadmin['password'],
host='localhost')
backupcu = backupcnx.cursor()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/multipart.py Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,413 @@
+# -*- coding: utf-8 -*-
+'''
+Parser for multipart/form-data
+==============================
+
+This module provides a parser for the multipart/form-data format. It can read
+from a file, a socket or a WSGI environment. The parser can be used to replace
+cgi.FieldStorage (without the bugs) and works with Python 2.5+ and 3.x (2to3).
+
+Licence (MIT)
+-------------
+
+ Copyright (c) 2010, Marcel Hellkamp.
+ Inspired by the Werkzeug library: http://werkzeug.pocoo.org/
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+'''
+
+__author__ = 'Marcel Hellkamp'
+__version__ = '0.1'
+__license__ = 'MIT'
+
+from tempfile import TemporaryFile
+from wsgiref.headers import Headers
+import re, sys
+try:
+ from urlparse import parse_qs
+except ImportError: # pragma: no cover (fallback for Python 2.5)
+ from cgi import parse_qs
+try:
+ from io import BytesIO
+except ImportError: # pragma: no cover (fallback for Python 2.5)
+ from StringIO import StringIO as BytesIO
+
+##############################################################################
+################################ Helper & Misc ################################
+##############################################################################
+# Some of these were copied from bottle: http://bottle.paws.de/
+
+try:
+ from collections import MutableMapping as DictMixin
+except ImportError: # pragma: no cover (fallback for Python 2.5)
+ from UserDict import DictMixin
+
+class MultiDict(DictMixin):
+ """ A dict that remembers old values for each key """
+ def __init__(self, *a, **k):
+ self.dict = dict()
+ for k, v in dict(*a, **k).iteritems():
+ self[k] = v
+
+ def __len__(self): return len(self.dict)
+ def __iter__(self): return iter(self.dict)
+ def __contains__(self, key): return key in self.dict
+ def __delitem__(self, key): del self.dict[key]
+ def keys(self): return self.dict.keys()
+ def __getitem__(self, key): return self.get(key, KeyError, -1)
+ def __setitem__(self, key, value): self.append(key, value)
+
+ def append(self, key, value): self.dict.setdefault(key, []).append(value)
+ def replace(self, key, value): self.dict[key] = [value]
+ def getall(self, key): return self.dict.get(key) or []
+
+ def get(self, key, default=None, index=-1):
+ if key not in self.dict and default != KeyError:
+ return [default][index]
+ return self.dict[key][index]
+
+ def iterallitems(self):
+ for key, values in self.dict.iteritems():
+ for value in values:
+ yield key, value
+
+def tob(data, enc='utf8'): # Convert strings to bytes (py2 and py3)
+ return data.encode(enc) if isinstance(data, unicode) else data
+
+def copy_file(stream, target, maxread=-1, buffer_size=2*16):
+ ''' Read from :stream and write to :target until :maxread or EOF. '''
+ size, read = 0, stream.read
+ while 1:
+ to_read = buffer_size if maxread < 0 else min(buffer_size, maxread-size)
+ part = read(to_read)
+ if not part: return size
+ target.write(part)
+ size += len(part)
+
+##############################################################################
+################################ Header Parser ################################
+##############################################################################
+
+_special = re.escape('()<>@,;:\\"/[]?={} \t')
+_re_special = re.compile('[%s]' % _special)
+_qstr = '"(?:\\\\.|[^"])*"' # Quoted string
+_value = '(?:[^%s]+|%s)' % (_special, _qstr) # Save or quoted string
+_option = '(?:;|^)\s*([^%s]+)\s*=\s*(%s)' % (_special, _value)
+_re_option = re.compile(_option) # key=value part of an Content-Type like header
+
+def header_quote(val):
+ if not _re_special.search(val):
+ return val
+ return '"' + val.replace('\\','\\\\').replace('"','\\"') + '"'
+
+def header_unquote(val, filename=False):
+ if val[0] == val[-1] == '"':
+ val = val[1:-1]
+ if val[1:3] == ':\\' or val[:2] == '\\\\':
+ val = val.split('\\')[-1] # fix ie6 bug: full path --> filename
+ return val.replace('\\\\','\\').replace('\\"','"')
+ return val
+
+def parse_options_header(header, options=None):
+ if ';' not in header:
+ return header.lower().strip(), {}
+ ctype, tail = header.split(';', 1)
+ options = options or {}
+ for match in _re_option.finditer(tail):
+ key = match.group(1).lower()
+ value = header_unquote(match.group(2), key=='filename')
+ options[key] = value
+ return ctype, options
+
+##############################################################################
+################################## Multipart ##################################
+##############################################################################
+
+
+class MultipartError(ValueError): pass
+
+
+class MultipartParser(object):
+
+ def __init__(self, stream, boundary, content_length=-1,
+ disk_limit=2**30, mem_limit=2**20, memfile_limit=2**18,
+ buffer_size=2**16, charset='latin1'):
+ ''' Parse a multipart/form-data byte stream. This object is an iterator
+ over the parts of the message.
+
+ :param stream: A file-like stream. Must implement ``.read(size)``.
+ :param boundary: The multipart boundary as a byte string.
+ :param content_length: The maximum number of bytes to read.
+ '''
+ self.stream, self.boundary = stream, boundary
+ self.content_length = content_length
+ self.disk_limit = disk_limit
+ self.memfile_limit = memfile_limit
+ self.mem_limit = min(mem_limit, self.disk_limit)
+ self.buffer_size = min(buffer_size, self.mem_limit)
+ self.charset = charset
+ if self.buffer_size - 6 < len(boundary): # "--boundary--\r\n"
+ raise MultipartError('Boundary does not fit into buffer_size.')
+ self._done = []
+ self._part_iter = None
+
+ def __iter__(self):
+ ''' Iterate over the parts of the multipart message. '''
+ if not self._part_iter:
+ self._part_iter = self._iterparse()
+ for part in self._done:
+ yield part
+ for part in self._part_iter:
+ self._done.append(part)
+ yield part
+
+ def parts(self):
+ ''' Returns a list with all parts of the multipart message. '''
+ return list(iter(self))
+
+ def get(self, name, default=None):
+ ''' Return the first part with that name or a default value (None). '''
+ for part in self:
+ if name == part.name:
+ return part
+ return default
+
+ def get_all(self, name):
+ ''' Return a list of parts with that name. '''
+ return [p for p in self if p.name == name]
+
+ def _lineiter(self):
+ ''' Iterate over a binary file-like object line by line. Each line is
+ returned as a (line, line_ending) tuple. If the line does not fit
+ into self.buffer_size, line_ending is empty and the rest of the line
+ is returned with the next iteration.
+ '''
+ read = self.stream.read
+ maxread, maxbuf = self.content_length, self.buffer_size
+ _bcrnl = tob('\r\n')
+ _bcr = _bcrnl[:1]
+ _bnl = _bcrnl[1:]
+ _bempty = _bcrnl[:0] # b'rn'[:0] -> b''
+ buffer = _bempty # buffer for the last (partial) line
+ while 1:
+ data = read(maxbuf if maxread < 0 else min(maxbuf, maxread))
+ maxread -= len(data)
+ lines = (buffer+data).splitlines(True)
+ len_first_line = len(lines[0])
+ # be sure that the first line does not become too big
+ if len_first_line > self.buffer_size:
+ # at the same time don't split a '\r\n' accidentally
+ if (len_first_line == self.buffer_size+1 and
+ lines[0].endswith(_bcrnl)):
+ splitpos = self.buffer_size - 1
+ else:
+ splitpos = self.buffer_size
+ lines[:1] = [lines[0][:splitpos],
+ lines[0][splitpos:]]
+ if data:
+ buffer = lines[-1]
+ lines = lines[:-1]
+ for line in lines:
+ if line.endswith(_bcrnl): yield line[:-2], _bcrnl
+ elif line.endswith(_bnl): yield line[:-1], _bnl
+ elif line.endswith(_bcr): yield line[:-1], _bcr
+ else: yield line, _bempty
+ if not data:
+ break
+
+ def _iterparse(self):
+ lines, line = self._lineiter(), ''
+ separator = tob('--') + tob(self.boundary)
+ terminator = tob('--') + tob(self.boundary) + tob('--')
+ # Consume first boundary. Ignore leading blank lines
+ for line, nl in lines:
+ if line: break
+ if line != separator:
+ raise MultipartError("Stream does not start with boundary")
+ # For each part in stream...
+ mem_used, disk_used = 0, 0 # Track used resources to prevent DoS
+ is_tail = False # True if the last line was incomplete (cutted)
+ opts = {'buffer_size': self.buffer_size,
+ 'memfile_limit': self.memfile_limit,
+ 'charset': self.charset}
+ part = MultipartPart(**opts)
+ for line, nl in lines:
+ if line == terminator and not is_tail:
+ part.file.seek(0)
+ yield part
+ break
+ elif line == separator and not is_tail:
+ if part.is_buffered(): mem_used += part.size
+ else: disk_used += part.size
+ part.file.seek(0)
+ yield part
+ part = MultipartPart(**opts)
+ else:
+ is_tail = not nl # The next line continues this one
+ part.feed(line, nl)
+ if part.is_buffered():
+ if part.size + mem_used > self.mem_limit:
+ raise MultipartError("Memory limit reached.")
+ elif part.size + disk_used > self.disk_limit:
+ raise MultipartError("Disk limit reached.")
+ if line != terminator:
+ raise MultipartError("Unexpected end of multipart stream.")
+
+
+class MultipartPart(object):
+
+ def __init__(self, buffer_size=2**16, memfile_limit=2**18, charset='latin1'):
+ self.headerlist = []
+ self.headers = None
+ self.file = False
+ self.size = 0
+ self._buf = tob('')
+ self.disposition, self.name, self.filename = None, None, None
+ self.content_type, self.charset = None, charset
+ self.memfile_limit = memfile_limit
+ self.buffer_size = buffer_size
+
+ def feed(self, line, nl=''):
+ if self.file:
+ return self.write_body(line, nl)
+ return self.write_header(line, nl)
+
+ def write_header(self, line, nl):
+ line = line.decode(self.charset or 'latin1')
+ if not nl: raise MultipartError('Unexpected end of line in header.')
+ if not line.strip(): # blank line -> end of header segment
+ self.finish_header()
+ elif line[0] in ' \t' and self.headerlist:
+ name, value = self.headerlist.pop()
+ self.headerlist.append((name, value+line.strip()))
+ else:
+ if ':' not in line:
+ raise MultipartError("Syntax error in header: No colon.")
+ name, value = line.split(':', 1)
+ self.headerlist.append((name.strip(), value.strip()))
+
+ def write_body(self, line, nl):
+ if not line and not nl: return # This does not even flush the buffer
+ self.size += len(line) + len(self._buf)
+ self.file.write(self._buf + line)
+ self._buf = nl
+ if self.content_length > 0 and self.size > self.content_length:
+ raise MultipartError('Size of body exceeds Content-Length header.')
+ if self.size > self.memfile_limit and isinstance(self.file, BytesIO):
+ # TODO: What about non-file uploads that exceed the memfile_limit?
+ self.file, old = TemporaryFile(mode='w+b'), self.file
+ old.seek(0)
+ copy_file(old, self.file, self.size, self.buffer_size)
+
+ def finish_header(self):
+ self.file = BytesIO()
+ self.headers = Headers(self.headerlist)
+ cdis = self.headers.get('Content-Disposition','')
+ ctype = self.headers.get('Content-Type','')
+ clen = self.headers.get('Content-Length','-1')
+ if not cdis:
+ raise MultipartError('Content-Disposition header is missing.')
+ self.disposition, self.options = parse_options_header(cdis)
+ self.name = self.options.get('name')
+ self.filename = self.options.get('filename')
+ self.content_type, options = parse_options_header(ctype)
+ self.charset = options.get('charset') or self.charset
+ self.content_length = int(self.headers.get('Content-Length','-1'))
+
+ def is_buffered(self):
+ ''' Return true if the data is fully buffered in memory.'''
+ return isinstance(self.file, BytesIO)
+
+ @property
+ def value(self):
+ ''' Data decoded with the specified charset '''
+ pos = self.file.tell()
+ self.file.seek(0)
+ val = self.file.read()
+ self.file.seek(pos)
+ return val.decode(self.charset)
+
+ def save_as(self, path):
+ fp = open(path, 'wb')
+ pos = self.file.tell()
+ try:
+ self.file.seek(0)
+ size = copy_file(self.file, fp)
+ finally:
+ self.file.seek(pos)
+ return size
+
+##############################################################################
+#################################### WSGI ####################################
+##############################################################################
+
+def parse_form_data(environ, charset='utf8', strict=False, **kw):
+ ''' Parse form data from an environ dict and return a (forms, files) tuple.
+ Both tuple values are dictionaries with the form-field name as a key
+ (unicode) and lists as values (multiple values per key are possible).
+ The forms-dictionary contains form-field values as unicode strings.
+ The files-dictionary contains :class:`MultipartPart` instances, either
+ because the form-field was a file-upload or the value is to big to fit
+ into memory limits.
+
+ :param environ: An WSGI environment dict.
+ :param charset: The charset to use if unsure. (default: utf8)
+ :param strict: If True, raise :exc:`MultipartError` on any parsing
+ errors. These are silently ignored by default.
+ '''
+
+ forms, files = MultiDict(), MultiDict()
+ try:
+ if environ.get('REQUEST_METHOD','GET').upper() not in ('POST', 'PUT'):
+ raise MultipartError("Request method other than POST or PUT.")
+ content_length = int(environ.get('CONTENT_LENGTH', '-1'))
+ content_type = environ.get('CONTENT_TYPE', '')
+ if not content_type:
+ raise MultipartError("Missing Content-Type header.")
+ content_type, options = parse_options_header(content_type)
+ stream = environ.get('wsgi.input') or BytesIO()
+ kw['charset'] = charset = options.get('charset', charset)
+ if content_type == 'multipart/form-data':
+ boundary = options.get('boundary','')
+ if not boundary:
+ raise MultipartError("No boundary for multipart/form-data.")
+ for part in MultipartParser(stream, boundary, content_length, **kw):
+ if part.filename or not part.is_buffered():
+ files[part.name] = part
+ else: # TODO: Big form-fields are in the files dict. really?
+ forms[part.name] = part.value
+ elif content_type in ('application/x-www-form-urlencoded',
+ 'application/x-url-encoded'):
+ mem_limit = kw.get('mem_limit', 2**20)
+ if content_length > mem_limit:
+ raise MultipartError("Request to big. Increase MAXMEM.")
+ data = stream.read(mem_limit).decode(charset)
+ if stream.read(1): # These is more that does not fit mem_limit
+ raise MultipartError("Request to big. Increase MAXMEM.")
+ data = parse_qs(data, keep_blank_values=True)
+ for key, values in data.iteritems():
+ for value in values:
+ forms[key] = value
+ else:
+ raise MultipartError("Unsupported content type.")
+ except MultipartError:
+ if strict: raise
+ return forms, files
+
--- a/predicates.py Tue Jun 10 09:35:26 2014 +0200
+++ b/predicates.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -707,6 +707,22 @@
# entity predicates #############################################################
+class composite_etype(Predicate):
+ """Return 1 for composite entities.
+
+ A composite entity has an etype for which at least one relation
+ definition points in its direction with the
+ composite='subject'/'object' notation.
+ """
+
+ def __call__(self, cls, req, **kwargs):
+ entity = kwargs.pop('entity', None)
+ if entity is None:
+ return 0
+ return entity.e_schema.is_composite
+
+
+
class non_final_entity(EClassPredicate):
"""Return 1 for entity of a non final entity type(s). Remember, "final"
entity types are String, Int, etc... This is equivalent to
@@ -1274,7 +1290,7 @@
@objectify_predicate
def authenticated_user(cls, req, **kwargs):
- """Return 1 if the user is authenticated (e.g. not the anonymous user).
+ """Return 1 if the user is authenticated (i.e. not the anonymous user).
May only be used on the web side, not on the data repository side.
"""
@@ -1285,7 +1301,7 @@
# XXX == ~ authenticated_user()
def anonymous_user():
- """Return 1 if the user is not authenticated (e.g. is the anonymous user).
+ """Return 1 if the user is not authenticated (i.e. is the anonymous user).
May only be used on the web side, not on the data repository side.
"""
--- a/pytestconf.py Tue Jun 10 09:35:26 2014 +0200
+++ b/pytestconf.py Tue Jun 10 09:49:45 2014 +0200
@@ -43,6 +43,6 @@
if not cls.repo.shutting_down:
cls.repo.shutdown()
del cls.repo
- for clsattr in ('cnx', '_orig_cnx', 'config', '_config', 'vreg', 'schema'):
+ for clsattr in ('cnx', 'config', '_config', 'vreg', 'schema'):
if clsattr in cls.__dict__:
delattr(cls, clsattr)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/repoapi.py Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,396 @@
+# copyright 2013-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
+"""Official API to access the content of a repository
+"""
+from logilab.common.deprecation import deprecated
+
+from cubicweb.utils import parse_repo_uri
+from cubicweb import ConnectionError, ProgrammingError, AuthenticationError
+from uuid import uuid4
+from contextlib import contextmanager
+from cubicweb.req import RequestSessionBase
+from functools import wraps
+
+### private function for specific method ############################
+
+def _get_inmemory_repo(config, vreg=None):
+ from cubicweb.server.repository import Repository
+ from cubicweb.server.utils import TasksManager
+ return Repository(config, TasksManager(), vreg=vreg)
+
+
+### public API ######################################################
+
+def get_repository(uri=None, config=None, vreg=None):
+ """get a repository for the given URI or config/vregistry (in case we're
+ loading the repository for a client, eg web server, configuration).
+
+ The returned repository may be an in-memory repository or a proxy object
+ using a specific RPC method, depending on the given URI (pyro or zmq).
+ """
+ if uri is None:
+ return _get_inmemory_repo(config, vreg)
+
+ protocol, hostport, appid = parse_repo_uri(uri)
+
+ if protocol == 'inmemory':
+ # me may have been called with a dummy 'inmemory://' uri ...
+ return _get_inmemory_repo(config, vreg)
+
+ if protocol == 'pyroloc': # direct connection to the instance
+ from logilab.common.pyro_ext import get_proxy
+ uri = uri.replace('pyroloc', 'PYRO')
+ return get_proxy(uri)
+
+ if protocol == 'pyro': # connection mediated through the pyro ns
+ from logilab.common.pyro_ext import ns_get_proxy
+ path = appid.strip('/')
+ if not path:
+ raise ConnectionError(
+ "can't find instance name in %s (expected to be the path component)"
+ % uri)
+ if '.' in path:
+ nsgroup, nsid = path.rsplit('.', 1)
+ else:
+ nsgroup = 'cubicweb'
+ nsid = path
+ return ns_get_proxy(nsid, defaultnsgroup=nsgroup, nshost=hostport)
+
+ if protocol.startswith('zmqpickle-'):
+ from cubicweb.zmqclient import ZMQRepositoryClient
+ return ZMQRepositoryClient(uri)
+ else:
+ raise ConnectionError('unknown protocol: `%s`' % protocol)
+
+def connect(repo, login, **kwargs):
+ """Take credential and return associated ClientConnection.
+
+ The ClientConnection is associated to a new Session object that will be
+ closed when the ClientConnection is closed.
+
+ raise AuthenticationError if the credential are invalid."""
+ sessionid = repo.connect(login, **kwargs)
+ session = repo._get_session(sessionid)
+ # XXX the autoclose_session should probably be handle on the session directly
+ # this is something to consider once we have proper server side Connection.
+ return ClientConnection(session, autoclose_session=True)
+
+def anonymous_cnx(repo):
+ """return a ClientConnection for Anonymous user.
+
+ The ClientConnection is associated to a new Session object that will be
+ closed when the ClientConnection is closed.
+
+ raises an AuthenticationError if anonymous usage is not allowed
+ """
+ anoninfo = getattr(repo.config, 'anonymous_user', lambda: None)()
+ if anoninfo is None: # no anonymous user
+ raise AuthenticationError('anonymous access is not authorized')
+ anon_login, anon_password = anoninfo
+ # use vreg's repository cache
+ return connect(repo, anon_login, password=anon_password)
+
+def _srv_cnx_func(name):
+ """Decorate ClientConnection method blindly forward to Connection
+ THIS TRANSITIONAL PURPOSE
+
+ will be dropped when we have standalone connection"""
+ def proxy(clt_cnx, *args, **kwargs):
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ if not clt_cnx._open:
+ raise ProgrammingError('Closed client connection')
+ return getattr(clt_cnx._cnx, name)(*args, **kwargs)
+ return proxy
+
+def _open_only(func):
+ """decorator for ClientConnection method that check it is open"""
+ @wraps(func)
+ def check_open(clt_cnx, *args, **kwargs):
+ if not clt_cnx._open:
+ raise ProgrammingError('Closed client connection')
+ return func(clt_cnx, *args, **kwargs)
+ return check_open
+
+
+class ClientConnection(RequestSessionBase):
+ """A Connection object to be used Client side.
+
+ This object is aimed to be used client side (so potential communication
+ with the repo through RPC) and aims to offer some compatibility with the
+ cubicweb.dbapi.Connection interface.
+
+ The autoclose_session parameter informs the connection that this session
+ has been opened explicitly and only for this client connection. The
+ connection will close the session on exit.
+ """
+ # make exceptions available through the connection object
+ ProgrammingError = ProgrammingError
+ # attributes that may be overriden per connection instance
+ anonymous_connection = False # XXX really needed ?
+ is_repo_in_memory = True # BC, always true
+
+ def __init__(self, session, autoclose_session=False):
+ self._session = session # XXX there is no real reason to keep the
+ # session around function still using it should
+ # be rewritten and migrated.
+ self._cnx = None
+ self._open = None
+ self._web_request = False
+ #: cache entities built during the connection
+ self._eid_cache = {}
+ self.vreg = session.vreg
+ self._set_user(session.user)
+ self._autoclose_session = autoclose_session
+
+ def __enter__(self):
+ assert self._open is None
+ self._open = True
+ self._cnx = self._session.new_cnx()
+ self._cnx.__enter__()
+ self._cnx.ctx_count += 1
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._open = False
+ self._cnx.ctx_count -= 1
+ self._cnx.__exit__(exc_type, exc_val, exc_tb)
+ self._cnx = None
+ if self._autoclose_session:
+ # we have to call repo.close to ensure the repo properly forgets the
+ # session; calling session.close() is not enough :-(
+ self._session.repo.close(self._session.sessionid)
+
+
+ # begin silly BC
+ @property
+ def _closed(self):
+ return not self._open
+
+ def close(self):
+ if self._open:
+ self.__exit__(None, None, None)
+
+ def __repr__(self):
+ # XXX we probably want to reference the user of the session here
+ if self._open is None:
+ return '<ClientConnection (not open yet)>'
+ elif not self._open:
+ return '<ClientConnection (closed)>'
+ elif self.anonymous_connection:
+ return '<ClientConnection %s (anonymous)>' % self._cnx.connectionid
+ else:
+ return '<ClientConnection %s>' % self._cnx.connectionid
+ # end silly BC
+
+ # Main Connection purpose in life #########################################
+
+ call_service = _srv_cnx_func('call_service')
+
+ @_open_only
+ def execute(self, *args, **kwargs):
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ rset = self._cnx.execute(*args, **kwargs)
+ rset.req = self
+ # XXX keep the same behavior as the old dbapi
+ # otherwise multiple tests break.
+ # The little internet kitten is very sad about this situation.
+ rset._rqlst = None
+ return rset
+
+ @_open_only
+ def commit(self, *args, **kwargs):
+ try:
+ return self._cnx.commit(*args, **kwargs)
+ finally:
+ self.drop_entity_cache()
+
+ @_open_only
+ def rollback(self, *args, **kwargs):
+ try:
+ return self._cnx.rollback(*args, **kwargs)
+ finally:
+ self.drop_entity_cache()
+
+ # security #################################################################
+
+ allow_all_hooks_but = _srv_cnx_func('allow_all_hooks_but')
+ deny_all_hooks_but = _srv_cnx_func('deny_all_hooks_but')
+ security_enabled = _srv_cnx_func('security_enabled')
+
+ # direct sql ###############################################################
+
+ system_sql = _srv_cnx_func('system_sql')
+
+ # session data methods #####################################################
+
+ get_shared_data = _srv_cnx_func('get_shared_data')
+ set_shared_data = _srv_cnx_func('set_shared_data')
+
+ # meta-data accessors ######################################################
+
+ @_open_only
+ def source_defs(self):
+ """Return the definition of sources used by the repository."""
+ return self._session.repo.source_defs()
+
+ @_open_only
+ def get_schema(self):
+ """Return the schema currently used by the repository."""
+ return self._session.repo.source_defs()
+
+ @_open_only
+ def get_option_value(self, option):
+ """Return the value for `option` in the configuration."""
+ return self._session.repo.get_option_value(option)
+
+ entity_metas = _srv_cnx_func('entity_metas')
+ describe = _srv_cnx_func('describe') # XXX deprecated in 3.19
+
+ # undo support ############################################################
+
+ @_open_only
+ def undoable_transactions(self, ueid=None, req=None, **actionfilters):
+ """Return a list of undoable transaction objects by the connection's
+ user, ordered by descendant transaction time.
+
+ Managers may filter according to user (eid) who has done the transaction
+ using the `ueid` argument. Others will only see their own transactions.
+
+ Additional filtering capabilities is provided by using the following
+ named arguments:
+
+ * `etype` to get only transactions creating/updating/deleting entities
+ of the given type
+
+ * `eid` to get only transactions applied to entity of the given eid
+
+ * `action` to get only transactions doing the given action (action in
+ 'C', 'U', 'D', 'A', 'R'). If `etype`, action can only be 'C', 'U' or
+ 'D'.
+
+ * `public`: when additional filtering is provided, their are by default
+ only searched in 'public' actions, unless a `public` argument is given
+ and set to false.
+ """
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ source = self._cnx.repo.system_source
+ txinfos = source.undoable_transactions(self._cnx, ueid, **actionfilters)
+ for txinfo in txinfos:
+ txinfo.req = req or self # XXX mostly wrong
+ return txinfos
+
+ @_open_only
+ def transaction_info(self, txuuid, req=None):
+ """Return transaction object for the given uid.
+
+ raise `NoSuchTransaction` if not found or if session's user is not
+ allowed (eg not in managers group and the transaction doesn't belong to
+ him).
+ """
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ txinfo = self._cnx.repo.system_source.tx_info(self._cnx, txuuid)
+ if req:
+ txinfo.req = req
+ else:
+ txinfo.cnx = self
+ return txinfo
+
+ @_open_only
+ def transaction_actions(self, txuuid, public=True):
+ """Return an ordered list of action effectued during that transaction.
+
+ If public is true, return only 'public' actions, eg not ones triggered
+ under the cover by hooks, else return all actions.
+
+ raise `NoSuchTransaction` if the transaction is not found or if
+ session's user is not allowed (eg not in managers group and the
+ transaction doesn't belong to him).
+ """
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ return self._cnx.repo.system_source.tx_actions(self._cnx, txuuid, public)
+
+ @_open_only
+ def undo_transaction(self, txuuid):
+ """Undo the given transaction. Return potential restoration errors.
+
+ raise `NoSuchTransaction` if not found or if session's user is not
+ allowed (eg not in managers group and the transaction doesn't belong to
+ him).
+ """
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ return self._cnx.repo.system_source.undo_transaction(self._cnx, txuuid)
+
+ # cache management
+
+ def entity_cache(self, eid):
+ return self._eid_cache[eid]
+
+ def set_entity_cache(self, entity):
+ self._eid_cache[entity.eid] = entity
+
+ def cached_entities(self):
+ return self._eid_cache.values()
+
+ def drop_entity_cache(self, eid=None):
+ if eid is None:
+ self._eid_cache = {}
+ else:
+ del self._eid_cache[eid]
+
+ # deprecated stuff
+
+ @deprecated('[3.19] This is a repoapi.ClientConnection object not a dbapi one')
+ def request(self):
+ return self
+
+ @deprecated('[3.19] This is a repoapi.ClientConnection object not a dbapi one')
+ def cursor(self):
+ return self
+
+ @property
+ @deprecated('[3.19] This is a repoapi.ClientConnection object not a dbapi one')
+ def sessionid(self):
+ return self._session.sessionid
+
+ @property
+ @deprecated('[3.19] This is a repoapi.ClientConnection object not a dbapi one')
+ def connection(self):
+ return self
+
+ @property
+ @deprecated('[3.19] This is a repoapi.ClientConnection object not a dbapi one')
+ def _repo(self):
+ return self._session.repo
--- a/req.py Tue Jun 10 09:35:26 2014 +0200
+++ b/req.py Tue Jun 10 09:49:45 2014 +0200
@@ -75,6 +75,23 @@
self.local_perm_cache = {}
self._ = unicode
+ def _set_user(self, orig_user):
+ """set the user for this req_session_base
+
+ A special method is needed to ensure the linked user is linked to the
+ connection too.
+ """
+ # cnx validity is checked by the call to .user_info
+ rset = self.eid_rset(orig_user.eid, 'CWUser')
+ user_cls = self.vreg['etypes'].etype_class('CWUser')
+ user = user_cls(self, rset, row=0, groups=orig_user.groups,
+ properties=orig_user.properties)
+ user.cw_attr_cache['login'] = orig_user.login # cache login
+ self.user = user
+ self.set_entity_cache(user)
+ self.set_language(user.prefered_language())
+
+
def set_language(self, lang):
"""install i18n configuration for `lang` translation.
@@ -86,7 +103,7 @@
self._ = self.__ = gettext
self.pgettext = pgettext
- def get_option_value(self, option, foreid=None):
+ def get_option_value(self, option):
raise NotImplementedError
def property_value(self, key):
@@ -94,7 +111,9 @@
user specific value if any, else using site value
"""
if self.user:
- return self.user.property_value(key)
+ val = self.user.property_value(key)
+ if val is not None:
+ return val
return self.vreg.property_value(key)
def etype_rset(self, etype, size=1):
@@ -114,7 +133,7 @@
"""
eid = int(eid)
if etype is None:
- etype = self.describe(eid)[0]
+ etype = self.entity_metas(eid)['type']
rset = ResultSet([(eid,)], 'Any X WHERE X eid %(x)s', {'x': eid},
[(etype,)])
rset.req = self
@@ -188,7 +207,7 @@
"""
parts = ['Any X WHERE X is %s' % etype]
varmaker = rqlvar_maker(defined='X')
- eschema = self.vreg.schema[etype]
+ eschema = self.vreg.schema.eschema(etype)
for attr, value in kwargs.items():
if isinstance(value, list) or isinstance(value, tuple):
raise NotImplementedError("List of values are not supported")
@@ -224,6 +243,11 @@
- cubes.blog.mycache
- etc.
"""
+ warn.warning('[3.19] .get_cache will disappear soon. '
+ 'Distributed caching mechanisms are being introduced instead.'
+ 'Other caching mechanism can be used more reliably '
+ 'to the same effect.',
+ DeprecationWarning)
if cachename in CACHE_REGISTRY:
cache = CACHE_REGISTRY[cachename]
else:
@@ -253,24 +277,20 @@
"""
# use *args since we don't want first argument to be "anonymous" to
# avoid potential clash with kwargs
+ method = None
if args:
assert len(args) == 1, 'only 0 or 1 non-named-argument expected'
method = args[0]
- else:
- method = None
+ if method is None:
+ method = 'view'
# XXX I (adim) think that if method is passed explicitly, we should
# not try to process it and directly call req.build_url()
- if method is None:
- if self.from_controller() == 'view' and not '_restpath' in kwargs:
- method = self.relative_path(includeparams=False) or 'view'
- else:
- method = 'view'
base_url = kwargs.pop('base_url', None)
if base_url is None:
secure = kwargs.pop('__secure__', None)
base_url = self.base_url(secure=secure)
if '_restpath' in kwargs:
- assert method == 'view', method
+ assert method == 'view', repr(method)
path = kwargs.pop('_restpath')
else:
path = method
--- a/schema.py Tue Jun 10 09:35:26 2014 +0200
+++ b/schema.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -68,10 +68,13 @@
'allowed_transition', 'destination_state',
'from_state', 'to_state', 'condition',
'subworkflow', 'subworkflow_state', 'subworkflow_exit',
+ 'by_transition',
))
SYSTEM_RTYPES = set(('in_group', 'require_group',
# cwproperty
'for_user',
+ 'cw_schema', 'cw_import_of', 'cw_for_source',
+ 'cw_host_config_of',
)) | WORKFLOW_RTYPES
NO_I18NCONTEXT = META_RTYPES | WORKFLOW_RTYPES
@@ -676,6 +679,34 @@
eid = getattr(edef, 'eid', None)
self.eid = eid
+ def targets(self, role):
+ assert role in ('subject', 'object')
+ if role == 'subject':
+ return self.subjrels.values()
+ return self.objrels.values()
+
+ @cachedproperty
+ def composite_rdef_roles(self):
+ """Return all relation definitions that define the current entity
+ type as a composite.
+ """
+ rdef_roles = []
+ for role in ('subject', 'object'):
+ for rschema in self.targets(role):
+ if rschema.final:
+ continue
+ for rdef in rschema.rdefs.values():
+ if (role == 'subject' and rdef.subject == self) or \
+ (role == 'object' and rdef.object == self):
+ crole = rdef.composite
+ if crole == role:
+ rdef_roles.append((rdef, role))
+ return rdef_roles
+
+ @cachedproperty
+ def is_composite(self):
+ return bool(len(self.composite_rdef_roles))
+
def check_permission_definitions(self):
super(CubicWebEntitySchema, self).check_permission_definitions()
for groups in self.permissions.itervalues():
@@ -819,20 +850,20 @@
assert not ('fromeid' in kwargs or 'toeid' in kwargs), kwargs
assert action in ('read', 'update')
if 'eid' in kwargs:
- subjtype = _cw.describe(kwargs['eid'])[0]
+ subjtype = _cw.entity_metas(kwargs['eid'])['type']
else:
subjtype = objtype = None
else:
assert not 'eid' in kwargs, kwargs
assert action in ('read', 'add', 'delete')
if 'fromeid' in kwargs:
- subjtype = _cw.describe(kwargs['fromeid'])[0]
+ subjtype = _cw.entity_metas(kwargs['fromeid'])['type']
elif 'frometype' in kwargs:
subjtype = kwargs.pop('frometype')
else:
subjtype = None
if 'toeid' in kwargs:
- objtype = _cw.describe(kwargs['toeid'])[0]
+ objtype = _cw.entity_metas(kwargs['toeid'])['type']
elif 'toetype' in kwargs:
objtype = kwargs.pop('toetype')
else:
--- a/server/__init__.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/__init__.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -31,8 +31,6 @@
from logilab.common.modutils import LazyObject
from logilab.common.textutils import splitstrip
from logilab.common.registry import yes
-from logilab import database
-
from yams import BASE_GROUPS
from cubicweb import CW_SOFTWARE_ROOT
@@ -204,7 +202,7 @@
with the minimal set of entities (ie at least the schema, base groups and
a initial user)
"""
- from cubicweb.dbapi import in_memory_repo_cnx
+ from cubicweb.repoapi import get_repository, connect
from cubicweb.server.repository import Repository
from cubicweb.server.utils import manager_userpasswd
from cubicweb.server.sqlutils import sqlexec, sqlschema, sql_drop_all_user_tables
@@ -218,7 +216,7 @@
# only enable the system source at initialization time
repo = Repository(config, vreg=vreg)
schema = repo.schema
- sourcescfg = config.sources()
+ sourcescfg = config.read_sources_file()
source = sourcescfg['system']
driver = source['db-driver']
sqlcnx = repo.system_source.get_connection()
@@ -257,49 +255,48 @@
sqlcursor.close()
sqlcnx.commit()
sqlcnx.close()
- session = repo.internal_session()
- # insert entity representing the system source
- ssource = session.create_entity('CWSource', type=u'native', name=u'system')
- repo.system_source.eid = ssource.eid
- session.execute('SET X cw_source X WHERE X eid %(x)s', {'x': ssource.eid})
- # insert base groups and default admin
- print '-> inserting default user and default groups.'
- try:
- login = unicode(sourcescfg['admin']['login'])
- pwd = sourcescfg['admin']['password']
- except KeyError:
- if interactive:
- msg = 'enter login and password of the initial manager account'
- login, pwd = manager_userpasswd(msg=msg, confirm=True)
- else:
- login, pwd = unicode(source['db-user']), source['db-password']
- # sort for eid predicatability as expected in some server tests
- for group in sorted(BASE_GROUPS):
- session.create_entity('CWGroup', name=unicode(group))
- admin = create_user(session, login, pwd, 'managers')
- session.execute('SET X owned_by U WHERE X is IN (CWGroup,CWSource), U eid %(u)s',
- {'u': admin.eid})
- session.commit()
- session.close()
+ with repo.internal_cnx() as cnx:
+ # insert entity representing the system source
+ ssource = cnx.create_entity('CWSource', type=u'native', name=u'system')
+ repo.system_source.eid = ssource.eid
+ cnx.execute('SET X cw_source X WHERE X eid %(x)s', {'x': ssource.eid})
+ # insert base groups and default admin
+ print '-> inserting default user and default groups.'
+ try:
+ login = unicode(sourcescfg['admin']['login'])
+ pwd = sourcescfg['admin']['password']
+ except KeyError:
+ if interactive:
+ msg = 'enter login and password of the initial manager account'
+ login, pwd = manager_userpasswd(msg=msg, confirm=True)
+ else:
+ login, pwd = unicode(source['db-user']), source['db-password']
+ # sort for eid predicatability as expected in some server tests
+ for group in sorted(BASE_GROUPS):
+ cnx.create_entity('CWGroup', name=unicode(group))
+ admin = create_user(cnx, login, pwd, 'managers')
+ cnx.execute('SET X owned_by U WHERE X is IN (CWGroup,CWSource), U eid %(u)s',
+ {'u': admin.eid})
+ cnx.commit()
repo.shutdown()
# reloging using the admin user
config._cubes = None # avoid assertion error
- repo, cnx = in_memory_repo_cnx(config, login, password=pwd)
- repo.system_source.eid = ssource.eid # redo this manually
- assert len(repo.sources) == 1, repo.sources
- handler = config.migration_handler(schema, interactive=False,
- cnx=cnx, repo=repo)
- # install additional driver specific sql files
- handler.cmd_install_custom_sql_scripts()
- for cube in reversed(config.cubes()):
- handler.cmd_install_custom_sql_scripts(cube)
- # serialize the schema
- initialize_schema(config, schema, handler)
- # yoo !
- cnx.commit()
- repo.system_source.init_creating()
- cnx.commit()
- cnx.close()
+ repo = get_repository(config=config)
+ with connect(repo, login, password=pwd) as cnx:
+ with cnx.security_enabled(False, False):
+ repo.system_source.eid = ssource.eid # redo this manually
+ handler = config.migration_handler(schema, interactive=False,
+ cnx=cnx, repo=repo)
+ # install additional driver specific sql files
+ handler.cmd_install_custom_sql_scripts()
+ for cube in reversed(config.cubes()):
+ handler.cmd_install_custom_sql_scripts(cube)
+ # serialize the schema
+ initialize_schema(config, schema, handler)
+ # yoo !
+ cnx.commit()
+ repo.system_source.init_creating()
+ cnx.commit()
repo.shutdown()
# restore initial configuration
config.creating = False
@@ -312,13 +309,13 @@
def initialize_schema(config, schema, mhandler, event='create'):
from cubicweb.server.schemaserial import serialize_schema
- session = mhandler.session
+ cnx = mhandler.cnx
cubes = config.cubes()
# deactivate every hooks but those responsible to set metadata
# so, NO INTEGRITY CHECKS are done, to have quicker db creation.
# Active integrity is kept else we may pb such as two default
# workflows for one entity type.
- with session.deny_all_hooks_but('metadata', 'activeintegrity'):
+ with cnx.deny_all_hooks_but('metadata', 'activeintegrity'):
# execute cubicweb's pre<event> script
mhandler.cmd_exec_event_script('pre%s' % event)
# execute cubes pre<event> script if any
@@ -327,8 +324,7 @@
# execute instance's pre<event> script (useful in tests)
mhandler.cmd_exec_event_script('pre%s' % event, apphome=True)
# enter instance'schema into the database
- session.set_cnxset()
- serialize_schema(session, schema)
+ serialize_schema(cnx, schema)
# execute cubicweb's post<event> script
mhandler.cmd_exec_event_script('post%s' % event)
# execute cubes'post<event> script if any
@@ -353,6 +349,4 @@
SOURCE_TYPES = {'native': LazyObject('cubicweb.server.sources.native', 'NativeSQLSource'),
'datafeed': LazyObject('cubicweb.server.sources.datafeed', 'DataFeedSource'),
'ldapfeed': LazyObject('cubicweb.server.sources.ldapfeed', 'LDAPFeedSource'),
- 'pyrorql': LazyObject('cubicweb.server.sources.pyrorql', 'PyroRQLSource'),
- 'zmqrql': LazyObject('cubicweb.server.sources.zmqrql', 'ZMQRQLSource'),
}
--- a/server/checkintegrity.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/checkintegrity.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -35,29 +35,17 @@
sys.stderr.write(' [FIXED]')
sys.stderr.write('\n')
-def has_eid(session, sqlcursor, eid, eids):
+def has_eid(cnx, sqlcursor, eid, eids):
"""return true if the eid is a valid eid"""
if eid in eids:
return eids[eid]
- sqlcursor.execute('SELECT type, source FROM entities WHERE eid=%s' % eid)
+ sqlcursor.execute('SELECT type FROM entities WHERE eid=%s' % eid)
try:
- etype, source = sqlcursor.fetchone()
+ etype = sqlcursor.fetchone()[0]
except Exception:
eids[eid] = False
return False
- if source and source != 'system':
- try:
- # insert eid *and* etype to attempt checking entity has not been
- # replaced by another subsquently to a restore of an old dump
- if session.execute('Any X WHERE X is %s, X eid %%(x)s' % etype,
- {'x': eid}):
- eids[eid] = True
- return True
- except Exception: # TypeResolverError, Unauthorized...
- pass
- eids[eid] = False
- return False
- if etype not in session.vreg.schema:
+ if etype not in cnx.vreg.schema:
eids[eid] = False
return False
sqlcursor.execute('SELECT * FROM %s%s WHERE %seid=%s' % (SQL_PREFIX, etype,
@@ -94,16 +82,17 @@
else:
yield eschema
-def reindex_entities(schema, session, withpb=True, etypes=None):
+def reindex_entities(schema, cnx, withpb=True, etypes=None):
"""reindex all entities in the repository"""
# deactivate modification_date hook since we don't want them
# to be updated due to the reindexation
- repo = session.repo
- cursor = session.cnxset['system']
- dbhelper = session.repo.system_source.dbhelper
- if not dbhelper.has_fti_table(cursor):
- print 'no text index table'
- dbhelper.init_fti(cursor)
+ repo = cnx.repo
+ dbhelper = repo.system_source.dbhelper
+ with cnx.ensure_cnx_set:
+ cursor = cnx.cnxset.cu
+ if not dbhelper.has_fti_table(cursor):
+ print 'no text index table'
+ dbhelper.init_fti(cursor)
repo.system_source.do_fti = True # ensure full-text indexation is activated
if etypes is None:
print 'Reindexing entities'
@@ -117,15 +106,15 @@
for container in etype_fti_containers(eschema):
etypes.add(container)
# clear fti table first
- session.system_sql('DELETE FROM %s' % dbhelper.fti_table)
+ cnx.system_sql('DELETE FROM %s' % dbhelper.fti_table)
else:
print 'Reindexing entities of type %s' % \
', '.join(sorted(str(e) for e in etypes))
# clear fti table first. Use subquery for sql compatibility
- session.system_sql("DELETE FROM %s WHERE EXISTS(SELECT 1 FROM ENTITIES "
- "WHERE eid=%s AND type IN (%s))" % (
- dbhelper.fti_table, dbhelper.fti_uid_attr,
- ','.join("'%s'" % etype for etype in etypes)))
+ cnx.system_sql("DELETE FROM %s WHERE EXISTS(SELECT 1 FROM ENTITIES "
+ "WHERE eid=%s AND type IN (%s))" % (
+ dbhelper.fti_table, dbhelper.fti_uid_attr,
+ ','.join("'%s'" % etype for etype in etypes)))
if withpb:
pb = ProgressBar(len(etypes) + 1)
pb.update()
@@ -133,17 +122,19 @@
# attribute to their current value
source = repo.system_source
for eschema in etypes:
- etype_class = session.vreg['etypes'].etype_class(str(eschema))
- for fti_rql in etype_class.cw_fti_index_rql_queries(session):
- rset = session.execute(fti_rql)
- source.fti_index_entities(session, rset.entities())
+ etype_class = cnx.vreg['etypes'].etype_class(str(eschema))
+ for fti_rql in etype_class.cw_fti_index_rql_queries(cnx):
+ rset = cnx.execute(fti_rql)
+ source.fti_index_entities(cnx, rset.entities())
# clear entity cache to avoid high memory consumption on big tables
- session.drop_entity_cache()
+ cnx.drop_entity_cache()
if withpb:
pb.update()
+ if withpb:
+ pb.finish()
-def check_schema(schema, session, eids, fix=1):
+def check_schema(schema, cnx, eids, fix=1):
"""check serialized schema"""
print 'Checking serialized schema'
unique_constraints = ('SizeConstraint', 'FormatConstraint',
@@ -153,7 +144,7 @@
'WHERE X is CWConstraint, R constrained_by X, '
'R relation_type RT, RT name RN, R from_entity ST, ST name SN, '
'R to_entity OT, OT name ON, X cstrtype CT, CT name CTN')
- for count, rn, sn, on, cstrname in session.execute(rql):
+ for count, rn, sn, on, cstrname in cnx.execute(rql):
if count == 1:
continue
if cstrname in unique_constraints:
@@ -164,37 +155,38 @@
-def check_text_index(schema, session, eids, fix=1):
+def check_text_index(schema, cnx, eids, fix=1):
"""check all entities registered in the text index"""
print 'Checking text index'
msg = ' Entity with eid %s exists in the text index but in no source (autofix will remove from text index)'
- cursor = session.system_sql('SELECT uid FROM appears;')
+ cursor = cnx.system_sql('SELECT uid FROM appears;')
for row in cursor.fetchall():
eid = row[0]
- if not has_eid(session, cursor, eid, eids):
+ if not has_eid(cnx, cursor, eid, eids):
sys.stderr.write(msg % eid)
if fix:
- session.system_sql('DELETE FROM appears WHERE uid=%s;' % eid)
+ cnx.system_sql('DELETE FROM appears WHERE uid=%s;' % eid)
notify_fixed(fix)
-def check_entities(schema, session, eids, fix=1):
+def check_entities(schema, cnx, eids, fix=1):
"""check all entities registered in the repo system table"""
print 'Checking entities system table'
# system table but no source
msg = ' Entity %s with eid %s exists in the system table but in no source (autofix will delete the entity)'
- cursor = session.system_sql('SELECT eid,type FROM entities;')
+ cursor = cnx.system_sql('SELECT eid,type FROM entities;')
for row in cursor.fetchall():
eid, etype = row
- if not has_eid(session, cursor, eid, eids):
+ if not has_eid(cnx, cursor, eid, eids):
sys.stderr.write(msg % (etype, eid))
if fix:
- session.system_sql('DELETE FROM entities WHERE eid=%s;' % eid)
+ cnx.system_sql('DELETE FROM entities WHERE eid=%s;' % eid)
notify_fixed(fix)
# source in entities, but no relation cw_source
- applcwversion = session.repo.get_versions().get('cubicweb')
- if applcwversion >= (3,13,1): # entities.asource appeared in 3.13.1
- cursor = session.system_sql('SELECT e.eid FROM entities as e, cw_CWSource as s '
+ # XXX this (get_versions) requires a second connection to the db when we already have one open
+ applcwversion = cnx.repo.get_versions().get('cubicweb')
+ if applcwversion >= (3, 13, 1): # entities.asource appeared in 3.13.1
+ cursor = cnx.system_sql('SELECT e.eid FROM entities as e, cw_CWSource as s '
'WHERE s.cw_name=e.asource AND '
'NOT EXISTS(SELECT 1 FROM cw_source_relation as cs '
' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid) '
@@ -204,35 +196,35 @@
for row in cursor.fetchall():
sys.stderr.write(msg % row[0])
if fix:
- session.system_sql('INSERT INTO cw_source_relation (eid_from, eid_to) '
+ cnx.system_sql('INSERT INTO cw_source_relation (eid_from, eid_to) '
'SELECT e.eid, s.cw_eid FROM entities as e, cw_CWSource as s '
'WHERE s.cw_name=e.asource AND NOT EXISTS(SELECT 1 FROM cw_source_relation as cs '
' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid)')
notify_fixed(True)
# inconsistencies for 'is'
msg = ' %s #%s is missing relation "is" (autofix will create the relation)\n'
- cursor = session.system_sql('SELECT e.type, e.eid FROM entities as e, cw_CWEType as s '
+ cursor = cnx.system_sql('SELECT e.type, e.eid FROM entities as e, cw_CWEType as s '
'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_relation as cs '
' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid) '
'ORDER BY e.eid')
for row in cursor.fetchall():
sys.stderr.write(msg % row)
if fix:
- session.system_sql('INSERT INTO is_relation (eid_from, eid_to) '
+ cnx.system_sql('INSERT INTO is_relation (eid_from, eid_to) '
'SELECT e.eid, s.cw_eid FROM entities as e, cw_CWEType as s '
'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_relation as cs '
' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid)')
notify_fixed(True)
# inconsistencies for 'is_instance_of'
msg = ' %s #%s is missing relation "is_instance_of" (autofix will create the relation)\n'
- cursor = session.system_sql('SELECT e.type, e.eid FROM entities as e, cw_CWEType as s '
+ cursor = cnx.system_sql('SELECT e.type, e.eid FROM entities as e, cw_CWEType as s '
'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_instance_of_relation as cs '
' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid) '
'ORDER BY e.eid')
for row in cursor.fetchall():
sys.stderr.write(msg % row)
if fix:
- session.system_sql('INSERT INTO is_instance_of_relation (eid_from, eid_to) '
+ cnx.system_sql('INSERT INTO is_instance_of_relation (eid_from, eid_to) '
'SELECT e.eid, s.cw_eid FROM entities as e, cw_CWEType as s '
'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_instance_of_relation as cs '
' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid)')
@@ -244,7 +236,7 @@
continue
table = SQL_PREFIX + eschema.type
column = SQL_PREFIX + 'eid'
- cursor = session.system_sql('SELECT %s FROM %s;' % (column, table))
+ cursor = cnx.system_sql('SELECT %s FROM %s;' % (column, table))
for row in cursor.fetchall():
eid = row[0]
# eids is full since we have fetched everything from the entities table,
@@ -252,7 +244,7 @@
if not eid in eids or not eids[eid]:
sys.stderr.write(msg % (eid, eschema.type))
if fix:
- session.system_sql('DELETE FROM %s WHERE %s=%s;' % (table, column, eid))
+ cnx.system_sql('DELETE FROM %s WHERE %s=%s;' % (table, column, eid))
notify_fixed(fix)
@@ -268,7 +260,7 @@
notify_fixed(fix)
-def check_relations(schema, session, eids, fix=1):
+def check_relations(schema, cnx, eids, fix=1):
"""check that eids referenced by relations are registered in the repo system
table
"""
@@ -282,42 +274,42 @@
column = SQL_PREFIX + str(rschema)
sql = 'SELECT cw_eid,%s FROM %s WHERE %s IS NOT NULL;' % (
column, table, column)
- cursor = session.system_sql(sql)
+ cursor = cnx.system_sql(sql)
for row in cursor.fetchall():
parent_eid, eid = row
- if not has_eid(session, cursor, eid, eids):
+ if not has_eid(cnx, cursor, eid, eids):
bad_inlined_msg(rschema, parent_eid, eid, fix)
if fix:
sql = 'UPDATE %s SET %s=NULL WHERE %s=%s;' % (
table, column, column, eid)
- session.system_sql(sql)
+ cnx.system_sql(sql)
continue
try:
- cursor = session.system_sql('SELECT eid_from FROM %s_relation;' % rschema)
+ cursor = cnx.system_sql('SELECT eid_from FROM %s_relation;' % rschema)
except Exception as ex:
# usually because table doesn't exist
print 'ERROR', ex
continue
for row in cursor.fetchall():
eid = row[0]
- if not has_eid(session, cursor, eid, eids):
+ if not has_eid(cnx, cursor, eid, eids):
bad_related_msg(rschema, 'subject', eid, fix)
if fix:
sql = 'DELETE FROM %s_relation WHERE eid_from=%s;' % (
rschema, eid)
- session.system_sql(sql)
- cursor = session.system_sql('SELECT eid_to FROM %s_relation;' % rschema)
+ cnx.system_sql(sql)
+ cursor = cnx.system_sql('SELECT eid_to FROM %s_relation;' % rschema)
for row in cursor.fetchall():
eid = row[0]
- if not has_eid(session, cursor, eid, eids):
+ if not has_eid(cnx, cursor, eid, eids):
bad_related_msg(rschema, 'object', eid, fix)
if fix:
sql = 'DELETE FROM %s_relation WHERE eid_to=%s;' % (
rschema, eid)
- session.system_sql(sql)
+ cnx.system_sql(sql)
-def check_mandatory_relations(schema, session, eids, fix=1):
+def check_mandatory_relations(schema, cnx, eids, fix=1):
"""check entities missing some mandatory relation"""
print 'Checking mandatory relations'
msg = '%s #%s is missing mandatory %s relation %s (autofix will delete the entity)'
@@ -337,7 +329,7 @@
rql = 'Any X WHERE NOT X %s Y, X is %s' % (rschema, etype)
else:
rql = 'Any X WHERE NOT Y %s X, X is %s' % (rschema, etype)
- for entity in session.execute(rql).entities():
+ for entity in cnx.execute(rql).entities():
sys.stderr.write(msg % (entity.cw_etype, entity.eid, role, rschema))
if fix:
#if entity.cw_describe()['source']['uri'] == 'system': XXX
@@ -345,7 +337,7 @@
notify_fixed(fix)
-def check_mandatory_attributes(schema, session, eids, fix=1):
+def check_mandatory_attributes(schema, cnx, eids, fix=1):
"""check for entities stored in the system source missing some mandatory
attribute
"""
@@ -358,40 +350,40 @@
if rdef.cardinality[0] in '1+':
rql = 'Any X WHERE X %s NULL, X is %s, X cw_source S, S name "system"' % (
rschema, rdef.subject)
- for entity in session.execute(rql).entities():
+ for entity in cnx.execute(rql).entities():
sys.stderr.write(msg % (entity.cw_etype, entity.eid, rschema))
if fix:
entity.cw_delete()
notify_fixed(fix)
-def check_metadata(schema, session, eids, fix=1):
+def check_metadata(schema, cnx, eids, fix=1):
"""check entities has required metadata
FIXME: rewrite using RQL queries ?
"""
print 'Checking metadata'
- cursor = session.system_sql("SELECT DISTINCT type FROM entities;")
+ cursor = cnx.system_sql("SELECT DISTINCT type FROM entities;")
eidcolumn = SQL_PREFIX + 'eid'
msg = ' %s with eid %s has no %s (autofix will set it to now)'
for etype, in cursor.fetchall():
- if etype not in session.vreg.schema:
+ if etype not in cnx.vreg.schema:
sys.stderr.write('entities table references unknown type %s\n' %
etype)
if fix:
- session.system_sql("DELETE FROM entities WHERE type = %(type)s",
+ cnx.system_sql("DELETE FROM entities WHERE type = %(type)s",
{'type': etype})
continue
table = SQL_PREFIX + etype
for rel, default in ( ('creation_date', datetime.now()),
('modification_date', datetime.now()), ):
column = SQL_PREFIX + rel
- cursor = session.system_sql("SELECT %s FROM %s WHERE %s is NULL"
+ cursor = cnx.system_sql("SELECT %s FROM %s WHERE %s is NULL"
% (eidcolumn, table, column))
for eid, in cursor.fetchall():
sys.stderr.write(msg % (etype, eid, rel))
if fix:
- session.system_sql("UPDATE %s SET %s=%%(v)s WHERE %s=%s ;"
+ cnx.system_sql("UPDATE %s SET %s=%%(v)s WHERE %s=%s ;"
% (table, column, eidcolumn, eid),
{'v': default})
notify_fixed(fix)
@@ -402,22 +394,23 @@
using given user and password to locally connect to the repository
(no running cubicweb server needed)
"""
- session = repo._get_session(cnx.sessionid, setcnxset=True)
# yo, launch checks
+ srvcnx = cnx._cnx
if checks:
eids_cache = {}
- with session.security_enabled(read=False, write=False): # ensure no read security
+ with srvcnx.security_enabled(read=False, write=False): # ensure no read security
for check in checks:
check_func = globals()['check_%s' % check]
- check_func(repo.schema, session, eids_cache, fix=fix)
+ with srvcnx.ensure_cnx_set:
+ check_func(repo.schema, srvcnx, eids_cache, fix=fix)
if fix:
- session.commit()
+ srvcnx.commit()
else:
print
if not fix:
print 'WARNING: Diagnostic run, nothing has been corrected'
if reindex:
- session.rollback()
- session.set_cnxset()
- reindex_entities(repo.schema, session, withpb=withpb)
- session.commit()
+ srvcnx.rollback()
+ with srvcnx.ensure_cnx_set:
+ reindex_entities(repo.schema, srvcnx, withpb=withpb)
+ srvcnx.commit()
--- a/server/cwzmq.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/cwzmq.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# copyright 2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2012-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -17,17 +17,17 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-from threading import Thread
import cPickle
import traceback
+from threading import Thread
+from logging import getLogger
import zmq
from zmq.eventloop import ioloop
import zmq.eventloop.zmqstream
-from logging import getLogger
from cubicweb import set_log_methods
-from cubicweb.server.server import QuitEvent
+from cubicweb.server.server import QuitEvent, Finished
ctx = zmq.Context()
--- a/server/hook.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/hook.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -203,8 +203,8 @@
instance if you want to disable some integrity checking hook. This can be
controlled more finely through the `category` class attribute, which is a string
giving a category name. One can then uses the
-:meth:`~cubicweb.server.session.Session.deny_all_hooks_but` and
-:meth:`~cubicweb.server.session.Session.allow_all_hooks_but` context managers to
+:meth:`~cubicweb.server.session.Connection.deny_all_hooks_but` and
+:meth:`~cubicweb.server.session.Connection.allow_all_hooks_but` context managers to
explicitly enable or disable some categories.
The existing categories are:
@@ -257,8 +257,8 @@
from logilab.common.decorators import classproperty, cached
from logilab.common.deprecation import deprecated, class_renamed
from logilab.common.logging_ext import set_log_methods
-from logilab.common.registry import (Predicate, NotPredicate, OrPredicate,
- objectify_predicate, yes)
+from logilab.common.registry import (NotPredicate, OrPredicate,
+ objectify_predicate)
from cubicweb import RegistryNotFound, server
from cubicweb.cwvreg import CWRegistry, CWRegistryStore
@@ -295,13 +295,13 @@
obj.check_events()
super(HooksRegistry, self).register(obj, **kwargs)
- def call_hooks(self, event, session=None, **kwargs):
+ def call_hooks(self, event, cnx=None, **kwargs):
"""call `event` hooks for an entity or a list of entities (passed
respectively as the `entity` or ``entities`` keyword argument).
"""
kwargs['event'] = event
- if session is None: # True for events such as server_start
- for hook in sorted(self.possible_objects(session, **kwargs),
+ if cnx is None: # True for events such as server_start
+ for hook in sorted(self.possible_objects(cnx, **kwargs),
key=lambda x: x.order):
hook()
else:
@@ -318,28 +318,28 @@
else:
entities = []
eids_from_to = []
- pruned = self.get_pruned_hooks(session, event,
+ pruned = self.get_pruned_hooks(cnx, event,
entities, eids_from_to, kwargs)
# by default, hooks are executed with security turned off
- with session.security_enabled(read=False):
+ with cnx.security_enabled(read=False):
for _kwargs in _iter_kwargs(entities, eids_from_to, kwargs):
- hooks = sorted(self.filtered_possible_objects(pruned, session, **_kwargs),
+ hooks = sorted(self.filtered_possible_objects(pruned, cnx, **_kwargs),
key=lambda x: x.order)
debug = server.DEBUG & server.DBG_HOOKS
- with session.security_enabled(write=False):
+ with cnx.security_enabled(write=False):
for hook in hooks:
if debug:
print event, _kwargs, hook
hook()
- def get_pruned_hooks(self, session, event, entities, eids_from_to, kwargs):
+ def get_pruned_hooks(self, cnx, event, entities, eids_from_to, kwargs):
"""return a set of hooks that should not be considered by filtered_possible objects
the idea is to make a first pass over all the hooks in the
registry and to mark put some of them in a pruned list. The
pruned hooks are the one which:
- * are disabled at the session level
+ * are disabled at the connection level
* have a selector containing a :class:`match_rtype` or an
:class:`is_instance` predicate which does not match the rtype / etype
@@ -362,17 +362,17 @@
else: # nothing to prune, how did we get there ???
return set()
cache_key = (event, kwargs.get('rtype'), etype)
- pruned = session.pruned_hooks_cache.get(cache_key)
+ pruned = cnx.pruned_hooks_cache.get(cache_key)
if pruned is not None:
return pruned
pruned = set()
- session.pruned_hooks_cache[cache_key] = pruned
+ cnx.pruned_hooks_cache[cache_key] = pruned
if look_for_selector is not None:
for id, hooks in self.iteritems():
for hook in hooks:
enabled_cat, main_filter = hook.filterable_selectors()
if enabled_cat is not None:
- if not enabled_cat(hook, session):
+ if not enabled_cat(hook, cnx):
pruned.add(hook)
continue
if main_filter is not None:
@@ -381,7 +381,7 @@
main_filter.toetypes is not None):
continue
first_kwargs = _iter_kwargs(entities, eids_from_to, kwargs).next()
- if not main_filter(hook, session, **first_kwargs):
+ if not main_filter(hook, cnx, **first_kwargs):
pruned.add(hook)
return pruned
@@ -404,12 +404,12 @@
def __init__(self, vreg):
self.vreg = vreg
- def call_hooks(self, event, session=None, **kwargs):
+ def call_hooks(self, event, cnx=None, **kwargs):
try:
registry = self.vreg['%s_hooks' % event]
except RegistryNotFound:
return # no hooks for this event
- registry.call_hooks(event, session, **kwargs)
+ registry.call_hooks(event, cnx, **kwargs)
for event in ALL_HOOKS:
@@ -460,10 +460,10 @@
if kwargs.get('rtype') not in self.expected:
return 0
if self.frometypes is not None and \
- req.describe(kwargs['eidfrom'])[0] not in self.frometypes:
+ req.entity_metas(kwargs['eidfrom'])['type'] not in self.frometypes:
return 0
if self.toetypes is not None and \
- req.describe(kwargs['eidto'])[0] not in self.toetypes:
+ req.entity_metas(kwargs['eidto'])['type'] not in self.toetypes:
return 0
return 1
@@ -507,7 +507,7 @@
Hooks being appobjects like views, they have a `__regid__` and a `__select__`
class attribute. Like all appobjects, hooks have the `self._cw` attribute which
- represents the current session. In entity hooks, a `self.entity` attribute is
+ represents the current connection. In entity hooks, a `self.entity` attribute is
also present.
The `events` tuple is used by the base class selector to dispatch the hook
@@ -604,7 +604,7 @@
def __call__(self):
assert self.main_rtype
for eid in (self.eidfrom, self.eidto):
- etype = self._cw.describe(eid)[0]
+ etype = self._cw.entity_metas(eid)['type']
if self.main_rtype not in self._cw.vreg.schema.eschema(etype).subjrels:
return
if self.rtype in self.subject_relations:
@@ -640,7 +640,7 @@
skip_object_relations = ()
def __call__(self):
- eschema = self._cw.vreg.schema.eschema(self._cw.describe(self.eidfrom)[0])
+ eschema = self._cw.vreg.schema.eschema(self._cw.entity_metas(self.eidfrom)['type'])
execute = self._cw.execute
for rel in self.subject_relations:
if rel in eschema.subjrels and not rel in self.skip_subject_relations:
@@ -664,7 +664,7 @@
events = ('after_delete_relation',)
def __call__(self):
- eschema = self._cw.vreg.schema.eschema(self._cw.describe(self.eidfrom)[0])
+ eschema = self._cw.vreg.schema.eschema(self._cw.entity_metas(self.eidfrom)['type'])
execute = self._cw.execute
for rel in self.subject_relations:
if rel in eschema.subjrels and not rel in self.skip_subject_relations:
@@ -685,7 +685,7 @@
"""Base class for operations.
Operation may be instantiated in the hooks' `__call__` method. It always
- takes a session object as first argument (accessible as `.session` from the
+ takes a connection object as first argument (accessible as `.cnx` from the
operation instance), and optionally all keyword arguments needed by the
operation. These keyword arguments will be accessible as attributes from the
operation instance.
@@ -720,8 +720,8 @@
the transaction is over. All the ORM entities accessed by the earlier
transaction are invalid. If you need to work on the database, you need to
- start a new transaction, for instance using a new internal session, which
- you will need to commit (and close!).
+ start a new transaction, for instance using a new internal connection,
+ which you will need to commit.
For an operation to support an event, one has to implement the `<event
name>_event` method with no arguments.
@@ -731,24 +731,29 @@
base hook class used).
"""
- def __init__(self, session, **kwargs):
- self.session = session
+ def __init__(self, cnx, **kwargs):
+ self.cnx = cnx
self.__dict__.update(kwargs)
- self.register(session)
+ self.register(cnx)
# execution information
self.processed = None # 'precommit', 'commit'
self.failed = False
- def register(self, session):
- session.add_operation(self, self.insert_index())
+ @property
+ @deprecated('[3.19] Operation.session is deprecated, use Operation.cnx instead')
+ def session(self):
+ return self.cnx
+
+ def register(self, cnx):
+ cnx.add_operation(self, self.insert_index())
def insert_index(self):
- """return the index of the lastest instance which is not a
+ """return the index of the latest instance which is not a
LateOperation instance
"""
# faster by inspecting operation in reverse order for heavy transactions
i = None
- for i, op in enumerate(reversed(self.session.pending_operations)):
+ for i, op in enumerate(reversed(self.cnx.pending_operations)):
if isinstance(op, (LateOperation, SingleLastOperation)):
continue
return -i or None
@@ -849,12 +854,12 @@
return ('cw.dataops', cls.__name__)
@classmethod
- def get_instance(cls, session, **kwargs):
+ def get_instance(cls, cnx, **kwargs):
# no need to lock: transaction_data already comes from thread's local storage
try:
- return session.transaction_data[cls.data_key]
+ return cnx.transaction_data[cls.data_key]
except KeyError:
- op = session.transaction_data[cls.data_key] = cls(session, **kwargs)
+ op = cnx.transaction_data[cls.data_key] = cls(cnx, **kwargs)
return op
def __init__(self, *args, **kwargs):
@@ -892,14 +897,14 @@
Iterating over operation data closed it and should be reserved to precommit /
postcommit method of the operation."""
self._processed = True
- op = self.session.transaction_data.pop(self.data_key)
+ op = self.cnx.transaction_data.pop(self.data_key)
assert op is self, "Bad handling of operation data, found %s instead of %s for key %s" % (
op, self, self.data_key)
return self._container
-@deprecated('[3.10] use opcls.get_instance(session, **opkwargs).add_data(value)')
-def set_operation(session, datakey, value, opcls, containercls=set, **opkwargs):
+@deprecated('[3.10] use opcls.get_instance(cnx, **opkwargs).add_data(value)')
+def set_operation(cnx, datakey, value, opcls, containercls=set, **opkwargs):
"""Function to ease applying a single operation on a set of data, avoiding
to create as many as operation as they are individual modification. You
should try to use this instead of creating on operation for each `value`,
@@ -907,10 +912,10 @@
Arguments are:
- * the `session` object
+ * `cnx`, the current connection
* `datakey`, a specially forged key that will be used as key in
- session.transaction_data
+ cnx.transaction_data
* `value` that is the actual payload of an individual operation
@@ -940,15 +945,15 @@
get unexpected data loss in some case of nested hooks.
"""
try:
- # Search for session.transaction_data[`datakey`] (expected to be a set):
+ # Search for cnx.transaction_data[`datakey`] (expected to be a set):
# if found, simply append `value`
- _container_add(session.transaction_data[datakey], value)
+ _container_add(cnx.transaction_data[datakey], value)
except KeyError:
# else, initialize it to containercls([`value`]) and instantiate the given
# `opcls` operation class with additional keyword arguments
- opcls(session, **opkwargs)
- session.transaction_data[datakey] = containercls()
- _container_add(session.transaction_data[datakey], value)
+ opcls(cnx, **opkwargs)
+ cnx.transaction_data[datakey] = containercls()
+ _container_add(cnx.transaction_data[datakey], value)
class LateOperation(Operation):
@@ -961,7 +966,7 @@
"""
# faster by inspecting operation in reverse order for heavy transactions
i = None
- for i, op in enumerate(reversed(self.session.pending_operations)):
+ for i, op in enumerate(reversed(self.cnx.pending_operations)):
if isinstance(op, SingleLastOperation):
continue
return -i or None
@@ -976,17 +981,17 @@
operations
"""
- def register(self, session):
+ def register(self, cnx):
"""override register to handle cases where this operation has already
been added
"""
- operations = session.pending_operations
+ operations = cnx.pending_operations
index = self.equivalent_index(operations)
if index is not None:
equivalent = operations.pop(index)
else:
equivalent = None
- session.add_operation(self, self.insert_index())
+ cnx.add_operation(self, self.insert_index())
return equivalent
def equivalent_index(self, operations):
@@ -1001,7 +1006,7 @@
class SendMailOp(SingleLastOperation):
- def __init__(self, session, msg=None, recipients=None, **kwargs):
+ def __init__(self, cnx, msg=None, recipients=None, **kwargs):
# may not specify msg yet, as
# `cubicweb.sobjects.supervision.SupervisionMailOp`
if msg is not None:
@@ -1010,18 +1015,18 @@
else:
assert recipients is None
self.to_send = []
- super(SendMailOp, self).__init__(session, **kwargs)
+ super(SendMailOp, self).__init__(cnx, **kwargs)
- def register(self, session):
- previous = super(SendMailOp, self).register(session)
+ def register(self, cnx):
+ previous = super(SendMailOp, self).register(cnx)
if previous:
self.to_send = previous.to_send + self.to_send
def postcommit_event(self):
- self.session.repo.threaded_task(self.sendmails)
+ self.cnx.repo.threaded_task(self.sendmails)
def sendmails(self):
- self.session.vreg.config.sendmails(self.to_send)
+ self.cnx.vreg.config.sendmails(self.to_send)
class RQLPrecommitOperation(Operation):
@@ -1029,7 +1034,7 @@
rqls = None
def precommit_event(self):
- execute = self.session.execute
+ execute = self.cnx.execute
for rql in self.rqls:
execute(*rql)
@@ -1051,7 +1056,7 @@
remove inserted eid from repository type/source cache
"""
try:
- self.session.repo.clear_caches(self.get_data())
+ self.cnx.repo.clear_caches(self.get_data())
except KeyError:
pass
@@ -1066,7 +1071,7 @@
"""
try:
eids = self.get_data()
- self.session.repo.clear_caches(eids)
- self.session.repo.app_instances_bus.publish(['delete'] + list(str(eid) for eid in eids))
+ self.cnx.repo.clear_caches(eids)
+ self.cnx.repo.app_instances_bus.publish(['delete'] + list(str(eid) for eid in eids))
except KeyError:
pass
--- a/server/ldaputils.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,360 +0,0 @@
-# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""cubicweb utilities for ldap sources
-
-Part of the code is coming form Zope's LDAPUserFolder
-
-Copyright (c) 2004 Jens Vagelpohl.
-All Rights Reserved.
-
-This software is subject to the provisions of the Zope Public License,
-Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-FOR A PARTICULAR PURPOSE.
-"""
-
-from __future__ import division # XXX why?
-
-from datetime import datetime
-
-import ldap
-from ldap.ldapobject import ReconnectLDAPObject
-from ldap.filter import filter_format
-from ldapurl import LDAPUrl
-
-from cubicweb import ValidationError, AuthenticationError, Binary
-from cubicweb.server import utils
-from cubicweb.server.sources import ConnectionWrapper
-
-_ = unicode
-
-# search scopes
-BASE = ldap.SCOPE_BASE
-ONELEVEL = ldap.SCOPE_ONELEVEL
-SUBTREE = ldap.SCOPE_SUBTREE
-
-# map ldap protocol to their standard port
-PROTO_PORT = {'ldap': 389,
- 'ldaps': 636,
- 'ldapi': None,
- }
-
-
-class LDAPSourceMixIn(object):
- """a mix-in for LDAP based source"""
- options = (
- ('auth-mode',
- {'type' : 'choice',
- 'default': 'simple',
- 'choices': ('simple', 'cram_md5', 'digest_md5', 'gssapi'),
- 'help': 'authentication mode used to authenticate user to the ldap.',
- 'group': 'ldap-source', 'level': 3,
- }),
- ('auth-realm',
- {'type' : 'string',
- 'default': None,
- 'help': 'realm to use when using gssapi/kerberos authentication.',
- 'group': 'ldap-source', 'level': 3,
- }),
-
- ('data-cnx-dn',
- {'type' : 'string',
- 'default': '',
- 'help': 'user dn to use to open data connection to the ldap (eg used \
-to respond to rql queries). Leave empty for anonymous bind',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('data-cnx-password',
- {'type' : 'string',
- 'default': '',
- 'help': 'password to use to open data connection to the ldap (eg used to respond to rql queries). Leave empty for anonymous bind.',
- 'group': 'ldap-source', 'level': 1,
- }),
-
- ('user-base-dn',
- {'type' : 'string',
- 'default': '',
- 'help': 'base DN to lookup for users; disable user importation mechanism if unset',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('user-scope',
- {'type' : 'choice',
- 'default': 'ONELEVEL',
- 'choices': ('BASE', 'ONELEVEL', 'SUBTREE'),
- 'help': 'user search scope (valid values: "BASE", "ONELEVEL", "SUBTREE")',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('user-classes',
- {'type' : 'csv',
- 'default': ('top', 'posixAccount'),
- 'help': 'classes of user (with Active Directory, you want to say "user" here)',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('user-filter',
- {'type': 'string',
- 'default': '',
- 'help': 'additional filters to be set in the ldap query to find valid users',
- 'group': 'ldap-source', 'level': 2,
- }),
- ('user-login-attr',
- {'type' : 'string',
- 'default': 'uid',
- 'help': 'attribute used as login on authentication (with Active Directory, you want to use "sAMAccountName" here)',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('user-default-group',
- {'type' : 'csv',
- 'default': ('users',),
- 'help': 'name of a group in which ldap users will be by default. \
-You can set multiple groups by separating them by a comma.',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('user-attrs-map',
- {'type' : 'named',
- 'default': {'uid': 'login', 'gecos': 'email', 'userPassword': 'upassword'},
- 'help': 'map from ldap user attributes to cubicweb attributes (with Active Directory, you want to use sAMAccountName:login,mail:email,givenName:firstname,sn:surname)',
- 'group': 'ldap-source', 'level': 1,
- }),
-
- )
-
- _conn = None
-
- def _entity_update(self, source_entity):
- super(LDAPSourceMixIn, self)._entity_update(source_entity)
- if self.urls:
- if len(self.urls) > 1:
- raise ValidationError(source_entity.eid, {'url': _('can only have one url')})
- try:
- protocol, hostport = self.urls[0].split('://')
- except ValueError:
- raise ValidationError(source_entity.eid, {'url': _('badly formatted url')})
- if protocol not in PROTO_PORT:
- raise ValidationError(source_entity.eid, {'url': _('unsupported protocol')})
-
- def update_config(self, source_entity, typedconfig):
- """update configuration from source entity. `typedconfig` is config
- properly typed with defaults set
- """
- super(LDAPSourceMixIn, self).update_config(source_entity, typedconfig)
- self.authmode = typedconfig['auth-mode']
- self._authenticate = getattr(self, '_auth_%s' % self.authmode)
- self.cnx_dn = typedconfig['data-cnx-dn']
- self.cnx_pwd = typedconfig['data-cnx-password']
- self.user_base_dn = str(typedconfig['user-base-dn'])
- self.user_base_scope = globals()[typedconfig['user-scope']]
- self.user_login_attr = typedconfig['user-login-attr']
- self.user_default_groups = typedconfig['user-default-group']
- self.user_attrs = {'dn': 'eid', 'modifyTimestamp': 'modification_date'}
- self.user_attrs.update(typedconfig['user-attrs-map'])
- self.user_rev_attrs = dict((v, k) for k, v in self.user_attrs.iteritems())
- self.base_filters = [filter_format('(%s=%s)', ('objectClass', o))
- for o in typedconfig['user-classes']]
- if typedconfig['user-filter']:
- self.base_filters.append(typedconfig['user-filter'])
- self._conn = None
-
- def connection_info(self):
- assert len(self.urls) == 1, self.urls
- protocol, hostport = self.urls[0].split('://')
- if protocol != 'ldapi' and not ':' in hostport:
- hostport = '%s:%s' % (hostport, PROTO_PORT[protocol])
- return protocol, hostport
-
- def get_connection(self):
- """open and return a connection to the source"""
- if self._conn is None:
- try:
- self._connect()
- except Exception:
- self.exception('unable to connect to ldap')
- return ConnectionWrapper(self._conn)
-
- def authenticate(self, session, login, password=None, **kwargs):
- """return CWUser eid for the given login/password if this account is
- defined in this source, else raise `AuthenticationError`
-
- two queries are needed since passwords are stored crypted, so we have
- to fetch the salt first
- """
- self.info('ldap authenticate %s', login)
- if not password:
- # On Windows + ADAM this would have succeeded (!!!)
- # You get Authenticated as: 'NT AUTHORITY\ANONYMOUS LOGON'.
- # we really really don't want that
- raise AuthenticationError()
- searchfilter = [filter_format('(%s=%s)', (self.user_login_attr, login))]
- searchfilter.extend(self.base_filters)
- searchstr = '(&%s)' % ''.join(searchfilter)
- # first search the user
- try:
- user = self._search(session, self.user_base_dn,
- self.user_base_scope, searchstr)[0]
- except (IndexError, ldap.SERVER_DOWN):
- # no such user
- raise AuthenticationError()
- # check password by establishing a (unused) connection
- try:
- self._connect(user, password)
- except ldap.LDAPError as ex:
- # Something went wrong, most likely bad credentials
- self.info('while trying to authenticate %s: %s', user, ex)
- raise AuthenticationError()
- except Exception:
- self.error('while trying to authenticate %s', user, exc_info=True)
- raise AuthenticationError()
- eid = self.repo.extid2eid(self, user['dn'], 'CWUser', session, {})
- if eid < 0:
- # user has been moved away from this source
- raise AuthenticationError()
- return eid
-
- def _connect(self, user=None, userpwd=None):
- protocol, hostport = self.connection_info()
- self.info('connecting %s://%s as %s', protocol, hostport,
- user and user['dn'] or 'anonymous')
- # don't require server certificate when using ldaps (will
- # enable self signed certs)
- ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
- url = LDAPUrl(urlscheme=protocol, hostport=hostport)
- conn = ReconnectLDAPObject(url.initializeUrl())
- # Set the protocol version - version 3 is preferred
- try:
- conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3)
- except ldap.LDAPError: # Invalid protocol version, fall back safely
- conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION2)
- # Deny auto-chasing of referrals to be safe, we handle them instead
- # Required for AD
- try:
- conn.set_option(ldap.OPT_REFERRALS, 0)
- except ldap.LDAPError: # Cannot set referrals, so do nothing
- pass
- #conn.set_option(ldap.OPT_NETWORK_TIMEOUT, conn_timeout)
- #conn.timeout = op_timeout
- # Now bind with the credentials given. Let exceptions propagate out.
- if user is None:
- # no user specified, we want to initialize the 'data' connection,
- assert self._conn is None
- self._conn = conn
- # XXX always use simple bind for data connection
- if not self.cnx_dn:
- conn.simple_bind_s(self.cnx_dn, self.cnx_pwd)
- else:
- self._authenticate(conn, {'dn': self.cnx_dn}, self.cnx_pwd)
- else:
- # user specified, we want to check user/password, no need to return
- # the connection which will be thrown out
- self._authenticate(conn, user, userpwd)
- return conn
-
- def _auth_simple(self, conn, user, userpwd):
- conn.simple_bind_s(user['dn'], userpwd)
-
- def _auth_cram_md5(self, conn, user, userpwd):
- from ldap import sasl
- auth_token = sasl.cram_md5(user['dn'], userpwd)
- conn.sasl_interactive_bind_s('', auth_token)
-
- def _auth_digest_md5(self, conn, user, userpwd):
- from ldap import sasl
- auth_token = sasl.digest_md5(user['dn'], userpwd)
- conn.sasl_interactive_bind_s('', auth_token)
-
- def _auth_gssapi(self, conn, user, userpwd):
- # print XXX not proper sasl/gssapi
- import kerberos
- if not kerberos.checkPassword(user[self.user_login_attr], userpwd):
- raise Exception('BAD login / mdp')
- #from ldap import sasl
- #conn.sasl_interactive_bind_s('', sasl.gssapi())
-
- def _search(self, session, base, scope,
- searchstr='(objectClass=*)', attrs=()):
- """make an ldap query"""
- self.debug('ldap search %s %s %s %s %s', self.uri, base, scope,
- searchstr, list(attrs))
- # XXX for now, we do not have connections set support for LDAP, so
- # this is always self._conn
- cnx = self.get_connection().cnx #session.cnxset.connection(self.uri).cnx
- if cnx is None:
- # cant connect to server
- msg = session._("can't connect to source %s, some data may be missing")
- session.set_shared_data('sources_error', msg % self.uri, txdata=True)
- return []
- try:
- res = cnx.search_s(base, scope, searchstr, attrs)
- except ldap.PARTIAL_RESULTS:
- res = cnx.result(all=0)[1]
- except ldap.NO_SUCH_OBJECT:
- self.info('ldap NO SUCH OBJECT %s %s %s', base, scope, searchstr)
- self._process_no_such_object(session, base)
- return []
- # except ldap.REFERRAL as e:
- # cnx = self.handle_referral(e)
- # try:
- # res = cnx.search_s(base, scope, searchstr, attrs)
- # except ldap.PARTIAL_RESULTS:
- # res_type, res = cnx.result(all=0)
- result = []
- for rec_dn, rec_dict in res:
- # When used against Active Directory, "rec_dict" may not be
- # be a dictionary in some cases (instead, it can be a list)
- #
- # An example of a useless "res" entry that can be ignored
- # from AD is
- # (None, ['ldap://ForestDnsZones.PORTAL.LOCAL/DC=ForestDnsZones,DC=PORTAL,DC=LOCAL'])
- # This appears to be some sort of internal referral, but
- # we can't handle it, so we need to skip over it.
- try:
- items = rec_dict.iteritems()
- except AttributeError:
- continue
- else:
- itemdict = self._process_ldap_item(rec_dn, items)
- result.append(itemdict)
- self.debug('ldap built results %s', len(result))
- return result
-
- def _process_ldap_item(self, dn, iterator):
- """Turn an ldap received item into a proper dict."""
- itemdict = {'dn': dn}
- for key, value in iterator:
- if self.user_attrs.get(key) == 'upassword': # XXx better password detection
- value = value[0].encode('utf-8')
- # we only support ldap_salted_sha1 for ldap sources, see: server/utils.py
- if not value.startswith('{SSHA}'):
- value = utils.crypt_password(value)
- itemdict[key] = Binary(value)
- elif self.user_attrs.get(key) == 'modification_date':
- itemdict[key] = datetime.strptime(value[0], '%Y%m%d%H%M%SZ')
- else:
- value = [unicode(val, 'utf-8', 'replace') for val in value]
- if len(value) == 1:
- itemdict[key] = value = value[0]
- else:
- itemdict[key] = value
- return itemdict
-
- def _process_no_such_object(self, session, dn):
- """Some search return NO_SUCH_OBJECT error, handle this (usually because
- an object whose dn is no more existent in ldap as been encountered).
-
- Do nothing by default, let sub-classes handle that.
- """
--- a/server/migractions.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/migractions.py Tue Jun 10 09:49:45 2014 +0200
@@ -53,15 +53,11 @@
PURE_VIRTUAL_RTYPES,
CubicWebRelationSchema, order_eschemas)
from cubicweb.cwvreg import CW_EVENT_MANAGER
-from cubicweb.dbapi import get_repository, _repo_connect
+from cubicweb import repoapi
from cubicweb.migration import MigrationHelper, yes
-from cubicweb.server import hook
-try:
- from cubicweb.server import SOURCE_TYPES, schemaserial as ss
- from cubicweb.server.utils import manager_userpasswd
- from cubicweb.server.sqlutils import sqlexec, SQL_PREFIX
-except ImportError: # LAX
- pass
+from cubicweb.server import hook, schemaserial as ss
+from cubicweb.server.utils import manager_userpasswd
+from cubicweb.server.sqlutils import sqlexec, SQL_PREFIX
def mock_object(**params):
@@ -82,6 +78,7 @@
if not cls.__regid__ in repo.vreg['after_add_entity_hooks']:
repo.vreg.register(ClearGroupMap)
+
class ServerMigrationHelper(MigrationHelper):
"""specific migration helper for server side migration scripts,
providing actions related to schema/data migration
@@ -95,10 +92,14 @@
assert repo
if cnx is not None:
assert repo
- self._cnx = cnx
+ self.cnx = cnx
self.repo = repo
+ self.session = cnx._session
elif connect:
self.repo_connect()
+ self.set_session()
+ else:
+ self.session = None
# no config on shell to a remote instance
if config is not None and (cnx or connect):
repo = self.repo
@@ -124,11 +125,38 @@
self.fs_schema = schema
self._synchronized = set()
+ def set_session(self):
+ try:
+ login = self.repo.config.default_admin_config['login']
+ pwd = self.repo.config.default_admin_config['password']
+ except KeyError:
+ login, pwd = manager_userpasswd()
+ while True:
+ try:
+ self.cnx = repoapi.connect(self.repo, login, password=pwd)
+ if not 'managers' in self.cnx.user.groups:
+ print 'migration need an account in the managers group'
+ else:
+ break
+ except AuthenticationError:
+ print 'wrong user/password'
+ except (KeyboardInterrupt, EOFError):
+ print 'aborting...'
+ sys.exit(0)
+ try:
+ login, pwd = manager_userpasswd()
+ except (KeyboardInterrupt, EOFError):
+ print 'aborting...'
+ sys.exit(0)
+ self.session = self.repo._get_session(self.cnx.sessionid)
+ self.session.keep_cnxset_mode('transaction')
+ self.session.set_shared_data('rebuild-infered', False)
+
# overriden from base MigrationHelper ######################################
@cached
def repo_connect(self):
- self.repo = get_repository(config=self.config)
+ self.repo = repoapi.get_repository(config=self.config)
return self.repo
def cube_upgraded(self, cube, version):
@@ -147,18 +175,19 @@
elif options.backup_db:
self.backup_database(askconfirm=False)
# disable notification during migration
- with self.session.allow_all_hooks_but('notification'):
+ with self.cnx.allow_all_hooks_but('notification'):
super(ServerMigrationHelper, self).migrate(vcconf, toupgrade, options)
def cmd_process_script(self, migrscript, funcname=None, *args, **kwargs):
- try:
- return super(ServerMigrationHelper, self).cmd_process_script(
- migrscript, funcname, *args, **kwargs)
- except ExecutionError as err:
- sys.stderr.write("-> %s\n" % err)
- except BaseException:
- self.rollback()
- raise
+ with self.cnx._cnx.ensure_cnx_set:
+ try:
+ return super(ServerMigrationHelper, self).cmd_process_script(
+ migrscript, funcname, *args, **kwargs)
+ except ExecutionError as err:
+ sys.stderr.write("-> %s\n" % err)
+ except BaseException:
+ self.rollback()
+ raise
# Adjust docstring
cmd_process_script.__doc__ = MigrationHelper.cmd_process_script.__doc__
@@ -186,18 +215,18 @@
open(backupfile,'w').close() # kinda lock
os.chmod(backupfile, 0600)
# backup
+ source = repo.system_source
tmpdir = tempfile.mkdtemp()
try:
failed = False
- for source in repo.sources:
- try:
- source.backup(osp.join(tmpdir, source.uri), self.confirm, format=format)
- except Exception as ex:
- print '-> error trying to backup %s [%s]' % (source.uri, ex)
- if not self.confirm('Continue anyway?', default='n'):
- raise SystemExit(1)
- else:
- failed = True
+ try:
+ source.backup(osp.join(tmpdir, source.uri), self.confirm, format=format)
+ except Exception as ex:
+ print '-> error trying to backup %s [%s]' % (source.uri, ex)
+ if not self.confirm('Continue anyway?', default='n'):
+ raise SystemExit(1)
+ else:
+ failed = True
with open(osp.join(tmpdir, 'format.txt'), 'w') as format_file:
format_file.write('%s\n' % format)
with open(osp.join(tmpdir, 'versions.txt'), 'w') as version_file:
@@ -216,8 +245,7 @@
finally:
shutil.rmtree(tmpdir)
- def restore_database(self, backupfile, drop=True, systemonly=True,
- askconfirm=True, format='native'):
+ def restore_database(self, backupfile, drop=True, askconfirm=True, format='native'):
# check
if not osp.exists(backupfile):
raise ExecutionError("Backup file %s doesn't exist" % backupfile)
@@ -246,76 +274,26 @@
format = written_format
self.config.init_cnxset_pool = False
repo = self.repo_connect()
- for source in repo.sources:
- if systemonly and source.uri != 'system':
- continue
- try:
- source.restore(osp.join(tmpdir, source.uri), self.confirm, drop, format)
- except Exception as exc:
- print '-> error trying to restore %s [%s]' % (source.uri, exc)
- if not self.confirm('Continue anyway?', default='n'):
- raise SystemExit(1)
+ source = repo.system_source
+ try:
+ source.restore(osp.join(tmpdir, source.uri), self.confirm, drop, format)
+ except Exception as exc:
+ print '-> error trying to restore %s [%s]' % (source.uri, exc)
+ if not self.confirm('Continue anyway?', default='n'):
+ raise SystemExit(1)
shutil.rmtree(tmpdir)
# call hooks
repo.init_cnxset_pool()
repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile)
print '-> database restored.'
- @property
- def cnx(self):
- """lazy connection"""
- try:
- return self._cnx
- except AttributeError:
- sourcescfg = self.repo.config.sources()
- try:
- login = sourcescfg['admin']['login']
- pwd = sourcescfg['admin']['password']
- except KeyError:
- login, pwd = manager_userpasswd()
- while True:
- try:
- self._cnx = _repo_connect(self.repo, login, password=pwd)
- if not 'managers' in self._cnx.user(self.session).groups:
- print 'migration need an account in the managers group'
- else:
- break
- except AuthenticationError:
- print 'wrong user/password'
- except (KeyboardInterrupt, EOFError):
- print 'aborting...'
- sys.exit(0)
- try:
- login, pwd = manager_userpasswd()
- except (KeyboardInterrupt, EOFError):
- print 'aborting...'
- sys.exit(0)
- self.session.keep_cnxset_mode('transaction')
- return self._cnx
-
- @property
- def session(self):
- if self.config is not None:
- session = self.repo._get_session(self.cnx.sessionid)
- if session.cnxset is None:
- session.read_security = False
- session.write_security = False
- session.set_cnxset()
- return session
- # no access to session on remote instance
- return None
-
def commit(self):
- if hasattr(self, '_cnx'):
- self._cnx.commit()
- if self.session:
- self.session.set_cnxset()
+ if hasattr(self, 'cnx'):
+ self.cnx.commit(free_cnxset=False)
def rollback(self):
- if hasattr(self, '_cnx'):
- self._cnx.rollback()
- if self.session:
- self.session.set_cnxset()
+ if hasattr(self, 'cnx'):
+ self.cnx.rollback(free_cnxset=False)
def rqlexecall(self, rqliter, ask_confirm=False):
for rql, kwargs in rqliter:
@@ -333,7 +311,7 @@
'schema': self.repo.get_schema(),
'cnx': self.cnx,
'fsschema': self.fs_schema,
- 'session' : self.session,
+ 'session' : self.cnx._cnx,
'repo' : self.repo,
})
return context
@@ -341,12 +319,12 @@
@cached
def group_mapping(self):
"""cached group mapping"""
- return ss.group_mapping(self._cw)
+ return ss.group_mapping(self.cnx)
@cached
def cstrtype_mapping(self):
"""cached constraint types mapping"""
- return ss.cstrtype_mapping(self._cw)
+ return ss.cstrtype_mapping(self.cnx)
def cmd_exec_event_script(self, event, cube=None, funcname=None,
*args, **kwargs):
@@ -371,7 +349,7 @@
self.execscript_confirm = yes
try:
if event == 'postcreate':
- with self.session.allow_all_hooks_but():
+ with self.cnx.allow_all_hooks_but():
return self.cmd_process_script(apc, funcname, *args, **kwargs)
return self.cmd_process_script(apc, funcname, *args, **kwargs)
finally:
@@ -393,7 +371,7 @@
sql_scripts = glob(osp.join(directory, '*.%s.sql' % driver))
for fpath in sql_scripts:
print '-> installing', fpath
- failed = sqlexec(open(fpath).read(), self.session.system_sql, False,
+ failed = sqlexec(open(fpath).read(), self.cnx.system_sql, False,
delimiter=';;')
if failed:
print '-> ERROR, skipping', fpath
@@ -562,7 +540,7 @@
repo = {}
for cols in eschema._unique_together or ():
fs[unique_index_name(repoeschema, cols)] = sorted(cols)
- schemaentity = self.session.entity_from_eid(repoeschema.eid)
+ schemaentity = self.cnx.entity_from_eid(repoeschema.eid)
for entity in schemaentity.related('constraint_of', 'object',
targettypes=('CWUniqueTogetherConstraint',)).entities():
repo[entity.name] = sorted(rel.name for rel in entity.relations)
@@ -630,21 +608,8 @@
# out of sync with newconstraints when multiple
# constraints of the same type are used
for cstr in oldconstraints:
- for newcstr in newconstraints:
- if newcstr.type() == cstr.type():
- break
- else:
- newcstr = None
- if newcstr is None:
- self.rqlexec('DELETE X constrained_by C WHERE C eid %(x)s',
- {'x': cstr.eid}, ask_confirm=confirm)
- else:
- newconstraints.remove(newcstr)
- value = unicode(newcstr.serialize())
- if value != unicode(cstr.serialize()):
- self.rqlexec('SET X value %(v)s WHERE X eid %(x)s',
- {'x': cstr.eid, 'v': value},
- ask_confirm=confirm)
+ self.rqlexec('DELETE CWConstraint C WHERE C eid %(x)s',
+ {'x': cstr.eid}, ask_confirm=confirm)
# 2. add new constraints
cstrtype_map = self.cstrtype_mapping()
self.rqlexecall(ss.constraints2rql(cstrtype_map, newconstraints,
@@ -719,7 +684,7 @@
str(totype))
# execute post-create files
for cube in reversed(newcubes):
- with self.session.allow_all_hooks_but():
+ with self.cnx.allow_all_hooks_but():
self.cmd_exec_event_script('postcreate', cube)
self.commit()
@@ -821,7 +786,7 @@
groupmap = self.group_mapping()
cstrtypemap = self.cstrtype_mapping()
# register the entity into CWEType
- execute = self._cw.execute
+ execute = self.cnx.execute
ss.execschemarql(execute, eschema, ss.eschema2rql(eschema, groupmap))
# add specializes relation if needed
specialized = eschema.specializes()
@@ -1001,8 +966,8 @@
# repository caches are properly cleanup
hook.CleanupDeletedEidsCacheOp.get_instance(session).union(thispending)
# and don't forget to remove record from system tables
- entities = [session.entity_from_eid(eid, rdeftype) for eid in thispending]
- self.repo.system_source.delete_info_multi(session, entities, 'system')
+ entities = [self.cnx.entity_from_eid(eid, rdeftype) for eid in thispending]
+ self.repo.system_source.delete_info_multi(self.cnx._cnx, entities)
self.sqlexec('DELETE FROM cw_%s WHERE cw_from_entity=%%(eid)s OR '
'cw_to_entity=%%(eid)s' % rdeftype,
{'eid': oldeid}, ask_confirm=False)
@@ -1050,7 +1015,7 @@
"""
reposchema = self.repo.schema
rschema = self.fs_schema.rschema(rtype)
- execute = self._cw.execute
+ execute = self.cnx.execute
if rtype in reposchema:
print 'warning: relation type %s is already known, skip addition' % (
rtype)
@@ -1129,7 +1094,7 @@
subjtype, rtype, objtype)
return
rdef = self._get_rdef(rschema, subjtype, objtype)
- ss.execschemarql(self._cw.execute, rdef,
+ ss.execschemarql(self.cnx.execute, rdef,
ss.rdef2rql(rdef, self.cstrtype_mapping(),
self.group_mapping()))
if commit:
@@ -1356,14 +1321,6 @@
# other data migration commands ###########################################
- @property
- def _cw(self):
- session = self.session
- if session is not None:
- session.set_cnxset()
- return session
- return self.cnx.request()
-
def cmd_storage_changed(self, etype, attribute):
"""migrate entities to a custom storage. The new storage is expected to
be set, it will be temporarily removed for the migration.
@@ -1387,22 +1344,28 @@
def cmd_create_entity(self, etype, commit=False, **kwargs):
"""add a new entity of the given type"""
- entity = self._cw.create_entity(etype, **kwargs)
+ entity = self.cnx.create_entity(etype, **kwargs)
if commit:
self.commit()
return entity
+ def cmd_find(self, etype, **kwargs):
+ """find entities of the given type and attribute values"""
+ return self.cnx.find(etype, **kwargs)
+
+ @deprecated("[3.19] use find(*args, **kwargs).entities() instead")
def cmd_find_entities(self, etype, **kwargs):
"""find entities of the given type and attribute values"""
- return self._cw.find_entities(etype, **kwargs)
+ return self.cnx.find(etype, **kwargs).entities()
+ @deprecated("[3.19] use find(*args, **kwargs).one() instead")
def cmd_find_one_entity(self, etype, **kwargs):
"""find one entity of the given type and attribute values.
raise :exc:`cubicweb.req.FindEntityError` if can not return one and only
one entity.
"""
- return self._cw.find_one_entity(etype, **kwargs)
+ return self.cnx.find(etype, **kwargs).one()
def cmd_update_etype_fti_weight(self, etype, weight):
if self.repo.system_source.dbdriver == 'postgres':
@@ -1416,7 +1379,7 @@
indexable entity types
"""
from cubicweb.server.checkintegrity import reindex_entities
- reindex_entities(self.repo.schema, self.session, etypes=etypes)
+ reindex_entities(self.repo.schema, self.cnx._cnx, etypes=etypes)
@contextmanager
def cmd_dropped_constraints(self, etype, attrname, cstrtype=None,
@@ -1461,7 +1424,7 @@
"""
if not ask_confirm or self.confirm('Execute sql: %s ?' % sql):
try:
- cu = self.session.system_sql(sql, args)
+ cu = self.cnx.system_sql(sql, args)
except Exception:
ex = sys.exc_info()[1]
if self.confirm('Error: %s\nabort?' % ex, pdb=True):
@@ -1479,7 +1442,7 @@
if not isinstance(rql, (tuple, list)):
rql = ( (rql, kwargs), )
res = None
- execute = self._cw.execute
+ execute = self.cnx.execute
for rql, kwargs in rql:
if kwargs:
msg = '%s (%s)' % (rql, kwargs)
@@ -1515,7 +1478,7 @@
self.sqlexec(sql, ask_confirm=False)
dbhelper = self.repo.system_source.dbhelper
sqltype = dbhelper.TYPE_MAPPING[newtype]
- cursor = self.session.cnxset[self.repo.system_source.uri]
+ cursor = self.cnx._cnx.cnxset.cu
dbhelper.change_col_type(cursor, 'cw_%s' % etype, 'cw_%s' % attr, sqltype, allownull)
if commit:
self.commit()
--- a/server/msplanner.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1822 +0,0 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""plan execution of rql queries on multiple sources
-
-the best way to understand what are we trying to acheive here is to read the
-unit-tests in unittest_msplanner.py
-
-
-What you need to know
-~~~~~~~~~~~~~~~~~~~~~
-1. The system source is expected to support every entity and relation types
-
-2. Given "X relation Y":
-
- * if relation, X and Y types are supported by the external source, we suppose
- by default that X and Y should both come from the same source as the
- relation. You can specify otherwise by adding relation into the
- "cross_relations" set in the source's mapping file and it that case, we'll
- consider that we can also find in the system source some relation between
- X and Y coming from different sources.
-
- * if "relation" isn't supported by the external source but X or Y
- types (or both) are, we suppose by default that can find in the system
- source some relation where X and/or Y come from the external source. You
- can specify otherwise by adding relation into the "dont_cross_relations"
- set in the source's mapping file and it that case, we'll consider that we
- can only find in the system source some relation between X and Y coming
- the system source.
-
-
-Implementation
-~~~~~~~~~~~~~~
-XXX explain algorithm
-
-
-Exemples of multi-sources query execution
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-For a system source and a ldap user source (only CWUser and its attributes
-is supported, no group or such):
-
-:CWUser X:
-1. fetch CWUser X from both sources and return concatenation of results
-
-:CWUser X WHERE X in_group G, G name 'users':
-* catch 1
- 1. fetch CWUser X from both sources, store concatenation of results into a
- temporary table
- 2. return the result of TMP X WHERE X in_group G, G name 'users' from the
- system source
-* catch 2
- 1. return the result of CWUser X WHERE X in_group G, G name 'users' from system
- source, that's enough (optimization of the sql querier will avoid join on
- CWUser, so we will directly get local eids)
-
-:CWUser X,L WHERE X in_group G, X login L, G name 'users':
-1. fetch Any X,L WHERE X is CWUser, X login L from both sources, store
- concatenation of results into a temporary table
-2. return the result of Any X, L WHERE X is TMP, X login LX in_group G,
- G name 'users' from the system source
-
-
-:Any X WHERE X owned_by Y:
-* catch 1
- 1. fetch CWUser X from both sources, store concatenation of results into a
- temporary table
- 2. return the result of Any X WHERE X owned_by Y, Y is TMP from the system
- source
-* catch 2
- 1. return the result of Any X WHERE X owned_by Y from system source, that's
- enough (optimization of the sql querier will avoid join on CWUser, so we
- will directly get local eids)
-"""
-
-__docformat__ = "restructuredtext en"
-
-from itertools import imap, ifilterfalse
-
-from logilab.common.compat import any
-from logilab.common.decorators import cached
-from logilab.common.deprecation import deprecated
-
-from rql import BadRQLQuery
-from rql.stmts import Union, Select
-from rql.nodes import (VariableRef, Comparison, Relation, Constant, Variable,
- Not, Exists, SortTerm, Function)
-
-from cubicweb import server
-from cubicweb.utils import make_uid
-from cubicweb.rqlrewrite import add_types_restriction, cleanup_solutions
-from cubicweb.server.ssplanner import SSPlanner, OneFetchStep
-from cubicweb.server.mssteps import *
-
-Variable._ms_table_key = lambda x: x.name
-Relation._ms_table_key = lambda x: x.r_type
-# str() Constant.value to ensure generated table name won't be unicode
-Constant._ms_table_key = lambda x: str(x.value)
-
-Variable._ms_may_be_processed = lambda x, terms, linkedterms: any(
- t for t in terms if t in linkedterms.get(x, ()))
-Relation._ms_may_be_processed = lambda x, terms, linkedterms: all(
- getattr(hs, 'variable', hs) in terms for hs in x.get_variable_parts())
-
-def ms_scope(term):
- rel = None
- scope = term.scope
- if isinstance(term, Variable) and len(term.stinfo['relations']) == 1:
- rel = iter(term.stinfo['relations']).next().relation()
- elif isinstance(term, Constant):
- rel = term.relation()
- elif isinstance(term, Relation):
- rel = term
- if rel is not None and (
- rel.r_type != 'identity' and rel.scope is scope
- and isinstance(rel.parent, Exists) and rel.parent.neged(strict=True)):
- return scope.parent.scope
- return scope
-
-def need_intersect(select, getrschema):
- for rel in select.iget_nodes(Relation):
- if isinstance(rel.parent, Exists) and rel.parent.neged(strict=True) and not rel.is_types_restriction():
- rschema = getrschema(rel.r_type)
- if not rschema.final:
- # if one of the relation's variable is ambiguous but not
- # invariant, an intersection will be necessary
- for vref in rel.get_nodes(VariableRef):
- var = vref.variable
- if (var.valuable_references() == 1
- and len(var.stinfo['possibletypes']) > 1):
- return True
- return False
-
-def neged_relation(rel):
- parent = rel.parent
- return isinstance(parent, Not) or (isinstance(parent, Exists) and
- isinstance(parent.parent, Not))
-
-def need_source_access_relation(vargraph):
- if not vargraph:
- return False
- # check vargraph contains some other relation than the identity relation
- # test of key nature since it may be a variable name (don't care about that)
- # or a 2-uple (var1, var2) associated to the relation to traverse to go from
- # var1 to var2
- return any(key for key, val in vargraph.iteritems()
- if isinstance(key, tuple) and val != 'identity')
-
-def need_aggr_step(select, sources, stepdefs=None):
- """return True if a temporary table is necessary to store some partial
- results to execute the given query
- """
- if len(sources) == 1:
- # can do everything at once with a single source
- return False
- if select.orderby or select.groupby or select.has_aggregat:
- # if more than one source, we need a temp table to deal with sort /
- # groups / aggregat if :
- # * the rqlst won't be splitted (in the other case the last query
- # using partial temporary table can do sort/groups/aggregat without
- # the need for a later AggrStep)
- # * the rqlst is splitted in multiple steps and there are more than one
- # final step
- if stepdefs is None:
- return True
- has_one_final = False
- fstepsolindices = set()
- for stepdef in stepdefs:
- if stepdef[-1]:
- if has_one_final or frozenset(stepdef[2]) != fstepsolindices:
- return True
- has_one_final = True
- else:
- fstepsolindices.update(stepdef[2])
- return False
-
-def select_group_sort(select): # XXX something similar done in rql2sql
- # add variables used in groups and sort terms to the selection
- # if necessary
- if select.groupby:
- for vref in select.groupby:
- if not vref in select.selection:
- select.append_selected(vref.copy(select))
- for sortterm in select.orderby:
- for vref in sortterm.iget_nodes(VariableRef):
- if not vref in select.get_selected_variables():
- # we can't directly insert sortterm.term because it references
- # a variable of the select before the copy.
- # XXX if constant term are used to define sort, their value
- # may necessite a decay
- select.append_selected(vref.copy(select))
- if select.groupby and not vref in select.groupby:
- select.add_group_var(vref.copy(select))
-
-def allequals(solutions):
- """return true if all solutions are identical"""
- sol = solutions.next()
- noconstsol = None
- for sol_ in solutions:
- if sol_ != sol:
- return False
- return True
-
-# XXX move functions below to rql ##############################################
-
-def is_ancestor(n1, n2):
- """return True if n2 is a parent scope of n1"""
- p = n1.parent
- while p is not None:
- if p is n2:
- return True
- p = p.parent
- return False
-
-def copy_node(newroot, node, subparts=()):
- newnode = node.__class__(*node.initargs(newroot))
- for part in subparts:
- newnode.append(part)
- return newnode
-
-def used_in_outer_scope(var, scope):
- """return true if the variable is used in an outer scope of the given scope
- """
- for rel in var.stinfo['relations']:
- rscope = ms_scope(rel)
- if not rscope is scope and is_ancestor(scope, rscope):
- return True
- return False
-
-################################################################################
-
-class PartPlanInformation(object):
- """regroups necessary information to execute some part of a "global" rql
- query ("global" means as received by the querier, which may result in
- several internal queries, e.g. parts, due to security insertions). Actually
- a PPI is created for each subquery and for each query in a union.
-
- It exposes as well some methods helping in executing this part on a
- multi-sources repository, modifying its internal structure during the
- process.
-
- :attr plan:
- the execution plan
- :attr rqlst:
- the original rql syntax tree handled by this part
-
- :attr needsplit:
- bool telling if the query has to be split into multiple steps for
- execution or if it can be executed at once
-
- :attr temptable:
- a SQL temporary table name or None, if necessary to handle aggregate /
- sorting for this part of the query
-
- :attr finaltable:
- a SQL table name or None, if results for this part of the query should be
- written into a temporary table (usually shared by multiple PPI)
-
- :attr sourcesterms:
- a dictionary {source : {term: set([solution index, ])}} telling for each
- source which terms are supported for which solutions. A "term" may be
- either a rql Variable, Constant or Relation node.
- """
- def __init__(self, plan, rqlst, rqlhelper=None):
- self.plan = plan
- self.rqlst = rqlst
- self.needsplit = False
- self.temptable = None
- self.finaltable = None
- # shortcuts
- self._schema = plan.schema
- self._session = plan.session
- self._repo = self._session.repo
- self._solutions = rqlst.solutions
- self._solindices = range(len(self._solutions))
- self.system_source = self._repo.system_source
- # source : {term: [solution index, ]}
- self.sourcesterms = self._sourcesterms = {}
- # source : {relation: set(child variable and constant)}
- self._crossrelations = {}
- # term : set(sources)
- self._discarded_sources = {}
- # dictionary of variables and constants which are linked to each other
- # using a non final relation supported by multiple sources (crossed or
- # not).
- self._linkedterms = {}
- # processing
- termssources = self._compute_sourcesterms()
- self._remove_invalid_sources(termssources)
- self._compute_needsplit()
- # after initialisation, .sourcesterms contains the same thing as
- # ._sourcesterms though during plan construction, ._sourcesterms will
- # be modified while .sourcesterms will be kept unmodified
- self.sourcesterms = {}
- for k, v in self._sourcesterms.iteritems():
- self.sourcesterms[k] = {}
- for k2, v2 in v.iteritems():
- self.sourcesterms[k][k2] = v2.copy()
- # cleanup linked var
- for var, linkedrelsinfo in self._linkedterms.iteritems():
- self._linkedterms[var] = frozenset(x[0] for x in linkedrelsinfo)
- # map output of a step to input of a following step
- self._inputmaps = {}
- # record input map conflicts to resolve them on final step generation
- self._conflicts = []
- if rqlhelper is not None: # else test
- self._insert_identity_variable = rqlhelper._annotator.rewrite_shared_optional
- if server.DEBUG & server.DBG_MS:
- print 'sourcesterms:'
- self._debug_sourcesterms()
-
- def _debug_sourcesterms(self):
- for source in self._sourcesterms:
- print '-', source
- for term, sols in self._sourcesterms[source].items():
- print ' -', term, id(term), ':', sols
-
- def copy_solutions(self, solindices):
- return [self._solutions[solidx].copy() for solidx in solindices]
-
- @property
- @cached
- def part_sources(self):
- if self._sourcesterms:
- return tuple(sorted(self._sourcesterms))
- return (self.system_source,)
-
- @property
- @cached
- def _sys_source_set(self):
- return frozenset((self.system_source, solindex)
- for solindex in self._solindices)
-
- @cached
- def _norel_support_set(self, relation):
- """return a set of (source, solindex) where source doesn't support the
- relation
- """
- return frozenset((source, solidx) for source in self._repo.sources
- for solidx in self._solindices
- if not ((source.support_relation(relation.r_type))
- or relation.r_type in source.dont_cross_relations))
-
- def _compute_sourcesterms(self):
- """compute for each term (variable, rewritten constant, relation) and
- for each solution in the rqlst which sources support them
- """
- repo = self._repo
- eschema = self._schema.eschema
- sourcesterms = self._sourcesterms
- # find for each source which variable/solution are supported
- for varname, varobj in self.rqlst.defined_vars.items():
- # if variable has an eid specified, we can get its source directly
- # NOTE: use uidrel and not constnode to deal with "X eid IN(1,2,3,4)"
- if varobj.stinfo['uidrel'] is not None:
- rel = varobj.stinfo['uidrel']
- hasrel = len(varobj.stinfo['relations']) > 1
- for const in rel.children[1].get_nodes(Constant):
- eid = const.eval(self.plan.args)
- source = self._session.source_from_eid(eid)
- if (source is self.system_source
- or (hasrel and varobj._q_invariant and
- not any(source.support_relation(r.r_type)
- for r in varobj.stinfo['relations']
- if not r is rel))):
- self._set_source_for_term(self.system_source, varobj)
- else:
- self._set_source_for_term(source, varobj)
- continue
- rels = varobj.stinfo['relations']
- if not rels and varobj.stinfo['typerel'] is None:
- # (rare) case where the variable has no type specified nor
- # relation accessed ex. "Any MAX(X)"
- self._set_source_for_term(self.system_source, varobj)
- continue
- for i, sol in enumerate(self._solutions):
- vartype = sol[varname]
- # skip final variable
- if eschema(vartype).final:
- break
- for source in repo.sources:
- if source.support_entity(vartype):
- # the source support the entity type, though we will
- # actually have to fetch from it only if
- # * the variable isn't invariant
- # * at least one supported relation specified
- if not varobj._q_invariant or \
- any(imap(source.support_relation,
- (r.r_type for r in rels if r.r_type not in ('identity', 'eid')))):
- sourcesterms.setdefault(source, {}).setdefault(varobj, set()).add(i)
- # if variable is not invariant and is used by a relation
- # not supported by this source, we'll have to split the
- # query
- if not varobj._q_invariant and any(ifilterfalse(
- source.support_relation, (r.r_type for r in rels))):
- self.needsplit = True
- # add source for rewritten constants to sourcesterms
- self._const_vars = {}
- for vconsts in self.rqlst.stinfo['rewritten'].itervalues():
- # remember those consts come from the same variable
- for const in vconsts:
- self._const_vars[const] = vconsts
- source = self._session.source_from_eid(const.eval(self.plan.args))
- if source is self.system_source:
- for const in vconsts:
- self._set_source_for_term(source, const)
- elif not self._sourcesterms:
- for const in vconsts:
- self._set_source_for_term(source, const)
- elif source in self._sourcesterms:
- source_scopes = frozenset(ms_scope(t) for t in self._sourcesterms[source])
- for const in vconsts:
- if ms_scope(const) in source_scopes:
- self._set_source_for_term(source, const)
- # if system source is used, add every rewritten constant
- # to its supported terms even when associated entity
- # doesn't actually come from it so we get a changes that
- # allequals will return True as expected when computing
- # needsplit
- # check const is used in a relation restriction
- if const.relation() and self.system_source in sourcesterms:
- self._set_source_for_term(self.system_source, const)
- # add source for relations
- rschema = self._schema.rschema
- termssources = {}
- sourcerels = []
- for rel in self.rqlst.iget_nodes(Relation):
- # process non final relations only
- # note: don't try to get schema for 'is' relation (not available
- # during bootstrap)
- if rel.r_type == 'cw_source':
- sourcerels.append(rel)
- if not (rel.is_types_restriction() or rschema(rel.r_type).final):
- # nothing to do if relation is not supported by multiple sources
- # or if some source has it listed in its cross_relations
- # attribute
- #
- # XXX code below don't deal if some source allow relation
- # crossing but not another one
- relsources = [s for s in repo.rel_type_sources(rel.r_type)
- if s is self.system_source
- or s in self._sourcesterms]
- if len(relsources) < 2:
- # filter out sources being there because they have this
- # relation in their dont_cross_relations attribute
- relsources = [source for source in relsources
- if source.support_relation(rel.r_type)]
- if relsources:
- # this means the relation is using a variable inlined as
- # a constant and another unsupported variable, in which
- # case we put the relation in sourcesterms
- self._sourcesterms.setdefault(relsources[0], {})[rel] = set(self._solindices)
- continue
- lhs, rhs = rel.get_variable_parts()
- lhsv, rhsv = getattr(lhs, 'variable', lhs), getattr(rhs, 'variable', rhs)
- # update dictionary of sources supporting lhs and rhs vars
- if not lhsv in termssources:
- termssources[lhsv] = self._term_sources(lhs)
- if not rhsv in termssources:
- termssources[rhsv] = self._term_sources(rhs)
- self._handle_cross_relation(rel, relsources, termssources)
- self._linkedterms.setdefault(lhsv, set()).add((rhsv, rel))
- self._linkedterms.setdefault(rhsv, set()).add((lhsv, rel))
- # extract information from cw_source relation
- for srel in sourcerels:
- vref = srel.children[1].children[0]
- sourceeids, sourcenames = [], []
- if isinstance(vref, Constant):
- # simplified variable
- sourceeids = None, (vref.eval(self.plan.args),)
- var = vref
- else:
- var = vref.variable
- for rel in var.stinfo['relations'] - var.stinfo['rhsrelations']:
- # skip neged eid relation since it's the kind of query
- # generated when clearing old value of '?1" relation,
- # cw_source included. See
- # unittest_ldapuser.test_copy_to_system_source
- if rel.r_type == 'name' or \
- (rel.r_type == 'eid' and not rel.neged(strict=True)):
- if rel.r_type == 'eid':
- slist = sourceeids
- else:
- slist = sourcenames
- sources = [cst.eval(self.plan.args)
- for cst in rel.children[1].get_nodes(Constant)]
- if sources:
- if slist:
- # don't attempt to do anything
- sourcenames = sourceeids = None
- break
- slist[:] = (rel, sources)
- if sourceeids:
- rel, values = sourceeids
- sourcesdict = self._repo.sources_by_eid
- elif sourcenames:
- rel, values = sourcenames
- sourcesdict = self._repo.sources_by_uri
- else:
- sourcesdict = None
- if sourcesdict is not None:
- lhs = srel.children[0]
- try:
- sources = [sourcesdict[key] for key in values]
- except KeyError:
- raise BadRQLQuery('source conflict for term %s' % lhs.as_string())
- if isinstance(lhs, Constant):
- source = self._session.source_from_eid(lhs.eval(self.plan.args))
- if not source in sources:
- raise BadRQLQuery('source conflict for term %s' % lhs.as_string())
- else:
- lhs = getattr(lhs, 'variable', lhs)
- invariant = getattr(lhs, '_q_invariant', False)
- # XXX NOT NOT
- neged = srel.neged(traverse_scope=True) or (rel and rel.neged(strict=True))
- has_copy_based_source = False
- sources_ = []
- for source in sources:
- if source.copy_based_source:
- has_copy_based_source = True
- if not self.system_source in sources_:
- sources_.append(self.system_source)
- else:
- sources_.append(source)
- sources = sources_
- if neged:
- for source in sources:
- if invariant and source is self.system_source:
- continue
- self._remove_source_term(source, lhs)
- self._discarded_sources.setdefault(lhs, set()).add(source)
- usesys = self.system_source not in sources
- else:
- for source, terms in sourcesterms.items():
- if lhs in terms and not source in sources:
- if invariant and source is self.system_source:
- continue
- self._remove_source_term(source, lhs)
- self._discarded_sources.setdefault(lhs, set()).add(source)
- usesys = self.system_source in sources
- if rel is None or (len(var.stinfo['relations']) == 2 and
- not var.stinfo['selected']):
- self._remove_source_term(self.system_source, var)
- if not (has_copy_based_source or len(sources) > 1
- or usesys or invariant):
- if rel is None:
- srel.parent.remove(srel)
- else:
- self.rqlst.undefine_variable(var)
- self._remove_source_term(self.system_source, srel)
- return termssources
-
- def _handle_cross_relation(self, rel, relsources, termssources):
- for source in relsources:
- if rel.r_type in source.cross_relations:
- ssource = self.system_source
- crossvars = set(x.variable for x in rel.get_nodes(VariableRef))
- for const in rel.get_nodes(Constant):
- if source.uri != 'system' and not const in self._sourcesterms.get(source, ()):
- continue
- crossvars.add(const)
- self._crossrelations.setdefault(source, {})[rel] = crossvars
- if len(crossvars) < 2:
- # this means there is a constant in the relation which is
- # not supported by the source, so we can stop here
- continue
- self._sourcesterms.setdefault(ssource, {})[rel] = set(self._solindices)
- solindices = None
- for term in crossvars:
- if len(termssources[term]) == 1 and iter(termssources[term]).next()[0].uri == 'system':
- for ov in crossvars:
- if ov is not term and (isinstance(ov, Constant) or ov._q_invariant):
- ssset = frozenset((ssource,))
- self._remove_sources(ov, termssources[ov] - ssset)
- break
- if solindices is None:
- solindices = set(sol for s, sol in termssources[term]
- if s is source)
- else:
- solindices &= set(sol for s, sol in termssources[term]
- if s is source)
- else:
- self._sourcesterms.setdefault(source, {})[rel] = solindices
-
- def _remove_invalid_sources(self, termssources):
- """removes invalid sources from `sourcesterms` member according to
- traversed relations and their properties (which sources support them,
- can they cross sources, etc...)
- """
- for term in self._linkedterms:
- self._remove_sources_until_stable(term, termssources)
- if len(self._sourcesterms) > 1 and hasattr(self.plan.rqlst, 'main_relations'):
- # the querier doesn't annotate write queries, need to do it here
- self.plan.annotate_rqlst()
- # insert/update/delete queries, we may get extra information from
- # the main relation (eg relations to the left of the WHERE
- if self.plan.rqlst.TYPE == 'insert':
- inserted = dict((vref.variable, etype)
- for etype, vref in self.plan.rqlst.main_variables)
- else:
- inserted = {}
- repo = self._repo
- rschema = self._schema.rschema
- for rel in self.plan.rqlst.main_relations:
- if not rschema(rel.r_type).final:
- # nothing to do if relation is not supported by multiple sources
- if len(repo.rel_type_sources(rel.r_type)) < 2:
- continue
- lhs, rhs = rel.get_variable_parts()
- try:
- lhsv = self._extern_term(lhs, termssources, inserted)
- rhsv = self._extern_term(rhs, termssources, inserted)
- except KeyError:
- continue
- self._remove_term_sources(lhsv, rel, rhsv, termssources)
- self._remove_term_sources(rhsv, rel, lhsv, termssources)
-
- def _extern_term(self, term, termssources, inserted):
- var = term.variable
- if var.stinfo['constnode']:
- termv = var.stinfo['constnode']
- termssources[termv] = self._term_sources(termv)
- elif var in inserted:
- termv = var
- source = self._repo.locate_etype_source(inserted[var])
- termssources[termv] = set((source, solindex)
- for solindex in self._solindices)
- else:
- termv = self.rqlst.defined_vars[var.name]
- if not termv in termssources:
- termssources[termv] = self._term_sources(termv)
- return termv
-
- def _remove_sources_until_stable(self, term, termssources):
- sourcesterms = self._sourcesterms
- for oterm, rel in self._linkedterms.get(term, ()):
- tscope = ms_scope(term)
- otscope = ms_scope(oterm)
- rscope = ms_scope(rel)
- if not tscope is otscope and rscope.neged(strict=True):
- # can't get information from relation inside a NOT exists
- # where terms don't belong to the same scope
- continue
- need_ancestor_scope = False
- if not (tscope is rscope and otscope is rscope):
- if rel.ored():
- continue
- if rel.ored(traverse_scope=True):
- # if relation has some OR as parent, constraints should only
- # propagate from parent scope to child scope, nothing else
- need_ancestor_scope = True
- relsources = self._repo.rel_type_sources(rel.r_type)
- if neged_relation(rel) and (
- len(relsources) < 2
- or not isinstance(oterm, Variable)
- or oterm.valuable_references() != 1
- or any(sourcesterms[source][term] != sourcesterms[source][oterm]
- for source in relsources
- if term in sourcesterms.get(source, ())
- and oterm in sourcesterms.get(source, ()))):
- # neged relation doesn't allow to infer term sources unless
- # we're on a multisource relation for a term only used by this
- # relation (eg "Any X WHERE NOT X multisource_rel Y" and over is
- # Y)
- continue
- # compute invalid sources for terms and remove them
- if not need_ancestor_scope or is_ancestor(tscope, otscope):
- self._remove_term_sources(term, rel, oterm, termssources)
- if not need_ancestor_scope or is_ancestor(otscope, tscope):
- self._remove_term_sources(oterm, rel, term, termssources)
-
- def _remove_term_sources(self, term, rel, oterm, termssources):
- """remove invalid sources for term according to oterm's sources and the
- relation between those two terms.
- """
- norelsup = self._norel_support_set(rel)
- termsources = termssources[term]
- invalid_sources = termsources - (termssources[oterm] | norelsup)
- if invalid_sources and self._repo.can_cross_relation(rel.r_type):
- invalid_sources -= self._sys_source_set
- if invalid_sources and isinstance(term, Variable) \
- and self._need_ext_source_access(term, rel):
- # if the term is a not invariant variable, we should filter out
- # source where the relation is a cross relation from invalid
- # sources
- invalid_sources = frozenset((s, solidx) for s, solidx in invalid_sources
- if not (s in self._crossrelations and
- rel in self._crossrelations[s]))
- if invalid_sources:
- self._remove_sources(term, invalid_sources)
- discarded = self._discarded_sources.get(term)
- if discarded is not None and not any(x[0] for x in (termsources-invalid_sources)
- if not x[0] in discarded):
- raise BadRQLQuery('relation %s cant be crossed but %s and %s should '
- 'come from difference sources' %
- (rel.r_type, term.as_string(), oterm.as_string()))
- # if term is a rewritten const, we can apply the same changes to
- # all other consts inserted from the same original variable
- for const in self._const_vars.get(term, ()):
- if const is not term:
- self._remove_sources(const, invalid_sources)
- termsources -= invalid_sources
- self._remove_sources_until_stable(term, termssources)
- if isinstance(oterm, Constant):
- self._remove_sources(oterm, invalid_sources)
-
- def _compute_needsplit(self):
- """tell according to sourcesterms if the rqlst has to be splitted for
- execution among multiple sources
-
- the execution has to be split if
- * a source support an entity (non invariant) but doesn't support a
- relation on it
- * a source support an entity which is accessed by an optional relation
- * there is more than one source and either all sources'supported
- variable/solutions are not equivalent or multiple variables have to
- be fetched from some source
- """
- # NOTE: < 2 since may be 0 on queries such as Any X WHERE X eid 2
- if len(self._sourcesterms) < 2:
- self.needsplit = False
- # if this is not the system source but we have only constant terms
- # and no relation (other than eid), apply query on the system source
- #
- # testing for rqlst with nothing in vargraph nor defined_vars is the
- # simplest way the check the condition explained below
- if not self.system_source in self._sourcesterms and \
- not self.rqlst.defined_vars and \
- not need_source_access_relation(self.rqlst.vargraph):
- self._sourcesterms = {self.system_source: {}}
- elif not self.needsplit:
- if not allequals(self._sourcesterms.itervalues()):
- for source, terms in self._sourcesterms.iteritems():
- if source is self.system_source:
- continue
- if any(x for x in terms if not isinstance(x, Constant)):
- self.needsplit = True
- return
- self._sourcesterms = {self.system_source: {}}
- self.needsplit = False
- else:
- sample = self._sourcesterms.itervalues().next()
- if len(sample) > 1:
- for term in sample:
- # need split if unlinked variable
- if isinstance(term, Variable) and not term in self._linkedterms:
- self.needsplit = True
- break
- else:
- # need split if there are some cross relation on non
- # invariant variable or if the variable is used in
- # multi-sources relation
- if self._crossrelations:
- for reldict in self._crossrelations.itervalues():
- for rel, terms in reldict.iteritems():
- for term in terms:
- if isinstance(term, Variable) \
- and self._need_ext_source_access(term, rel):
- self.needsplit = True
- return
- else:
- # remove sources only accessing to constant nodes
- for source, terms in self._sourcesterms.items():
- if source is self.system_source:
- continue
- if not any(x for x in terms if not isinstance(x, Constant)):
- del self._sourcesterms[source]
- if len(self._sourcesterms) < 2:
- self.needsplit = False
-
- @cached
- def _need_ext_source_access(self, var, rel):
- if not var._q_invariant:
- return True
- if any(r for x, r in self._linkedterms[var]
- if not r is rel and self._repo.is_multi_sources_relation(r.r_type)):
- return True
- return False
-
- def _set_source_for_term(self, source, term):
- self._sourcesterms.setdefault(source, {})[term] = set(self._solindices)
-
- def _term_sources(self, term):
- """returns possible sources for terms `term`"""
- if isinstance(term, Constant):
- source = self._session.source_from_eid(term.eval(self.plan.args))
- return set((source, solindex) for solindex in self._solindices)
- else:
- var = getattr(term, 'variable', term)
- sources = [source for source, varobjs in self.sourcesterms.iteritems()
- if var in varobjs]
- return set((source, solindex) for source in sources
- for solindex in self.sourcesterms[source][var])
-
- def _remove_sources(self, term, sources):
- """removes invalid sources (`sources`) from `sourcesterms`
-
- :param sources: the list of sources to remove
- :param term: the analyzed term
- """
- sourcesterms = self._sourcesterms
- for source, solindex in sources:
- try:
- sourcesterms[source][term].remove(solindex)
- except KeyError:
- import rql.base as rqlb
- assert isinstance(term, (rqlb.BaseNode, Variable)), repr(term)
- continue # may occur with subquery column alias
- if not sourcesterms[source][term]:
- self._remove_source_term(source, term)
-
- def _remove_source_term(self, source, term):
- try:
- poped = self._sourcesterms[source].pop(term, None)
- except KeyError:
- pass
- else:
- if not self._sourcesterms[source]:
- del self._sourcesterms[source]
-
- def crossed_relation(self, source, relation):
- return relation in self._crossrelations.get(source, ())
-
- def part_steps(self):
- """precompute necessary part steps before generating actual rql for
- each step. This is necessary to know if an aggregate step will be
- necessary or not.
- """
- steps = []
- select = self.rqlst
- rschema = self._schema.rschema
- for source in self.part_sources:
- try:
- sourceterms = self._sourcesterms[source]
- except KeyError:
- continue # already proceed
- while sourceterms:
- # take a term randomly, and all terms supporting the
- # same solutions
- term, solindices = self._choose_term(source, sourceterms)
- if source.uri == 'system':
- # ensure all variables are available for the latest step
- # (missing one will be available from temporary tables
- # of previous steps)
- scope = select
- terms = scope.defined_vars.values() + scope.aliases.values()
- sourceterms.clear()
- sources = [source]
- else:
- scope = ms_scope(term)
- # find which sources support the same term and solutions
- sources = self._expand_sources(source, term, solindices)
- # no try to get as much terms as possible
- terms = self._expand_terms(term, sources, sourceterms,
- scope, solindices)
- if len(terms) == 1 and isinstance(terms[0], Constant):
- # we can't generate anything interesting with a single
- # constant term (will generate an empty "Any" query),
- # go to the next iteration directly!
- continue
- if not sourceterms:
- try:
- del self._sourcesterms[source]
- except KeyError:
- # XXX already cleaned
- pass
- # set of terms which should be additionaly selected when
- # possible
- needsel = set()
- if not self._sourcesterms and scope is select:
- terms += scope.defined_vars.values() + scope.aliases.values()
- if isinstance(term, Relation) and len(sources) > 1:
- variants = set()
- partterms = [term]
- for vref in term.get_nodes(VariableRef):
- if not vref.variable._q_invariant:
- variants.add(vref.name)
- if len(variants) == 2:
- # we need an extra-step to fetch relations from each source
- # before a join with prefetched inputs
- # (see test_crossed_relation_noeid_needattr in
- # unittest_msplanner / unittest_multisources)
- lhs, rhs = term.get_variable_parts()
- steps.append( (sources, [term, getattr(lhs, 'variable', lhs),
- getattr(rhs, 'variable', rhs)],
- solindices, scope, variants, False) )
- sources = [self.system_source]
- final = True
- else:
- # suppose this is a final step until the contrary is proven
- final = scope is select
- # add attribute variables and mark variables which should be
- # additionaly selected when possible
- for var in select.defined_vars.itervalues():
- if not var in terms:
- stinfo = var.stinfo
- for ovar, rtype in stinfo.get('attrvars', ()):
- if ovar in terms:
- needsel.add(var.name)
- terms.append(var)
- break
- else:
- needsel.add(var.name)
- final = False
- # check all relations are supported by the sources
- for rel in scope.iget_nodes(Relation):
- if rel.is_types_restriction():
- continue
- # take care not overwriting the existing "source" identifier
- for _source in sources:
- if not _source.support_relation(rel.r_type) or (
- self.crossed_relation(_source, rel) and not rel in terms):
- for vref in rel.iget_nodes(VariableRef):
- needsel.add(vref.name)
- final = False
- break
- else:
- if not scope is select:
- self._exists_relation(rel, terms, needsel, source)
- # if relation is supported by all sources and some of
- # its lhs/rhs variable isn't in "terms", and the
- # other end *is* in "terms", mark it have to be
- # selected
- if source.uri != 'system' and not rschema(rel.r_type).final:
- lhs, rhs = rel.get_variable_parts()
- try:
- lhsvar = lhs.variable
- except AttributeError:
- lhsvar = lhs
- try:
- rhsvar = rhs.variable
- except AttributeError:
- rhsvar = rhs
- try:
- if lhsvar in terms and not rhsvar in terms:
- needsel.add(lhsvar.name)
- elif rhsvar in terms and not lhsvar in terms:
- needsel.add(rhsvar.name)
- except AttributeError:
- continue # not an attribute, no selection needed
- if final and source.uri != 'system':
- # check rewritten constants
- for vconsts in select.stinfo['rewritten'].itervalues():
- const = vconsts[0]
- eid = const.eval(self.plan.args)
- _source = self._session.source_from_eid(eid)
- if len(sources) > 1 or not _source in sources:
- # if there is some rewriten constant used by a not
- # neged relation while there are some source not
- # supporting the associated entity, this step can't
- # be final (unless the relation is explicitly in
- # `terms`, eg cross relations)
- for c in vconsts:
- rel = c.relation()
- if rel is None or not (rel in terms or neged_relation(rel)):
- final = False
- break
- break
- if final:
- self._cleanup_sourcesterms(sources, solindices)
- steps.append((sources, terms, solindices, scope, needsel, final)
- )
- if not steps[-1][-1]:
- # add a final step
- terms = select.defined_vars.values() + select.aliases.values()
- steps.append( ([self.system_source], terms, set(self._solindices),
- select, set(), True) )
- return steps
-
- def _exists_relation(self, rel, terms, needsel, source):
- rschema = self._schema.rschema(rel.r_type)
- lhs, rhs = rel.get_variable_parts()
- try:
- lhsvar, rhsvar = lhs.variable, rhs.variable
- except AttributeError:
- pass
- else:
- # supported relation with at least one end supported, check the
- # other end is in as well. If not this usually means the
- # variable is refed by an outer scope and should be substituted
- # using an 'identity' relation (else we'll get a conflict of
- # temporary tables)
- relscope = ms_scope(rel)
- lhsscope = ms_scope(lhsvar)
- rhsscope = ms_scope(rhsvar)
- if rhsvar in terms and not lhsvar in terms and lhsscope is lhsvar.stmt:
- self._identity_substitute(rel, lhsvar, terms, needsel, relscope)
- elif lhsvar in terms and not rhsvar in terms and rhsscope is rhsvar.stmt:
- self._identity_substitute(rel, rhsvar, terms, needsel, relscope)
- elif self.crossed_relation(source, rel):
- if lhsscope is not relscope:
- self._identity_substitute(rel, lhsvar, terms, needsel,
- relscope, lhsscope)
- if rhsscope is not relscope:
- self._identity_substitute(rel, rhsvar, terms, needsel,
- relscope, rhsscope)
-
- def _identity_substitute(self, relation, var, terms, needsel, exist,
- idrelscope=None):
- newvar = self._insert_identity_variable(exist, var, idrelscope)
- # ensure relation is using '=' operator, else we rely on a
- # sqlgenerator side effect (it won't insert an inequality operator
- # in this case)
- relation.children[1].operator = '='
- terms.append(newvar)
- needsel.add(newvar.name)
-
- def _choose_term(self, source, sourceterms):
- """pick one term among terms supported by a source, which will be used
- as a base to generate an execution step
- """
- secondchoice = None
- if len(self._sourcesterms) > 1:
- # first, return non invariant variable of crossed relation, then the
- # crossed relation itself
- for term in sourceterms:
- if (isinstance(term, Relation)
- and self.crossed_relation(source, term)
- and not ms_scope(term) is self.rqlst):
- for vref in term.get_variable_parts():
- try:
- var = vref.variable
- except AttributeError:
- # Constant
- continue
- if ((len(var.stinfo['relations']) > 1 or var.stinfo['selected'])
- and var in sourceterms):
- return var, sourceterms.pop(var)
- return term, sourceterms.pop(term)
- # priority to variable from subscopes
- for term in sourceterms:
- if not ms_scope(term) is self.rqlst:
- if isinstance(term, Variable):
- return term, sourceterms.pop(term)
- secondchoice = term
- else:
- # priority to variable from outer scope
- for term in sourceterms:
- if ms_scope(term) is self.rqlst:
- if isinstance(term, Variable):
- return term, sourceterms.pop(term)
- secondchoice = term
- if secondchoice is not None:
- return secondchoice, sourceterms.pop(secondchoice)
- # priority to variable with the less solutions supported and with the
- # most valuable refs. Add variable name for test predictability
- variables = sorted([(var, sols) for (var, sols) in sourceterms.items()
- if isinstance(var, Variable)],
- key=lambda (v, s): (len(s), -v.valuable_references(), v.name))
- if variables:
- var = variables[0][0]
- return var, sourceterms.pop(var)
- # priority to constant
- for term in sourceterms:
- if isinstance(term, Constant):
- return term, sourceterms.pop(term)
- # whatever (relation)
- term = iter(sourceterms).next()
- return term, sourceterms.pop(term)
-
- def _expand_sources(self, selected_source, term, solindices):
- """return all sources supporting given term / solindices"""
- sources = [selected_source]
- sourcesterms = self._sourcesterms
- for source in list(sourcesterms):
- if source is selected_source:
- continue
- if not (term in sourcesterms[source] and
- solindices.issubset(sourcesterms[source][term])):
- continue
- sources.append(source)
- if source.uri != 'system' or not (isinstance(term, Variable) and not term in self._linkedterms):
- termsolindices = sourcesterms[source][term]
- termsolindices -= solindices
- if not termsolindices:
- del sourcesterms[source][term]
- if not sourcesterms[source]:
- del sourcesterms[source]
- return sources
-
- def _expand_terms(self, term, sources, sourceterms, scope, solindices):
- terms = [term]
- sources = sorted(sources)
- sourcesterms = self._sourcesterms
- linkedterms = self._linkedterms
- # term has to belong to the same scope if there is more
- # than the system source remaining
- if len(sourcesterms) > 1 and not scope is self.rqlst:
- candidates = (t for t in sourceterms if scope is ms_scope(t))
- else:
- candidates = sourceterms
- # we only want one unlinked term in each generated query
- candidates = [t for t in candidates
- if isinstance(t, (Constant, Relation)) or
- (solindices.issubset(sourceterms[t]) and t in linkedterms)]
- cross_rels = {}
- for source in sources:
- cross_rels.update(self._crossrelations.get(source, {}))
- exclude = {}
- for crossvars in cross_rels.itervalues():
- vars = [t for t in crossvars if isinstance(t, Variable)]
- try:
- exclude[vars[0]] = vars[1]
- exclude[vars[1]] = vars[0]
- except IndexError:
- pass
- accept_term = lambda x: (not any(s for s in sources
- if not x in sourcesterms.get(s, ()))
- and x._ms_may_be_processed(terms, linkedterms)
- and not exclude.get(x) in terms)
- if isinstance(term, Relation) and term in cross_rels:
- cross_terms = cross_rels.pop(term)
- base_accept_term = accept_term
- accept_term = lambda x: (base_accept_term(x) or x in cross_terms)
- for refed in cross_terms:
- if not refed in candidates:
- terms.append(refed)
- # repeat until no term can't be added, since addition of a new
- # term may permit to another one to be added
- modified = True
- while modified and candidates:
- modified = False
- for term in candidates[:]:
- if isinstance(term, Constant):
- termsources = set(x[0] for x in self._term_sources(term))
- # ensure system source is there for constant
- if self.system_source in sources:
- termsources.add(self.system_source)
- if sorted(termsources) != sources:
- continue
- terms.append(term)
- candidates.remove(term)
- modified = True
- del sourceterms[term]
- elif accept_term(term):
- terms.append(term)
- candidates.remove(term)
- modified = True
- self._cleanup_sourcesterms(sources, solindices, term)
- return terms
-
- def _cleanup_sourcesterms(self, sources, solindices, term=None):
- """remove solutions so we know they are already processed"""
- for source in sources:
- try:
- sourceterms = self._sourcesterms[source]
- except KeyError:
- continue
- if term is None:
- for term, termsolindices in sourceterms.items():
- if isinstance(term, Relation) and self.crossed_relation(source, term):
- continue
- termsolindices -= solindices
- if not termsolindices:
- del sourceterms[term]
- else:
- try:
- sourceterms[term] -= solindices
- if not sourceterms[term]:
- del sourceterms[term]
- except KeyError:
- pass
- #assert term in cross_terms
- if not sourceterms:
- del self._sourcesterms[source]
-
- def merge_input_maps(self, allsolindices, complete=True):
- """inputmaps is a dictionary with tuple of solution indices as key with
- an associated input map as value. This function compute for each
- solution its necessary input map and return them grouped
-
- ex:
- inputmaps = {(0, 1, 2): {'A': 't1.login1', 'U': 't1.C0', 'U.login': 't1.login1'},
- (1,): {'X': 't2.C0', 'T': 't2.C1'}}
- return : [([1], {'A': 't1.login1', 'U': 't1.C0', 'U.login': 't1.login1',
- 'X': 't2.C0', 'T': 't2.C1'}),
- ([0,2], {'A': 't1.login1', 'U': 't1.C0', 'U.login': 't1.login1'})]
- """
- if not self._inputmaps:
- return [(allsolindices, None)]
- _allsolindices = allsolindices.copy()
- mapbysol = {}
- # compute a single map for each solution
- for solindices, basemap in self._inputmaps.iteritems():
- for solindex in solindices:
- if not (complete or solindex in allsolindices):
- continue
- solmap = mapbysol.setdefault(solindex, {})
- solmap.update(basemap)
- try:
- _allsolindices.remove(solindex)
- except KeyError:
- continue # already removed
- # group results by identical input map
- result = []
- for solindex, solmap in mapbysol.iteritems():
- for solindices, commonmap in result:
- if commonmap == solmap:
- solindices.append(solindex)
- break
- else:
- result.append( ([solindex], solmap) )
- if _allsolindices:
- result.append( (list(_allsolindices), None) )
- return result
-
- def build_final_part(self, select, solindices, inputmap, sources,
- insertedvars):
- solutions = [self._solutions[i] for i in solindices]
- if self._conflicts and inputmap:
- for varname, mappedto in self._conflicts:
- var = select.defined_vars[varname]
- newvar = select.make_variable()
- # XXX should use var.scope but scope hasn't been computed yet
- select.add_relation(var, 'identity', newvar)
- for sol in solutions:
- sol[newvar.name] = sol[varname]
- inputmap[newvar.name] = mappedto
- rqlst = self.plan.finalize(select, solutions, insertedvars)
- if self.temptable is None and self.finaltable is None:
- return OneFetchStep(self.plan, rqlst, sources, inputmap=inputmap)
- table = self.temptable or self.finaltable
- return FetchStep(self.plan, rqlst, sources, table, True, inputmap)
-
- def build_non_final_part(self, select, solindices, sources, insertedvars,
- table):
- """non final step, will have to store results in a temporary table"""
- inputmapkey = tuple(sorted(solindices))
- solutions = [self._solutions[i] for i in solindices]
- # XXX be smarter vs rql comparison
- idx_key = (select.as_string(), inputmapkey,
- tuple(sorted(sources)), tuple(sorted(insertedvars)))
- try:
- # if a similar step has already been process, simply backport its
- # input map
- step = self.plan.ms_steps_idx[idx_key]
- except KeyError:
- # processing needed
- rqlst = self.plan.finalize(select, solutions, insertedvars)
- step = FetchStep(self.plan, rqlst, sources, table, False)
- self.plan.ms_steps_idx[idx_key] = step
- self.plan.add_step(step)
- # update input map for following steps, according to processed solutions
- inputmap = self._inputmaps.setdefault(inputmapkey, {})
- for varname, mapping in step.outputmap.iteritems():
- if varname in inputmap and not '.' in varname and \
- not (mapping == inputmap[varname] or
- self._schema.eschema(solutions[0][varname]).final):
- self._conflicts.append((varname, inputmap[varname]))
- inputmap.update(step.outputmap)
-
-
-@deprecated('[3.18] old multi-source system will go away in the next version')
-class MSPlanner(SSPlanner):
- """MultiSourcesPlanner: build execution plan for rql queries
-
- decompose the RQL query according to sources'schema
- """
-
- def build_select_plan(self, plan, rqlst):
- """build execution plan for a SELECT RQL query
-
- the rqlst should not be tagged at this point
- """
- # preprocess deals with security insertion and returns a new syntax tree
- # which have to be executed to fulfill the query: according
- # to permissions for variable's type, different rql queries may have to
- # be executed
- plan.preprocess(rqlst)
- if server.DEBUG & server.DBG_MS:
- print '-'*80
- print 'PLANNING', rqlst
- ppis = [PartPlanInformation(plan, select, self.rqlhelper)
- for select in rqlst.children]
- plan.ms_steps_idx = {}
- steps = self._union_plan(plan, ppis)
- if server.DEBUG & server.DBG_MS:
- from pprint import pprint
- for step in plan.steps:
- pprint(step.test_repr())
- pprint(steps[0].test_repr())
- return steps
-
- def _ppi_subqueries(self, ppi):
- # part plan info for subqueries
- plan = ppi.plan
- inputmap = {}
- for subquery in ppi.rqlst.with_[:]:
- sppis = [PartPlanInformation(plan, select)
- for select in subquery.query.children]
- for sppi in sppis:
- if sppi.needsplit or sppi.part_sources != ppi.part_sources:
- temptable = plan.make_temp_table_name('T%s' % make_uid(id(subquery)))
- sstep = self._union_plan(plan, sppis, temptable)[0]
- break
- else:
- sstep = None
- if sstep is not None:
- ppi.rqlst.with_.remove(subquery)
- for i, colalias in enumerate(subquery.aliases):
- inputmap[colalias.name] = '%s.C%s' % (temptable, i)
- ppi.plan.add_step(sstep)
- return inputmap
-
- def _union_plan(self, plan, ppis, temptable=None):
- tosplit, cango, allsources = [], {}, set()
- for planinfo in ppis:
- if planinfo.needsplit:
- tosplit.append(planinfo)
- else:
- cango.setdefault(planinfo.part_sources, []).append(planinfo)
- for source in planinfo.part_sources:
- allsources.add(source)
- # first add steps for query parts which doesn't need to splitted
- steps = []
- for sources, cppis in cango.iteritems():
- byinputmap = {}
- for ppi in cppis:
- select = ppi.rqlst
- if sources != (ppi.system_source,):
- add_types_restriction(self.schema, select)
- # part plan info for subqueries
- inputmap = self._ppi_subqueries(ppi)
- aggrstep = need_aggr_step(select, sources)
- if aggrstep:
- atemptable = plan.make_temp_table_name('T%s' % make_uid(id(select)))
- sunion = Union()
- sunion.append(select)
- selected = select.selection[:]
- select_group_sort(select)
- step = AggrStep(plan, selected, select, atemptable, temptable)
- step.set_limit_offset(select.limit, select.offset)
- select.limit = None
- select.offset = 0
- fstep = FetchStep(plan, sunion, sources, atemptable, True, inputmap)
- step.children.append(fstep)
- steps.append(step)
- else:
- byinputmap.setdefault(tuple(inputmap.iteritems()), []).append( (select) )
- for inputmap, queries in byinputmap.iteritems():
- inputmap = dict(inputmap)
- sunion = Union()
- for select in queries:
- sunion.append(select)
- if temptable:
- steps.append(FetchStep(plan, sunion, sources, temptable, True, inputmap))
- else:
- steps.append(OneFetchStep(plan, sunion, sources, inputmap))
- # then add steps for splitted query parts
- for planinfo in tosplit:
- steps.append(self.split_part(planinfo, temptable))
- if len(steps) > 1:
- if temptable:
- step = UnionFetchStep(plan)
- else:
- step = UnionStep(plan)
- step.children = steps
- return (step,)
- return steps
-
- # internal methods for multisources decomposition #########################
-
- def split_part(self, ppi, temptable):
- ppi.finaltable = temptable
- plan = ppi.plan
- select = ppi.rqlst
- subinputmap = self._ppi_subqueries(ppi)
- stepdefs = ppi.part_steps()
- if need_aggr_step(select, ppi.part_sources, stepdefs):
- atemptable = plan.make_temp_table_name('T%s' % make_uid(id(select)))
- selection = select.selection[:]
- select_group_sort(select)
- else:
- atemptable = None
- selection = select.selection
- ppi.temptable = atemptable
- vfilter = TermsFiltererVisitor(self.schema, ppi)
- steps = []
- multifinal = len([x for x in stepdefs if x[-1]]) >= 2
- for sources, terms, solindices, scope, needsel, final in stepdefs:
- # extract an executable query using only the specified terms
- if sources[0].uri == 'system':
- # in this case we have to merge input maps before call to
- # filter so already processed restriction are correctly
- # removed
- solsinputmaps = ppi.merge_input_maps(
- solindices, complete=not (final and multifinal))
- for solindices, inputmap in solsinputmaps:
- minrqlst, insertedvars = vfilter.filter(
- sources, terms, scope, set(solindices), needsel, final)
- if inputmap is None:
- inputmap = subinputmap
- else:
- inputmap.update(subinputmap)
- steps.append(ppi.build_final_part(minrqlst, solindices, inputmap,
- sources, insertedvars))
- else:
- # this is a final part (i.e. retreiving results for the
- # original query part) if all term / sources have been
- # treated or if this is the last shot for used solutions
- minrqlst, insertedvars = vfilter.filter(
- sources, terms, scope, solindices, needsel, final)
- if final:
- solsinputmaps = ppi.merge_input_maps(
- solindices, complete=not (final and multifinal))
- if len(solsinputmaps) > 1:
- refrqlst = minrqlst
- for solindices, inputmap in solsinputmaps:
- if inputmap is None:
- inputmap = subinputmap
- else:
- inputmap.update(subinputmap)
- if len(solsinputmaps) > 1:
- minrqlst = refrqlst.copy()
- sources = sources[:]
- if inputmap and len(sources) > 1:
- sources.remove(ppi.system_source)
- steps.append(ppi.build_final_part(minrqlst, solindices, None,
- sources, insertedvars))
- steps.append(ppi.build_final_part(minrqlst, solindices, inputmap,
- [ppi.system_source], insertedvars))
- else:
- steps.append(ppi.build_final_part(minrqlst, solindices, inputmap,
- sources, insertedvars))
- else:
- table = plan.make_temp_table_name('T%s' % make_uid(id(select)))
- ppi.build_non_final_part(minrqlst, solindices, sources,
- insertedvars, table)
- # finally: join parts, deal with aggregat/group/sorts if necessary
- if atemptable is not None:
- step = AggrStep(plan, selection, select, atemptable, temptable)
- step.children = steps
- elif len(steps) > 1:
- getrschema = self.schema.rschema
- if need_intersect(select, getrschema) or any(need_intersect(select, getrschema)
- for step in steps
- for select in step.union.children):
- if temptable:
- raise NotImplementedError('oops') # IntersectFetchStep(plan)
- else:
- step = IntersectStep(plan)
- else:
- if temptable:
- step = UnionFetchStep(plan)
- else:
- step = UnionStep(plan)
- step.children = steps
- else:
- step = steps[0]
- if select.limit is not None or select.offset:
- step.set_limit_offset(select.limit, select.offset)
- return step
-
-
-class UnsupportedBranch(Exception):
- pass
-
-
-class TermsFiltererVisitor(object):
- def __init__(self, schema, ppi):
- self.schema = schema
- self.ppi = ppi
- self.skip = {}
- self.hasaggrstep = self.ppi.temptable
- self.extneedsel = frozenset(vref.name for sortterm in ppi.rqlst.orderby
- for vref in sortterm.iget_nodes(VariableRef))
-
- def _rqlst_accept(self, rqlst, node, newroot, terms, setfunc=None):
- try:
- newrestr, node_ = node.accept(self, newroot, terms[:])
- except UnsupportedBranch:
- return rqlst
- if setfunc is not None and newrestr is not None:
- setfunc(newrestr)
- if not node_ is node:
- rqlst = node.parent
- return rqlst
-
- def filter(self, sources, terms, rqlst, solindices, needsel, final):
- if server.DEBUG & server.DBG_MS:
- print 'filter', final and 'final' or '', sources, terms, rqlst, solindices, needsel
- newroot = Select()
- self.sources = sorted(sources)
- self.terms = terms
- self.solindices = solindices
- self.final = final
- self._pending_vrefs = []
- # terms which appear in unsupported branches
- needsel |= self.extneedsel
- self.needsel = needsel
- # terms which appear in supported branches
- self.mayneedsel = set()
- # new inserted variables
- self.insertedvars = []
- # other structures (XXX document)
- self.mayneedvar, self.hasvar = {}, {}
- self.use_only_defined = False
- self.scopes = {rqlst: newroot}
- self.current_scope = rqlst
- if rqlst.where:
- rqlst = self._rqlst_accept(rqlst, rqlst.where, newroot, terms,
- newroot.set_where)
- if isinstance(rqlst, Select):
- self.use_only_defined = True
- if rqlst.groupby:
- groupby = []
- for node in rqlst.groupby:
- rqlst = self._rqlst_accept(rqlst, node, newroot, terms,
- groupby.append)
- if groupby:
- newroot.set_groupby(groupby)
- if rqlst.having:
- having = []
- for node in rqlst.having:
- rqlst = self._rqlst_accept(rqlst, node, newroot, terms,
- having.append)
- if having:
- newroot.set_having(having)
- if final and rqlst.orderby and not self.hasaggrstep:
- orderby = []
- for node in rqlst.orderby:
- rqlst = self._rqlst_accept(rqlst, node, newroot, terms,
- orderby.append)
- if orderby:
- newroot.set_orderby(orderby)
- elif rqlst.orderby:
- for sortterm in rqlst.orderby:
- if any(f for f in sortterm.iget_nodes(Function) if f.name == 'FTIRANK'):
- newnode, oldnode = sortterm.accept(self, newroot, terms)
- if newnode is not None:
- newroot.add_sort_term(newnode)
- self.process_selection(newroot, terms, rqlst)
- elif not newroot.where:
- # no restrictions have been copied, just select terms and add
- # type restriction (done later by add_types_restriction)
- for v in terms:
- if not isinstance(v, Variable):
- continue
- newroot.append_selected(VariableRef(newroot.get_variable(v.name)))
- solutions = self.ppi.copy_solutions(solindices)
- cleanup_solutions(newroot, solutions)
- newroot.set_possible_types(solutions)
- if final:
- if self.hasaggrstep:
- self.add_necessary_selection(newroot, self.mayneedsel & self.extneedsel)
- newroot.distinct = rqlst.distinct
- else:
- self.add_necessary_selection(newroot, self.mayneedsel & self.needsel)
- # insert vars to fetch constant values when needed
- for (varname, rschema), reldefs in self.mayneedvar.iteritems():
- for rel, ored in reldefs:
- if not (varname, rschema) in self.hasvar:
- self.hasvar[(varname, rschema)] = None # just to avoid further insertion
- cvar = newroot.make_variable()
- for sol in newroot.solutions:
- sol[cvar.name] = rschema.objects(sol[varname])[0]
- # if the current restriction is not used in a OR branch,
- # we can keep it, else we have to drop the constant
- # restriction (or we may miss some results)
- if not ored:
- rel = rel.copy(newroot)
- newroot.add_restriction(rel)
- # add a relation to link the variable
- newroot.remove_node(rel.children[1])
- cmp = Comparison('=')
- rel.append(cmp)
- cmp.append(VariableRef(cvar))
- self.insertedvars.append((varname, rschema, cvar.name))
- newroot.append_selected(VariableRef(newroot.get_variable(cvar.name)))
- # NOTE: even if the restriction is done by this query, we have
- # to let it in the original rqlst so that it appears anyway in
- # the "final" query, else we may change the meaning of the query
- # if there are NOT somewhere :
- # 'NOT X relation Y, Y name "toto"' means X WHERE X isn't related
- # to Y whose name is toto while
- # 'NOT X relation Y' means X WHERE X has no 'relation' (whatever Y)
- elif ored:
- newroot.remove_node(rel)
- add_types_restriction(self.schema, rqlst, newroot, solutions)
- if server.DEBUG & server.DBG_MS:
- print '--->', newroot
- return newroot, self.insertedvars
-
- def visit_and(self, node, newroot, terms):
- subparts = []
- for i in xrange(len(node.children)):
- child = node.children[i]
- try:
- newchild, child_ = child.accept(self, newroot, terms)
- if not child_ is child:
- node = child_.parent
- if newchild is None:
- continue
- subparts.append(newchild)
- except UnsupportedBranch:
- continue
- if not subparts:
- return None, node
- if len(subparts) == 1:
- return subparts[0], node
- return copy_node(newroot, node, subparts), node
-
- visit_or = visit_and
-
- def _relation_supported(self, relation):
- rtype = relation.r_type
- for source in self.sources:
- if not source.support_relation(rtype) or (
- rtype in source.cross_relations and not relation in self.terms):
- return False
- if not self.final and not relation in self.terms:
- rschema = self.schema.rschema(relation.r_type)
- if not rschema.final:
- for term in relation.get_nodes((VariableRef, Constant)):
- term = getattr(term, 'variable', term)
- termsources = sorted(set(x[0] for x in self.ppi._term_sources(term)))
- if termsources and termsources != self.sources:
- return False
- return True
-
- def visit_relation(self, node, newroot, terms):
- if not node.is_types_restriction():
- if not node in terms and node in self.skip and self.solindices.issubset(self.skip[node]):
- return None, node
- if not self._relation_supported(node):
- raise UnsupportedBranch()
- # don't copy type restriction unless this is the only supported relation
- # for the lhs variable, else they'll be reinserted later as needed (in
- # other cases we may copy a type restriction while the variable is not
- # actually used)
- elif not (node.neged(strict=True) or
- any(self._relation_supported(rel)
- for rel in node.children[0].variable.stinfo['relations'])):
- return self.visit_default(node, newroot, terms)
- else:
- raise UnsupportedBranch()
- rschema = self.schema.rschema(node.r_type)
- self._pending_vrefs = []
- try:
- res = self.visit_default(node, newroot, terms)[0]
- except Exception:
- # when a relation isn't supported, we should dereference potentially
- # introduced variable refs
- for vref in self._pending_vrefs:
- vref.unregister_reference()
- raise
- ored = node.ored()
- if rschema.final or rschema.inlined:
- vrefs = node.children[1].get_nodes(VariableRef)
- if not vrefs:
- if not ored:
- self.skip.setdefault(node, set()).update(self.solindices)
- else:
- self.mayneedvar.setdefault((node.children[0].name, rschema), []).append( (res, ored) )
- else:
- assert len(vrefs) == 1
- vref = vrefs[0]
- # XXX check operator ?
- self.hasvar[(node.children[0].name, rschema)] = vref
- if self._may_skip_attr_rel(rschema, node, vref, ored, terms, res):
- self.skip.setdefault(node, set()).update(self.solindices)
- elif not ored:
- self.skip.setdefault(node, set()).update(self.solindices)
- return res, node
-
- def _may_skip_attr_rel(self, rschema, rel, vref, ored, terms, res):
- var = vref.variable
- if ored:
- return False
- if var.name in self.extneedsel or var.stinfo['selected']:
- return False
- if not var in terms or used_in_outer_scope(var, self.current_scope):
- return False
- if any(v for v, _ in var.stinfo.get('attrvars', ()) if not v in terms):
- return False
- return True
-
- def visit_exists(self, node, newroot, terms):
- newexists = node.__class__()
- self.scopes = {node: newexists}
- subparts, node = self._visit_children(node, newroot, terms)
- if not subparts:
- return None, node
- newexists.set_where(subparts[0])
- return newexists, node
-
- def visit_not(self, node, newroot, terms):
- subparts, node = self._visit_children(node, newroot, terms)
- if not subparts:
- return None, node
- return copy_node(newroot, node, subparts), node
-
- def visit_group(self, node, newroot, terms):
- if not self.final:
- return None, node
- return self.visit_default(node, newroot, terms)
-
- def visit_variableref(self, node, newroot, terms):
- if self.use_only_defined:
- if not node.variable.name in newroot.defined_vars:
- raise UnsupportedBranch(node.name)
- elif not node.variable in terms:
- raise UnsupportedBranch(node.name)
- self.mayneedsel.add(node.name)
- # set scope so we can insert types restriction properly
- newvar = newroot.get_variable(node.name)
- newvar.stinfo['scope'] = self.scopes.get(node.variable.scope, newroot)
- vref = VariableRef(newvar)
- self._pending_vrefs.append(vref)
- return vref, node
-
- def visit_constant(self, node, newroot, terms):
- return copy_node(newroot, node), node
-
- def visit_comparison(self, node, newroot, terms):
- subparts, node = self._visit_children(node, newroot, terms)
- copy = copy_node(newroot, node, subparts)
- # ignore comparison operator when fetching non final query
- if not self.final and isinstance(node.children[0], VariableRef):
- copy.operator = '='
- return copy, node
-
- def visit_function(self, node, newroot, terms):
- if node.name == 'FTIRANK':
- # FTIRANK is somewhat special... Rank function should be included in
- # the same query has the has_text relation, potentially added to
- # selection for latter usage
- if not self.hasaggrstep and self.final and node not in self.skip:
- return self.visit_default(node, newroot, terms)
- elif any(s for s in self.sources if s.uri != 'system'):
- return None, node
- # p = node.parent
- # while p is not None and not isinstance(p, SortTerm):
- # p = p.parent
- # if isinstance(p, SortTerm):
- if not self.hasaggrstep and self.final and node in self.skip:
- return Constant(self.skip[node], 'Int'), node
- # XXX only if not yet selected
- newroot.append_selected(node.copy(newroot))
- self.skip[node] = len(newroot.selection)
- return None, node
- return self.visit_default(node, newroot, terms)
-
- def visit_default(self, node, newroot, terms):
- subparts, node = self._visit_children(node, newroot, terms)
- return copy_node(newroot, node, subparts), node
-
- visit_mathexpression = visit_constant = visit_default
-
- def visit_sortterm(self, node, newroot, terms):
- subparts, node = self._visit_children(node, newroot, terms)
- if not subparts:
- return None, node
- return copy_node(newroot, node, subparts), node
-
- def _visit_children(self, node, newroot, terms):
- subparts = []
- for i in xrange(len(node.children)):
- child = node.children[i]
- newchild, child_ = child.accept(self, newroot, terms)
- if not child is child_:
- node = child_.parent
- if newchild is not None:
- subparts.append(newchild)
- return subparts, node
-
- def process_selection(self, newroot, terms, rqlst):
- if self.final:
- for term in rqlst.selection:
- newroot.append_selected(term.copy(newroot))
- for vref in term.get_nodes(VariableRef):
- self.needsel.add(vref.name)
- return
- for term in rqlst.selection:
- vrefs = term.get_nodes(VariableRef)
- if vrefs:
- supportedvars = []
- for vref in vrefs:
- var = vref.variable
- if var in terms:
- supportedvars.append(vref)
- continue
- else:
- self.needsel.add(vref.name)
- break
- else:
- for vref in vrefs:
- newroot.append_selected(vref.copy(newroot))
- supportedvars = []
- for vref in supportedvars:
- if not vref in newroot.get_selected_variables():
- newroot.append_selected(VariableRef(newroot.get_variable(vref.name)))
- elif term in self.terms:
- newroot.append_selected(term.copy(newroot))
-
- def add_necessary_selection(self, newroot, terms):
- selected = tuple(newroot.get_selected_variables())
- for varname in terms:
- var = newroot.defined_vars[varname]
- for vref in var.references():
- rel = vref.relation()
- if rel is None and vref in selected:
- # already selected
- break
- else:
- selvref = VariableRef(var)
- newroot.append_selected(selvref)
- if newroot.groupby:
- newroot.add_group_var(VariableRef(selvref.variable, noautoref=1))
--- a/server/mssteps.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,309 +0,0 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""Defines the diferent querier steps usable in plans.
-
-FIXME : this code needs refactoring. Some problems :
-* get data from the parent plan, the latest step, temporary table...
-* each step has is own members (this is not necessarily bad, but a bit messy
- for now)
-"""
-__docformat__ = "restructuredtext en"
-
-from rql.nodes import VariableRef, Variable, Function
-
-from cubicweb.server.ssplanner import (LimitOffsetMixIn, Step, OneFetchStep,
- varmap_test_repr, offset_result)
-
-AGGR_TRANSFORMS = {'COUNT':'SUM', 'MIN':'MIN', 'MAX':'MAX', 'SUM': 'SUM'}
-
-class remove_and_restore_clauses(object):
- def __init__(self, union, keepgroup):
- self.union = union
- self.keepgroup = keepgroup
- self.clauses = None
-
- def __enter__(self):
- self.clauses = clauses = []
- for select in self.union.children:
- if self.keepgroup:
- having, orderby = select.having, select.orderby
- select.having, select.orderby = (), ()
- clauses.append( (having, orderby) )
- else:
- groupby, having, orderby = select.groupby, select.having, select.orderby
- select.groupby, select.having, select.orderby = (), (), ()
- clauses.append( (groupby, having, orderby) )
-
- def __exit__(self, exctype, exc, traceback):
- for i, select in enumerate(self.union.children):
- if self.keepgroup:
- select.having, select.orderby = self.clauses[i]
- else:
- select.groupby, select.having, select.orderby = self.clauses[i]
-
-
-class FetchStep(OneFetchStep):
- """step consisting in fetching data from sources, and storing result in
- a temporary table
- """
- def __init__(self, plan, union, sources, table, keepgroup, inputmap=None):
- OneFetchStep.__init__(self, plan, union, sources)
- # temporary table to store step result
- self.table = table
- # should groupby clause be kept or not
- self.keepgroup = keepgroup
- # variables mapping to use as input
- self.inputmap = inputmap
- # output variable mapping
- srqlst = union.children[0] # sample select node
- # add additional information to the output mapping
- self.outputmap = plan.init_temp_table(table, srqlst.selection,
- srqlst.solutions[0])
- for vref in srqlst.selection:
- if not isinstance(vref, VariableRef):
- continue
- var = vref.variable
- if var.stinfo.get('attrvars'):
- for lhsvar, rtype in var.stinfo['attrvars']:
- if lhsvar.name in srqlst.defined_vars:
- key = '%s.%s' % (lhsvar.name, rtype)
- self.outputmap[key] = self.outputmap[var.name]
- else:
- rschema = self.plan.schema.rschema
- for rel in var.stinfo['rhsrelations']:
- if rschema(rel.r_type).inlined:
- lhsvar = rel.children[0]
- if lhsvar.name in srqlst.defined_vars:
- key = '%s.%s' % (lhsvar.name, rel.r_type)
- self.outputmap[key] = self.outputmap[var.name]
-
- def execute(self):
- """execute this step"""
- self.execute_children()
- plan = self.plan
- plan.create_temp_table(self.table)
- union = self.union
- with remove_and_restore_clauses(union, self.keepgroup):
- for source in self.sources:
- source.flying_insert(self.table, plan.session, union, plan.args,
- self.inputmap)
-
- def mytest_repr(self):
- """return a representation of this step suitable for test"""
- with remove_and_restore_clauses(self.union, self.keepgroup):
- try:
- inputmap = varmap_test_repr(self.inputmap, self.plan.tablesinorder)
- outputmap = varmap_test_repr(self.outputmap, self.plan.tablesinorder)
- except AttributeError:
- inputmap = self.inputmap
- outputmap = self.outputmap
- return (self.__class__.__name__,
- sorted((r.as_string(kwargs=self.plan.args), r.solutions)
- for r in self.union.children),
- sorted(self.sources), inputmap, outputmap)
-
-
-class AggrStep(LimitOffsetMixIn, Step):
- """step consisting in making aggregat from temporary data in the system
- source
- """
- def __init__(self, plan, selection, select, table, outputtable=None):
- Step.__init__(self, plan)
- # original selection
- self.selection = selection
- # original Select RQL tree
- self.select = select
- # table where are located temporary results
- self.table = table
- # optional table where to write results
- self.outputtable = outputtable
- if outputtable is not None:
- plan.init_temp_table(outputtable, selection, select.solutions[0])
-
- #self.inputmap = inputmap
-
- def mytest_repr(self):
- """return a representation of this step suitable for test"""
- try:
- # rely on a monkey patch (cf unittest_querier)
- table = self.plan.tablesinorder[self.table]
- outputtable = self.outputtable and self.plan.tablesinorder[self.outputtable]
- except AttributeError:
- # not monkey patched
- table = self.table
- outputtable = self.outputtable
- sql = self.get_sql().replace(self.table, table)
- return (self.__class__.__name__, sql, outputtable)
-
- def execute(self):
- """execute this step"""
- self.execute_children()
- sql = self.get_sql()
- if self.outputtable:
- self.plan.create_temp_table(self.outputtable)
- sql = 'INSERT INTO %s %s' % (self.outputtable, sql)
- self.plan.syssource.doexec(self.plan.session, sql, self.plan.args)
- else:
- return self.plan.sqlexec(sql, self.plan.args)
-
- def get_sql(self):
- self.inputmap = inputmap = self.children[-1].outputmap
- dbhelper=self.plan.syssource.dbhelper
- # get the select clause
- clause = []
- for i, term in enumerate(self.selection):
- try:
- var_name = inputmap[term.as_string()]
- except KeyError:
- var_name = 'C%s' % i
- if isinstance(term, Function):
- # we have to translate some aggregat function
- # (for instance COUNT -> SUM)
- orig_name = term.name
- try:
- term.name = AGGR_TRANSFORMS[term.name]
- # backup and reduce children
- orig_children = term.children
- term.children = [VariableRef(Variable(var_name))]
- clause.append(term.accept(self))
- # restaure the tree XXX necessary?
- term.name = orig_name
- term.children = orig_children
- except KeyError:
- clause.append(var_name)
- else:
- clause.append(var_name)
- for vref in term.iget_nodes(VariableRef):
- inputmap[vref.name] = var_name
- # XXX handle distinct with non selected sort term
- if self.select.distinct:
- sql = ['SELECT DISTINCT %s' % ', '.join(clause)]
- else:
- sql = ['SELECT %s' % ', '.join(clause)]
- sql.append("FROM %s" % self.table)
- # get the group/having clauses
- if self.select.groupby:
- clause = [inputmap[var.name] for var in self.select.groupby]
- grouped = set(var.name for var in self.select.groupby)
- sql.append('GROUP BY %s' % ', '.join(clause))
- else:
- grouped = None
- if self.select.having:
- clause = [term.accept(self) for term in self.select.having]
- sql.append('HAVING %s' % ', '.join(clause))
- # get the orderby clause
- if self.select.orderby:
- clause = []
- for sortterm in self.select.orderby:
- sqlterm = sortterm.term.accept(self)
- if sortterm.asc:
- clause.append(sqlterm)
- else:
- clause.append('%s DESC' % sqlterm)
- if grouped is not None:
- for vref in sortterm.iget_nodes(VariableRef):
- if not vref.name in grouped:
- sql[-1] += ', ' + self.inputmap[vref.name]
- grouped.add(vref.name)
- sql = dbhelper.sql_add_order_by(' '.join(sql),
- clause,
- None, False,
- self.limit or self.offset)
- else:
- sql = ' '.join(sql)
- clause = None
-
- sql = dbhelper.sql_add_limit_offset(sql, self.limit, self.offset, clause)
- return sql
-
- def visit_function(self, function):
- """generate SQL name for a function"""
- try:
- return self.children[0].outputmap[str(function)]
- except KeyError:
- return '%s(%s)' % (function.name,
- ','.join(c.accept(self) for c in function.children))
-
- def visit_variableref(self, variableref):
- """get the sql name for a variable reference"""
- try:
- return self.inputmap[variableref.name]
- except KeyError: # XXX duh? explain
- return variableref.variable.name
-
- def visit_constant(self, constant):
- """generate SQL name for a constant"""
- assert constant.type == 'Int'
- return str(constant.value)
-
-
-class UnionStep(LimitOffsetMixIn, Step):
- """union results of child in-memory steps (e.g. OneFetchStep / AggrStep)"""
-
- def execute(self):
- """execute this step"""
- result = []
- limit = olimit = self.limit
- offset = self.offset
- assert offset != 0
- if offset is not None:
- limit = limit + offset
- for step in self.children:
- if limit is not None:
- if offset is None:
- limit = olimit - len(result)
- step.set_limit_offset(limit, None)
- result_ = step.execute()
- if offset is not None:
- offset, result_ = offset_result(offset, result_)
- result += result_
- if limit is not None:
- if len(result) >= olimit:
- return result[:olimit]
- return result
-
- def mytest_repr(self):
- """return a representation of this step suitable for test"""
- return (self.__class__.__name__, self.limit, self.offset)
-
-
-class IntersectStep(UnionStep):
- """return intersection of results of child in-memory steps (e.g. OneFetchStep / AggrStep)"""
-
- def execute(self):
- """execute this step"""
- result = set()
- for step in self.children:
- result &= frozenset(step.execute())
- result = list(result)
- if self.offset:
- result = result[self.offset:]
- if self.limit:
- result = result[:self.limit]
- return result
-
-
-class UnionFetchStep(Step):
- """union results of child steps using temporary tables (e.g. FetchStep)"""
-
- def execute(self):
- """execute this step"""
- self.execute_children()
-
-
-__all__ = ('FetchStep', 'AggrStep', 'UnionStep', 'UnionFetchStep', 'IntersectStep')
--- a/server/pool.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,160 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""CubicWeb server connections set : the repository has a limited number of
-:class:`ConnectionsSet` (defined in configuration, default to 4). Each of them
-hold a connection for each source used by the repository.
-"""
-
-__docformat__ = "restructuredtext en"
-
-import sys
-
-class ConnectionsSet(object):
- """handle connections on a set of sources, at some point associated to a
- :class:`Session`
- """
-
- def __init__(self, sources):
- # dictionary of (source, connection), indexed by sources'uri
- self.source_cnxs = {}
- for source in sources:
- self.add_source(source)
- if not 'system' in self.source_cnxs:
- self.source_cnxs['system'] = self.source_cnxs[sources[0].uri]
- self._cursors = {}
-
- def __getitem__(self, uri):
- """subscription notation provide access to sources'cursors"""
- try:
- cursor = self._cursors[uri]
- except KeyError:
- cursor = self.source_cnxs[uri][1].cursor()
- if cursor is not None:
- # None possible on sources without cursor support such as ldap
- self._cursors[uri] = cursor
- return cursor
-
- def add_source(self, source):
- assert not source.uri in self.source_cnxs
- self.source_cnxs[source.uri] = (source, source.get_connection())
-
- def remove_source(self, source):
- source, cnx = self.source_cnxs.pop(source.uri)
- cnx.close()
- self._cursors.pop(source.uri, None)
-
- def commit(self):
- """commit the current transaction for this user"""
- # FIXME: what happends if a commit fail
- # would need a two phases commit or like, but I don't know how to do
- # this using the db-api...
- for source, cnx in self.source_cnxs.itervalues():
- # let exception propagates
- cnx.commit()
-
- def rollback(self):
- """rollback the current transaction for this user"""
- for source, cnx in self.source_cnxs.itervalues():
- # catch exceptions, rollback other sources anyway
- try:
- cnx.rollback()
- except Exception:
- source.critical('rollback error', exc_info=sys.exc_info())
- # error on rollback, the connection is much probably in a really
- # bad state. Replace it by a new one.
- self.reconnect(source)
-
- def close(self, i_know_what_i_do=False):
- """close all connections in the set"""
- if i_know_what_i_do is not True: # unexpected closing safety belt
- raise RuntimeError('connections set shouldn\'t be closed')
- for cu in self._cursors.itervalues():
- try:
- cu.close()
- except Exception:
- continue
- for _, cnx in self.source_cnxs.itervalues():
- try:
- cnx.close()
- except Exception:
- continue
-
- # internals ###############################################################
-
- def cnxset_set(self):
- """connections set is being set on a session"""
- self.check_connections()
-
- def cnxset_freed(self):
- """connections set is being freed from a session"""
- for source, cnx in self.source_cnxs.itervalues():
- source.cnxset_freed(cnx)
-
- def sources(self):
- """return the source objects handled by this connections set"""
- # implementation details of flying insert requires the system source
- # first
- yield self.source_cnxs['system'][0]
- for uri, (source, cnx) in self.source_cnxs.items():
- if uri == 'system':
- continue
- yield source
- #return [source_cnx[0] for source_cnx in self.source_cnxs.itervalues()]
-
- def source(self, uid):
- """return the source object with the given uri"""
- return self.source_cnxs[uid][0]
-
- def connection(self, uid):
- """return the connection on the source object with the given uri"""
- return self.source_cnxs[uid][1]
-
- def reconnect(self, source=None):
- """reopen a connection for this source or all sources if none specified
- """
- if source is None:
- sources = self.sources()
- else:
- sources = (source,)
- for source in sources:
- try:
- # properly close existing connection if any
- self.source_cnxs[source.uri][1].close()
- except Exception:
- pass
- source.info('trying to reconnect')
- self.source_cnxs[source.uri] = (source, source.get_connection())
- self._cursors.pop(source.uri, None)
-
- def check_connections(self):
- for source, cnx in self.source_cnxs.itervalues():
- newcnx = source.check_connection(cnx)
- if newcnx is not None:
- self.reset_connection(source, newcnx)
-
- def reset_connection(self, source, cnx):
- self.source_cnxs[source.uri] = (source, cnx)
- self._cursors.pop(source.uri, None)
-
-
-from cubicweb.server.hook import Operation, LateOperation, SingleLastOperation
-from logilab.common.deprecation import class_moved, class_renamed
-Operation = class_moved(Operation)
-PreCommitOperation = class_renamed('PreCommitOperation', Operation)
-LateOperation = class_moved(LateOperation)
-SingleLastOperation = class_moved(SingleLastOperation)
--- a/server/querier.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/querier.py Tue Jun 10 09:49:45 2014 +0200
@@ -36,7 +36,7 @@
from cubicweb.server.rqlannotation import SQLGenAnnotator, set_qdata
from cubicweb.server.ssplanner import READ_ONLY_RTYPES, add_types_restriction
from cubicweb.server.edition import EditedEntity
-
+from cubicweb.server.ssplanner import SSPlanner
ETYPE_PYOBJ_MAP[Binary] = 'Bytes'
@@ -64,16 +64,16 @@
if etype == 'Password':
raise Unauthorized('Password selection is not allowed (%s)' % var)
-def term_etype(session, term, solution, args):
+def term_etype(cnx, term, solution, args):
"""return the entity type for the given term (a VariableRef or a Constant
node)
"""
try:
return solution[term.name]
except AttributeError:
- return session.describe(term.eval(args))[0]
+ return cnx.entity_metas(term.eval(args))['type']
-def check_read_access(session, rqlst, solution, args):
+def check_read_access(cnx, rqlst, solution, args):
"""Check that the given user has credentials to access data read by the
query and return a dict defining necessary "local checks" (i.e. rql
expression in read permission defined in the schema) where no group grants
@@ -86,7 +86,7 @@
# when used as an external source by another repository.
# XXX what about local read security w/ those rewritten constants...
DBG = (server.DEBUG & server.DBG_SEC) and 'read' in server._SECURITY_CAPS
- schema = session.repo.schema
+ schema = cnx.repo.schema
if rqlst.where is not None:
for rel in rqlst.where.iget_nodes(Relation):
# XXX has_text may have specific perm ?
@@ -94,37 +94,37 @@
continue
rschema = schema.rschema(rel.r_type)
if rschema.final:
- eschema = schema.eschema(term_etype(session, rel.children[0],
+ eschema = schema.eschema(term_etype(cnx, rel.children[0],
solution, args))
rdef = eschema.rdef(rschema)
else:
- rdef = rschema.rdef(term_etype(session, rel.children[0],
+ rdef = rschema.rdef(term_etype(cnx, rel.children[0],
solution, args),
- term_etype(session, rel.children[1].children[0],
+ term_etype(cnx, rel.children[1].children[0],
solution, args))
- if not session.user.matching_groups(rdef.get_groups('read')):
+ if not cnx.user.matching_groups(rdef.get_groups('read')):
if DBG:
print ('check_read_access: %s %s does not match %s' %
- (rdef, session.user.groups, rdef.get_groups('read')))
+ (rdef, cnx.user.groups, rdef.get_groups('read')))
# XXX rqlexpr not allowed
raise Unauthorized('read', rel.r_type)
if DBG:
print ('check_read_access: %s %s matches %s' %
- (rdef, session.user.groups, rdef.get_groups('read')))
+ (rdef, cnx.user.groups, rdef.get_groups('read')))
localchecks = {}
# iterate on defined_vars and not on solutions to ignore column aliases
for varname in rqlst.defined_vars:
eschema = schema.eschema(solution[varname])
if eschema.final:
continue
- if not session.user.matching_groups(eschema.get_groups('read')):
+ if not cnx.user.matching_groups(eschema.get_groups('read')):
erqlexprs = eschema.get_rqlexprs('read')
if not erqlexprs:
ex = Unauthorized('read', solution[varname])
ex.var = varname
if DBG:
print ('check_read_access: %s %s %s %s' %
- (varname, eschema, session.user.groups, eschema.get_groups('read')))
+ (varname, eschema, cnx.user.groups, eschema.get_groups('read')))
raise ex
# don't insert security on variable only referenced by 'NOT X relation Y' or
# 'NOT EXISTS(X relation Y)'
@@ -144,23 +144,21 @@
class ExecutionPlan(object):
"""the execution model of a rql query, composed of querier steps"""
- def __init__(self, querier, rqlst, args, session):
+ def __init__(self, querier, rqlst, args, cnx):
# original rql syntax tree
self.rqlst = rqlst
self.args = args or {}
- # session executing the query
- self.session = session
+ # cnx executing the query
+ self.cnx = cnx
# quick reference to the system source
- self.syssource = session.cnxset.source('system')
+ self.syssource = cnx.repo.system_source
# execution steps
self.steps = []
- # index of temporary tables created during execution
- self.temp_tables = {}
# various resource accesors
self.querier = querier
self.schema = querier.schema
self.sqlannotate = querier.sqlgen_annotate
- self.rqlhelper = session.vreg.rqlhelper
+ self.rqlhelper = cnx.vreg.rqlhelper
def annotate_rqlst(self):
if not self.rqlst.annotated:
@@ -170,49 +168,15 @@
"""add a step to the plan"""
self.steps.append(step)
- def clean(self):
- """remove temporary tables"""
- self.syssource.clean_temp_data(self.session, self.temp_tables)
-
def sqlexec(self, sql, args=None):
- return self.syssource.sqlexec(self.session, sql, args)
+ return self.syssource.sqlexec(self.cnx, sql, args)
def execute(self):
"""execute a plan and return resulting rows"""
- try:
- for step in self.steps:
- result = step.execute()
- # the latest executed step contains the full query result
- return result
- finally:
- self.clean()
-
- def make_temp_table_name(self, table):
- """
- return a temp table name according to db backend
- """
- return self.syssource.make_temp_table_name(table)
-
-
- def init_temp_table(self, table, selected, sol):
- """initialize sql schema and variable map for a temporary table which
- will be used to store result for the given rqlst
- """
- try:
- outputmap, sqlschema, _ = self.temp_tables[table]
- update_varmap(outputmap, selected, table)
- except KeyError:
- sqlschema, outputmap = self.syssource.temp_table_def(selected, sol,
- table)
- self.temp_tables[table] = [outputmap, sqlschema, False]
- return outputmap
-
- def create_temp_table(self, table):
- """create a temporary table to store result for the given rqlst"""
- if not self.temp_tables[table][-1]:
- sqlschema = self.temp_tables[table][1]
- self.syssource.create_temp_table(self.session, table, sqlschema)
- self.temp_tables[table][-1] = True
+ for step in self.steps:
+ result = step.execute()
+ # the latest executed step contains the full query result
+ return result
def preprocess(self, union, security=True):
"""insert security when necessary then annotate rql st for sql generation
@@ -220,15 +184,15 @@
return rqlst to actually execute
"""
cached = None
- if security and self.session.read_security:
+ if security and self.cnx.read_security:
# ensure security is turned of when security is inserted,
# else we may loop for ever...
- if self.session.transaction_data.get('security-rqlst-cache'):
+ if self.cnx.transaction_data.get('security-rqlst-cache'):
key = self.cache_key
else:
key = None
- if key is not None and key in self.session.transaction_data:
- cachedunion, args = self.session.transaction_data[key]
+ if key is not None and key in self.cnx.transaction_data:
+ cachedunion, args = self.cnx.transaction_data[key]
union.children[:] = []
for select in cachedunion.children:
union.append(select)
@@ -237,10 +201,10 @@
self.args = args
cached = True
else:
- with self.session.security_enabled(read=False):
+ with self.cnx.security_enabled(read=False):
noinvariant = self._insert_security(union)
if key is not None:
- self.session.transaction_data[key] = (union, self.args)
+ self.cnx.transaction_data[key] = (union, self.args)
else:
noinvariant = ()
if cached is None:
@@ -257,7 +221,7 @@
self._insert_security(subquery.query)
localchecks, restricted = self._check_permissions(select)
if any(localchecks):
- self.session.rql_rewriter.insert_local_checks(
+ self.cnx.rql_rewriter.insert_local_checks(
select, self.args, localchecks, restricted, noinvariant)
return noinvariant
@@ -279,12 +243,12 @@
Note rqlst should not have been simplified at this point.
"""
- session = self.session
+ cnx = self.cnx
msgs = []
# dict(varname: eid), allowing to check rql expression for variables
# which have a known eid
varkwargs = {}
- if not session.transaction_data.get('security-rqlst-cache'):
+ if not cnx.transaction_data.get('security-rqlst-cache'):
for var in rqlst.defined_vars.itervalues():
if var.stinfo['constnode'] is not None:
eid = var.stinfo['constnode'].eval(self.args)
@@ -295,10 +259,10 @@
newsolutions = []
for solution in rqlst.solutions:
try:
- localcheck = check_read_access(session, rqlst, solution, self.args)
+ localcheck = check_read_access(cnx, rqlst, solution, self.args)
except Unauthorized as ex:
msg = 'remove %s from solutions since %s has no %s access to %s'
- msg %= (solution, session.user.login, ex.args[0], ex.args[1])
+ msg %= (solution, cnx.user.login, ex.args[0], ex.args[1])
msgs.append(msg)
LOGGER.info(msg)
else:
@@ -313,10 +277,10 @@
# if entity has been added in the current transaction, the
# user can read it whatever rql expressions are associated
# to its type
- if session.added_in_transaction(eid):
+ if cnx.added_in_transaction(eid):
continue
for rqlexpr in rqlexprs:
- if rqlexpr.check(session, eid):
+ if rqlexpr.check(cnx, eid):
break
else:
raise Unauthorized('No read acces on %r with eid %i.' % (var, eid))
@@ -352,8 +316,8 @@
"""an execution model specific to the INSERT rql query
"""
- def __init__(self, querier, rqlst, args, session):
- ExecutionPlan.__init__(self, querier, rqlst, args, session)
+ def __init__(self, querier, rqlst, args, cnx):
+ ExecutionPlan.__init__(self, querier, rqlst, args, cnx)
# save originaly selected variable, we may modify this
# dictionary for substitution (query parameters)
self.selected = rqlst.selection
@@ -451,17 +415,17 @@
if there is two entities matching U, the result set will look like
[(eidX1, eidY1), (eidX2, eidY2)]
"""
- session = self.session
- repo = session.repo
+ cnx = self.cnx
+ repo = cnx.repo
results = []
for row in self.e_defs:
- results.append([repo.glob_add_entity(session, edef)
+ results.append([repo.glob_add_entity(cnx, edef)
for edef in row])
return results
def insert_relation_defs(self):
- session = self.session
- repo = session.repo
+ cnx = self.cnx
+ repo = cnx.repo
edited_entities = {}
relations = {}
for subj, rtype, obj in self.relation_defs():
@@ -476,7 +440,7 @@
obj = obj.entity.eid
if repo.schema.rschema(rtype).inlined:
if subj not in edited_entities:
- entity = session.entity_from_eid(subj)
+ entity = cnx.entity_from_eid(subj)
edited = EditedEntity(entity)
edited_entities[subj] = edited
else:
@@ -487,9 +451,9 @@
relations[rtype].append((subj, obj))
else:
relations[rtype] = [(subj, obj)]
- repo.glob_add_relations(session, relations)
+ repo.glob_add_relations(cnx, relations)
for edited in edited_entities.itervalues():
- repo.glob_update_entity(session, edited)
+ repo.glob_update_entity(cnx, edited)
class QuerierHelper(object):
@@ -516,27 +480,14 @@
self.solutions = repo.vreg.solutions
rqlhelper = repo.vreg.rqlhelper
# set backend on the rql helper, will be used for function checking
- rqlhelper.backend = repo.config.sources()['system']['db-driver']
+ rqlhelper.backend = repo.config.system_source_config['db-driver']
self._parse = rqlhelper.parse
self._annotate = rqlhelper.annotate
# rql planner
- if len(repo.sources) < 2:
- from cubicweb.server.ssplanner import SSPlanner
- self._planner = SSPlanner(schema, rqlhelper)
- else:
- from cubicweb.server.msplanner import MSPlanner
- self._planner = MSPlanner(schema, rqlhelper)
+ self._planner = SSPlanner(schema, rqlhelper)
# sql generation annotator
self.sqlgen_annotate = SQLGenAnnotator(schema).annotate
- def set_planner(self):
- if len(self._repo.sources) < 2:
- from cubicweb.server.ssplanner import SSPlanner
- self._planner = SSPlanner(self.schema, self._repo.vreg.rqlhelper)
- else:
- from cubicweb.server.msplanner import MSPlanner
- self._planner = MSPlanner(self.schema, self._repo.vreg.rqlhelper)
-
def parse(self, rql, annotate=False):
"""return a rql syntax tree for the given rql"""
try:
@@ -544,13 +495,13 @@
except UnicodeError:
raise RQLSyntaxError(rql)
- def plan_factory(self, rqlst, args, session):
+ def plan_factory(self, rqlst, args, cnx):
"""create an execution plan for an INSERT RQL query"""
if rqlst.TYPE == 'insert':
- return InsertPlan(self, rqlst, args, session)
- return ExecutionPlan(self, rqlst, args, session)
+ return InsertPlan(self, rqlst, args, cnx)
+ return ExecutionPlan(self, rqlst, args, cnx)
- def execute(self, session, rql, args=None, build_descr=True):
+ def execute(self, cnx, rql, args=None, build_descr=True):
"""execute a rql query, return resulting rows and their description in
a `ResultSet` object
@@ -584,7 +535,7 @@
# if there are some, we need a better cache key, eg (rql +
# entity type of each eid)
try:
- cachekey = self._repo.querier_cache_key(session, rql,
+ cachekey = self._repo.querier_cache_key(cnx, rql,
args, eidkeys)
except UnknownEid:
# we want queries such as "Any X WHERE X eid 9999"
@@ -600,7 +551,7 @@
# which are eids. Notice that if you may not need `eidkeys`, we
# have to compute solutions anyway (kept as annotation on the
# tree)
- eidkeys = self.solutions(session, rqlst, args)
+ eidkeys = self.solutions(cnx, rqlst, args)
except UnknownEid:
# we want queries such as "Any X WHERE X eid 9999" return an
# empty result instead of raising UnknownEid
@@ -608,19 +559,19 @@
if args and rql not in self._rql_ck_cache:
self._rql_ck_cache[rql] = eidkeys
if eidkeys:
- cachekey = self._repo.querier_cache_key(session, rql, args,
+ cachekey = self._repo.querier_cache_key(cnx, rql, args,
eidkeys)
self._rql_cache[cachekey] = rqlst
orig_rqlst = rqlst
if rqlst.TYPE != 'select':
- if session.read_security:
+ if cnx.read_security:
check_no_password_selected(rqlst)
- # write query, ensure session's mode is 'write' so connections won't
- # be released until commit/rollback
- session.mode = 'write'
+ # write query, ensure connection's mode is 'write' so connections
+ # won't be released until commit/rollback
+ cnx.mode = 'write'
cachekey = None
else:
- if session.read_security:
+ if cnx.read_security:
for select in rqlst.children:
check_no_password_selected(select)
# on select query, always copy the cached rqlst so we don't have to
@@ -634,7 +585,7 @@
cachekey += tuple(sorted([k for k, v in args.iteritems()
if v is None]))
# make an execution plan
- plan = self.plan_factory(rqlst, args, session)
+ plan = self.plan_factory(rqlst, args, cnx)
plan.cache_key = cachekey
self._planner.build_plan(plan)
# execute the plan
@@ -646,11 +597,11 @@
#
# notes:
# * we should not reset the connections set here, since we don't want the
- # session to loose it during processing
+ # connection to loose it during processing
# * don't rollback if we're in the commit process, will be handled
- # by the session
- if session.commit_state is None:
- session.commit_state = 'uncommitable'
+ # by the connection
+ if cnx.commit_state is None:
+ cnx.commit_state = 'uncommitable'
raise
# build a description for the results if necessary
descr = ()
@@ -665,14 +616,14 @@
descr = RepeatList(len(results), tuple(description))
else:
# hard, delegate the work :o)
- descr = manual_build_descr(session, rqlst, args, results)
+ descr = manual_build_descr(cnx, rqlst, args, results)
elif rqlst.TYPE == 'insert':
# on insert plan, some entities may have been auto-casted,
# so compute description manually even if there is only
# one solution
basedescr = [None] * len(plan.selected)
todetermine = zip(xrange(len(plan.selected)), repeat(False))
- descr = _build_descr(session, results, basedescr, todetermine)
+ descr = _build_descr(cnx, results, basedescr, todetermine)
# FIXME: get number of affected entities / relations on non
# selection queries ?
# return a result set object
@@ -688,7 +639,7 @@
set_log_methods(QuerierHelper, LOGGER)
-def manual_build_descr(tx, rqlst, args, result):
+def manual_build_descr(cnx, rqlst, args, result):
"""build a description for a given result by analysing each row
XXX could probably be done more efficiently during execution of query
@@ -712,11 +663,11 @@
basedescr.append(ttype)
if not todetermine:
return RepeatList(len(result), tuple(basedescr))
- return _build_descr(tx, result, basedescr, todetermine)
+ return _build_descr(cnx, result, basedescr, todetermine)
-def _build_descr(tx, result, basedescription, todetermine):
+def _build_descr(cnx, result, basedescription, todetermine):
description = []
- etype_from_eid = tx.describe
+ entity_metas = cnx.entity_metas
todel = []
for i, row in enumerate(result):
row_descr = basedescription[:]
@@ -730,9 +681,9 @@
row_descr[index] = etype_from_pyobj(value)
else:
try:
- row_descr[index] = etype_from_eid(value)[0]
+ row_descr[index] = entity_metas(value)['type']
except UnknownEid:
- tx.error('wrong eid %s in repository, you should '
+ cnx.error('wrong eid %s in repository, you should '
'db-check the database' % value)
todel.append(i)
break
--- a/server/repository.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/repository.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -33,27 +33,23 @@
import Queue
from warnings import warn
from itertools import chain
-from os.path import join
-from datetime import datetime
from time import time, localtime, strftime
+from contextlib import contextmanager
from warnings import warn
from logilab.common.decorators import cached, clear_cache
-from logilab.common.compat import any
-from logilab.common import flatten
+from logilab.common.deprecation import deprecated
from yams import BadSchemaDefinition
-from yams.schema import role_name
from rql import RQLSyntaxError
from rql.utils import rqlvar_maker
-from cubicweb import (CW_SOFTWARE_ROOT, CW_MIGRATION_MAP, QueryError,
+from cubicweb import (CW_MIGRATION_MAP, QueryError,
UnknownEid, AuthenticationError, ExecutionError,
- ETypeNotSupportedBySources, MultiSourcesError,
BadConnectionId, Unauthorized, ValidationError,
- RepositoryError, UniqueTogetherError, onevent)
+ UniqueTogetherError, onevent)
from cubicweb import cwvreg, schema, server
-from cubicweb.server import ShuttingDown, utils, hook, pool, querier, sources
+from cubicweb.server import ShuttingDown, utils, hook, querier, sources
from cubicweb.server.session import Session, InternalSession, InternalManager
from cubicweb.server.ssplanner import EditedEntity
@@ -187,14 +183,13 @@
self.shutting_down = False
# sources (additional sources info in the system database)
self.system_source = self.get_source('native', 'system',
- config.sources()['system'].copy())
- self.sources = [self.system_source]
+ config.system_source_config.copy())
self.sources_by_uri = {'system': self.system_source}
# querier helper, need to be created after sources initialization
self.querier = querier.QuerierHelper(self, self.schema)
- # cache eid -> (type, physical source, extid, actual source)
+ # cache eid -> (type, extid, actual source)
self._type_source_cache = {}
- # cache (extid, source uri) -> eid
+ # cache extid -> eid
self._extid_cache = {}
# open some connection sets
if config.init_cnxset_pool:
@@ -218,7 +213,7 @@
self._cnxsets_pool = Queue.Queue()
# 0. init a cnxset that will be used to fetch bootstrap information from
# the database
- self._cnxsets_pool.put_nowait(pool.ConnectionsSet(self.sources))
+ self._cnxsets_pool.put_nowait(self.system_source.wrapped_connection())
# 1. set used cubes
if config.creating or not config.read_instance_schema:
config.bootstrap_cubes()
@@ -249,8 +244,7 @@
if config.creating:
# call init_creating so that for instance native source can
# configurate tsearch according to postgres version
- for source in self.sources:
- source.init_creating()
+ self.system_source.init_creating()
else:
self.init_sources_from_database()
if 'CWProperty' in self.schema:
@@ -260,7 +254,7 @@
self._get_cnxset().close(True)
self.cnxsets = [] # list of available cnxsets (can't iterate on a Queue)
for i in xrange(config['connections-pool-size']):
- self.cnxsets.append(pool.ConnectionsSet(self.sources))
+ self.cnxsets.append(self.system_source.wrapped_connection())
self._cnxsets_pool.put_nowait(self.cnxsets[-1])
# internals ###############################################################
@@ -271,9 +265,9 @@
or not 'CWSource' in self.schema: # # 3.10 migration
self.system_source.init_creating()
return
- with self.internal_session() as session:
+ with self.internal_cnx() as cnx:
# FIXME: sources should be ordered (add_entity priority)
- for sourceent in session.execute(
+ for sourceent in cnx.execute(
'Any S, SN, SA, SC WHERE S is_instance_of CWSource, '
'S name SN, S type SA, S config SC').entities():
if sourceent.name == 'system':
@@ -281,16 +275,20 @@
self.sources_by_eid[sourceent.eid] = self.system_source
self.system_source.init(True, sourceent)
continue
- self.add_source(sourceent, add_to_cnxsets=False)
+ self.add_source(sourceent)
def _clear_planning_caches(self):
- for cache in ('source_defs', 'is_multi_sources_relation',
- 'can_cross_relation', 'rel_type_sources'):
- clear_cache(self, cache)
+ clear_cache(self, 'source_defs')
- def add_source(self, sourceent, add_to_cnxsets=True):
- source = self.get_source(sourceent.type, sourceent.name,
- sourceent.host_config, sourceent.eid)
+ def add_source(self, sourceent):
+ try:
+ source = self.get_source(sourceent.type, sourceent.name,
+ sourceent.host_config, sourceent.eid)
+ except RuntimeError:
+ if self.config.repairing:
+ self.exception('cant setup source %s, skipped', sourceent.name)
+ return
+ raise
self.sources_by_eid[sourceent.eid] = source
self.sources_by_uri[sourceent.name] = source
if self.config.source_enabled(source):
@@ -299,14 +297,6 @@
# internal session, which is not possible until connections sets have been
# initialized)
source.init(True, sourceent)
- if not source.copy_based_source:
- warn('[3.18] old multi-source system will go away in the next version',
- DeprecationWarning)
- self.sources.append(source)
- self.querier.set_planner()
- if add_to_cnxsets:
- for cnxset in self.cnxsets:
- cnxset.add_source(source)
else:
source.init(False, sourceent)
self._clear_planning_caches()
@@ -314,11 +304,6 @@
def remove_source(self, uri):
source = self.sources_by_uri.pop(uri)
del self.sources_by_eid[source.eid]
- if self.config.source_enabled(source) and not source.copy_based_source:
- self.sources.remove(source)
- self.querier.set_planner()
- for cnxset in self.cnxsets:
- cnxset.remove_source(source)
self._clear_planning_caches()
def get_source(self, type, uri, source_config, eid=None):
@@ -336,8 +321,6 @@
else:
self.vreg._set_schema(schema)
self.querier.set_schema(schema)
- # don't use self.sources, we may want to give schema even to disabled
- # sources
for source in self.sources_by_uri.itervalues():
source.set_schema(schema)
self.schema = schema
@@ -347,9 +330,9 @@
from cubicweb.server.schemaserial import deserialize_schema
appschema = schema.CubicWebSchema(self.config.appid)
self.debug('deserializing db schema into %s %#x', appschema.name, id(appschema))
- with self.internal_session() as session:
+ with self.internal_cnx() as cnx:
try:
- deserialize_schema(appschema, session)
+ deserialize_schema(appschema, cnx)
except BadSchemaDefinition:
raise
except Exception as ex:
@@ -470,7 +453,7 @@
except ZeroDivisionError:
pass
- def check_auth_info(self, session, login, authinfo):
+ def check_auth_info(self, cnx, login, authinfo):
"""validate authentication, raise AuthenticationError on failure, return
associated CWUser's eid on success.
"""
@@ -479,70 +462,55 @@
for source in self.sources_by_uri.itervalues():
if self.config.source_enabled(source) and source.support_entity('CWUser'):
try:
- return source.authenticate(session, login, **authinfo)
+ with cnx.ensure_cnx_set:
+ return source.authenticate(cnx, login, **authinfo)
except AuthenticationError:
continue
else:
raise AuthenticationError('authentication failed with all sources')
- def authenticate_user(self, session, login, **authinfo):
+ def authenticate_user(self, cnx, login, **authinfo):
"""validate login / password, raise AuthenticationError on failure
return associated CWUser instance on success
"""
- eid = self.check_auth_info(session, login, authinfo)
- cwuser = self._build_user(session, eid)
+ eid = self.check_auth_info(cnx, login, authinfo)
+ cwuser = self._build_user(cnx, eid)
if self.config.consider_user_state and \
not cwuser.cw_adapt_to('IWorkflowable').state in cwuser.AUTHENTICABLE_STATES:
raise AuthenticationError('user is not in authenticable state')
return cwuser
- def _build_user(self, session, eid):
+ def _build_user(self, cnx, eid):
"""return a CWUser entity for user with the given eid"""
- cls = self.vreg['etypes'].etype_class('CWUser')
- st = cls.fetch_rqlst(session.user, ordermethod=None)
- st.add_eid_restriction(st.get_variable('X'), 'x', 'Substitute')
- rset = session.execute(st.as_string(), {'x': eid})
- assert len(rset) == 1, rset
- cwuser = rset.get_entity(0, 0)
- # pylint: disable=W0104
- # prefetch / cache cwuser's groups and properties. This is especially
- # useful for internal sessions to avoid security insertions
- cwuser.groups
- cwuser.properties
- return cwuser
+ with cnx.ensure_cnx_set:
+ cls = self.vreg['etypes'].etype_class('CWUser')
+ st = cls.fetch_rqlst(cnx.user, ordermethod=None)
+ st.add_eid_restriction(st.get_variable('X'), 'x', 'Substitute')
+ rset = cnx.execute(st.as_string(), {'x': eid})
+ assert len(rset) == 1, rset
+ cwuser = rset.get_entity(0, 0)
+ # pylint: disable=W0104
+ # prefetch / cache cwuser's groups and properties. This is especially
+ # useful for internal sessions to avoid security insertions
+ cwuser.groups
+ cwuser.properties
+ return cwuser
# public (dbapi) interface ################################################
+ @deprecated("[3.19] use _cw.call_service('repo_stats')")
def stats(self): # XXX restrict to managers session?
"""Return a dictionary containing some statistics about the repository
resources usage.
This is a public method, not requiring a session id.
+
+ This method is deprecated in favor of using _cw.call_service('repo_stats')
"""
- results = {}
- querier = self.querier
- source = self.system_source
- for size, maxsize, hits, misses, title in (
- (len(querier._rql_cache), self.config['rql-cache-size'],
- querier.cache_hit, querier.cache_miss, 'rqlt_st'),
- (len(source._cache), self.config['rql-cache-size'],
- source.cache_hit, source.cache_miss, 'sql'),
- ):
- results['%s_cache_size' % title] = '%s / %s' % (size, maxsize)
- results['%s_cache_hit' % title] = hits
- results['%s_cache_miss' % title] = misses
- results['%s_cache_hit_percent' % title] = (hits * 100) / (hits + misses)
- results['type_source_cache_size'] = len(self._type_source_cache)
- results['extid_cache_size'] = len(self._extid_cache)
- results['sql_no_cache'] = self.system_source.no_cache
- results['nb_open_sessions'] = len(self._sessions)
- results['nb_active_threads'] = threading.activeCount()
- looping_tasks = self._tasks_manager._looping_tasks
- results['looping_tasks'] = ', '.join(str(t) for t in looping_tasks)
- results['available_cnxsets'] = self._cnxsets_pool.qsize()
- results['threads'] = ', '.join(sorted(str(t) for t in threading.enumerate()))
- return results
+ with self.internal_cnx() as cnx:
+ return cnx.call_service('repo_stats')
+ @deprecated("[3.19] use _cw.call_service('repo_gc_stats')")
def gc_stats(self, nmax=20):
"""Return a dictionary containing some statistics about the repository
memory usage.
@@ -552,33 +520,8 @@
nmax is the max number of (most) referenced object returned as
the 'referenced' result
"""
-
- from cubicweb._gcdebug import gc_info
- from cubicweb.appobject import AppObject
- from cubicweb.rset import ResultSet
- from cubicweb.dbapi import Connection, Cursor
- from cubicweb.web.request import CubicWebRequestBase
- from rql.stmts import Union
-
- lookupclasses = (AppObject,
- Union, ResultSet,
- Connection, Cursor,
- CubicWebRequestBase)
- try:
- from cubicweb.server.session import Session, InternalSession
- lookupclasses += (InternalSession, Session)
- except ImportError:
- pass # no server part installed
-
- results = {}
- counters, ocounters, garbage = gc_info(lookupclasses,
- viewreferrersclasses=())
- values = sorted(counters.iteritems(), key=lambda x: x[1], reverse=True)
- results['lookupclasses'] = values
- values = sorted(ocounters.iteritems(), key=lambda x: x[1], reverse=True)[:nmax]
- results['referenced'] = values
- results['unreachable'] = len(garbage)
- return results
+ with self.internal_cnx() as cnx:
+ return cnx.call_service('repo_gc_stats', nmax=nmax)
def get_schema(self):
"""Return the instance schema.
@@ -601,31 +544,17 @@
return cubes
def get_option_value(self, option, foreid=None):
- """Return the value for `option` in the configuration. If `foreid` is
- specified, the actual repository to which this entity belongs is
- derefenced and the option value retrieved from it.
+ """Return the value for `option` in the configuration.
This is a public method, not requiring a session id.
+
+ `foreid` argument is deprecated and now useless (as of 3.19).
"""
+ if foreid is not None:
+ warn('[3.19] foreid argument is deprecated', DeprecationWarning,
+ stacklevel=2)
# XXX we may want to check we don't give sensible information
- # XXX the only cube using 'foreid', apycot, stop used this, we probably
- # want to drop this argument
- if foreid is None:
- return self.config[option]
- _, sourceuri, extid, _ = self.type_and_source_from_eid(foreid)
- if sourceuri == 'system':
- return self.config[option]
- cnxset = self._get_cnxset()
- try:
- cnx = cnxset.connection(sourceuri)
- # needed to check connection is valid and usable by the current
- # thread
- newcnx = self.sources_by_uri[sourceuri].check_connection(cnx)
- if newcnx is not None:
- cnx = newcnx
- return cnx.get_option_value(option, extid)
- finally:
- self._free_cnxset(cnxset)
+ return self.config[option]
@cached
def get_versions(self, checkversions=False):
@@ -636,8 +565,8 @@
"""
from logilab.common.changelog import Version
vcconf = {}
- with self.internal_session() as session:
- for pk, version in session.execute(
+ with self.internal_cnx() as cnx:
+ for pk, version in cnx.execute(
'Any K,V WHERE P is CWProperty, P value V, P pkey K, '
'P pkey ~="system.version.%"', build_descr=False):
cube = pk.split('.')[-1]
@@ -675,49 +604,22 @@
This is a public method, not requiring a session id.
"""
- with self.internal_session() as session:
- # don't use session.execute, we don't want rset.req set
- return self.querier.execute(session, 'Any K,V WHERE P is CWProperty,'
+ with self.internal_cnx() as cnx:
+ # don't use cnx.execute, we don't want rset.req set
+ return self.querier.execute(cnx, 'Any K,V WHERE P is CWProperty,'
'P pkey K, P value V, NOT P for_user U',
build_descr=False)
- # XXX protect this method: anonymous should be allowed and registration
- # plugged
+ @deprecated("[3.19] Use session.call_service('register_user') instead'")
def register_user(self, login, password, email=None, **kwargs):
"""check a user with the given login exists, if not create it with the
given password. This method is designed to be used for anonymous
registration on public web site.
"""
- with self.internal_session() as session:
- # for consistency, keep same error as unique check hook (although not required)
- errmsg = session._('the value "%s" is already used, use another one')
- if (session.execute('CWUser X WHERE X login %(login)s', {'login': login},
- build_descr=False)
- or session.execute('CWUser X WHERE X use_email C, C address %(login)s',
- {'login': login}, build_descr=False)):
- qname = role_name('login', 'subject')
- raise ValidationError(None, {qname: errmsg % login})
- # we have to create the user
- user = self.vreg['etypes'].etype_class('CWUser')(session)
- if isinstance(password, unicode):
- # password should *always* be utf8 encoded
- password = password.encode('UTF8')
- kwargs['login'] = login
- kwargs['upassword'] = password
- self.glob_add_entity(session, EditedEntity(user, **kwargs))
- session.execute('SET X in_group G WHERE X eid %(x)s, G name "users"',
- {'x': user.eid})
- if email or '@' in login:
- d = {'login': login, 'email': email or login}
- if session.execute('EmailAddress X WHERE X address %(email)s', d,
- build_descr=False):
- qname = role_name('address', 'subject')
- raise ValidationError(None, {qname: errmsg % d['email']})
- session.execute('INSERT EmailAddress X: X address %(email)s, '
- 'U primary_email X, U use_email X '
- 'WHERE U login %(login)s', d, build_descr=False)
- session.commit()
- return True
+ with self.internal_cnx() as cnx:
+ cnx.call_service('register_user', login=login, password=password,
+ email=email, **kwargs)
+ cnx.commit()
def find_users(self, fetch_attrs, **query_attrs):
"""yield user attributes for cwusers matching the given query_attrs
@@ -750,16 +652,16 @@
return rset.rows
def connect(self, login, **kwargs):
- """open a connection for a given user
+ """open a session for a given user
raise `AuthenticationError` if the authentication failed
raise `ConnectionError` if we can't open a connection
"""
cnxprops = kwargs.pop('cnxprops', None)
# use an internal connection
- with self.internal_session() as session:
+ with self.internal_cnx() as cnx:
# try to get a user object
- user = self.authenticate_user(session, login, **kwargs)
+ user = self.authenticate_user(cnx, login, **kwargs)
session = Session(user, self, cnxprops)
if threading.currentThread() in self._pyro_sessions:
# assume no pyro client does one get_repository followed by
@@ -769,13 +671,14 @@
self._pyro_sessions[threading.currentThread()] = session
user._cw = user.cw_rset.req = session
user.cw_clear_relation_cache()
- self._sessions[session.id] = session
- self.info('opened session %s for user %s', session.id, login)
- self.hm.call_hooks('session_open', session)
- # commit session at this point in case write operation has been done
- # during `session_open` hooks
- session.commit()
- return session.id
+ self._sessions[session.sessionid] = session
+ self.info('opened session %s for user %s', session.sessionid, login)
+ with session.new_cnx() as cnx:
+ self.hm.call_hooks('session_open', cnx)
+ # commit connection at this point in case write operation has been
+ # done during `session_open` hooks
+ cnx.commit()
+ return session.sessionid
def execute(self, sessionid, rqlstring, args=None, build_descr=True,
txid=None):
@@ -805,13 +708,35 @@
finally:
session.free_cnxset()
+ @deprecated('[3.19] use .entity_metas(sessionid, eid, txid) instead')
def describe(self, sessionid, eid, txid=None):
"""return a tuple `(type, physical source uri, extid, actual source
uri)` for the entity of the given `eid`
+
+ As of 3.19, physical source uri is always the system source.
"""
session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
- return self.type_and_source_from_eid(eid, session)
+ etype, extid, source = self.type_and_source_from_eid(eid, session)
+ return etype, source, extid, source
+ finally:
+ session.free_cnxset()
+
+ def entity_metas(self, sessionid, eid, txid=None):
+ """return a dictionary containing meta-datas for the entity of the given
+ `eid`. Available keys are:
+
+ * 'type', the entity's type name,
+
+ * 'source', the name of the source from which this entity's coming from,
+
+ * 'extid', the identifierfor this entity in its originating source, as
+ an encoded string or `None` for entities from the 'system' source.
+ """
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
+ try:
+ etype, extid, source = self.type_and_source_from_eid(eid, session)
+ return {'type': etype, 'source': source, 'extid': extid}
finally:
session.free_cnxset()
@@ -848,7 +773,7 @@
self.debug('begin commit for session %s', sessionid)
try:
session = self._get_session(sessionid)
- session.set_tx(txid)
+ session.set_cnx(txid)
return session.commit()
except (ValidationError, Unauthorized):
raise
@@ -861,7 +786,7 @@
self.debug('begin rollback for session %s', sessionid)
try:
session = self._get_session(sessionid)
- session.set_tx(txid)
+ session.set_cnx(txid)
session.rollback()
except Exception:
self.exception('unexpected error')
@@ -869,47 +794,30 @@
def close(self, sessionid, txid=None, checkshuttingdown=True):
"""close the session with the given id"""
- session = self._get_session(sessionid, setcnxset=True, txid=txid,
+ session = self._get_session(sessionid, txid=txid,
checkshuttingdown=checkshuttingdown)
# operation uncommited before close are rolled back before hook is called
- session.rollback(free_cnxset=False)
- self.hm.call_hooks('session_close', session)
- # commit session at this point in case write operation has been done
- # during `session_close` hooks
- session.commit()
+ if session._cnx._session_handled:
+ session._cnx.rollback(free_cnxset=False)
+ with session.new_cnx() as cnx:
+ self.hm.call_hooks('session_close', cnx)
+ # commit connection at this point in case write operation has been
+ # done during `session_close` hooks
+ cnx.commit()
session.close()
if threading.currentThread() in self._pyro_sessions:
self._pyro_sessions[threading.currentThread()] = None
del self._sessions[sessionid]
self.info('closed session %s for user %s', sessionid, session.user.login)
- def call_service(self, sessionid, regid, async, **kwargs):
+ def call_service(self, sessionid, regid, **kwargs):
"""
See :class:`cubicweb.dbapi.Connection.call_service`
and :class:`cubicweb.server.Service`
"""
+ # XXX lack a txid
session = self._get_session(sessionid)
- return self._call_service_with_session(session, regid, async, **kwargs)
-
- def _call_service_with_session(self, session, regid, async, **kwargs):
- if async:
- self.info('calling service %s asynchronously', regid)
- def task():
- session.set_cnxset()
- try:
- service = session.vreg['services'].select(regid, session, **kwargs)
- return service.call(**kwargs)
- finally:
- session.rollback() # free cnxset
- self.threaded_task(task)
- else:
- self.info('calling service %s synchronously', regid)
- session.set_cnxset()
- try:
- service = session.vreg['services'].select(regid, session, **kwargs)
- return service.call(**kwargs)
- finally:
- session.free_cnxset()
+ return session._cnx.call_service(regid, **kwargs)
def user_info(self, sessionid, props=None):
"""this method should be used by client to:
@@ -954,25 +862,6 @@
finally:
session.free_cnxset()
- # public (inter-repository) interface #####################################
-
- def entities_modified_since(self, etypes, mtime):
- """function designed to be called from an external repository which
- is using this one as a rql source for synchronization, and return a
- 3-uple containing :
- * the local date
- * list of (etype, eid) of entities of the given types which have been
- modified since the given timestamp (actually entities whose full text
- index content has changed)
- * list of (etype, eid) of entities of the given types which have been
- deleted since the given timestamp
- """
- with self.internal_session() as session:
- updatetime = datetime.utcnow()
- modentities, delentities = self.system_source.modified_entities(
- session, etypes, mtime)
- return updatetime, modentities, delentities
-
# session handling ########################################################
def close_sessions(self):
@@ -993,23 +882,45 @@
nbclosed = 0
for session in self._sessions.values():
if session.timestamp < mintime:
- self.close(session.id)
+ self.close(session.sessionid)
nbclosed += 1
return nbclosed
+ @deprecated("[3.19] use internal_cnx now\n"
+ "(Beware that integrity hook are now enabled by default)")
def internal_session(self, cnxprops=None, safe=False):
"""return a dbapi like connection/cursor using internal user which have
every rights on the repository. The `safe` argument is a boolean flag
telling if integrity hooks should be activated or not.
+ /!\ the safe argument is False by default.
+
*YOU HAVE TO* commit/rollback or close (rollback implicitly) the
session once the job's done, else you'll leak connections set up to the
time where no one is available, causing irremediable freeze...
"""
- session = InternalSession(self, cnxprops, safe)
+ session = InternalSession(self, cnxprops)
+ if not safe:
+ session.disable_hook_categories('integrity')
+ session.disable_hook_categories('security')
+ session._cnx.ctx_count += 1
session.set_cnxset()
return session
+ @contextmanager
+ def internal_cnx(self):
+ """Context manager returning a Connection using internal user which have
+ every access rights on the repository.
+
+ Beware that unlike the older :meth:`internal_session`, internal
+ connections have all hooks beside security enabled.
+ """
+ with InternalSession(self) as session:
+ with session.new_cnx() as cnx:
+ with cnx.security_enabled(read=False, write=False):
+ with cnx.ensure_cnx_set:
+ yield cnx
+
def _get_session(self, sessionid, setcnxset=False, txid=None,
checkshuttingdown=True):
"""return the session associated with the given session identifier"""
@@ -1020,7 +931,7 @@
except KeyError:
raise BadConnectionId('No such session %s' % sessionid)
if setcnxset:
- session.set_tx(txid) # must be done before set_cnxset
+ session.set_cnx(txid) # must be done before set_cnxset
session.set_cnxset()
return session
@@ -1028,9 +939,9 @@
# * correspondance between eid and (type, source)
# * correspondance between eid and local id (i.e. specific to a given source)
- def type_and_source_from_eid(self, eid, session=None):
- """return a tuple `(type, physical source uri, extid, actual source
- uri)` for the entity of the given `eid`
+ def type_and_source_from_eid(self, eid, session):
+ """return a tuple `(type, extid, actual source uri)` for the entity of
+ the given `eid`
"""
try:
eid = int(eid)
@@ -1039,21 +950,10 @@
try:
return self._type_source_cache[eid]
except KeyError:
- if session is None:
- session = self.internal_session()
- free_cnxset = True
- else:
- free_cnxset = False
- try:
- etype, uri, extid, auri = self.system_source.eid_type_source(
- session, eid)
- finally:
- if free_cnxset:
- session.free_cnxset()
- self._type_source_cache[eid] = (etype, uri, extid, auri)
- if uri != 'system':
- self._extid_cache[(extid, uri)] = eid
- return etype, uri, extid, auri
+ etype, extid, auri = self.system_source.eid_type_source(session,
+ eid)
+ self._type_source_cache[eid] = (etype, extid, auri)
+ return etype, extid, auri
def clear_caches(self, eids):
etcache = self._type_source_cache
@@ -1061,23 +961,18 @@
rqlcache = self.querier._rql_cache
for eid in eids:
try:
- etype, uri, extid, auri = etcache.pop(int(eid)) # may be a string in some cases
+ etype, extid, auri = etcache.pop(int(eid)) # may be a string in some cases
rqlcache.pop( ('%s X WHERE X eid %s' % (etype, eid),), None)
- extidcache.pop((extid, uri), None)
+ extidcache.pop(extid, None)
except KeyError:
etype = None
rqlcache.pop( ('Any X WHERE X eid %s' % eid,), None)
- for source in self.sources:
- source.clear_eid_cache(eid, etype)
+ self.system_source.clear_eid_cache(eid, etype)
- def type_from_eid(self, eid, session=None):
+ def type_from_eid(self, eid, session):
"""return the type of the entity with id <eid>"""
return self.type_and_source_from_eid(eid, session)[0]
- def source_from_eid(self, eid, session=None):
- """return the source for the given entity's eid"""
- return self.sources_by_uri[self.type_and_source_from_eid(eid, session)[1]]
-
def querier_cache_key(self, session, rql, args, eidkeys):
cachekey = [rql]
for key in sorted(eidkeys):
@@ -1093,16 +988,8 @@
args[key] = int(args[key])
return tuple(cachekey)
- def eid2extid(self, source, eid, session=None):
- """get local id from an eid"""
- etype, uri, extid, _ = self.type_and_source_from_eid(eid, session)
- if source.uri != uri:
- # eid not from the given source
- raise UnknownEid(eid)
- return extid
-
- def extid2eid(self, source, extid, etype, session=None, insert=True,
- complete=True, commit=True, sourceparams=None):
+ def extid2eid(self, source, extid, etype, cnx, insert=True,
+ sourceparams=None):
"""Return eid from a local id. If the eid is a negative integer, that
means the entity is known but has been copied back to the system source
hence should be ignored.
@@ -1125,101 +1012,80 @@
6. unless source's :attr:`should_call_hooks` tell otherwise,
'before_add_entity' hooks are called
"""
- uri = 'system' if source.copy_based_source else source.uri
- cachekey = (extid, uri)
try:
- return self._extid_cache[cachekey]
+ return self._extid_cache[extid]
except KeyError:
pass
- free_cnxset = False
- if session is None:
- session = self.internal_session()
- free_cnxset = True
- eid = self.system_source.extid2eid(session, uri, extid)
+ try:
+ # bw compat: cnx may be a session, get at the Connection
+ cnx = cnx._cnx
+ except AttributeError:
+ pass
+ with cnx.ensure_cnx_set:
+ eid = self.system_source.extid2eid(cnx, extid)
if eid is not None:
- self._extid_cache[cachekey] = eid
- self._type_source_cache[eid] = (etype, uri, extid, source.uri)
- if free_cnxset:
- session.free_cnxset()
+ self._extid_cache[extid] = eid
+ self._type_source_cache[eid] = (etype, extid, source.uri)
return eid
if not insert:
return
- # no link between extid and eid, create one using an internal session
- # since the current session user may not have required permissions to
- # do necessary stuff and we don't want to commit user session.
- #
- # Moreover, even if session is already an internal session but is
- # processing a commit, we have to use another one
- if not session.is_internal_session:
- session = self.internal_session()
- free_cnxset = True
- try:
- eid = self.system_source.create_eid(session)
- self._extid_cache[cachekey] = eid
- self._type_source_cache[eid] = (etype, uri, extid, source.uri)
- entity = source.before_entity_insertion(
- session, extid, etype, eid, sourceparams)
- if source.should_call_hooks:
- # get back a copy of operation for later restore if necessary,
- # see below
- pending_operations = session.pending_operations[:]
- self.hm.call_hooks('before_add_entity', session, entity=entity)
- self.add_info(session, entity, source, extid, complete=complete)
- source.after_entity_insertion(session, extid, entity, sourceparams)
- if source.should_call_hooks:
- self.hm.call_hooks('after_add_entity', session, entity=entity)
- if commit or free_cnxset:
- session.commit(free_cnxset)
- return eid
- except Exception:
- if commit or free_cnxset:
- session.rollback(free_cnxset)
- else:
+ # no link between extid and eid, create one
+ with cnx.ensure_cnx_set:
+ # write query, ensure connection's mode is 'write' so connections
+ # won't be released until commit/rollback
+ cnx.mode = 'write'
+ try:
+ eid = self.system_source.create_eid(cnx)
+ self._extid_cache[extid] = eid
+ self._type_source_cache[eid] = (etype, extid, source.uri)
+ entity = source.before_entity_insertion(
+ cnx, extid, etype, eid, sourceparams)
+ if source.should_call_hooks:
+ # get back a copy of operation for later restore if
+ # necessary, see below
+ pending_operations = cnx.pending_operations[:]
+ self.hm.call_hooks('before_add_entity', cnx, entity=entity)
+ self.add_info(cnx, entity, source, extid)
+ source.after_entity_insertion(cnx, extid, entity, sourceparams)
+ if source.should_call_hooks:
+ self.hm.call_hooks('after_add_entity', cnx, entity=entity)
+ return eid
+ except Exception:
# XXX do some cleanup manually so that the transaction has a
# chance to be commited, with simply this entity discarded
- self._extid_cache.pop(cachekey, None)
+ self._extid_cache.pop(extid, None)
self._type_source_cache.pop(eid, None)
if 'entity' in locals():
- hook.CleanupDeletedEidsCacheOp.get_instance(session).add_data(entity.eid)
- self.system_source.delete_info_multi(session, [entity], uri)
+ hook.CleanupDeletedEidsCacheOp.get_instance(cnx).add_data(entity.eid)
+ self.system_source.delete_info_multi(cnx, [entity])
if source.should_call_hooks:
- session._tx.pending_operations = pending_operations
- raise
+ cnx.pending_operations = pending_operations
+ raise
- def add_info(self, session, entity, source, extid=None, complete=True):
+ def add_info(self, session, entity, source, extid=None):
"""add type and source info for an eid into the system table,
and index the entity with the full text index
"""
# begin by inserting eid/type/source/extid into the entities table
hook.CleanupNewEidsCacheOp.get_instance(session).add_data(entity.eid)
- self.system_source.add_info(session, entity, source, extid, complete)
+ self.system_source.add_info(session, entity, source, extid)
- def delete_info(self, session, entity, sourceuri, scleanup=None):
+ def delete_info(self, session, entity, sourceuri):
"""called by external source when some entity known by the system source
has been deleted in the external source
"""
# mark eid as being deleted in session info and setup cache update
# operation
hook.CleanupDeletedEidsCacheOp.get_instance(session).add_data(entity.eid)
- self._delete_info(session, entity, sourceuri, scleanup)
+ self._delete_info(session, entity, sourceuri)
- def _delete_info(self, session, entity, sourceuri, scleanup=None):
+ def _delete_info(self, session, entity, sourceuri):
"""delete system information on deletion of an entity:
* delete all remaining relations from/to this entity
-
- * call delete info on the system source which will transfer record from
- the entities table to the deleted_entities table
-
- When scleanup is specified, it's expected to be the source's eid, in
- which case we'll specify the target's relation source so that this
- source is ignored. E.g. we want to delete relations stored locally, as
- the deletion information comes from the external source, it's its
- responsability to have cleaned-up its own relations.
+ * call delete info on the system source
"""
pendingrtypes = session.transaction_data.get('pendingrtypes', ())
- if scleanup is not None:
- source = self.sources_by_eid[scleanup]
# delete remaining relations: if user can delete the entity, he can
# delete all its relations without security checking
with session.security_enabled(read=False, write=False):
@@ -1234,34 +1100,20 @@
rql = 'DELETE X %s Y WHERE X eid %%(x)s' % rtype
else:
rql = 'DELETE Y %s X WHERE X eid %%(x)s' % rtype
- if scleanup is not None:
- # if the relation can't be crossed, nothing to cleanup (we
- # would get a BadRQLQuery from the multi-sources planner).
- # This may still leave some junk if the mapping has changed
- # at some point, but one can still run db-check to catch
- # those
- if not source in self.can_cross_relation(rtype):
- continue
- # source cleaning: only delete relations stored locally
- # (here, scleanup
- rql += ', NOT (Y cw_source S, S eid %(seid)s)'
try:
- session.execute(rql, {'x': eid, 'seid': scleanup},
- build_descr=False)
+ session.execute(rql, {'x': eid}, build_descr=False)
except Exception:
if self.config.mode == 'test':
raise
self.exception('error while cascading delete for entity %s '
'from %s. RQL: %s', entity, sourceuri, rql)
- self.system_source.delete_info_multi(session, [entity], sourceuri)
+ self.system_source.delete_info_multi(session, [entity])
- def _delete_info_multi(self, session, entities, sourceuri, scleanup=None):
+ def _delete_info_multi(self, session, entities):
"""same as _delete_info but accepts a list of entities with
the same etype and belinging to the same source.
"""
pendingrtypes = session.transaction_data.get('pendingrtypes', ())
- if scleanup is not None:
- source = self.sources_by_eid[scleanup]
# delete remaining relations: if user can delete the entity, he can
# delete all its relations without security checking
with session.security_enabled(read=False, write=False):
@@ -1276,77 +1128,36 @@
rql = 'DELETE X %s Y WHERE X eid IN (%s)' % (rtype, in_eids)
else:
rql = 'DELETE Y %s X WHERE X eid IN (%s)' % (rtype, in_eids)
- if scleanup is not None:
- # if the relation can't be crossed, nothing to cleanup (we
- # would get a BadRQLQuery from the multi-sources planner).
- # This may still leave some junk if the mapping has changed
- # at some point, but one can still run db-check to catch
- # those
- if not source in self.can_cross_relation(rtype):
- continue
- # source cleaning: only delete relations stored locally
- rql += ', NOT (Y cw_source S, S eid %(seid)s)'
try:
- session.execute(rql, {'seid': scleanup}, build_descr=False)
+ session.execute(rql, build_descr=False)
except ValidationError:
raise
except Unauthorized:
- self.exception('Unauthorized exception while cascading delete for entity %s '
- 'from %s. RQL: %s.\nThis should not happen since security is disabled here.',
- entities, sourceuri, rql)
+ self.exception('Unauthorized exception while cascading delete for entity %s. '
+ 'RQL: %s.\nThis should not happen since security is disabled here.',
+ entities, rql)
raise
except Exception:
if self.config.mode == 'test':
raise
- self.exception('error while cascading delete for entity %s '
- 'from %s. RQL: %s', entities, sourceuri, rql)
- self.system_source.delete_info_multi(session, entities, sourceuri)
+ self.exception('error while cascading delete for entity %s. RQL: %s',
+ entities, rql)
+ self.system_source.delete_info_multi(session, entities)
- def locate_relation_source(self, session, subject, rtype, object):
- subjsource = self.source_from_eid(subject, session)
- objsource = self.source_from_eid(object, session)
- if not subjsource is objsource:
- source = self.system_source
- if not (subjsource.may_cross_relation(rtype)
- and objsource.may_cross_relation(rtype)):
- raise MultiSourcesError(
- "relation %s can't be crossed among sources"
- % rtype)
- elif not subjsource.support_relation(rtype):
- source = self.system_source
- else:
- source = subjsource
- if not source.support_relation(rtype, True):
- raise MultiSourcesError(
- "source %s doesn't support write of %s relation"
- % (source.uri, rtype))
- return source
-
- def locate_etype_source(self, etype):
- for source in self.sources:
- if source.support_entity(etype, 1):
- return source
- else:
- raise ETypeNotSupportedBySources(etype)
-
- def init_entity_caches(self, session, entity, source):
- """add entity to session entities cache and repo's extid cache.
+ def init_entity_caches(self, cnx, entity, source):
+ """add entity to connection entities cache and repo's extid cache.
Return entity's ext id if the source isn't the system source.
"""
- session.set_entity_cache(entity)
- suri = source.uri
- if suri == 'system':
+ cnx.set_entity_cache(entity)
+ if source.uri == 'system':
extid = None
else:
- if source.copy_based_source:
- suri = 'system'
extid = source.get_extid(entity)
- self._extid_cache[(str(extid), suri)] = entity.eid
- self._type_source_cache[entity.eid] = (entity.cw_etype, suri, extid,
- source.uri)
+ self._extid_cache[str(extid)] = entity.eid
+ self._type_source_cache[entity.eid] = (entity.cw_etype, extid, source.uri)
return extid
- def glob_add_entity(self, session, edited):
+ def glob_add_entity(self, cnx, edited):
"""add an entity to the repository
the entity eid should originaly be None and a unique eid is assigned to
@@ -1356,40 +1167,38 @@
entity._cw_is_saved = False # entity has an eid but is not yet saved
# init edited_attributes before calling before_add_entity hooks
entity.cw_edited = edited
- source = self.locate_etype_source(entity.cw_etype)
+ source = self.system_source
# allocate an eid to the entity before calling hooks
- entity.eid = self.system_source.create_eid(session)
+ entity.eid = self.system_source.create_eid(cnx)
# set caches asap
- extid = self.init_entity_caches(session, entity, source)
+ extid = self.init_entity_caches(cnx, entity, source)
if server.DEBUG & server.DBG_REPO:
print 'ADD entity', self, entity.cw_etype, entity.eid, edited
prefill_entity_caches(entity)
- if source.should_call_hooks:
- self.hm.call_hooks('before_add_entity', session, entity=entity)
- relations = preprocess_inlined_relations(session, entity)
+ self.hm.call_hooks('before_add_entity', cnx, entity=entity)
+ relations = preprocess_inlined_relations(cnx, entity)
edited.set_defaults()
- if session.is_hook_category_activated('integrity'):
+ if cnx.is_hook_category_activated('integrity'):
edited.check(creation=True)
try:
- source.add_entity(session, entity)
+ source.add_entity(cnx, entity)
except UniqueTogetherError as exc:
- userhdlr = session.vreg['adapters'].select(
- 'IUserFriendlyError', session, entity=entity, exc=exc)
+ userhdlr = cnx.vreg['adapters'].select(
+ 'IUserFriendlyError', cnx, entity=entity, exc=exc)
userhdlr.raise_user_exception()
- self.add_info(session, entity, source, extid, complete=False)
+ self.add_info(cnx, entity, source, extid)
edited.saved = entity._cw_is_saved = True
# trigger after_add_entity after after_add_relation
- if source.should_call_hooks:
- self.hm.call_hooks('after_add_entity', session, entity=entity)
- # call hooks for inlined relations
- for attr, value in relations:
- self.hm.call_hooks('before_add_relation', session,
- eidfrom=entity.eid, rtype=attr, eidto=value)
- self.hm.call_hooks('after_add_relation', session,
- eidfrom=entity.eid, rtype=attr, eidto=value)
+ self.hm.call_hooks('after_add_entity', cnx, entity=entity)
+ # call hooks for inlined relations
+ for attr, value in relations:
+ self.hm.call_hooks('before_add_relation', cnx,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
+ self.hm.call_hooks('after_add_relation', cnx,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
return entity.eid
- def glob_update_entity(self, session, edited):
+ def glob_update_entity(self, cnx, edited):
"""replace an entity in the repository
the type and the eid of an entity must not be changed
"""
@@ -1399,13 +1208,13 @@
entity.cw_attr_cache, edited
hm = self.hm
eschema = entity.e_schema
- session.set_entity_cache(entity)
+ cnx.set_entity_cache(entity)
orig_edited = getattr(entity, 'cw_edited', None)
entity.cw_edited = edited
+ source = self.system_source
try:
only_inline_rels, need_fti_update = True, False
relations = []
- source = self.source_from_eid(entity.eid, session)
for attr in list(edited):
if attr == 'eid':
continue
@@ -1421,117 +1230,101 @@
previous_value = previous_value[0][0] # got a result set
if previous_value == entity.cw_attr_cache[attr]:
previous_value = None
- elif source.should_call_hooks:
- hm.call_hooks('before_delete_relation', session,
+ else:
+ hm.call_hooks('before_delete_relation', cnx,
eidfrom=entity.eid, rtype=attr,
eidto=previous_value)
relations.append((attr, edited[attr], previous_value))
- if source.should_call_hooks:
- # call hooks for inlined relations
- for attr, value, _t in relations:
- hm.call_hooks('before_add_relation', session,
- eidfrom=entity.eid, rtype=attr, eidto=value)
- if not only_inline_rels:
- hm.call_hooks('before_update_entity', session, entity=entity)
- if session.is_hook_category_activated('integrity'):
+ # call hooks for inlined relations
+ for attr, value, _t in relations:
+ hm.call_hooks('before_add_relation', cnx,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
+ if not only_inline_rels:
+ hm.call_hooks('before_update_entity', cnx, entity=entity)
+ if cnx.is_hook_category_activated('integrity'):
edited.check()
try:
- source.update_entity(session, entity)
+ source.update_entity(cnx, entity)
edited.saved = True
except UniqueTogetherError as exc:
- userhdlr = session.vreg['adapters'].select(
- 'IUserFriendlyError', session, entity=entity, exc=exc)
+ userhdlr = cnx.vreg['adapters'].select(
+ 'IUserFriendlyError', cnx, entity=entity, exc=exc)
userhdlr.raise_user_exception()
- self.system_source.update_info(session, entity, need_fti_update)
- if source.should_call_hooks:
- if not only_inline_rels:
- hm.call_hooks('after_update_entity', session, entity=entity)
- for attr, value, prevvalue in relations:
- # if the relation is already cached, update existant cache
- relcache = entity.cw_relation_cached(attr, 'subject')
- if prevvalue is not None:
- hm.call_hooks('after_delete_relation', session,
- eidfrom=entity.eid, rtype=attr, eidto=prevvalue)
- if relcache is not None:
- session.update_rel_cache_del(entity.eid, attr, prevvalue)
- del_existing_rel_if_needed(session, entity.eid, attr, value)
- session.update_rel_cache_add(entity.eid, attr, value)
- hm.call_hooks('after_add_relation', session,
- eidfrom=entity.eid, rtype=attr, eidto=value)
+ self.system_source.update_info(cnx, entity, need_fti_update)
+ if not only_inline_rels:
+ hm.call_hooks('after_update_entity', cnx, entity=entity)
+ for attr, value, prevvalue in relations:
+ # if the relation is already cached, update existant cache
+ relcache = entity.cw_relation_cached(attr, 'subject')
+ if prevvalue is not None:
+ hm.call_hooks('after_delete_relation', cnx,
+ eidfrom=entity.eid, rtype=attr, eidto=prevvalue)
+ if relcache is not None:
+ cnx.update_rel_cache_del(entity.eid, attr, prevvalue)
+ del_existing_rel_if_needed(cnx, entity.eid, attr, value)
+ cnx.update_rel_cache_add(entity.eid, attr, value)
+ hm.call_hooks('after_add_relation', cnx,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
finally:
if orig_edited is not None:
entity.cw_edited = orig_edited
- def glob_delete_entities(self, session, eids):
+ def glob_delete_entities(self, cnx, eids):
"""delete a list of entities and all related entities from the repository"""
- # mark eids as being deleted in session info and setup cache update
+ # mark eids as being deleted in cnx info and setup cache update
# operation (register pending eids before actual deletion to avoid
# multiple call to glob_delete_entities)
- op = hook.CleanupDeletedEidsCacheOp.get_instance(session)
+ op = hook.CleanupDeletedEidsCacheOp.get_instance(cnx)
if not isinstance(eids, (set, frozenset)):
warn('[3.13] eids should be given as a set', DeprecationWarning,
stacklevel=2)
eids = frozenset(eids)
eids = eids - op._container
op._container |= eids
- data_by_etype_source = {} # values are ([list of eids],
- # [list of extid],
- # [list of entities])
+ data_by_etype = {} # values are [list of entities]
#
# WARNING: the way this dictionary is populated is heavily optimized
# and does not use setdefault on purpose. Unless a new release
# of the Python interpreter advertises large perf improvements
# in setdefault, this should not be changed without profiling.
-
for eid in eids:
- etype, sourceuri, extid, _ = self.type_and_source_from_eid(eid, session)
+ etype = self.type_from_eid(eid, cnx)
# XXX should cache entity's cw_metainformation
- entity = session.entity_from_eid(eid, etype)
+ entity = cnx.entity_from_eid(eid, etype)
try:
- data_by_etype_source[(etype, sourceuri)].append(entity)
+ data_by_etype[etype].append(entity)
except KeyError:
- data_by_etype_source[(etype, sourceuri)] = [entity]
- for (etype, sourceuri), entities in data_by_etype_source.iteritems():
+ data_by_etype[etype] = [entity]
+ source = self.system_source
+ for etype, entities in data_by_etype.iteritems():
if server.DEBUG & server.DBG_REPO:
print 'DELETE entities', etype, [entity.eid for entity in entities]
- source = self.sources_by_uri[sourceuri]
- if source.should_call_hooks:
- self.hm.call_hooks('before_delete_entity', session, entities=entities)
- if session.deleted_in_transaction(source.eid):
- # source is being deleted, think to give scleanup argument
- self._delete_info_multi(session, entities, sourceuri, scleanup=source.eid)
- else:
- self._delete_info_multi(session, entities, sourceuri)
- source.delete_entities(session, entities)
- if source.should_call_hooks:
- self.hm.call_hooks('after_delete_entity', session, entities=entities)
+ self.hm.call_hooks('before_delete_entity', cnx, entities=entities)
+ self._delete_info_multi(cnx, entities)
+ source.delete_entities(cnx, entities)
+ self.hm.call_hooks('after_delete_entity', cnx, entities=entities)
# don't clear cache here, it is done in a hook on commit
- def glob_add_relation(self, session, subject, rtype, object):
+ def glob_add_relation(self, cnx, subject, rtype, object):
"""add a relation to the repository"""
- self.glob_add_relations(session, {rtype: [(subject, object)]})
+ self.glob_add_relations(cnx, {rtype: [(subject, object)]})
- def glob_add_relations(self, session, relations):
+ def glob_add_relations(self, cnx, relations):
"""add several relations to the repository
relations is a dictionary rtype: [(subj_eid, obj_eid), ...]
"""
- sources = {}
+ source = self.system_source
+ relations_by_rtype = {}
subjects_by_types = {}
objects_by_types = {}
- activintegrity = session.is_hook_category_activated('activeintegrity')
+ activintegrity = cnx.is_hook_category_activated('activeintegrity')
for rtype, eids_subj_obj in relations.iteritems():
if server.DEBUG & server.DBG_REPO:
for subjeid, objeid in eids_subj_obj:
print 'ADD relation', subjeid, rtype, objeid
for subjeid, objeid in eids_subj_obj:
- source = self.locate_relation_source(session, subjeid, rtype, objeid)
- if source not in sources:
- relations_by_rtype = {}
- sources[source] = relations_by_rtype
- else:
- relations_by_rtype = sources[source]
if rtype in relations_by_rtype:
relations_by_rtype[rtype].append((subjeid, objeid))
else:
@@ -1541,13 +1334,13 @@
# take care to relation of cardinality '?1', as all eids will
# be inserted later, we've remove duplicated eids since they
# won't be catched by `del_existing_rel_if_needed`
- rdef = session.rtype_eids_rdef(rtype, subjeid, objeid)
+ rdef = cnx.rtype_eids_rdef(rtype, subjeid, objeid)
card = rdef.cardinality
if card[0] in '?1':
- with session.security_enabled(read=False):
- session.execute('DELETE X %s Y WHERE X eid %%(x)s, '
- 'NOT Y eid %%(y)s' % rtype,
- {'x': subjeid, 'y': objeid})
+ with cnx.security_enabled(read=False):
+ cnx.execute('DELETE X %s Y WHERE X eid %%(x)s, '
+ 'NOT Y eid %%(y)s' % rtype,
+ {'x': subjeid, 'y': objeid})
subjects = subjects_by_types.setdefault(rdef, {})
if subjeid in subjects:
del relations_by_rtype[rtype][subjects[subjeid]]
@@ -1555,45 +1348,40 @@
continue
subjects[subjeid] = len(relations_by_rtype[rtype]) - 1
if card[1] in '?1':
- with session.security_enabled(read=False):
- session.execute('DELETE X %s Y WHERE Y eid %%(y)s, '
- 'NOT X eid %%(x)s' % rtype,
- {'x': subjeid, 'y': objeid})
+ with cnx.security_enabled(read=False):
+ cnx.execute('DELETE X %s Y WHERE Y eid %%(y)s, '
+ 'NOT X eid %%(x)s' % rtype,
+ {'x': subjeid, 'y': objeid})
objects = objects_by_types.setdefault(rdef, {})
if objeid in objects:
del relations_by_rtype[rtype][objects[objeid]]
objects[objeid] = len(relations_by_rtype[rtype])
continue
objects[objeid] = len(relations_by_rtype[rtype])
- for source, relations_by_rtype in sources.iteritems():
- if source.should_call_hooks:
- for rtype, source_relations in relations_by_rtype.iteritems():
- self.hm.call_hooks('before_add_relation', session,
- rtype=rtype, eids_from_to=source_relations)
- for rtype, source_relations in relations_by_rtype.iteritems():
- source.add_relations(session, rtype, source_relations)
- rschema = self.schema.rschema(rtype)
- for subjeid, objeid in source_relations:
- session.update_rel_cache_add(subjeid, rtype, objeid, rschema.symmetric)
- if source.should_call_hooks:
- for rtype, source_relations in relations_by_rtype.iteritems():
- self.hm.call_hooks('after_add_relation', session,
- rtype=rtype, eids_from_to=source_relations)
+ for rtype, source_relations in relations_by_rtype.iteritems():
+ self.hm.call_hooks('before_add_relation', cnx,
+ rtype=rtype, eids_from_to=source_relations)
+ for rtype, source_relations in relations_by_rtype.iteritems():
+ source.add_relations(cnx, rtype, source_relations)
+ rschema = self.schema.rschema(rtype)
+ for subjeid, objeid in source_relations:
+ cnx.update_rel_cache_add(subjeid, rtype, objeid, rschema.symmetric)
+ for rtype, source_relations in relations_by_rtype.iteritems():
+ self.hm.call_hooks('after_add_relation', cnx,
+ rtype=rtype, eids_from_to=source_relations)
- def glob_delete_relation(self, session, subject, rtype, object):
+ def glob_delete_relation(self, cnx, subject, rtype, object):
"""delete a relation from the repository"""
if server.DEBUG & server.DBG_REPO:
print 'DELETE relation', subject, rtype, object
- source = self.locate_relation_source(session, subject, rtype, object)
- if source.should_call_hooks:
- self.hm.call_hooks('before_delete_relation', session,
- eidfrom=subject, rtype=rtype, eidto=object)
- source.delete_relation(session, subject, rtype, object)
+ source = self.system_source
+ self.hm.call_hooks('before_delete_relation', cnx,
+ eidfrom=subject, rtype=rtype, eidto=object)
+ source.delete_relation(cnx, subject, rtype, object)
rschema = self.schema.rschema(rtype)
- session.update_rel_cache_del(subject, rtype, object, rschema.symmetric)
- if source.should_call_hooks:
- self.hm.call_hooks('after_delete_relation', session,
- eidfrom=subject, rtype=rtype, eidto=object)
+ cnx.update_rel_cache_del(subject, rtype, object, rschema.symmetric)
+ self.hm.call_hooks('after_delete_relation', cnx,
+ eidfrom=subject, rtype=rtype, eidto=object)
# pyro handling ###########################################################
@@ -1644,7 +1432,7 @@
# client was not yet connected to the repo
return
if not session.closed:
- self.close(session.id)
+ self.close(session.sessionid)
daemon.removeConnection = removeConnection
return daemon
@@ -1656,35 +1444,10 @@
self.info('repository re-registered as a pyro object %s',
self.pyro_appid)
- # multi-sources planner helpers ###########################################
-
- @cached
- def rel_type_sources(self, rtype):
- warn('[3.18] old multi-source system will go away in the next version',
- DeprecationWarning)
- return tuple([source for source in self.sources
- if source.support_relation(rtype)
- or rtype in source.dont_cross_relations])
-
- @cached
- def can_cross_relation(self, rtype):
- warn('[3.18] old multi-source system will go away in the next version',
- DeprecationWarning)
- return tuple([source for source in self.sources
- if source.support_relation(rtype)
- and rtype in source.cross_relations])
-
- @cached
- def is_multi_sources_relation(self, rtype):
- warn('[3.18] old multi-source system will go away in the next version',
- DeprecationWarning)
- return any(source for source in self.sources
- if not source is self.system_source
- and source.support_relation(rtype))
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
- info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+ info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
def pyro_unregister(config):
--- a/server/schemaserial.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/schemaserial.py Tue Jun 10 09:49:45 2014 +0200
@@ -20,7 +20,6 @@
__docformat__ = "restructuredtext en"
import os
-from itertools import chain
import json
from logilab.common.shellutils import ProgressBar
@@ -28,13 +27,13 @@
from yams import (BadSchemaDefinition, schema as schemamod, buildobjs as ybo,
schema2sql as y2sql)
-from cubicweb import CW_SOFTWARE_ROOT, Binary, typed_eid
+from cubicweb import Binary
from cubicweb.schema import (KNOWN_RPROPERTIES, CONSTRAINTS, ETYPE_NAME_MAP,
- VIRTUAL_RTYPES, PURE_VIRTUAL_RTYPES)
+ VIRTUAL_RTYPES)
from cubicweb.server import sqlutils
-def group_mapping(cursor, interactive=True):
+def group_mapping(cnx, interactive=True):
"""create a group mapping from an rql cursor
A group mapping has standard group names as key (managers, owners at least)
@@ -43,7 +42,7 @@
from the user.
"""
res = {}
- for eid, name in cursor.execute('Any G, N WHERE G is CWGroup, G name N',
+ for eid, name in cnx.execute('Any G, N WHERE G is CWGroup, G name N',
build_descr=False):
res[name] = eid
if not interactive:
@@ -75,33 +74,33 @@
break
return res
-def cstrtype_mapping(cursor):
+def cstrtype_mapping(cnx):
"""cached constraint types mapping"""
- map = dict(cursor.execute('Any T, X WHERE X is CWConstraintType, X name T'))
+ map = dict(cnx.execute('Any T, X WHERE X is CWConstraintType, X name T'))
return map
# schema / perms deserialization ##############################################
-def deserialize_schema(schema, session):
+def deserialize_schema(schema, cnx):
"""return a schema according to information stored in an rql database
as CWRType and CWEType entities
"""
- repo = session.repo
+ repo = cnx.repo
dbhelper = repo.system_source.dbhelper
# XXX bw compat (3.6 migration)
- sqlcu = session.cnxset['system']
- sqlcu.execute("SELECT * FROM cw_CWRType WHERE cw_name='symetric'")
- if sqlcu.fetchall():
- sql = dbhelper.sql_rename_col('cw_CWRType', 'cw_symetric', 'cw_symmetric',
- dbhelper.TYPE_MAPPING['Boolean'], True)
- sqlcu.execute(sql)
- sqlcu.execute("UPDATE cw_CWRType SET cw_name='symmetric' WHERE cw_name='symetric'")
- session.commit(False)
+ with cnx.ensure_cnx_set:
+ sqlcu = cnx.system_sql("SELECT * FROM cw_CWRType WHERE cw_name='symetric'")
+ if sqlcu.fetchall():
+ sql = dbhelper.sql_rename_col('cw_CWRType', 'cw_symetric', 'cw_symmetric',
+ dbhelper.TYPE_MAPPING['Boolean'], True)
+ sqlcu.execute(sql)
+ sqlcu.execute("UPDATE cw_CWRType SET cw_name='symmetric' WHERE cw_name='symetric'")
+ cnx.commit(False)
ertidx = {}
copiedeids = set()
- permsidx = deserialize_ertype_permissions(session)
+ permsidx = deserialize_ertype_permissions(cnx)
schema.reading_from_database = True
- for eid, etype, desc in session.execute(
+ for eid, etype, desc in cnx.execute(
'Any X, N, D WHERE X is CWEType, X name N, X description D',
build_descr=False):
# base types are already in the schema, skip them
@@ -115,7 +114,7 @@
needcopy = False
netype = ETYPE_NAME_MAP[etype]
# can't use write rql queries at this point, use raw sql
- sqlexec = session.system_sql
+ sqlexec = cnx.system_sql
if sqlexec('SELECT 1 FROM %(p)sCWEType WHERE %(p)sname=%%(n)s'
% {'p': sqlutils.SQL_PREFIX}, {'n': netype}).fetchone():
# the new type already exists, we should copy (eg make existing
@@ -132,17 +131,12 @@
sqlexec(alter_table_sql)
sqlexec('UPDATE entities SET type=%(n)s WHERE type=%(x)s',
{'x': etype, 'n': netype})
- session.commit(False)
- try:
- sqlexec('UPDATE deleted_entities SET type=%(n)s WHERE type=%(x)s',
- {'x': etype, 'n': netype})
- except Exception:
- pass
+ cnx.commit(False)
tocleanup = [eid]
tocleanup += (eid for eid, cached in repo._type_source_cache.iteritems()
if etype == cached[0])
repo.clear_caches(tocleanup)
- session.commit(False)
+ cnx.commit(False)
if needcopy:
ertidx[eid] = netype
copiedeids.add(eid)
@@ -154,14 +148,14 @@
eschema = schema.add_entity_type(
ybo.EntityType(name=etype, description=desc, eid=eid))
set_perms(eschema, permsidx)
- for etype, stype in session.execute(
+ for etype, stype in cnx.execute(
'Any XN, ETN WHERE X is CWEType, X name XN, X specializes ET, ET name ETN',
build_descr=False):
etype = ETYPE_NAME_MAP.get(etype, etype)
stype = ETYPE_NAME_MAP.get(stype, stype)
schema.eschema(etype)._specialized_type = stype
schema.eschema(stype)._specialized_by.append(etype)
- for eid, rtype, desc, sym, il, ftc in session.execute(
+ for eid, rtype, desc, sym, il, ftc in cnx.execute(
'Any X,N,D,S,I,FTC WHERE X is CWRType, X name N, X description D, '
'X symmetric S, X inlined I, X fulltext_container FTC', build_descr=False):
ertidx[eid] = rtype
@@ -169,7 +163,7 @@
ybo.RelationType(name=rtype, description=desc,
symmetric=bool(sym), inlined=bool(il),
fulltext_container=ftc, eid=eid))
- cstrsidx = deserialize_rdef_constraints(session)
+ cstrsidx = deserialize_rdef_constraints(cnx)
pendingrdefs = []
# closure to factorize common code of attribute/relation rdef addition
def _add_rdef(rdefeid, seid, reid, oeid, **kwargs):
@@ -198,13 +192,13 @@
set_perms(rdefs, permsidx)
# Get the type parameters for additional base types.
try:
- extra_props = dict(session.execute('Any X, XTP WHERE X is CWAttribute, '
+ extra_props = dict(cnx.execute('Any X, XTP WHERE X is CWAttribute, '
'X extra_props XTP'))
except Exception:
- session.critical('Previous CRITICAL notification about extra_props is not '
+ cnx.critical('Previous CRITICAL notification about extra_props is not '
'a problem if you are migrating to cubicweb 3.17')
extra_props = {} # not yet in the schema (introduced by 3.17 migration)
- for values in session.execute(
+ for values in cnx.execute(
'Any X,SE,RT,OE,CARD,ORD,DESC,IDX,FTIDX,I18N,DFLT WHERE X is CWAttribute,'
'X relation_type RT, X cardinality CARD, X ordernum ORD, X indexed IDX,'
'X description DESC, X internationalizable I18N, X defaultval DFLT,'
@@ -222,7 +216,7 @@
cardinality=card, description=desc, order=ord,
indexed=idx, fulltextindexed=ftidx, internationalizable=i18n,
default=default, **typeparams)
- for values in session.execute(
+ for values in cnx.execute(
'Any X,SE,RT,OE,CARD,ORD,DESC,C WHERE X is CWRelation, X relation_type RT,'
'X cardinality CARD, X ordernum ORD, X description DESC, '
'X from_entity SE, X to_entity OE, X composite C', build_descr=False):
@@ -238,7 +232,7 @@
if rdefs is not None:
set_perms(rdefs, permsidx)
unique_togethers = {}
- rset = session.execute(
+ rset = cnx.execute(
'Any X,E,R WHERE '
'X is CWUniqueTogetherConstraint, '
'X constraint_of E, X relations R', build_descr=False)
@@ -257,11 +251,11 @@
for eschema, unique_together in unique_togethers.itervalues():
eschema._unique_together.append(tuple(sorted(unique_together)))
schema.infer_specialization_rules()
- session.commit()
+ cnx.commit()
schema.reading_from_database = False
-def deserialize_ertype_permissions(session):
+def deserialize_ertype_permissions(cnx):
"""return sect action:groups associations for the given
entity or relation schema with its eid, according to schema's
permissions stored in the database as [read|add|delete|update]_permission
@@ -270,21 +264,21 @@
res = {}
for action in ('read', 'add', 'update', 'delete'):
rql = 'Any E,N WHERE G is CWGroup, G name N, E %s_permission G' % action
- for eid, gname in session.execute(rql, build_descr=False):
+ for eid, gname in cnx.execute(rql, build_descr=False):
res.setdefault(eid, {}).setdefault(action, []).append(gname)
rql = ('Any E,X,EXPR,V WHERE X is RQLExpression, X expression EXPR, '
'E %s_permission X, X mainvars V' % action)
- for eid, expreid, expr, mainvars in session.execute(rql, build_descr=False):
+ for eid, expreid, expr, mainvars in cnx.execute(rql, build_descr=False):
# we don't know yet if it's a rql expr for an entity or a relation,
# so append a tuple to differentiate from groups and so we'll be
# able to instantiate it later
res.setdefault(eid, {}).setdefault(action, []).append( (expr, mainvars, expreid) )
return res
-def deserialize_rdef_constraints(session):
+def deserialize_rdef_constraints(cnx):
"""return the list of relation definition's constraints as instances"""
res = {}
- for rdefeid, ceid, ct, val in session.execute(
+ for rdefeid, ceid, ct, val in cnx.execute(
'Any E, X,TN,V WHERE E constrained_by X, X is CWConstraint, '
'X cstrtype T, T name TN, X value V', build_descr=False):
cstr = CONSTRAINTS[ct].deserialize(val)
@@ -311,7 +305,7 @@
# schema / perms serialization ################################################
-def serialize_schema(cursor, schema):
+def serialize_schema(cnx, schema):
"""synchronize schema and permissions in the database according to
current schema
"""
@@ -319,7 +313,7 @@
if not quiet:
_title = '-> storing the schema in the database '
print _title,
- execute = cursor.execute
+ execute = cnx.execute
eschemas = schema.entities()
if not quiet:
pb_size = (len(eschemas + schema.relations())
@@ -328,7 +322,7 @@
pb = ProgressBar(pb_size, title=_title)
else:
pb = None
- groupmap = group_mapping(cursor, interactive=False)
+ groupmap = group_mapping(cnx, interactive=False)
# serialize all entity types, assuring CWEType is serialized first for proper
# is / is_instance_of insertion
eschemas.remove(schema.eschema('CWEType'))
--- a/server/server.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/server.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -19,13 +19,9 @@
__docformat__ = "restructuredtext en"
-import os
-import sys
import select
-import warnings
from time import localtime, mktime
-from cubicweb.cwconfig import CubicWebConfiguration
from cubicweb.server.utils import TasksManager
from cubicweb.server.repository import Repository
--- a/server/serverconfig.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/serverconfig.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -24,7 +24,7 @@
from StringIO import StringIO
import logilab.common.configuration as lgconfig
-from logilab.common.decorators import wproperty, cached
+from logilab.common.decorators import cached
from cubicweb.toolsutils import read_config, restrict_perms_to_user
from cubicweb.cwconfig import CONFIGURATIONS, CubicWebConfiguration
@@ -297,13 +297,16 @@
# configuration file (#16102)
@cached
def read_sources_file(self):
+ """return a dictionary of values found in the sources file"""
return read_config(self.sources_file(), raise_if_unreadable=True)
- def sources(self):
- """return a dictionnaries containing sources definitions indexed by
- sources'uri
- """
- return self.read_sources_file()
+ @property
+ def system_source_config(self):
+ return self.read_sources_file()['system']
+
+ @property
+ def default_admin_config(self):
+ return self.read_sources_file()['admin']
def source_enabled(self, source):
if self.sources_mode is not None:
--- a/server/serverctl.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/serverctl.py Tue Jun 10 09:49:45 2014 +0200
@@ -132,17 +132,19 @@
return cnx
def repo_cnx(config):
- """return a in-memory repository and a db api connection it"""
- from cubicweb.dbapi import in_memory_repo_cnx
+ """return a in-memory repository and a repoapi connection to it"""
+ from cubicweb import repoapi
from cubicweb.server.utils import manager_userpasswd
try:
- login = config.sources()['admin']['login']
- pwd = config.sources()['admin']['password']
+ login = config.default_admin_config['login']
+ pwd = config.default_admin_config['password']
except KeyError:
login, pwd = manager_userpasswd()
while True:
try:
- return in_memory_repo_cnx(config, login, password=pwd)
+ repo = repoapi.get_repository(config=config)
+ cnx = repoapi.connect(repo, login, password=pwd)
+ return repo, cnx
except AuthenticationError:
print '-> Error: wrong user/password.'
# reset cubes else we'll have an assertion error on next retry
@@ -221,7 +223,7 @@
def cleanup(self):
"""remove instance's configuration and database"""
from logilab.database import get_db_helper
- source = self.config.sources()['system']
+ source = self.config.system_source_config
dbname = source['db-name']
helper = get_db_helper(source['db-driver'])
if ASK.confirm('Delete database %s ?' % dbname):
@@ -334,7 +336,7 @@
automatic = self.get('automatic')
appid = args.pop()
config = ServerConfiguration.config_for(appid)
- source = config.sources()['system']
+ source = config.system_source_config
dbname = source['db-name']
driver = source['db-driver']
helper = get_db_helper(driver)
@@ -441,7 +443,7 @@
appid = args[0]
config = ServerConfiguration.config_for(appid)
try:
- system = config.sources()['system']
+ system = config.system_source_config
extra_args = system.get('db-extra-arguments')
extra = extra_args and {'extra_args': extra_args} or {}
get_connection(
@@ -457,7 +459,7 @@
init_repository(config, drop=self.config.drop)
if not self.config.automatic:
while ASK.confirm('Enter another source ?', default_is_yes=False):
- CWCTL.run(['add-source', '--config-level',
+ CWCTL.run(['source-add', '--config-level',
str(self.config.config_level), config.appid])
@@ -467,7 +469,7 @@
<instance>
the identifier of the instance to initialize.
"""
- name = 'add-source'
+ name = 'source-add'
arguments = '<instance>'
min_args = max_args = 1
options = (
@@ -482,43 +484,43 @@
config = ServerConfiguration.config_for(appid)
config.quick_start = True
repo, cnx = repo_cnx(config)
- req = cnx.request()
- used = set(n for n, in req.execute('Any SN WHERE S is CWSource, S name SN'))
- cubes = repo.get_cubes()
- while True:
- type = raw_input('source type (%s): '
- % ', '.join(sorted(SOURCE_TYPES)))
- if type not in SOURCE_TYPES:
- print '-> unknown source type, use one of the available types.'
- continue
- sourcemodule = SOURCE_TYPES[type].module
- if not sourcemodule.startswith('cubicweb.'):
- # module names look like cubes.mycube.themodule
- sourcecube = SOURCE_TYPES[type].module.split('.', 2)[1]
- # if the source adapter is coming from an external component,
- # ensure it's specified in used cubes
- if not sourcecube in cubes:
- print ('-> this source type require the %s cube which is '
- 'not used by the instance.')
+ with cnx:
+ used = set(n for n, in cnx.execute('Any SN WHERE S is CWSource, S name SN'))
+ cubes = repo.get_cubes()
+ while True:
+ type = raw_input('source type (%s): '
+ % ', '.join(sorted(SOURCE_TYPES)))
+ if type not in SOURCE_TYPES:
+ print '-> unknown source type, use one of the available types.'
continue
- break
- while True:
- sourceuri = raw_input('source identifier (a unique name used to '
- 'tell sources apart): ').strip()
- if not sourceuri:
- print '-> mandatory.'
- else:
- sourceuri = unicode(sourceuri, sys.stdin.encoding)
- if sourceuri in used:
- print '-> uri already used, choose another one.'
+ sourcemodule = SOURCE_TYPES[type].module
+ if not sourcemodule.startswith('cubicweb.'):
+ # module names look like cubes.mycube.themodule
+ sourcecube = SOURCE_TYPES[type].module.split('.', 2)[1]
+ # if the source adapter is coming from an external component,
+ # ensure it's specified in used cubes
+ if not sourcecube in cubes:
+ print ('-> this source type require the %s cube which is '
+ 'not used by the instance.')
+ continue
+ break
+ while True:
+ sourceuri = raw_input('source identifier (a unique name used to '
+ 'tell sources apart): ').strip()
+ if not sourceuri:
+ print '-> mandatory.'
else:
- break
- # XXX configurable inputlevel
- sconfig = ask_source_config(config, type, inputlevel=self.config.config_level)
- cfgstr = unicode(generate_source_config(sconfig), sys.stdin.encoding)
- req.create_entity('CWSource', name=sourceuri,
- type=unicode(type), config=cfgstr)
- cnx.commit()
+ sourceuri = unicode(sourceuri, sys.stdin.encoding)
+ if sourceuri in used:
+ print '-> uri already used, choose another one.'
+ else:
+ break
+ # XXX configurable inputlevel
+ sconfig = ask_source_config(config, type, inputlevel=self.config.config_level)
+ cfgstr = unicode(generate_source_config(sconfig), sys.stdin.encoding)
+ cnx.create_entity('CWSource', name=sourceuri,
+ type=unicode(type), config=cfgstr)
+ cnx.commit()
class GrantUserOnInstanceCommand(Command):
@@ -544,7 +546,7 @@
from cubicweb.server.sqlutils import sqlexec, sqlgrants
appid, user = args
config = ServerConfiguration.config_for(appid)
- source = config.sources()['system']
+ source = config.system_source_config
set_owner = self.config.set_owner
cnx = system_source_cnx(source, special_privs='GRANT')
cursor = cnx.cursor()
@@ -734,12 +736,12 @@
mih.backup_database(output, askconfirm=False, format=format)
mih.shutdown()
-def _local_restore(appid, backupfile, drop, systemonly=True, format='native'):
+def _local_restore(appid, backupfile, drop, format='native'):
config = ServerConfiguration.config_for(appid)
config.verbosity = 1 # else we won't be asked for confirmation on problems
config.quick_start = True
mih = config.migration_handler(connect=False, verbosity=1)
- mih.restore_database(backupfile, drop, systemonly, askconfirm=False, format=format)
+ mih.restore_database(backupfile, drop, askconfirm=False, format=format)
repo = mih.repo_connect()
# version of the database
dbversions = repo.get_versions()
@@ -848,13 +850,6 @@
'help': 'for some reason the database doesn\'t exist and so '
'should not be dropped.'}
),
- ('restore-all',
- {'short': 'r', 'action' : 'store_true', 'default' : False,
- 'help': 'restore everything, eg not only the system source database '
- 'but also data for all sources supporting backup/restore and custom '
- 'instance data. In that case, <backupfile> is expected to be the '
- 'timestamp of the backup to restore, not a file'}
- ),
('format',
{'short': 'f', 'default': 'native', 'type': 'choice',
'choices': ('native', 'portable'),
@@ -874,7 +869,6 @@
raise
_local_restore(appid, backupfile,
drop=not self.config.no_drop,
- systemonly=not self.config.restore_all,
format=self.config.format)
if self.config.format == 'portable':
try:
@@ -986,8 +980,9 @@
config = ServerConfiguration.config_for(appid)
config.repairing = self.config.force
repo, cnx = repo_cnx(config)
- check(repo, cnx,
- self.config.checks, self.config.reindex, self.config.autofix)
+ with cnx:
+ check(repo, cnx,
+ self.config.checks, self.config.reindex, self.config.autofix)
class RebuildFTICommand(Command):
@@ -1009,29 +1004,9 @@
etypes = args or None
config = ServerConfiguration.config_for(appid)
repo, cnx = repo_cnx(config)
- session = repo._get_session(cnx.sessionid, setcnxset=True)
- reindex_entities(repo.schema, session, etypes=etypes)
- cnx.commit()
-
-
-class SynchronizeInstanceSchemaCommand(Command):
- """Synchronize persistent schema with cube schema.
-
- Will synchronize common stuff between the cube schema and the
- actual persistent schema, but will not add/remove any entity or relation.
-
- <instance>
- the identifier of the instance to synchronize.
- """
- name = 'schema-sync'
- arguments = '<instance>'
- min_args = max_args = 1
-
- def run(self, args):
- appid = args[0]
- config = ServerConfiguration.config_for(appid)
- mih = config.migration_handler()
- mih.cmd_synchronize_schema()
+ with cnx:
+ reindex_entities(repo.schema, cnx._cnx, etypes=etypes)
+ cnx.commit()
class SynchronizeSourceCommand(Command):
@@ -1102,7 +1077,7 @@
diff_tool = args.pop(0)
config = ServerConfiguration.config_for(appid)
repo, cnx = repo_cnx(config)
- session = repo._get_session(cnx.sessionid, setcnxset=True)
+ cnx.close()
fsschema = config.load_schema(expand_cubes=True)
schema_diff(fsschema, repo.schema, permissionshandler, diff_tool, ignore=('eid',))
@@ -1112,7 +1087,7 @@
StartRepositoryCommand,
DBDumpCommand, DBRestoreCommand, DBCopyCommand,
AddSourceCommand, CheckRepositoryCommand, RebuildFTICommand,
- SynchronizeInstanceSchemaCommand, SynchronizeSourceCommand, SchemaDiffCommand,
+ SynchronizeSourceCommand, SchemaDiffCommand,
):
CWCTL.register(cmdclass)
--- a/server/session.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/session.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -23,12 +23,14 @@
from time import time
from uuid import uuid4
from warnings import warn
+import functools
+from contextlib import contextmanager
from logilab.common.deprecation import deprecated
from logilab.common.textutils import unormalize
from logilab.common.registry import objectify_predicate
-from cubicweb import UnknownEid, QueryError, schema, server
+from cubicweb import QueryError, schema, server, ProgrammingError
from cubicweb.req import RequestSessionBase
from cubicweb.utils import make_uid
from cubicweb.rqlrewrite import RQLRewriter
@@ -96,59 +98,75 @@
return obj.deny_all_hooks_but(*categories)
-class _hooks_control(object):
+class _hooks_control(object): # XXX repoapi: remove me when
+ # session stop being connection
"""context manager to control activated hooks categories.
- If mode is session.`HOOKS_DENY_ALL`, given hooks categories will
+ If mode is `HOOKS_DENY_ALL`, given hooks categories will
be enabled.
- If mode is session.`HOOKS_ALLOW_ALL`, given hooks categories will
+ If mode is `HOOKS_ALLOW_ALL`, given hooks categories will
be disabled.
.. sourcecode:: python
- with _hooks_control(self.session, self.session.HOOKS_ALLOW_ALL, 'integrity'):
+ with _hooks_control(cnx, HOOKS_ALLOW_ALL, 'integrity'):
# ... do stuff with all but 'integrity' hooks activated
- with _hooks_control(self.session, self.session.HOOKS_DENY_ALL, 'integrity'):
+ with _hooks_control(cnx, HOOKS_DENY_ALL, 'integrity'):
# ... do stuff with none but 'integrity' hooks activated
- This is an internal api, you should rather use
- :meth:`~cubicweb.server.session.Session.deny_all_hooks_but` or
- :meth:`~cubicweb.server.session.Session.allow_all_hooks_but` session
- methods.
+ This is an internal API, you should rather use
+ :meth:`~cubicweb.server.session.Connection.deny_all_hooks_but` or
+ :meth:`~cubicweb.server.session.Connection.allow_all_hooks_but`
+ Connection methods.
"""
- def __init__(self, session, mode, *categories):
+ def __init__(self, cnx, mode, *categories):
assert mode in (HOOKS_ALLOW_ALL, HOOKS_DENY_ALL)
- self.session = session
- self.tx = session._tx
+ self.cnx = cnx
self.mode = mode
self.categories = categories
self.oldmode = None
self.changes = ()
def __enter__(self):
- self.oldmode = self.tx.hooks_mode
- self.tx.hooks_mode = self.mode
+ self.oldmode = self.cnx.hooks_mode
+ self.cnx.hooks_mode = self.mode
if self.mode is HOOKS_DENY_ALL:
- self.changes = self.tx.enable_hook_categories(*self.categories)
+ self.changes = self.cnx.enable_hook_categories(*self.categories)
else:
- self.changes = self.tx.disable_hook_categories(*self.categories)
- self.tx.ctx_count += 1
+ self.changes = self.cnx.disable_hook_categories(*self.categories)
+ self.cnx.ctx_count += 1
def __exit__(self, exctype, exc, traceback):
- self.tx.ctx_count -= 1
- if self.tx.ctx_count == 0:
- self.session._clear_thread_storage(self.tx)
- else:
- try:
- if self.categories:
- if self.mode is HOOKS_DENY_ALL:
- self.tx.disable_hook_categories(*self.categories)
- else:
- self.tx.enable_hook_categories(*self.categories)
- finally:
- self.tx.hooks_mode = self.oldmode
+ self.cnx.ctx_count -= 1
+ try:
+ if self.categories:
+ if self.mode is HOOKS_DENY_ALL:
+ self.cnx.disable_hook_categories(*self.categories)
+ else:
+ self.cnx.enable_hook_categories(*self.categories)
+ finally:
+ self.cnx.hooks_mode = self.oldmode
+
+class _session_hooks_control(_hooks_control): # XXX repoapi: remove me when
+ # session stop being connection
+ """hook control context manager for session
+
+ Necessary to handle some unholy transaction scope logic."""
+
+
+ def __init__(self, session, mode, *categories):
+ self.session = session
+ super_init = super(_session_hooks_control, self).__init__
+ super_init(session._cnx, mode, *categories)
+
+ def __exit__(self, exctype, exc, traceback):
+ super_exit = super(_session_hooks_control, self).__exit__
+ ret = super_exit(exctype, exc, traceback)
+ if self.cnx.ctx_count == 0:
+ self.session._close_cnx(self.cnx)
+ return ret
@deprecated('[3.17] use <object>.security_enabled instead')
def security_enabled(obj, *args, **kwargs):
@@ -160,9 +178,8 @@
By default security is disabled on queries executed on the repository
side.
"""
- def __init__(self, session, read=None, write=None):
- self.session = session
- self.tx = session._tx
+ def __init__(self, cnx, read=None, write=None):
+ self.cnx = cnx
self.read = read
self.write = write
self.oldread = None
@@ -172,24 +189,39 @@
if self.read is None:
self.oldread = None
else:
- self.oldread = self.tx.read_security
- self.tx.read_security = self.read
+ self.oldread = self.cnx.read_security
+ self.cnx.read_security = self.read
if self.write is None:
self.oldwrite = None
else:
- self.oldwrite = self.tx.write_security
- self.tx.write_security = self.write
- self.tx.ctx_count += 1
+ self.oldwrite = self.cnx.write_security
+ self.cnx.write_security = self.write
+ self.cnx.ctx_count += 1
def __exit__(self, exctype, exc, traceback):
- self.tx.ctx_count -= 1
- if self.tx.ctx_count == 0:
- self.session._clear_thread_storage(self.tx)
- else:
- if self.oldread is not None:
- self.tx.read_security = self.oldread
- if self.oldwrite is not None:
- self.tx.write_security = self.oldwrite
+ self.cnx.ctx_count -= 1
+ if self.oldread is not None:
+ self.cnx.read_security = self.oldread
+ if self.oldwrite is not None:
+ self.cnx.write_security = self.oldwrite
+
+class _session_security_enabled(_security_enabled):
+ """hook security context manager for session
+
+ Necessary To handle some unholy transaction scope logic."""
+
+
+ def __init__(self, session, read=None, write=None):
+ self.session = session
+ super_init = super(_session_security_enabled, self).__init__
+ super_init(session._cnx, read=read, write=write)
+
+ def __exit__(self, exctype, exc, traceback):
+ super_exit = super(_session_security_enabled, self).__exit__
+ ret = super_exit(exctype, exc, traceback)
+ if self.cnx.ctx_count == 0:
+ self.session._close_cnx(self.cnx)
+ return ret
HOOKS_ALLOW_ALL = object()
HOOKS_DENY_ALL = object()
@@ -199,13 +231,13 @@
pass
class CnxSetTracker(object):
- """Keep track of which transaction use which cnxset.
+ """Keep track of which connection use which cnxset.
- There should be one of these object per session (including internal sessions).
+ There should be one of these objects per session (including internal sessions).
- Session objects are responsible of creating their CnxSetTracker object.
+ Session objects are responsible for creating their CnxSetTracker object.
- Transactions should use the :meth:`record` and :meth:`forget` to inform the
+ Connections should use the :meth:`record` and :meth:`forget` to inform the
tracker of cnxsets they have acquired.
.. automethod:: cubicweb.server.session.CnxSetTracker.record
@@ -231,13 +263,13 @@
def __exit__(self, *args):
return self._condition.__exit__(*args)
- def record(self, txid, cnxset):
- """Inform the tracker that a txid has acquired a cnxset
+ def record(self, cnxid, cnxset):
+ """Inform the tracker that a cnxid has acquired a cnxset
- This method is to be used by Transaction objects.
+ This method is to be used by Connection objects.
This method fails when:
- - The txid already has a recorded cnxset.
+ - The cnxid already has a recorded cnxset.
- The tracker is not active anymore.
Notes about the caller:
@@ -264,19 +296,19 @@
with self._condition:
if not self._active:
raise SessionClosedError('Closed')
- old = self._record.get(txid)
+ old = self._record.get(cnxid)
if old is not None:
- raise ValueError('transaction "%s" already has a cnx_set (%r)'
- % (txid, old))
- self._record[txid] = cnxset
+ raise ValueError('connection "%s" already has a cnx_set (%r)'
+ % (cnxid, old))
+ self._record[cnxid] = cnxset
- def forget(self, txid, cnxset):
- """Inform the tracker that a txid have release a cnxset
+ def forget(self, cnxid, cnxset):
+ """Inform the tracker that a cnxid have release a cnxset
- This methode is to be used by Transaction object.
+ This methode is to be used by Connection object.
This method fails when:
- - The cnxset for the txid does not match the recorded one.
+ - The cnxset for the cnxid does not match the recorded one.
Notes about the caller:
(1) It is responsible for releasing the cnxset.
@@ -296,11 +328,11 @@
cnxset = repo._free_cnxset(cnxset) # (1)
"""
with self._condition:
- old = self._record.get(txid, None)
+ old = self._record.get(cnxid, None)
if old is not cnxset:
raise ValueError('recorded cnxset for "%s" mismatch: %r != %r'
- % (txid, old, cnxset))
- self._record.pop(txid)
+ % (cnxid, old, cnxset))
+ self._record.pop(cnxid)
self._condition.notify_all()
def close(self):
@@ -318,7 +350,7 @@
This method is to be used by Session objects.
- Returns a tuple of transaction ids that remain open.
+ Returns a tuple of connection ids that remain open.
"""
with self._condition:
if self._active:
@@ -330,10 +362,30 @@
timeout -= time() - start
return tuple(self._record)
-class Transaction(object):
- """Repository Transaction
+
+def _with_cnx_set(func):
+ """decorator for Connection method that ensure they run with a cnxset """
+ @functools.wraps(func)
+ def wrapper(cnx, *args, **kwargs):
+ with cnx.ensure_cnx_set:
+ return func(cnx, *args, **kwargs)
+ return wrapper
- Holds all transaction related data
+def _open_only(func):
+ """decorator for Connection method that check it is open"""
+ @functools.wraps(func)
+ def check_open(cnx, *args, **kwargs):
+ if not cnx._open:
+ raise ProgrammingError('Closed Connection: %s'
+ % cnx.connectionid)
+ return func(cnx, *args, **kwargs)
+ return check_open
+
+
+class Connection(RequestSessionBase):
+ """Repository Connection
+
+ Holds all connection related data
Database connection resources:
@@ -342,11 +394,11 @@
:attr:`cnxset`, the connections set to use to execute queries on sources.
If the transaction is read only, the connection set may be freed between
- actual queries. This allows multiple transactions with a reasonably low
+ actual queries. This allows multiple connections with a reasonably low
connection set pool size. Control mechanism is detailed below.
- .. automethod:: cubicweb.server.session.Transaction.set_cnxset
- .. automethod:: cubicweb.server.session.Transaction.free_cnxset
+ .. automethod:: cubicweb.server.session.Connection.set_cnxset
+ .. automethod:: cubicweb.server.session.Connection.free_cnxset
:attr:`mode`, string telling the connections set handling mode, may be one
of 'read' (connections set may be freed), 'write' (some write was done in
@@ -387,15 +439,40 @@
"""
- def __init__(self, txid, session, rewriter):
- #: transaction unique id
- self.transactionid = txid
+ is_request = False
+
+ def __init__(self, session, cnxid=None, session_handled=False):
+ # using super(Connection, self) confuse some test hack
+ RequestSessionBase.__init__(self, session.vreg)
+ # only the session provide explicite
+ if cnxid is not None:
+ assert session_handled # only session profive explicite cnxid
+ #: connection unique id
+ self._open = None
+ if cnxid is None:
+ cnxid = '%s-%s' % (session.sessionid, uuid4().hex)
+ self.connectionid = cnxid
+ self.sessionid = session.sessionid
+ #: self._session_handled
+ #: are the life cycle of this Connection automatically controlled by the
+ #: Session This is the old backward compatibility mode
+ self._session_handled = session_handled
#: reentrance handling
self.ctx_count = 0
+ #: count the number of entry in a context needing a cnxset
+ self._cnxset_count = 0
+ #: Boolean for compat with the older explicite set_cnxset/free_cnx API
+ #: When a call set_cnxset is done, no automatic freeing will be done
+ #: until free_cnx is called.
+ self._auto_free_cnx_set = True
#: server.Repository object
self.repo = session.repo
self.vreg = self.repo.vreg
+ self._execute = self.repo.querier.execute
+
+ # other session utility
+ self._session_timestamp = session._timestamp
#: connection handling mode
self.mode = session.default_mode
@@ -403,11 +480,14 @@
self._cnxset = None
#: CnxSetTracker used to report cnxset usage
self._cnxset_tracker = session._cnxset_tracker
- #: is this transaction from a client or internal to the repo
+ #: is this connection from a client or internal to the repo
self.running_dbapi_query = True
+ # internal (root) session
+ self.is_internal_session = session.is_internal_session
#: dict containing arbitrary data cleared at the end of the transaction
- self.data = {}
+ self.transaction_data = {}
+ self._session_data = session.data
#: ordered list of operations to be processed on commit/rollback
self.pending_operations = []
#: (None, 'precommit', 'postcommit', 'uncommitable')
@@ -432,118 +512,345 @@
self.undo_actions = config['undo-enabled']
# RQLRewriter are not thread safe
- self._rewriter = rewriter
+ self._rewriter = RQLRewriter(self)
+
+ # other session utility
+ if session.user.login == '__internal_manager__':
+ self.user = session.user
+ self.set_language(self.user.prefered_language())
+ else:
+ self._set_user(session.user)
+
+
+ # live cycle handling ####################################################
+
+ def __enter__(self):
+ assert self._open is None # first opening
+ self._open = True
+ return self
+
+ def __exit__(self, exctype=None, excvalue=None, tb=None):
+ assert self._open # actually already open
+ assert self._cnxset_count == 0
+ self._free_cnxset(ignoremode=True)
+ self.clear()
+ self._open = False
+
+
+
+ # shared data handling ###################################################
@property
- def transaction_data(self):
- return self.data
+ def data(self):
+ return self._session_data
+
+ @property
+ def rql_rewriter(self):
+ return self._rewriter
+
+ @_open_only
+ def get_shared_data(self, key, default=None, pop=False, txdata=False):
+ """return value associated to `key` in session data"""
+ if txdata:
+ data = self.transaction_data
+ else:
+ data = self._session_data
+ if pop:
+ return data.pop(key, default)
+ else:
+ return data.get(key, default)
+
+ @_open_only
+ def set_shared_data(self, key, value, txdata=False):
+ """set value associated to `key` in session data"""
+ if txdata:
+ self.transaction_data[key] = value
+ else:
+ self._session_data[key] = value
def clear(self):
"""reset internal data"""
- self.data = {}
+ self.transaction_data = {}
#: ordered list of operations to be processed on commit/rollback
self.pending_operations = []
#: (None, 'precommit', 'postcommit', 'uncommitable')
self.commit_state = None
self.pruned_hooks_cache = {}
+ self.local_perm_cache.clear()
+ self.rewriter = RQLRewriter(self)
# Connection Set Management ###############################################
@property
+ @_open_only
def cnxset(self):
return self._cnxset
@cnxset.setter
+ @_open_only
def cnxset(self, new_cnxset):
with self._cnxset_tracker:
old_cnxset = self._cnxset
if new_cnxset is old_cnxset:
return #nothing to do
if old_cnxset is not None:
+ old_cnxset.rollback()
self._cnxset = None
self.ctx_count -= 1
- self._cnxset_tracker.forget(self.transactionid, old_cnxset)
+ self._cnxset_tracker.forget(self.connectionid, old_cnxset)
if new_cnxset is not None:
- self._cnxset_tracker.record(self.transactionid, new_cnxset)
+ self._cnxset_tracker.record(self.connectionid, new_cnxset)
self._cnxset = new_cnxset
self.ctx_count += 1
- def set_cnxset(self):
- """the transaction need a connections set to execute some queries"""
+ @_open_only
+ def _set_cnxset(self):
+ """the connection need a connections set to execute some queries"""
if self.cnxset is None:
cnxset = self.repo._get_cnxset()
try:
self.cnxset = cnxset
- try:
- cnxset.cnxset_set()
- except:
- self.cnxset = None
- raise
except:
self.repo._free_cnxset(cnxset)
raise
return self.cnxset
- def free_cnxset(self, ignoremode=False):
- """the transaction is no longer using its connections set, at least for some time"""
+ @_open_only
+ def _free_cnxset(self, ignoremode=False):
+ """the connection is no longer using its connections set, at least for some time"""
# cnxset may be none if no operation has been done since last commit
# or rollback
cnxset = self.cnxset
if cnxset is not None and (ignoremode or self.mode == 'read'):
+ assert self._cnxset_count == 0
try:
self.cnxset = None
finally:
cnxset.cnxset_freed()
self.repo._free_cnxset(cnxset)
+ @deprecated('[3.19] cnxset are automatically managed now.'
+ ' stop using explicit set and free.')
+ def set_cnxset(self):
+ self._auto_free_cnx_set = False
+ return self._set_cnxset()
+
+ @deprecated('[3.19] cnxset are automatically managed now.'
+ ' stop using explicit set and free.')
+ def free_cnxset(self, ignoremode=False):
+ self._auto_free_cnx_set = True
+ return self._free_cnxset(ignoremode=ignoremode)
+
+
+ @property
+ @contextmanager
+ @_open_only
+ def ensure_cnx_set(self):
+ assert self._cnxset_count >= 0
+ if self._cnxset_count == 0:
+ self._set_cnxset()
+ try:
+ self._cnxset_count += 1
+ yield
+ finally:
+ self._cnxset_count = max(self._cnxset_count - 1, 0)
+ if self._cnxset_count == 0 and self._auto_free_cnx_set:
+ self._free_cnxset()
+
# Entity cache management #################################################
#
- # The transaction entity cache as held in tx.data is removed at the
- # end of the transaction (commit and rollback)
+ # The connection entity cache as held in cnx.transaction_data is removed at the
+ # end of the connection (commit and rollback)
#
- # XXX transaction level caching may be a pb with multiple repository
+ # XXX connection level caching may be a pb with multiple repository
# instances, but 1. this is probably not the only one :$ and 2. it may be
# an acceptable risk. Anyway we could activate it or not according to a
# configuration option
def set_entity_cache(self, entity):
- """Add `entity` to the transaction entity cache"""
- ecache = self.data.setdefault('ecache', {})
+ """Add `entity` to the connection entity cache"""
+ # XXX not using _open_only because before at creation time. _set_user
+ # call this function to cache the Connection user.
+ if entity.cw_etype != 'CWUser' and not self._open:
+ raise ProgrammingError('Closed Connection: %s'
+ % self.connectionid)
+ ecache = self.transaction_data.setdefault('ecache', {})
ecache.setdefault(entity.eid, entity)
+ @_open_only
def entity_cache(self, eid):
"""get cache entity for `eid`"""
- return self.data['ecache'][eid]
+ return self.transaction_data['ecache'][eid]
+ @_open_only
def cached_entities(self):
"""return the whole entity cache"""
- return self.data.get('ecache', {}).values()
+ return self.transaction_data.get('ecache', {}).values()
+ @_open_only
def drop_entity_cache(self, eid=None):
"""drop entity from the cache
If eid is None, the whole cache is dropped"""
if eid is None:
- self.data.pop('ecache', None)
+ self.transaction_data.pop('ecache', None)
else:
- del self.data['ecache'][eid]
+ del self.transaction_data['ecache'][eid]
+
+ # relations handling #######################################################
+
+ @_open_only
+ def add_relation(self, fromeid, rtype, toeid):
+ """provide direct access to the repository method to add a relation.
+
+ This is equivalent to the following rql query:
+
+ SET X rtype Y WHERE X eid fromeid, T eid toeid
+
+ without read security check but also all the burden of rql execution.
+ You may use this in hooks when you know both eids of the relation you
+ want to add.
+ """
+ self.add_relations([(rtype, [(fromeid, toeid)])])
+
+ @_open_only
+ def add_relations(self, relations):
+ '''set many relation using a shortcut similar to the one in add_relation
+
+ relations is a list of 2-uples, the first element of each
+ 2-uple is the rtype, and the second is a list of (fromeid,
+ toeid) tuples
+ '''
+ edited_entities = {}
+ relations_dict = {}
+ with self.security_enabled(False, False):
+ for rtype, eids in relations:
+ if self.vreg.schema[rtype].inlined:
+ for fromeid, toeid in eids:
+ if fromeid not in edited_entities:
+ entity = self.entity_from_eid(fromeid)
+ edited = EditedEntity(entity)
+ edited_entities[fromeid] = edited
+ else:
+ edited = edited_entities[fromeid]
+ edited.edited_attribute(rtype, toeid)
+ else:
+ relations_dict[rtype] = eids
+ self.repo.glob_add_relations(self, relations_dict)
+ for edited in edited_entities.itervalues():
+ self.repo.glob_update_entity(self, edited)
+
+
+ @_open_only
+ def delete_relation(self, fromeid, rtype, toeid):
+ """provide direct access to the repository method to delete a relation.
+
+ This is equivalent to the following rql query:
+
+ DELETE X rtype Y WHERE X eid fromeid, T eid toeid
+
+ without read security check but also all the burden of rql execution.
+ You may use this in hooks when you know both eids of the relation you
+ want to delete.
+ """
+ with self.security_enabled(False, False):
+ if self.vreg.schema[rtype].inlined:
+ entity = self.entity_from_eid(fromeid)
+ entity.cw_attr_cache[rtype] = None
+ self.repo.glob_update_entity(self, entity, set((rtype,)))
+ else:
+ self.repo.glob_delete_relation(self, fromeid, rtype, toeid)
+
+ # relations cache handling #################################################
+
+ @_open_only
+ def update_rel_cache_add(self, subject, rtype, object, symmetric=False):
+ self._update_entity_rel_cache_add(subject, rtype, 'subject', object)
+ if symmetric:
+ self._update_entity_rel_cache_add(object, rtype, 'subject', subject)
+ else:
+ self._update_entity_rel_cache_add(object, rtype, 'object', subject)
+
+ @_open_only
+ def update_rel_cache_del(self, subject, rtype, object, symmetric=False):
+ self._update_entity_rel_cache_del(subject, rtype, 'subject', object)
+ if symmetric:
+ self._update_entity_rel_cache_del(object, rtype, 'object', object)
+ else:
+ self._update_entity_rel_cache_del(object, rtype, 'object', subject)
+
+ @_open_only
+ def _update_entity_rel_cache_add(self, eid, rtype, role, targeteid):
+ try:
+ entity = self.entity_cache(eid)
+ except KeyError:
+ return
+ rcache = entity.cw_relation_cached(rtype, role)
+ if rcache is not None:
+ rset, entities = rcache
+ rset = rset.copy()
+ entities = list(entities)
+ rset.rows.append([targeteid])
+ if not isinstance(rset.description, list): # else description not set
+ rset.description = list(rset.description)
+ rset.description.append([self.entity_metas(targeteid)['type']])
+ targetentity = self.entity_from_eid(targeteid)
+ if targetentity.cw_rset is None:
+ targetentity.cw_rset = rset
+ targetentity.cw_row = rset.rowcount
+ targetentity.cw_col = 0
+ rset.rowcount += 1
+ entities.append(targetentity)
+ entity._cw_related_cache['%s_%s' % (rtype, role)] = (
+ rset, tuple(entities))
+
+ @_open_only
+ def _update_entity_rel_cache_del(self, eid, rtype, role, targeteid):
+ try:
+ entity = self.entity_cache(eid)
+ except KeyError:
+ return
+ rcache = entity.cw_relation_cached(rtype, role)
+ if rcache is not None:
+ rset, entities = rcache
+ for idx, row in enumerate(rset.rows):
+ if row[0] == targeteid:
+ break
+ else:
+ # this may occurs if the cache has been filed by a hook
+ # after the database update
+ self.debug('cache inconsistency for %s %s %s %s', eid, rtype,
+ role, targeteid)
+ return
+ rset = rset.copy()
+ entities = list(entities)
+ del rset.rows[idx]
+ if isinstance(rset.description, list): # else description not set
+ del rset.description[idx]
+ del entities[idx]
+ rset.rowcount -= 1
+ entity._cw_related_cache['%s_%s' % (rtype, role)] = (
+ rset, tuple(entities))
# Tracking of entities added of removed in the transaction ##################
+ @_open_only
def deleted_in_transaction(self, eid):
"""return True if the entity of the given eid is being deleted in the
current transaction
"""
- return eid in self.data.get('pendingeids', ())
+ return eid in self.transaction_data.get('pendingeids', ())
+ @_open_only
def added_in_transaction(self, eid):
"""return True if the entity of the given eid is being created in the
current transaction
"""
- return eid in self.data.get('neweids', ())
+ return eid in self.transaction_data.get('neweids', ())
# Operation management ####################################################
+ @_open_only
def add_operation(self, operation, index=None):
"""add an operation to be executed at the end of the transaction"""
if index is None:
@@ -553,6 +860,15 @@
# Hooks control ###########################################################
+ @_open_only
+ def allow_all_hooks_but(self, *categories):
+ return _hooks_control(self, HOOKS_ALLOW_ALL, *categories)
+
+ @_open_only
+ def deny_all_hooks_but(self, *categories):
+ return _hooks_control(self, HOOKS_DENY_ALL, *categories)
+
+ @_open_only
def disable_hook_categories(self, *categories):
"""disable the given hook categories:
@@ -572,6 +888,7 @@
disabledcats |= changes # changes is small hence faster
return tuple(changes)
+ @_open_only
def enable_hook_categories(self, *categories):
"""enable the given hook categories:
@@ -591,6 +908,7 @@
disabledcats -= changes # changes is small hence faster
return tuple(changes)
+ @_open_only
def is_hook_category_activated(self, category):
"""return a boolean telling if the given category is currently activated
or not
@@ -599,6 +917,7 @@
return category in self.enabled_hook_cats
return category not in self.disabled_hook_cats
+ @_open_only
def is_hook_activated(self, hook):
"""return a boolean telling if the given hook class is currently
activated or not
@@ -606,11 +925,18 @@
return self.is_hook_category_activated(hook.category)
# Security management #####################################################
+
+ @_open_only
+ def security_enabled(self, read=None, write=None):
+ return _security_enabled(self, read=read, write=write)
+
@property
+ @_open_only
def read_security(self):
return self._read_security
@read_security.setter
+ @_open_only
def read_security(self, activated):
oldmode = self._read_security
self._read_security = activated
@@ -636,53 +962,219 @@
# undo support ############################################################
+ @_open_only
def ertype_supports_undo(self, ertype):
return self.undo_actions and ertype not in NO_UNDO_TYPES
+ @_open_only
def transaction_uuid(self, set=True):
- uuid = self.data.get('tx_uuid')
+ uuid = self.transaction_data.get('tx_uuid')
if set and uuid is None:
- raise KeyError
+ self.transaction_data['tx_uuid'] = uuid = uuid4().hex
+ self.repo.system_source.start_undoable_transaction(self, uuid)
return uuid
+ @_open_only
def transaction_inc_action_counter(self):
- num = self.data.setdefault('tx_action_count', 0) + 1
- self.data['tx_action_count'] = num
+ num = self.transaction_data.setdefault('tx_action_count', 0) + 1
+ self.transaction_data['tx_action_count'] = num
return num
# db-api like interface ###################################################
+ @_open_only
def source_defs(self):
return self.repo.source_defs()
+ @deprecated('[3.19] use .entity_metas(eid) instead')
+ @_with_cnx_set
+ @_open_only
def describe(self, eid, asdict=False):
"""return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
- metas = self.repo.type_and_source_from_eid(eid, self)
+ etype, extid, source = self.repo.type_and_source_from_eid(eid, self)
+ metas = {'type': etype, 'source': source, 'extid': extid}
if asdict:
- return dict(zip(('type', 'source', 'extid', 'asource'), metas))
- # XXX :-1 for cw compat, use asdict=True for full information
- return metas[:-1]
+ metas['asource'] = meta['source'] # XXX pre 3.19 client compat
+ return meta
+ return etype, source, extid
+
+ @_with_cnx_set
+ @_open_only
+ def entity_metas(self, eid):
+ """return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
+ etype, extid, source = self.repo.type_and_source_from_eid(eid, self)
+ return {'type': etype, 'source': source, 'extid': extid}
+
+ # core method #############################################################
+
+ @_with_cnx_set
+ @_open_only
+ def execute(self, rql, kwargs=None, eid_key=None, build_descr=True):
+ """db-api like method directly linked to the querier execute method.
+
+ See :meth:`cubicweb.dbapi.Cursor.execute` documentation.
+ """
+ self._session_timestamp.touch()
+ if eid_key is not None:
+ warn('[3.8] eid_key is deprecated, you can safely remove this argument',
+ DeprecationWarning, stacklevel=2)
+ rset = self._execute(self, rql, kwargs, build_descr)
+ rset.req = self
+ self._session_timestamp.touch()
+ return rset
+
+ @_open_only
+ def rollback(self, free_cnxset=True, reset_pool=None):
+ """rollback the current transaction"""
+ if reset_pool is not None:
+ warn('[3.13] use free_cnxset argument instead for reset_pool',
+ DeprecationWarning, stacklevel=2)
+ free_cnxset = reset_pool
+ if self._cnxset_count != 0:
+ # we are inside ensure_cnx_set, don't lose it
+ free_cnxset = False
+ cnxset = self.cnxset
+ if cnxset is None:
+ self.clear()
+ self._session_timestamp.touch()
+ self.debug('rollback transaction %s done (no db activity)', self.connectionid)
+ return
+ try:
+ # by default, operations are executed with security turned off
+ with self.security_enabled(False, False):
+ while self.pending_operations:
+ try:
+ operation = self.pending_operations.pop(0)
+ operation.handle_event('rollback_event')
+ except BaseException:
+ self.critical('rollback error', exc_info=sys.exc_info())
+ continue
+ cnxset.rollback()
+ self.debug('rollback for transaction %s done', self.connectionid)
+ finally:
+ self._session_timestamp.touch()
+ if free_cnxset:
+ self._free_cnxset(ignoremode=True)
+ self.clear()
- def source_from_eid(self, eid):
- """return the source where the entity with id <eid> is located"""
- return self.repo.source_from_eid(eid, self)
+ @_open_only
+ def commit(self, free_cnxset=True, reset_pool=None):
+ """commit the current session's transaction"""
+ if reset_pool is not None:
+ warn('[3.13] use free_cnxset argument instead for reset_pool',
+ DeprecationWarning, stacklevel=2)
+ free_cnxset = reset_pool
+ if self.cnxset is None:
+ assert not self.pending_operations
+ self.clear()
+ self._session_timestamp.touch()
+ self.debug('commit transaction %s done (no db activity)', self.connectionid)
+ return
+ if self._cnxset_count != 0:
+ # we are inside ensure_cnx_set, don't lose it
+ free_cnxset = False
+ cstate = self.commit_state
+ if cstate == 'uncommitable':
+ raise QueryError('transaction must be rolled back')
+ if cstate is not None:
+ return
+ # on rollback, an operation should have the following state
+ # information:
+ # - processed by the precommit/commit event or not
+ # - if processed, is it the failed operation
+ debug = server.DEBUG & server.DBG_OPS
+ try:
+ # by default, operations are executed with security turned off
+ with self.security_enabled(False, False):
+ processed = []
+ self.commit_state = 'precommit'
+ if debug:
+ print self.commit_state, '*' * 20
+ try:
+ while self.pending_operations:
+ operation = self.pending_operations.pop(0)
+ operation.processed = 'precommit'
+ processed.append(operation)
+ if debug:
+ print operation
+ operation.handle_event('precommit_event')
+ self.pending_operations[:] = processed
+ self.debug('precommit transaction %s done', self.connectionid)
+ except BaseException:
+ # if error on [pre]commit:
+ #
+ # * set .failed = True on the operation causing the failure
+ # * call revert<event>_event on processed operations
+ # * call rollback_event on *all* operations
+ #
+ # that seems more natural than not calling rollback_event
+ # for processed operations, and allow generic rollback
+ # instead of having to implements rollback, revertprecommit
+ # and revertcommit, that will be enough in mont case.
+ operation.failed = True
+ if debug:
+ print self.commit_state, '*' * 20
+ for operation in reversed(processed):
+ if debug:
+ print operation
+ try:
+ operation.handle_event('revertprecommit_event')
+ except BaseException:
+ self.critical('error while reverting precommit',
+ exc_info=True)
+ # XXX use slice notation since self.pending_operations is a
+ # read-only property.
+ self.pending_operations[:] = processed + self.pending_operations
+ self.rollback(free_cnxset)
+ raise
+ self.cnxset.commit()
+ self.commit_state = 'postcommit'
+ if debug:
+ print self.commit_state, '*' * 20
+ while self.pending_operations:
+ operation = self.pending_operations.pop(0)
+ if debug:
+ print operation
+ operation.processed = 'postcommit'
+ try:
+ operation.handle_event('postcommit_event')
+ except BaseException:
+ self.critical('error while postcommit',
+ exc_info=sys.exc_info())
+ self.debug('postcommit transaction %s done', self.connectionid)
+ return self.transaction_uuid(set=False)
+ finally:
+ self._session_timestamp.touch()
+ if free_cnxset:
+ self._free_cnxset(ignoremode=True)
+ self.clear()
# resource accessors ######################################################
+ @_with_cnx_set
+ @_open_only
+ def call_service(self, regid, **kwargs):
+ self.debug('calling service %s', regid)
+ service = self.vreg['services'].select(regid, self, **kwargs)
+ return service.call(**kwargs)
+
+ @_with_cnx_set
+ @_open_only
def system_sql(self, sql, args=None, rollback_on_failure=True):
"""return a sql cursor on the system database"""
if sql.split(None, 1)[0].upper() != 'SELECT':
self.mode = 'write'
- source = self.cnxset.source('system')
+ source = self.repo.system_source
try:
return source.doexec(self, sql, args, rollback=rollback_on_failure)
except (source.OperationalError, source.InterfaceError):
if not rollback_on_failure:
raise
source.warning("trying to reconnect")
- self.cnxset.reconnect(source)
+ self.cnxset.reconnect()
return source.doexec(self, sql, args, rollback=rollback_on_failure)
+ @_open_only
def rtype_eids_rdef(self, rtype, eidfrom, eidto):
# use type_and_source_from_eid instead of type_from_eid for optimization
# (avoid two extra methods call)
@@ -691,31 +1183,49 @@
return self.vreg.schema.rschema(rtype).rdefs[(subjtype, objtype)]
-def tx_attr(attr_name, writable=False):
- """return a property to forward attribute access to transaction.
+def cnx_attr(attr_name, writable=False):
+ """return a property to forward attribute access to connection.
This is to be used by session"""
args = {}
- def attr_from_tx(session):
- return getattr(session._tx, attr_name)
- args['fget'] = attr_from_tx
+ @deprecated('[3.19] use a Connection object instead')
+ def attr_from_cnx(session):
+ return getattr(session._cnx, attr_name)
+ args['fget'] = attr_from_cnx
if writable:
+ @deprecated('[3.19] use a Connection object instead')
def write_attr(session, value):
- return setattr(session._tx, attr_name, value)
+ return setattr(session._cnx, attr_name, value)
args['fset'] = write_attr
return property(**args)
-def tx_meth(meth_name):
- """return a function forwarding calls to transaction.
+def cnx_meth(meth_name):
+ """return a function forwarding calls to connection.
This is to be used by session"""
- def meth_from_tx(session, *args, **kwargs):
- return getattr(session._tx, meth_name)(*args, **kwargs)
- meth_from_tx.__doc__ = getattr(Transaction, meth_name).__doc__
- return meth_from_tx
+ @deprecated('[3.19] use a Connection object instead')
+ def meth_from_cnx(session, *args, **kwargs):
+ result = getattr(session._cnx, meth_name)(*args, **kwargs)
+ if getattr(result, '_cw', None) is not None:
+ result._cw = session
+ return result
+ meth_from_cnx.__doc__ = getattr(Connection, meth_name).__doc__
+ return meth_from_cnx
+
+class Timestamp(object):
+
+ def __init__(self):
+ self.value = time()
+
+ def touch(self):
+ self.value = time()
+
+ def __float__(self):
+ return float(self.value)
-class Session(RequestSessionBase):
+class Session(RequestSessionBase): # XXX repoapi: stop being a
+ # RequestSessionBase at some point
"""Repository user session
This ties all together:
@@ -733,23 +1243,23 @@
:attr:`data` is a dictionary containing shared data, used to communicate
extra information between the client and the repository
- :attr:`_txs` is a dictionary of :class:`TransactionData` instance, one
- for each running transaction. The key is the transaction id. By default
- the transaction id is the thread name but it can be otherwise (per dbapi
+ :attr:`_cnxs` is a dictionary of :class:`Connection` instance, one
+ for each running connection. The key is the connection id. By default
+ the connection id is the thread name but it can be otherwise (per dbapi
cursor for instance, or per thread name *from another process*).
- :attr:`__threaddata` is a thread local storage whose `tx` attribute
- refers to the proper instance of :class:`Transaction` according to the
- transaction.
+ :attr:`__threaddata` is a thread local storage whose `cnx` attribute
+ refers to the proper instance of :class:`Connection` according to the
+ connection.
- You should not have to use neither :attr:`_tx` nor :attr:`__threaddata`,
- simply access transaction data transparently through the :attr:`_tx`
+ You should not have to use neither :attr:`_cnx` nor :attr:`__threaddata`,
+ simply access connection data transparently through the :attr:`_cnx`
property. Also, you usually don't have to access it directly since current
- transaction's data may be accessed/modified through properties / methods:
+ connection's data may be accessed/modified through properties / methods:
- :attr:`transaction_data`, similarly to :attr:`data`, is a dictionary
+ :attr:`connection_data`, similarly to :attr:`data`, is a dictionary
containing some shared data that should be cleared at the end of the
- transaction. Hooks and operations may put arbitrary data in there, and
+ connection. Hooks and operations may put arbitrary data in there, and
this may also be used as a communication channel between the client and
the repository.
@@ -758,7 +1268,7 @@
.. automethod:: cubicweb.server.session.Session.added_in_transaction
.. automethod:: cubicweb.server.session.Session.deleted_in_transaction
- Transaction state information:
+ Connection state information:
:attr:`running_dbapi_query`, boolean flag telling if the executing query
is coming from a dbapi connection or is a query from within the repository
@@ -831,10 +1341,10 @@
def __init__(self, user, repo, cnxprops=None, _id=None):
super(Session, self).__init__(repo.vreg)
- self.id = _id or make_uid(unormalize(user.login).encode('UTF8'))
- self.user = user
+ self.sessionid = _id or make_uid(unormalize(user.login).encode('UTF8'))
+ self.user = user # XXX repoapi: deprecated and store only a login.
self.repo = repo
- self.timestamp = time()
+ self._timestamp = Timestamp()
self.default_mode = 'read'
# short cut to querier .execute method
self._execute = repo.querier.execute
@@ -844,53 +1354,96 @@
# i18n initialization
self.set_language(user.prefered_language())
### internals
- # Transaction of this section
- self._txs = {}
+ # Connection of this section
+ self._cnxs = {} # XXX repoapi: remove this when nobody use the session
+ # as a Connection
# Data local to the thread
- self.__threaddata = threading.local()
+ self.__threaddata = threading.local() # XXX repoapi: remove this when
+ # nobody use the session as a Connection
self._cnxset_tracker = CnxSetTracker()
self._closed = False
self._lock = threading.RLock()
def __unicode__(self):
return '<session %s (%s 0x%x)>' % (
- unicode(self.user.login), self.id, id(self))
+ unicode(self.user.login), self.sessionid, id(self))
+ @property
+ def timestamp(self):
+ return float(self._timestamp)
+
+ @property
+ @deprecated('[3.19] session.id is deprecated, use session.sessionid')
+ def id(self):
+ return self.sessionid
- def get_tx(self, txid):
- """return the <txid> transaction attached to this session
+ @property
+ def login(self):
+ return self.user.login
+
+ def new_cnx(self):
+ """Return a new Connection object linked to the session
- Transaction is created if necessary"""
- with self._lock: # no transaction exist with the same id
+ The returned Connection will *not* be managed by the Session.
+ """
+ return Connection(self)
+
+ def _get_cnx(self, cnxid):
+ """return the <cnxid> connection attached to this session
+
+ Connection is created if necessary"""
+ with self._lock: # no connection exist with the same id
try:
if self.closed:
- raise SessionClosedError('try to access connections set on a closed session %s' % self.id)
- tx = self._txs[txid]
+ raise SessionClosedError('try to access connections set on'
+ ' a closed session %s' % self.id)
+ cnx = self._cnxs[cnxid]
+ assert cnx._session_handled
except KeyError:
- rewriter = RQLRewriter(self)
- tx = Transaction(txid, self, rewriter)
- self._txs[txid] = tx
- return tx
+ cnx = Connection(self, cnxid=cnxid, session_handled=True)
+ self._cnxs[cnxid] = cnx
+ cnx.__enter__()
+ return cnx
- def set_tx(self, txid=None):
- """set the default transaction of the current thread to <txid>
+ def _close_cnx(self, cnx):
+ """Close a Connection related to a session"""
+ assert cnx._session_handled
+ cnx.__exit__()
+ self._cnxs.pop(cnx.connectionid, None)
+ try:
+ if self.__threaddata.cnx is cnx:
+ del self.__threaddata.cnx
+ except AttributeError:
+ pass
- Transaction is created if necessary"""
- if txid is None:
- txid = threading.currentThread().getName()
- self.__threaddata.tx = self.get_tx(txid)
+ def set_cnx(self, cnxid=None):
+ # XXX repoapi: remove this when nobody use the session as a Connection
+ """set the default connection of the current thread to <cnxid>
+
+ Connection is created if necessary"""
+ if cnxid is None:
+ cnxid = threading.currentThread().getName()
+ cnx = self._get_cnx(cnxid)
+ # New style session should not be accesed through the session.
+ assert cnx._session_handled
+ self.__threaddata.cnx = cnx
@property
- def _tx(self):
- """default transaction for current session in current thread"""
+ def _cnx(self):
+ """default connection for current session in current thread"""
try:
- return self.__threaddata.tx
+ return self.__threaddata.cnx
except AttributeError:
- self.set_tx()
- return self.__threaddata.tx
+ self.set_cnx()
+ return self.__threaddata.cnx
+ @deprecated('[3.19] use a Connection object instead')
def get_option_value(self, option, foreid=None):
- return self.repo.get_option_value(option, foreid)
+ if foreid is not None:
+ warn('[3.19] foreid argument is deprecated', DeprecationWarning,
+ stacklevel=2)
+ return self.repo.get_option_value(option)
+ @deprecated('[3.19] use a Connection object instead')
def transaction(self, free_cnxset=True):
"""return context manager to enter a transaction for the session: when
exiting the `with` block on exception, call `session.rollback()`, else
@@ -901,184 +1454,55 @@
"""
return transaction(self, free_cnxset)
-
- @deprecated('[3.17] do not use hijack_user. create new Session object')
- def hijack_user(self, user):
- """return a fake request/session using specified user"""
- session = Session(user, self.repo)
- tx = session._tx
- tx.cnxset = self.cnxset
- # share pending_operations, else operation added in the hi-jacked
- # session such as SendMailOp won't ever be processed
- tx.pending_operations = self.pending_operations
- # everything in tx.data should be copied back but the entity
- # type cache we don't want to avoid security pb
- tx.data = self._tx.data.copy()
- tx.data.pop('ecache', None)
- return session
-
- def add_relation(self, fromeid, rtype, toeid):
- """provide direct access to the repository method to add a relation.
-
- This is equivalent to the following rql query:
-
- SET X rtype Y WHERE X eid fromeid, T eid toeid
-
- without read security check but also all the burden of rql execution.
- You may use this in hooks when you know both eids of the relation you
- want to add.
- """
- self.add_relations([(rtype, [(fromeid, toeid)])])
-
- def add_relations(self, relations):
- '''set many relation using a shortcut similar to the one in add_relation
-
- relations is a list of 2-uples, the first element of each
- 2-uple is the rtype, and the second is a list of (fromeid,
- toeid) tuples
- '''
- edited_entities = {}
- relations_dict = {}
- with self.security_enabled(False, False):
- for rtype, eids in relations:
- if self.vreg.schema[rtype].inlined:
- for fromeid, toeid in eids:
- if fromeid not in edited_entities:
- entity = self.entity_from_eid(fromeid)
- edited = EditedEntity(entity)
- edited_entities[fromeid] = edited
- else:
- edited = edited_entities[fromeid]
- edited.edited_attribute(rtype, toeid)
- else:
- relations_dict[rtype] = eids
- self.repo.glob_add_relations(self, relations_dict)
- for edited in edited_entities.itervalues():
- self.repo.glob_update_entity(self, edited)
-
-
- def delete_relation(self, fromeid, rtype, toeid):
- """provide direct access to the repository method to delete a relation.
-
- This is equivalent to the following rql query:
-
- DELETE X rtype Y WHERE X eid fromeid, T eid toeid
-
- without read security check but also all the burden of rql execution.
- You may use this in hooks when you know both eids of the relation you
- want to delete.
- """
- with self.security_enabled(False, False):
- if self.vreg.schema[rtype].inlined:
- entity = self.entity_from_eid(fromeid)
- entity.cw_attr_cache[rtype] = None
- self.repo.glob_update_entity(self, entity, set((rtype,)))
- else:
- self.repo.glob_delete_relation(self, fromeid, rtype, toeid)
+ add_relation = cnx_meth('add_relation')
+ add_relations = cnx_meth('add_relations')
+ delete_relation = cnx_meth('delete_relation')
# relations cache handling #################################################
- def update_rel_cache_add(self, subject, rtype, object, symmetric=False):
- self._update_entity_rel_cache_add(subject, rtype, 'subject', object)
- if symmetric:
- self._update_entity_rel_cache_add(object, rtype, 'subject', subject)
- else:
- self._update_entity_rel_cache_add(object, rtype, 'object', subject)
-
- def update_rel_cache_del(self, subject, rtype, object, symmetric=False):
- self._update_entity_rel_cache_del(subject, rtype, 'subject', object)
- if symmetric:
- self._update_entity_rel_cache_del(object, rtype, 'object', object)
- else:
- self._update_entity_rel_cache_del(object, rtype, 'object', subject)
-
- def _update_entity_rel_cache_add(self, eid, rtype, role, targeteid):
- try:
- entity = self.entity_cache(eid)
- except KeyError:
- return
- rcache = entity.cw_relation_cached(rtype, role)
- if rcache is not None:
- rset, entities = rcache
- rset = rset.copy()
- entities = list(entities)
- rset.rows.append([targeteid])
- if not isinstance(rset.description, list): # else description not set
- rset.description = list(rset.description)
- rset.description.append([self.describe(targeteid)[0]])
- targetentity = self.entity_from_eid(targeteid)
- if targetentity.cw_rset is None:
- targetentity.cw_rset = rset
- targetentity.cw_row = rset.rowcount
- targetentity.cw_col = 0
- rset.rowcount += 1
- entities.append(targetentity)
- entity._cw_related_cache['%s_%s' % (rtype, role)] = (
- rset, tuple(entities))
-
- def _update_entity_rel_cache_del(self, eid, rtype, role, targeteid):
- try:
- entity = self.entity_cache(eid)
- except KeyError:
- return
- rcache = entity.cw_relation_cached(rtype, role)
- if rcache is not None:
- rset, entities = rcache
- for idx, row in enumerate(rset.rows):
- if row[0] == targeteid:
- break
- else:
- # this may occurs if the cache has been filed by a hook
- # after the database update
- self.debug('cache inconsistency for %s %s %s %s', eid, rtype,
- role, targeteid)
- return
- rset = rset.copy()
- entities = list(entities)
- del rset.rows[idx]
- if isinstance(rset.description, list): # else description not set
- del rset.description[idx]
- del entities[idx]
- rset.rowcount -= 1
- entity._cw_related_cache['%s_%s' % (rtype, role)] = (
- rset, tuple(entities))
+ update_rel_cache_add = cnx_meth('update_rel_cache_add')
+ update_rel_cache_del = cnx_meth('update_rel_cache_del')
# resource accessors ######################################################
- system_sql = tx_meth('system_sql')
- deleted_in_transaction = tx_meth('deleted_in_transaction')
- added_in_transaction = tx_meth('added_in_transaction')
- rtype_eids_rdef = tx_meth('rtype_eids_rdef')
+ system_sql = cnx_meth('system_sql')
+ deleted_in_transaction = cnx_meth('deleted_in_transaction')
+ added_in_transaction = cnx_meth('added_in_transaction')
+ rtype_eids_rdef = cnx_meth('rtype_eids_rdef')
# security control #########################################################
-
+ @deprecated('[3.19] use a Connection object instead')
def security_enabled(self, read=None, write=None):
- return _security_enabled(self, read=read, write=write)
+ return _session_security_enabled(self, read=read, write=write)
- read_security = tx_attr('read_security', writable=True)
- write_security = tx_attr('write_security', writable=True)
- running_dbapi_query = tx_attr('running_dbapi_query')
+ read_security = cnx_attr('read_security', writable=True)
+ write_security = cnx_attr('write_security', writable=True)
+ running_dbapi_query = cnx_attr('running_dbapi_query')
# hooks activation control #################################################
# all hooks should be activated during normal execution
+
+ @deprecated('[3.19] use a Connection object instead')
def allow_all_hooks_but(self, *categories):
- return _hooks_control(self, HOOKS_ALLOW_ALL, *categories)
+ return _session_hooks_control(self, HOOKS_ALLOW_ALL, *categories)
+ @deprecated('[3.19] use a Connection object instead')
def deny_all_hooks_but(self, *categories):
- return _hooks_control(self, HOOKS_DENY_ALL, *categories)
-
- hooks_mode = tx_attr('hooks_mode')
+ return _session_hooks_control(self, HOOKS_DENY_ALL, *categories)
- disabled_hook_categories = tx_attr('disabled_hook_cats')
- enabled_hook_categories = tx_attr('enabled_hook_cats')
- disable_hook_categories = tx_meth('disable_hook_categories')
- enable_hook_categories = tx_meth('enable_hook_categories')
- is_hook_category_activated = tx_meth('is_hook_category_activated')
- is_hook_activated = tx_meth('is_hook_activated')
+ hooks_mode = cnx_attr('hooks_mode')
+
+ disabled_hook_categories = cnx_attr('disabled_hook_cats')
+ enabled_hook_categories = cnx_attr('enabled_hook_cats')
+ disable_hook_categories = cnx_meth('disable_hook_categories')
+ enable_hook_categories = cnx_meth('enable_hook_categories')
+ is_hook_category_activated = cnx_meth('is_hook_category_activated')
+ is_hook_activated = cnx_meth('is_hook_activated')
# connection management ###################################################
+ @deprecated('[3.19] use a Connection object instead')
def keep_cnxset_mode(self, mode):
"""set `mode`, e.g. how the session will keep its connections set:
@@ -1099,16 +1523,17 @@
else: # mode == 'write'
self.default_mode = 'read'
- mode = tx_attr('mode', writable=True)
- commit_state = tx_attr('commit_state', writable=True)
+ mode = cnx_attr('mode', writable=True)
+ commit_state = cnx_attr('commit_state', writable=True)
@property
+ @deprecated('[3.19] use a Connection object instead')
def cnxset(self):
"""connections set, set according to transaction mode for each query"""
if self._closed:
self.free_cnxset(True)
raise SessionClosedError('try to access connections set on a closed session %s' % self.id)
- return self._tx.cnxset
+ return self._cnx.cnxset
def set_cnxset(self):
"""the session need a connections set to execute some queries"""
@@ -1116,20 +1541,27 @@
if self._closed:
self.free_cnxset(True)
raise SessionClosedError('try to set connections set on a closed session %s' % self.id)
- return self._tx.set_cnxset()
- free_cnxset = tx_meth('free_cnxset')
+ return self._cnx.set_cnxset()
+ free_cnxset = cnx_meth('free_cnxset')
+ ensure_cnx_set = cnx_attr('ensure_cnx_set')
def _touch(self):
"""update latest session usage timestamp and reset mode to read"""
- self.timestamp = time()
- self.local_perm_cache.clear() # XXX simply move in tx.data, no?
+ self._timestamp.touch()
+
+ local_perm_cache = cnx_attr('local_perm_cache')
+ @local_perm_cache.setter
+ def local_perm_cache(self, value):
+ #base class assign an empty dict:-(
+ assert value == {}
+ pass
# shared data handling ###################################################
def get_shared_data(self, key, default=None, pop=False, txdata=False):
"""return value associated to `key` in session data"""
if txdata:
- data = self._tx.data
+ return self._cnx.get_shared_data(key, default, pop, txdata=True)
else:
data = self.data
if pop:
@@ -1140,47 +1572,39 @@
def set_shared_data(self, key, value, txdata=False):
"""set value associated to `key` in session data"""
if txdata:
- self._tx.data[key] = value
+ return self._cnx.set_shared_data(key, value, txdata=True)
else:
self.data[key] = value
# server-side service call #################################################
- def call_service(self, regid, async=False, **kwargs):
- return self.repo._call_service_with_session(self, regid, async,
- **kwargs)
-
+ call_service = cnx_meth('call_service')
# request interface #######################################################
@property
+ @deprecated('[3.19] use a Connection object instead')
def cursor(self):
"""return a rql cursor"""
return self
- set_entity_cache = tx_meth('set_entity_cache')
- entity_cache = tx_meth('entity_cache')
- cache_entities = tx_meth('cached_entities')
- drop_entity_cache = tx_meth('drop_entity_cache')
+ set_entity_cache = cnx_meth('set_entity_cache')
+ entity_cache = cnx_meth('entity_cache')
+ cache_entities = cnx_meth('cached_entities')
+ drop_entity_cache = cnx_meth('drop_entity_cache')
- def from_controller(self):
- """return the id (string) of the controller issuing the request (no
- sense here, always return 'view')
- """
- return 'view'
-
- source_defs = tx_meth('source_defs')
- describe = tx_meth('describe')
- source_from_eid = tx_meth('source_from_eid')
+ source_defs = cnx_meth('source_defs')
+ entity_metas = cnx_meth('entity_metas')
+ describe = cnx_meth('describe') # XXX deprecated in 3.19
- def execute(self, rql, kwargs=None, build_descr=True):
+ @deprecated('[3.19] use a Connection object instead')
+ def execute(self, *args, **kwargs):
"""db-api like method directly linked to the querier execute method.
See :meth:`cubicweb.dbapi.Cursor.execute` documentation.
"""
- self.timestamp = time() # update timestamp
- rset = self._execute(self, rql, kwargs, build_descr)
+ rset = self._cnx.execute(*args, **kwargs)
rset.req = self
return rset
@@ -1190,150 +1614,39 @@
by _touch
"""
try:
- tx = self.__threaddata.tx
+ cnx = self.__threaddata.cnx
except AttributeError:
pass
else:
if free_cnxset:
- self.free_cnxset()
- if tx.ctx_count == 0:
- self._clear_thread_storage(tx)
+ cnx._free_cnxset()
+ if cnx.ctx_count == 0:
+ self._close_cnx(cnx)
else:
- self._clear_tx_storage(tx)
+ cnx.clear()
else:
- self._clear_tx_storage(tx)
+ cnx.clear()
- def _clear_thread_storage(self, tx):
- self._txs.pop(tx.transactionid, None)
- try:
- del self.__threaddata.tx
- except AttributeError:
- pass
-
- def _clear_tx_storage(self, tx):
- tx.clear()
- tx._rewriter = RQLRewriter(self)
-
+ @deprecated('[3.19] use a Connection object instead')
def commit(self, free_cnxset=True, reset_pool=None):
"""commit the current session's transaction"""
- if reset_pool is not None:
- warn('[3.13] use free_cnxset argument instead for reset_pool',
- DeprecationWarning, stacklevel=2)
- free_cnxset = reset_pool
- if self.cnxset is None:
- assert not self.pending_operations
- self._clear_thread_data()
- self._touch()
- self.debug('commit session %s done (no db activity)', self.id)
- return
- cstate = self.commit_state
+ cstate = self._cnx.commit_state
if cstate == 'uncommitable':
raise QueryError('transaction must be rolled back')
- if cstate is not None:
- return
- # on rollback, an operation should have the following state
- # information:
- # - processed by the precommit/commit event or not
- # - if processed, is it the failed operation
- debug = server.DEBUG & server.DBG_OPS
try:
- # by default, operations are executed with security turned off
- with self.security_enabled(False, False):
- processed = []
- self.commit_state = 'precommit'
- if debug:
- print self.commit_state, '*' * 20
- try:
- while self.pending_operations:
- operation = self.pending_operations.pop(0)
- operation.processed = 'precommit'
- processed.append(operation)
- if debug:
- print operation
- operation.handle_event('precommit_event')
- self.pending_operations[:] = processed
- self.debug('precommit session %s done', self.id)
- except BaseException:
- # save exception context, it may be clutered below by
- # exception in revert_* event
- exc_info = sys.exc_info()
- # if error on [pre]commit:
- #
- # * set .failed = True on the operation causing the failure
- # * call revert<event>_event on processed operations
- # * call rollback_event on *all* operations
- #
- # that seems more natural than not calling rollback_event
- # for processed operations, and allow generic rollback
- # instead of having to implements rollback, revertprecommit
- # and revertcommit, that will be enough in mont case.
- operation.failed = True
- if debug:
- print self.commit_state, '*' * 20
- for operation in reversed(processed):
- if debug:
- print operation
- try:
- operation.handle_event('revertprecommit_event')
- except BaseException:
- self.critical('error while reverting precommit',
- exc_info=True)
- # XXX use slice notation since self.pending_operations is a
- # read-only property.
- self.pending_operations[:] = processed + self.pending_operations
- self.rollback(free_cnxset)
- raise exc_info[0], exc_info[1], exc_info[2]
- self.cnxset.commit()
- self.commit_state = 'postcommit'
- if debug:
- print self.commit_state, '*' * 20
- while self.pending_operations:
- operation = self.pending_operations.pop(0)
- if debug:
- print operation
- operation.processed = 'postcommit'
- try:
- operation.handle_event('postcommit_event')
- except BaseException:
- self.critical('error while postcommit',
- exc_info=sys.exc_info())
- self.debug('postcommit session %s done', self.id)
- return self.transaction_uuid(set=False)
+ return self._cnx.commit(free_cnxset, reset_pool)
finally:
- self._touch()
- if free_cnxset:
- self.free_cnxset(ignoremode=True)
self._clear_thread_data(free_cnxset)
- def rollback(self, free_cnxset=True, reset_pool=None):
+ @deprecated('[3.19] use a Connection object instead')
+ def rollback(self, *args, **kwargs):
"""rollback the current session's transaction"""
- if reset_pool is not None:
- warn('[3.13] use free_cnxset argument instead for reset_pool',
- DeprecationWarning, stacklevel=2)
- free_cnxset = reset_pool
- # don't use self.cnxset, rollback may be called with _closed == True
- cnxset = self._tx.cnxset
- if cnxset is None:
- self._clear_thread_data()
- self._touch()
- self.debug('rollback session %s done (no db activity)', self.id)
- return
+ return self._rollback(*args, **kwargs)
+
+ def _rollback(self, free_cnxset=True, **kwargs):
try:
- # by default, operations are executed with security turned off
- with self.security_enabled(False, False):
- while self.pending_operations:
- try:
- operation = self.pending_operations.pop(0)
- operation.handle_event('rollback_event')
- except BaseException:
- self.critical('rollback error', exc_info=sys.exc_info())
- continue
- cnxset.rollback()
- self.debug('rollback for session %s done', self.id)
+ return self._cnx.rollback(free_cnxset, **kwargs)
finally:
- self._touch()
- if free_cnxset:
- self.free_cnxset(ignoremode=True)
self._clear_thread_data(free_cnxset)
def close(self):
@@ -1342,63 +1655,64 @@
with self._lock:
self._closed = True
tracker.close()
- self.rollback()
- self.debug('waiting for open transaction of session: %s', self)
+ if self._cnx._session_handled:
+ self._rollback()
+ self.debug('waiting for open connection of session: %s', self)
timeout = 10
pendings = tracker.wait(timeout)
if pendings:
- self.error('%i transaction still alive after 10 seconds, will close '
+ self.error('%i connection still alive after 10 seconds, will close '
'session anyway', len(pendings))
- for txid in pendings:
- tx = self._txs.get(txid)
- if tx is not None:
- # drop tx.cnxset
+ for cnxid in pendings:
+ cnx = self._cnxs.get(cnxid)
+ if cnx is not None:
+ # drop cnx.cnxset
with tracker:
try:
- cnxset = tx.cnxset
+ cnxset = cnx.cnxset
if cnxset is None:
continue
- tx.cnxset = None
+ cnx.cnxset = None
except RuntimeError:
msg = 'issue while force free of cnxset in %s'
- self.error(msg, tx)
+ self.error(msg, cnx)
# cnxset.reconnect() do an hard reset of the cnxset
# it force it to be freed
cnxset.reconnect()
self.repo._free_cnxset(cnxset)
del self.__threaddata
- del self._txs
+ del self._cnxs
@property
def closed(self):
- return not hasattr(self, '_txs')
+ return not hasattr(self, '_cnxs')
# transaction data/operations management ##################################
- transaction_data = tx_attr('data')
- pending_operations = tx_attr('pending_operations')
- pruned_hooks_cache = tx_attr('pruned_hooks_cache')
- add_operation = tx_meth('add_operation')
+ transaction_data = cnx_attr('transaction_data')
+ pending_operations = cnx_attr('pending_operations')
+ pruned_hooks_cache = cnx_attr('pruned_hooks_cache')
+ add_operation = cnx_meth('add_operation')
# undo support ############################################################
- ertype_supports_undo = tx_meth('ertype_supports_undo')
- transaction_inc_action_counter = tx_meth('transaction_inc_action_counter')
-
- def transaction_uuid(self, set=True):
- try:
- return self._tx.transaction_uuid(set=set)
- except KeyError:
- self._tx.data['tx_uuid'] = uuid = uuid4().hex
- self.repo.system_source.start_undoable_transaction(self, uuid)
- return uuid
+ ertype_supports_undo = cnx_meth('ertype_supports_undo')
+ transaction_inc_action_counter = cnx_meth('transaction_inc_action_counter')
+ transaction_uuid = cnx_meth('transaction_uuid')
# querier helpers #########################################################
- rql_rewriter = tx_attr('_rewriter')
+ rql_rewriter = cnx_attr('_rewriter')
# deprecated ###############################################################
+ @property
+ def anonymous_session(self):
+ # XXX for now, anonymous-user is a web side option.
+ # It will only be present inside all-in-one instance.
+ # there is plan to move it down to global config.
+ return self.user.login == self.repo.config.get('anonymous-user')
+
@deprecated('[3.13] use getattr(session.rtype_eids_rdef(rtype, eidfrom, eidto), prop)')
def schema_rproperty(self, rtype, eidfrom, eidto, rprop):
return getattr(self.rtype_eids_rdef(rtype, eidfrom, eidto), rprop)
@@ -1435,10 +1749,6 @@
super(InternalSession, self).__init__(InternalManager(), repo, cnxprops,
_id='internal')
self.user._cw = self # XXX remove when "vreg = user._cw.vreg" hack in entity.py is gone
- if not safe:
- self.disable_hook_categories('integrity')
- self.disable_hook_categories('security')
- self._tx.ctx_count += 1
def __enter__(self):
return self
@@ -1452,7 +1762,7 @@
if self.repo.shutting_down:
self.free_cnxset(True)
raise ShuttingDown('repository is shutting down')
- return self._tx.cnxset
+ return self._cnx.cnxset
class InternalManager(object):
@@ -1460,10 +1770,12 @@
bootstrapping the repository or creating regular users according to
repository content
"""
- def __init__(self):
+ def __init__(self, lang='en'):
self.eid = -1
self.login = u'__internal_manager__'
self.properties = {}
+ self.groups = set(['managers'])
+ self.lang = lang
def matching_groups(self, groups):
return 1
@@ -1476,7 +1788,7 @@
def property_value(self, key):
if key == 'ui.language':
- return 'en'
+ return self.lang
return None
def prefered_language(self, language=None):
@@ -1501,3 +1813,4 @@
from logging import getLogger
from cubicweb import set_log_methods
set_log_methods(Session, getLogger('cubicweb.session'))
+set_log_methods(Connection, getLogger('cubicweb.session'))
--- a/server/sources/__init__.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/sources/__init__.py Tue Jun 10 09:49:45 2014 +0200
@@ -61,32 +61,9 @@
# return true so it can be used as assertion (and so be killed by python -O)
return True
-class TimedCache(dict):
- def __init__(self, ttl):
- # time to live in seconds
- if ttl <= 0:
- raise ValueError('TimedCache initialized with a ttl of %ss' % ttl.seconds)
- self.ttl = timedelta(seconds=ttl)
-
- def __setitem__(self, key, value):
- dict.__setitem__(self, key, (datetime.utcnow(), value))
-
- def __getitem__(self, key):
- return dict.__getitem__(self, key)[1]
-
- def clear_expired(self):
- now_ = datetime.utcnow()
- ttl = self.ttl
- for key, (timestamp, value) in self.items():
- if now_ - timestamp > ttl:
- del self[key]
-
class AbstractSource(object):
"""an abstract class for sources"""
- # does the source copy data into the system source, or is it a *true* source
- # (i.e. entities are not stored physically here)
- copy_based_source = False
# boolean telling if modification hooks should be called when something is
# modified in this source
@@ -108,10 +85,6 @@
# a reference to the instance'schema (may differs from the source'schema)
schema = None
- # multi-sources planning control
- dont_cross_relations = ()
- cross_relations = ()
-
# force deactivation (configuration error for instance)
disabled = False
@@ -259,29 +232,15 @@
"""open and return a connection to the source"""
raise NotImplementedError(self)
- def check_connection(self, cnx):
- """Check connection validity, return None if the connection is still
- valid else a new connection (called when the connections set using the
- given connection is being attached to a session). Do nothing by default.
- """
- pass
-
def close_source_connections(self):
for cnxset in self.repo.cnxsets:
- cnxset._cursors.pop(self.uri, None)
- cnxset.source_cnxs[self.uri][1].close()
+ cnxset.cu = None
+ cnxset.cnx.close()
def open_source_connections(self):
for cnxset in self.repo.cnxsets:
- cnxset.source_cnxs[self.uri] = (self, self.get_connection())
-
- def cnxset_freed(self, cnx):
- """the connections set holding the given connection is being reseted
- from its current attached session.
-
- do nothing by default
- """
- pass
+ cnxset.cnx = self.get_connection()
+ cnxset.cu = cnxset.cnx.cursor()
# cache handling ###########################################################
@@ -333,23 +292,7 @@
return wsupport
return True
- def may_cross_relation(self, rtype):
- """return True if the relation may be crossed among sources. Rules are:
-
- * if this source support the relation, can't be crossed unless explicitly
- specified in .cross_relations
-
- * if this source doesn't support the relation, can be crossed unless
- explicitly specified in .dont_cross_relations
- """
- # XXX find a way to have relation such as state_of in dont cross
- # relation (eg composite relation without both end type available?
- # card 1 relation? ...)
- if self.support_relation(rtype):
- return rtype in self.cross_relations
- return rtype not in self.dont_cross_relations
-
- def before_entity_insertion(self, session, lid, etype, eid, sourceparams):
+ def before_entity_insertion(self, cnx, lid, etype, eid, sourceparams):
"""called by the repository when an eid has been attributed for an
entity stored here but the entity has not been inserted in the system
table yet.
@@ -357,12 +300,12 @@
This method must return the an Entity instance representation of this
entity.
"""
- entity = self.repo.vreg['etypes'].etype_class(etype)(session)
+ entity = self.repo.vreg['etypes'].etype_class(etype)(cnx)
entity.eid = eid
entity.cw_edited = EditedEntity(entity)
return entity
- def after_entity_insertion(self, session, lid, entity, sourceparams):
+ def after_entity_insertion(self, cnx, lid, entity, sourceparams):
"""called by the repository after an entity stored here has been
inserted in the system table.
"""
@@ -403,7 +346,7 @@
# user authentication api ##################################################
- def authenticate(self, session, login, **kwargs):
+ def authenticate(self, cnx, login, **kwargs):
"""if the source support CWUser entity type, it should implement
this method which should return CWUser eid for the given login/password
if this account is defined in this source and valid login / password is
@@ -413,7 +356,7 @@
# RQL query api ############################################################
- def syntax_tree_search(self, session, union,
+ def syntax_tree_search(self, cnx, union,
args=None, cachekey=None, varmap=None, debug=0):
"""return result from this source for a rql query (actually from a rql
syntax tree and a solution dictionary mapping each used variable to a
@@ -422,15 +365,6 @@
"""
raise NotImplementedError(self)
- def flying_insert(self, table, session, union, args=None, varmap=None):
- """similar as .syntax_tree_search, but inserts data in the temporary
- table (on-the-fly if possible, eg for the system source whose the given
- cursor come from). If not possible, inserts all data by calling
- .executemany().
- """
- res = self.syntax_tree_search(session, union, args, varmap=varmap)
- session.cnxset.source('system').manual_insert(res, table, session)
-
# write modification api ###################################################
# read-only sources don't have to implement methods below
@@ -487,22 +421,6 @@
"""mark entity as being modified, fulltext reindex if needed"""
raise NotImplementedError(self)
- def delete_info_multi(self, session, entities, uri):
- """delete system information on deletion of a list of entities with the
- same etype and belinging to the same source
- """
- raise NotImplementedError(self)
-
- def modified_entities(self, session, etypes, mtime):
- """return a 2-uple:
- * list of (etype, eid) of entities of the given types which have been
- modified since the given timestamp (actually entities whose full text
- index content has changed)
- * list of (etype, eid) of entities of the given types which have been
- deleted since the given timestamp
- """
- raise NotImplementedError(self)
-
def index_entity(self, session, entity):
"""create an operation to [re]index textual content of the given entity
on commit
@@ -525,90 +443,18 @@
"""execute the query and return its result"""
raise NotImplementedError(self)
- def temp_table_def(self, selection, solution, table, basemap):
- raise NotImplementedError(self)
-
def create_index(self, session, table, column, unique=False):
raise NotImplementedError(self)
def drop_index(self, session, table, column, unique=False):
raise NotImplementedError(self)
- def create_temp_table(self, session, table, schema):
- raise NotImplementedError(self)
-
- def clean_temp_data(self, session, temptables):
- """remove temporary data, usually associated to temporary tables"""
- pass
-
-
- @deprecated('[3.13] use repo.eid2extid(source, eid, session)')
- def eid2extid(self, eid, session=None):
- return self.repo.eid2extid(self, eid, session)
@deprecated('[3.13] use extid2eid(source, value, etype, session, **kwargs)')
- def extid2eid(self, value, etype, session=None, **kwargs):
+ def extid2eid(self, value, etype, session, **kwargs):
return self.repo.extid2eid(self, value, etype, session, **kwargs)
-class TrFunc(object):
- """lower, upper"""
- def __init__(self, trname, index, attrname=None):
- self._tr = trname.lower()
- self.index = index
- self.attrname = attrname
-
- def apply(self, resdict):
- value = resdict.get(self.attrname)
- if value is not None:
- return getattr(value, self._tr)()
- return None
-
-
-class GlobTrFunc(TrFunc):
- """count, sum, max, min, avg"""
- funcs = {
- 'count': len,
- 'sum': sum,
- 'max': max,
- 'min': min,
- # XXX avg
- }
- def apply(self, result):
- """have to 'groupby' manually. For instance, if we 'count' for index 1:
- >>> self.apply([(1, 2), (3, 4), (1, 5)])
- [(1, 7), (3, 4)]
- """
- keys, values = [], {}
- for row in result:
- key = tuple(v for i, v in enumerate(row) if i != self.index)
- value = row[self.index]
- try:
- values[key].append(value)
- except KeyError:
- keys.append(key)
- values[key] = [value]
- result = []
- trfunc = self.funcs[self._tr]
- for key in keys:
- row = list(key)
- row.insert(self.index, trfunc(values[key]))
- result.append(row)
- return result
-
-
-class ConnectionWrapper(object):
- def __init__(self, cnx=None):
- self.cnx = cnx
- def commit(self):
- pass
- def rollback(self):
- pass
- def cursor(self):
- return None # no actual cursor support
- def close(self):
- if hasattr(self.cnx, 'close'):
- self.cnx.close()
from cubicweb.server import SOURCE_TYPES
--- a/server/sources/datafeed.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/sources/datafeed.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2010-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2010-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -35,7 +35,6 @@
class DataFeedSource(AbstractSource):
- copy_based_source = True
use_cwuri_as_url = True
options = (
@@ -144,67 +143,64 @@
return False
return datetime.utcnow() < (self.latest_retrieval + self.synchro_interval)
- def update_latest_retrieval(self, session):
+ def update_latest_retrieval(self, cnx):
self.latest_retrieval = datetime.utcnow()
- session.set_cnxset()
- session.execute('SET X latest_retrieval %(date)s WHERE X eid %(x)s',
+ cnx.execute('SET X latest_retrieval %(date)s WHERE X eid %(x)s',
{'x': self.eid, 'date': self.latest_retrieval})
- session.commit()
+ cnx.commit()
- def acquire_synchronization_lock(self, session):
+ def acquire_synchronization_lock(self, cnx):
# XXX race condition until WHERE of SET queries is executed using
# 'SELECT FOR UPDATE'
now = datetime.utcnow()
- session.set_cnxset()
- if not session.execute(
+ if not cnx.execute(
'SET X in_synchronization %(now)s WHERE X eid %(x)s, '
'X in_synchronization NULL OR X in_synchronization < %(maxdt)s',
{'x': self.eid, 'now': now, 'maxdt': now - self.max_lock_lifetime}):
self.error('concurrent synchronization detected, skip pull')
- session.commit()
+ cnx.commit()
return False
- session.commit()
+ cnx.commit()
return True
- def release_synchronization_lock(self, session):
- session.set_cnxset()
- session.execute('SET X in_synchronization NULL WHERE X eid %(x)s',
+ def release_synchronization_lock(self, cnx):
+ cnx.execute('SET X in_synchronization NULL WHERE X eid %(x)s',
{'x': self.eid})
- session.commit()
+ cnx.commit()
- def pull_data(self, session, force=False, raise_on_error=False):
+ def pull_data(self, cnx, force=False, raise_on_error=False):
"""Launch synchronization of the source if needed.
This method is responsible to handle commit/rollback on the given
- session.
+ connection.
"""
if not force and self.fresh():
return {}
- if not self.acquire_synchronization_lock(session):
+ if not self.acquire_synchronization_lock(cnx):
return {}
try:
- with session.transaction(free_cnxset=False):
- return self._pull_data(session, force, raise_on_error)
+ return self._pull_data(cnx, force, raise_on_error)
finally:
- self.release_synchronization_lock(session)
+ cnx.rollback() # rollback first in case there is some dirty
+ # transaction remaining
+ self.release_synchronization_lock(cnx)
- def _pull_data(self, session, force=False, raise_on_error=False):
- importlog = self.init_import_log(session)
- myuris = self.source_cwuris(session)
- parser = self._get_parser(session, sourceuris=myuris, import_log=importlog)
+ def _pull_data(self, cnx, force=False, raise_on_error=False):
+ importlog = self.init_import_log(cnx)
+ myuris = self.source_cwuris(cnx)
+ parser = self._get_parser(cnx, sourceuris=myuris, import_log=importlog)
if self.process_urls(parser, self.urls, raise_on_error):
self.warning("some error occurred, don't attempt to delete entities")
else:
- parser.handle_deletion(self.config, session, myuris)
- self.update_latest_retrieval(session)
+ parser.handle_deletion(self.config, cnx, myuris)
+ self.update_latest_retrieval(cnx)
stats = parser.stats
if stats.get('created'):
importlog.record_info('added %s entities' % len(stats['created']))
if stats.get('updated'):
importlog.record_info('updated %s entities' % len(stats['updated']))
- session.set_cnxset()
- importlog.write_log(session, end_timestamp=self.latest_retrieval)
- session.commit()
+ importlog.write_log(cnx, end_timestamp=self.latest_retrieval)
+ cnx.commit()
return stats
def process_urls(self, parser, urls, raise_on_error=False):
@@ -323,7 +319,6 @@
uri = uri.encode('utf-8')
try:
eid = session.repo.extid2eid(source, str(uri), etype, session,
- complete=False, commit=False,
sourceparams=sourceparams)
except ValidationError as ex:
# XXX use critical so they are seen during tests. Should consider
@@ -418,11 +413,9 @@
# Check whether self._cw is a session or a connection
if getattr(self._cw, 'commit', None) is not None:
commit = self._cw.commit
- set_cnxset = self._cw.set_cnxset
rollback = self._cw.rollback
else:
commit = self._cw.cnx.commit
- set_cnxset = lambda: None
rollback = self._cw.cnx.rollback
for args in parsed:
try:
@@ -430,14 +423,12 @@
# commit+set_cnxset instead of commit(free_cnxset=False) to let
# other a chance to get our connections set
commit()
- set_cnxset()
except ValidationError as exc:
if raise_on_error:
raise
self.source.error('Skipping %s because of validation error %s'
% (args, exc))
rollback()
- set_cnxset()
error = True
return error
--- a/server/sources/extlite.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,302 +0,0 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""provide an abstract class for external sources using a sqlite database helper
-"""
-
-__docformat__ = "restructuredtext en"
-
-
-from os.path import join, exists
-
-from cubicweb import server
-from cubicweb.server.sqlutils import SQL_PREFIX, SQLAdapterMixIn, sqlexec
-from cubicweb.server.sources import native, rql2sql
-from cubicweb.server.sources import AbstractSource, dbg_st_search, dbg_results
-
-class ConnectionWrapper(object):
- def __init__(self, source=None):
- self.source = source
- self._cnx = None
-
- def cursor(self):
- if self._cnx is None:
- self._cnx = self.source._sqlcnx
- if server.DEBUG & server.DBG_SQL:
- print 'sql cnx OPEN', self._cnx
- return self._cnx.cursor()
-
- def commit(self):
- if self._cnx is not None:
- if server.DEBUG & (server.DBG_SQL | server.DBG_RQL):
- print 'sql cnx COMMIT', self._cnx
- self._cnx.commit()
-
- def rollback(self):
- if self._cnx is not None:
- if server.DEBUG & (server.DBG_SQL | server.DBG_RQL):
- print 'sql cnx ROLLBACK', self._cnx
- self._cnx.rollback()
-
- def close(self):
- if self._cnx is not None:
- if server.DEBUG & server.DBG_SQL:
- print 'sql cnx CLOSE', self._cnx
- self._cnx.close()
- self._cnx = None
-
-
-class SQLiteAbstractSource(AbstractSource):
- """an abstract class for external sources using a sqlite database helper
- """
- sqlgen_class = rql2sql.SQLGenerator
- @classmethod
- def set_nonsystem_types(cls):
- # those entities are only in this source, we don't want them in the
- # system source
- for etype in cls.support_entities:
- native.NONSYSTEM_ETYPES.add(etype)
- for rtype in cls.support_relations:
- native.NONSYSTEM_RELATIONS.add(rtype)
-
- options = (
- ('helper-db-path',
- {'type' : 'string',
- 'default': None,
- 'help': 'path to the sqlite database file used to do queries on the \
-repository.',
- 'level': 2,
- }),
- )
-
- def __init__(self, repo, appschema, source_config, *args, **kwargs):
- # the helper db is used to easy querying and will store everything but
- # actual file content
- dbpath = source_config.get('helper-db-path')
- if dbpath is None:
- dbpath = join(repo.config.appdatahome,
- '%(uri)s.sqlite' % source_config)
- self.dbpath = dbpath
- self.sqladapter = SQLAdapterMixIn({'db-driver': 'sqlite',
- 'db-name': dbpath})
- # those attributes have to be initialized before ancestor's __init__
- # which will call set_schema
- self._need_sql_create = not exists(dbpath)
- self._need_full_import = self._need_sql_create
- AbstractSource.__init__(self, repo, appschema, source_config,
- *args, **kwargs)
-
- def backup(self, backupfile, confirm):
- """method called to create a backup of the source's data"""
- self.close_source_connections()
- try:
- self.sqladapter.backup_to_file(backupfile, confirm)
- finally:
- self.open_source_connections()
-
- def restore(self, backupfile, confirm, drop):
- """method called to restore a backup of source's data"""
- self.close_source_connections()
- try:
- self.sqladapter.restore_from_file(backupfile, confirm, drop)
- finally:
- self.open_source_connections()
-
- @property
- def _sqlcnx(self):
- # XXX: sqlite connections can only be used in the same thread, so
- # create a new one each time necessary. If it appears to be time
- # consuming, find another way
- return self.sqladapter.get_connection()
-
- def _is_schema_complete(self):
- for etype in self.support_entities:
- if not etype in self.schema:
- self.warning('not ready to generate %s database, %s support missing from schema',
- self.uri, etype)
- return False
- for rtype in self.support_relations:
- if not rtype in self.schema:
- self.warning('not ready to generate %s database, %s support missing from schema',
- self.uri, rtype)
- return False
- return True
-
- def _create_database(self):
- from yams.schema2sql import eschema2sql, rschema2sql
- from cubicweb.toolsutils import restrict_perms_to_user
- self.warning('initializing sqlite database for %s source' % self.uri)
- cnx = self._sqlcnx
- cu = cnx.cursor()
- schema = self.schema
- for etype in self.support_entities:
- eschema = schema.eschema(etype)
- createsqls = eschema2sql(self.sqladapter.dbhelper, eschema,
- skip_relations=('data',), prefix=SQL_PREFIX)
- sqlexec(createsqls, cu, withpb=False)
- for rtype in self.support_relations:
- rschema = schema.rschema(rtype)
- if not rschema.inlined:
- sqlexec(rschema2sql(rschema), cu, withpb=False)
- cnx.commit()
- cnx.close()
- self._need_sql_create = False
- if self.repo.config['uid']:
- from logilab.common.shellutils import chown
- # database file must be owned by the uid of the server process
- self.warning('set %s as owner of the database file',
- self.repo.config['uid'])
- chown(self.dbpath, self.repo.config['uid'])
- restrict_perms_to_user(self.dbpath, self.info)
-
- def set_schema(self, schema):
- super(SQLiteAbstractSource, self).set_schema(schema)
- if self._need_sql_create and self._is_schema_complete() and self.dbpath:
- self._create_database()
- self.rqlsqlgen = self.sqlgen_class(schema, self.sqladapter.dbhelper)
-
- def get_connection(self):
- return ConnectionWrapper(self)
-
- def check_connection(self, cnx):
- """check connection validity, return None if the connection is still valid
- else a new connection (called when the connections set holding the given connection is
- being attached to a session)
-
- always return the connection to reset eventually cached cursor
- """
- return cnx
-
- def cnxset_freed(self, cnx):
- """the connections set holding the given connection is being freed from its current
- attached session: release the connection lock if the connection wrapper
- has a connection set
- """
- # reset _cnx to ensure next thread using cnx will get a new
- # connection
- cnx.close()
-
- def syntax_tree_search(self, session, union, args=None, cachekey=None,
- varmap=None):
- """return result from this source for a rql query (actually from a rql
- syntax tree and a solution dictionary mapping each used variable to a
- possible type). If cachekey is given, the query necessary to fetch the
- results (but not the results themselves) may be cached using this key.
- """
- if self._need_sql_create:
- return []
- assert dbg_st_search(self.uri, union, varmap, args, cachekey)
- sql, qargs, cbs = self.rqlsqlgen.generate(union, args)
- args = self.sqladapter.merge_args(args, qargs)
- cursor = self.doexec(session, sql, args)
- results = self.sqladapter.process_result(cursor, cbs)
- assert dbg_results(results)
- return results
-
- def local_add_entity(self, session, entity):
- """insert the entity in the local database.
-
- This is not provided as add_entity implementation since usually source
- don't want to simply do this, so let raise NotImplementedError and the
- source implementor may use this method if necessary
- """
- attrs = self.sqladapter.preprocess_entity(entity)
- sql = self.sqladapter.sqlgen.insert(SQL_PREFIX + str(entity.e_schema), attrs)
- self.doexec(session, sql, attrs)
-
- def add_entity(self, session, entity):
- """add a new entity to the source"""
- raise NotImplementedError()
-
- def local_update_entity(self, session, entity, attrs=None):
- """update an entity in the source
-
- This is not provided as update_entity implementation since usually
- source don't want to simply do this, so let raise NotImplementedError
- and the source implementor may use this method if necessary
- """
- if attrs is None:
- attrs = self.sqladapter.preprocess_entity(entity)
- sql = self.sqladapter.sqlgen.update(SQL_PREFIX + str(entity.e_schema),
- attrs, [SQL_PREFIX + 'eid'])
- self.doexec(session, sql, attrs)
-
- def update_entity(self, session, entity):
- """update an entity in the source"""
- raise NotImplementedError()
-
- def delete_entity(self, session, entity):
- """delete an entity from the source
-
- this is not deleting a file in the svn but deleting entities from the
- source. Main usage is to delete repository content when a Repository
- entity is deleted.
- """
- attrs = {'cw_eid': entity.eid}
- sql = self.sqladapter.sqlgen.delete(SQL_PREFIX + entity.cw_etype, attrs)
- self.doexec(session, sql, attrs)
-
- def local_add_relation(self, session, subject, rtype, object):
- """add a relation to the source
-
- This is not provided as add_relation implementation since usually
- source don't want to simply do this, so let raise NotImplementedError
- and the source implementor may use this method if necessary
- """
- attrs = {'eid_from': subject, 'eid_to': object}
- sql = self.sqladapter.sqlgen.insert('%s_relation' % rtype, attrs)
- self.doexec(session, sql, attrs)
-
- def add_relation(self, session, subject, rtype, object):
- """add a relation to the source"""
- raise NotImplementedError()
-
- def delete_relation(self, session, subject, rtype, object):
- """delete a relation from the source"""
- rschema = self.schema.rschema(rtype)
- if rschema.inlined:
- if subject in session.transaction_data.get('pendingeids', ()):
- return
- table = SQL_PREFIX + session.describe(subject)[0]
- column = SQL_PREFIX + rtype
- sql = 'UPDATE %s SET %s=NULL WHERE %seid=%%(eid)s' % (table, column, SQL_PREFIX)
- attrs = {'eid' : subject}
- else:
- attrs = {'eid_from': subject, 'eid_to': object}
- sql = self.sqladapter.sqlgen.delete('%s_relation' % rtype, attrs)
- self.doexec(session, sql, attrs)
-
- def doexec(self, session, query, args=None):
- """Execute a query.
- it's a function just so that it shows up in profiling
- """
- if server.DEBUG:
- print 'exec', query, args
- cursor = session.cnxset[self.uri]
- try:
- # str(query) to avoid error if it's a unicode string
- cursor.execute(str(query), args)
- except Exception as ex:
- self.critical("sql: %r\n args: %s\ndbms message: %r",
- query, args, ex.args[0])
- try:
- session.cnxset.connection(self.uri).rollback()
- self.critical('transaction has been rolled back')
- except Exception:
- pass
- raise
- return cursor
--- a/server/sources/ldapfeed.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/sources/ldapfeed.py Tue Jun 10 09:49:45 2014 +0200
@@ -17,24 +17,39 @@
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""cubicweb ldap feed source"""
+from __future__ import division # XXX why?
+
+from datetime import datetime
+
import ldap
+from ldap.ldapobject import ReconnectLDAPObject
from ldap.filter import filter_format
+from ldapurl import LDAPUrl
from logilab.common.configuration import merge_options
+from cubicweb import ValidationError, AuthenticationError, Binary
+from cubicweb.server import utils
from cubicweb.server.sources import datafeed
-from cubicweb.server import ldaputils, utils
-from cubicweb import Binary
_ = unicode
# search scopes
-ldapscope = {'BASE': ldap.SCOPE_BASE,
- 'ONELEVEL': ldap.SCOPE_ONELEVEL,
- 'SUBTREE': ldap.SCOPE_SUBTREE}
+BASE = ldap.SCOPE_BASE
+ONELEVEL = ldap.SCOPE_ONELEVEL
+SUBTREE = ldap.SCOPE_SUBTREE
+LDAP_SCOPES = {'BASE': ldap.SCOPE_BASE,
+ 'ONELEVEL': ldap.SCOPE_ONELEVEL,
+ 'SUBTREE': ldap.SCOPE_SUBTREE}
-class LDAPFeedSource(ldaputils.LDAPSourceMixIn,
- datafeed.DataFeedSource):
+# map ldap protocol to their standard port
+PROTO_PORT = {'ldap': 389,
+ 'ldaps': 636,
+ 'ldapi': None,
+ }
+
+
+class LDAPFeedSource(datafeed.DataFeedSource):
"""LDAP feed source: unlike ldapuser source, this source is copy based and
will import ldap content (beside passwords for authentication) into the
system source.
@@ -42,7 +57,79 @@
support_entities = {'CWUser': False}
use_cwuri_as_url = False
- options_group = (
+ options = (
+ ('auth-mode',
+ {'type' : 'choice',
+ 'default': 'simple',
+ 'choices': ('simple', 'cram_md5', 'digest_md5', 'gssapi'),
+ 'help': 'authentication mode used to authenticate user to the ldap.',
+ 'group': 'ldap-source', 'level': 3,
+ }),
+ ('auth-realm',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'realm to use when using gssapi/kerberos authentication.',
+ 'group': 'ldap-source', 'level': 3,
+ }),
+
+ ('data-cnx-dn',
+ {'type' : 'string',
+ 'default': '',
+ 'help': 'user dn to use to open data connection to the ldap (eg used \
+to respond to rql queries). Leave empty for anonymous bind',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('data-cnx-password',
+ {'type' : 'string',
+ 'default': '',
+ 'help': 'password to use to open data connection to the ldap (eg used to respond to rql queries). Leave empty for anonymous bind.',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+
+ ('user-base-dn',
+ {'type' : 'string',
+ 'default': '',
+ 'help': 'base DN to lookup for users; disable user importation mechanism if unset',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('user-scope',
+ {'type' : 'choice',
+ 'default': 'ONELEVEL',
+ 'choices': ('BASE', 'ONELEVEL', 'SUBTREE'),
+ 'help': 'user search scope (valid values: "BASE", "ONELEVEL", "SUBTREE")',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('user-classes',
+ {'type' : 'csv',
+ 'default': ('top', 'posixAccount'),
+ 'help': 'classes of user (with Active Directory, you want to say "user" here)',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('user-filter',
+ {'type': 'string',
+ 'default': '',
+ 'help': 'additional filters to be set in the ldap query to find valid users',
+ 'group': 'ldap-source', 'level': 2,
+ }),
+ ('user-login-attr',
+ {'type' : 'string',
+ 'default': 'uid',
+ 'help': 'attribute used as login on authentication (with Active Directory, you want to use "sAMAccountName" here)',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('user-default-group',
+ {'type' : 'csv',
+ 'default': ('users',),
+ 'help': 'name of a group in which ldap users will be by default. \
+You can set multiple groups by separating them by a comma.',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('user-attrs-map',
+ {'type' : 'named',
+ 'default': {'uid': 'login', 'gecos': 'email', 'userPassword': 'upassword'},
+ 'help': 'map from ldap user attributes to cubicweb attributes (with Active Directory, you want to use sAMAccountName:login,mail:email,givenName:firstname,sn:surname)',
+ 'group': 'ldap-source', 'level': 1,
+ }),
('group-base-dn',
{'type' : 'string',
'default': '',
@@ -76,18 +163,33 @@
}),
)
- options = merge_options(datafeed.DataFeedSource.options
- + ldaputils.LDAPSourceMixIn.options
- + options_group,
+ options = merge_options(datafeed.DataFeedSource.options + options,
optgroup='ldap-source',)
+ _conn = None
+
def update_config(self, source_entity, typedconfig):
"""update configuration from source entity. `typedconfig` is config
properly typed with defaults set
"""
super(LDAPFeedSource, self).update_config(source_entity, typedconfig)
+ self.authmode = typedconfig['auth-mode']
+ self._authenticate = getattr(self, '_auth_%s' % self.authmode)
+ self.cnx_dn = typedconfig['data-cnx-dn']
+ self.cnx_pwd = typedconfig['data-cnx-password']
+ self.user_base_dn = str(typedconfig['user-base-dn'])
+ self.user_base_scope = globals()[typedconfig['user-scope']]
+ self.user_login_attr = typedconfig['user-login-attr']
+ self.user_default_groups = typedconfig['user-default-group']
+ self.user_attrs = {'dn': 'eid', 'modifyTimestamp': 'modification_date'}
+ self.user_attrs.update(typedconfig['user-attrs-map'])
+ self.user_rev_attrs = dict((v, k) for k, v in self.user_attrs.iteritems())
+ self.base_filters = [filter_format('(%s=%s)', ('objectClass', o))
+ for o in typedconfig['user-classes']]
+ if typedconfig['user-filter']:
+ self.base_filters.append(typedconfig['user-filter'])
self.group_base_dn = str(typedconfig['group-base-dn'])
- self.group_base_scope = ldapscope[typedconfig['group-scope']]
+ self.group_base_scope = LDAP_SCOPES[typedconfig['group-scope']]
self.group_attrs = typedconfig['group-attrs-map']
self.group_attrs = {'dn': 'eid', 'modifyTimestamp': 'modification_date'}
self.group_attrs.update(typedconfig['group-attrs-map'])
@@ -96,11 +198,191 @@
for o in typedconfig['group-classes']]
if typedconfig['group-filter']:
self.group_base_filters.append(typedconfig['group-filter'])
+ self._conn = None
+
+ def _entity_update(self, source_entity):
+ super(LDAPFeedSource, self)._entity_update(source_entity)
+ if self.urls:
+ if len(self.urls) > 1:
+ raise ValidationError(source_entity.eid, {'url': _('can only have one url')})
+ try:
+ protocol, hostport = self.urls[0].split('://')
+ except ValueError:
+ raise ValidationError(source_entity.eid, {'url': _('badly formatted url')})
+ if protocol not in PROTO_PORT:
+ raise ValidationError(source_entity.eid, {'url': _('unsupported protocol')})
+
+ def connection_info(self):
+ assert len(self.urls) == 1, self.urls
+ protocol, hostport = self.urls[0].split('://')
+ if protocol != 'ldapi' and not ':' in hostport:
+ hostport = '%s:%s' % (hostport, PROTO_PORT[protocol])
+ return protocol, hostport
+
+ def authenticate(self, cnx, login, password=None, **kwargs):
+ """return CWUser eid for the given login/password if this account is
+ defined in this source, else raise `AuthenticationError`
+
+ two queries are needed since passwords are stored crypted, so we have
+ to fetch the salt first
+ """
+ self.info('ldap authenticate %s', login)
+ if not password:
+ # On Windows + ADAM this would have succeeded (!!!)
+ # You get Authenticated as: 'NT AUTHORITY\ANONYMOUS LOGON'.
+ # we really really don't want that
+ raise AuthenticationError()
+ searchfilter = [filter_format('(%s=%s)', (self.user_login_attr, login))]
+ searchfilter.extend(self.base_filters)
+ searchstr = '(&%s)' % ''.join(searchfilter)
+ # first search the user
+ try:
+ user = self._search(cnx, self.user_base_dn,
+ self.user_base_scope, searchstr)[0]
+ except (IndexError, ldap.SERVER_DOWN):
+ # no such user
+ raise AuthenticationError()
+ # check password by establishing a (unused) connection
+ try:
+ self._connect(user, password)
+ except ldap.LDAPError as ex:
+ # Something went wrong, most likely bad credentials
+ self.info('while trying to authenticate %s: %s', user, ex)
+ raise AuthenticationError()
+ except Exception:
+ self.error('while trying to authenticate %s', user, exc_info=True)
+ raise AuthenticationError()
+ eid = self.repo.extid2eid(self, user['dn'], 'CWUser', cnx, insert=False)
+ if eid < 0:
+ # user has been moved away from this source
+ raise AuthenticationError()
+ return eid
+
+ def _connect(self, user=None, userpwd=None):
+ protocol, hostport = self.connection_info()
+ self.info('connecting %s://%s as %s', protocol, hostport,
+ user and user['dn'] or 'anonymous')
+ # don't require server certificate when using ldaps (will
+ # enable self signed certs)
+ ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
+ url = LDAPUrl(urlscheme=protocol, hostport=hostport)
+ conn = ReconnectLDAPObject(url.initializeUrl())
+ # Set the protocol version - version 3 is preferred
+ try:
+ conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3)
+ except ldap.LDAPError: # Invalid protocol version, fall back safely
+ conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION2)
+ # Deny auto-chasing of referrals to be safe, we handle them instead
+ # Required for AD
+ try:
+ conn.set_option(ldap.OPT_REFERRALS, 0)
+ except ldap.LDAPError: # Cannot set referrals, so do nothing
+ pass
+ #conn.set_option(ldap.OPT_NETWORK_TIMEOUT, conn_timeout)
+ #conn.timeout = op_timeout
+ # Now bind with the credentials given. Let exceptions propagate out.
+ if user is None:
+ # XXX always use simple bind for data connection
+ if not self.cnx_dn:
+ conn.simple_bind_s(self.cnx_dn, self.cnx_pwd)
+ else:
+ self._authenticate(conn, {'dn': self.cnx_dn}, self.cnx_pwd)
+ else:
+ # user specified, we want to check user/password, no need to return
+ # the connection which will be thrown out
+ self._authenticate(conn, user, userpwd)
+ return conn
+
+ def _auth_simple(self, conn, user, userpwd):
+ conn.simple_bind_s(user['dn'], userpwd)
+
+ def _auth_cram_md5(self, conn, user, userpwd):
+ from ldap import sasl
+ auth_token = sasl.cram_md5(user['dn'], userpwd)
+ conn.sasl_interactive_bind_s('', auth_token)
+
+ def _auth_digest_md5(self, conn, user, userpwd):
+ from ldap import sasl
+ auth_token = sasl.digest_md5(user['dn'], userpwd)
+ conn.sasl_interactive_bind_s('', auth_token)
+
+ def _auth_gssapi(self, conn, user, userpwd):
+ # print XXX not proper sasl/gssapi
+ import kerberos
+ if not kerberos.checkPassword(user[self.user_login_attr], userpwd):
+ raise Exception('BAD login / mdp')
+ #from ldap import sasl
+ #conn.sasl_interactive_bind_s('', sasl.gssapi())
+
+ def _search(self, cnx, base, scope,
+ searchstr='(objectClass=*)', attrs=()):
+ """make an ldap query"""
+ self.debug('ldap search %s %s %s %s %s', self.uri, base, scope,
+ searchstr, list(attrs))
+ if self._conn is None:
+ self._conn = self._connect()
+ ldapcnx = self._conn
+ try:
+ res = ldapcnx.search_s(base, scope, searchstr, attrs)
+ except ldap.PARTIAL_RESULTS:
+ res = ldapcnx.result(all=0)[1]
+ except ldap.NO_SUCH_OBJECT:
+ self.info('ldap NO SUCH OBJECT %s %s %s', base, scope, searchstr)
+ self._process_no_such_object(cnx, base)
+ return []
+ # except ldap.REFERRAL as e:
+ # ldapcnx = self.handle_referral(e)
+ # try:
+ # res = ldapcnx.search_s(base, scope, searchstr, attrs)
+ # except ldap.PARTIAL_RESULTS:
+ # res_type, res = ldapcnx.result(all=0)
+ result = []
+ for rec_dn, rec_dict in res:
+ # When used against Active Directory, "rec_dict" may not be
+ # be a dictionary in some cases (instead, it can be a list)
+ #
+ # An example of a useless "res" entry that can be ignored
+ # from AD is
+ # (None, ['ldap://ForestDnsZones.PORTAL.LOCAL/DC=ForestDnsZones,DC=PORTAL,DC=LOCAL'])
+ # This appears to be some sort of internal referral, but
+ # we can't handle it, so we need to skip over it.
+ try:
+ items = rec_dict.iteritems()
+ except AttributeError:
+ continue
+ else:
+ itemdict = self._process_ldap_item(rec_dn, items)
+ result.append(itemdict)
+ self.debug('ldap built results %s', len(result))
+ return result
def _process_ldap_item(self, dn, iterator):
- itemdict = super(LDAPFeedSource, self)._process_ldap_item(dn, iterator)
+ """Turn an ldap received item into a proper dict."""
+ itemdict = {'dn': dn}
+ for key, value in iterator:
+ if self.user_attrs.get(key) == 'upassword': # XXx better password detection
+ value = value[0].encode('utf-8')
+ # we only support ldap_salted_sha1 for ldap sources, see: server/utils.py
+ if not value.startswith('{SSHA}'):
+ value = utils.crypt_password(value)
+ itemdict[key] = Binary(value)
+ elif self.user_attrs.get(key) == 'modification_date':
+ itemdict[key] = datetime.strptime(value[0], '%Y%m%d%H%M%SZ')
+ else:
+ value = [unicode(val, 'utf-8', 'replace') for val in value]
+ if len(value) == 1:
+ itemdict[key] = value = value[0]
+ else:
+ itemdict[key] = value
# we expect memberUid to be a list of user ids, make sure of it
member = self.group_rev_attrs['member']
if isinstance(itemdict.get(member), basestring):
itemdict[member] = [itemdict[member]]
return itemdict
+
+ def _process_no_such_object(self, cnx, dn):
+ """Some search return NO_SUCH_OBJECT error, handle this (usually because
+ an object whose dn is no more existent in ldap as been encountered).
+
+ Do nothing by default, let sub-classes handle that.
+ """
--- a/server/sources/native.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/sources/native.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -35,7 +35,7 @@
from datetime import datetime
from base64 import b64decode, b64encode
from contextlib import contextmanager
-from os.path import abspath, basename
+from os.path import basename
import re
import itertools
import zipfile
@@ -52,7 +52,7 @@
from yams.schema import role_name
from cubicweb import (UnknownEid, AuthenticationError, ValidationError, Binary,
- UniqueTogetherError, QueryError, UndoTransactionException)
+ UniqueTogetherError, UndoTransactionException)
from cubicweb import transaction as tx, server, neg_role
from cubicweb.utils import QueryCache
from cubicweb.schema import VIRTUAL_RTYPES
@@ -95,37 +95,6 @@
return self.cu.fetchone()
-def make_schema(selected, solution, table, typemap):
- """return a sql schema to store RQL query result"""
- sql = []
- varmap = {}
- for i, term in enumerate(selected):
- name = 'C%s' % i
- key = term.as_string()
- varmap[key] = '%s.%s' % (table, name)
- ttype = term.get_type(solution)
- try:
- sql.append('%s %s' % (name, typemap[ttype]))
- except KeyError:
- # assert not schema(ttype).final
- sql.append('%s %s' % (name, typemap['Int']))
- return ','.join(sql), varmap
-
-
-def _modified_sql(table, etypes):
- # XXX protect against sql injection
- if len(etypes) > 1:
- restr = 'type IN (%s)' % ','.join("'%s'" % etype for etype in etypes)
- else:
- restr = "type='%s'" % etypes[0]
- if table == 'entities':
- attr = 'mtime'
- else:
- attr = 'dtime'
- return 'SELECT type, eid FROM %s WHERE %s AND %s > %%(time)s' % (
- table, restr, attr)
-
-
def sql_or_clauses(sql, clauses):
select, restr = sql.split(' WHERE ', 1)
restrclauses = restr.split(' AND ')
@@ -138,6 +107,7 @@
restr = '(%s)' % ' OR '.join(clauses)
return '%s WHERE %s' % (select, restr)
+
def rdef_table_column(rdef):
"""return table and column used to store the given relation definition in
the database
@@ -145,6 +115,7 @@
return (SQL_PREFIX + str(rdef.subject),
SQL_PREFIX + str(rdef.rtype))
+
def rdef_physical_info(dbhelper, rdef):
"""return backend type and a boolean flag if NULL values should be allowed
for a given relation definition
@@ -182,34 +153,34 @@
'rtype': rdef.rtype,
'eid': tentity.eid})
-def _undo_rel_info(session, subj, rtype, obj):
+def _undo_rel_info(cnx, subj, rtype, obj):
entities = []
for role, eid in (('subject', subj), ('object', obj)):
try:
- entities.append(session.entity_from_eid(eid))
+ entities.append(cnx.entity_from_eid(eid))
except UnknownEid:
- raise _UndoException(session._(
+ raise _UndoException(cnx._(
"Can't restore relation %(rtype)s, %(role)s entity %(eid)s"
" doesn't exist anymore.")
- % {'role': session._(role),
- 'rtype': session._(rtype),
+ % {'role': cnx._(role),
+ 'rtype': cnx._(rtype),
'eid': eid})
sentity, oentity = entities
try:
- rschema = session.vreg.schema.rschema(rtype)
+ rschema = cnx.vreg.schema.rschema(rtype)
rdef = rschema.rdefs[(sentity.cw_etype, oentity.cw_etype)]
except KeyError:
- raise _UndoException(session._(
+ raise _UndoException(cnx._(
"Can't restore relation %(rtype)s between %(subj)s and "
"%(obj)s, that relation does not exists anymore in the "
"schema.")
- % {'rtype': session._(rtype),
+ % {'rtype': cnx._(rtype),
'subj': subj,
'obj': obj})
return sentity, oentity, rdef
-def _undo_has_later_transaction(session, eid):
- return session.system_sql('''\
+def _undo_has_later_transaction(cnx, eid):
+ return cnx.system_sql('''\
SELECT T.tx_uuid FROM transactions AS TREF, transactions AS T
WHERE TREF.tx_uuid='%(txuuid)s' AND T.tx_uuid!='%(txuuid)s'
AND T.tx_time>=TREF.tx_time
@@ -218,10 +189,85 @@
OR EXISTS(SELECT 1 FROM tx_relation_actions as TRA
WHERE TRA.tx_uuid=T.tx_uuid AND (
TRA.eid_from=%(eid)s OR TRA.eid_to=%(eid)s))
- )''' % {'txuuid': session.transaction_data['undoing_uuid'],
+ )''' % {'txuuid': cnx.transaction_data['undoing_uuid'],
'eid': eid}).fetchone()
+class DefaultEidGenerator(object):
+ __slots__ = ('source', 'cnx', 'lock')
+
+ def __init__(self, source):
+ self.source = source
+ self.cnx = None
+ self.lock = Lock()
+
+ def close(self):
+ if self.cnx:
+ self.cnx.close()
+ self.cnx = None
+
+ def create_eid(self, _cnx, count=1):
+ # lock needed to prevent 'Connection is busy with results for another
+ # command (0)' errors with SQLServer
+ assert count > 0
+ with self.lock:
+ return self._create_eid(count)
+
+ def _create_eid(self, count):
+ # internal function doing the eid creation without locking.
+ # needed for the recursive handling of disconnections (otherwise we
+ # deadlock on self._eid_cnx_lock
+ source = self.source
+ if self.cnx is None:
+ self.cnx = source.get_connection()
+ cnx = self.cnx
+ try:
+ cursor = cnx.cursor()
+ for sql in source.dbhelper.sqls_increment_numrange('entities_id_seq', count):
+ cursor.execute(sql)
+ eid = cursor.fetchone()[0]
+ except (source.OperationalError, source.InterfaceError):
+ # FIXME: better detection of deconnection pb
+ source.warning("trying to reconnect create eid connection")
+ self.cnx = None
+ return self._create_eid(count)
+ except source.DbapiError as exc:
+ # We get this one with pyodbc and SQL Server when connection was reset
+ if exc.args[0] == '08S01':
+ source.warning("trying to reconnect create eid connection")
+ self.cnx = None
+ return self._create_eid(count)
+ else:
+ raise
+ except Exception: # WTF?
+ cnx.rollback()
+ self.cnx = None
+ source.exception('create eid failed in an unforeseen way on SQL statement %s', sql)
+ raise
+ else:
+ cnx.commit()
+ return eid
+
+
+class SQLITEEidGenerator(object):
+ __slots__ = ('source', 'lock')
+
+ def __init__(self, source):
+ self.source = source
+ self.lock = Lock()
+
+ def close(self):
+ pass
+
+ def create_eid(self, cnx, count=1):
+ assert count > 0
+ source = self.source
+ with self.lock:
+ for sql in source.dbhelper.sqls_increment_numrange('entities_id_seq', count):
+ cursor = source.doexec(cnx, sql)
+ return cursor.fetchone()[0]
+
+
class NativeSQLSource(SQLAdapterMixIn, AbstractSource):
"""adapter for source using the native cubicweb schema (see below)
"""
@@ -292,38 +338,14 @@
self.do_fti = not repo.config['delay-full-text-indexation']
# sql queries cache
self._cache = QueryCache(repo.config['rql-cache-size'])
- self._temp_table_data = {}
- # we need a lock to protect eid attribution function (XXX, really?
- # explain)
- self._eid_cnx_lock = Lock()
- self._eid_creation_cnx = None
# (etype, attr) / storage mapping
self._storages = {}
- # entity types that may be used by other multi-sources instances
- self.multisources_etypes = set(repo.config['multi-sources-etypes'])
- # XXX no_sqlite_wrap trick since we've a sqlite locking pb when
- # running unittest_multisources with the wrapping below
- if self.dbdriver == 'sqlite' and \
- not getattr(repo.config, 'no_sqlite_wrap', False):
- from cubicweb.server.sources.extlite import ConnectionWrapper
- self.dbhelper.dbname = abspath(self.dbhelper.dbname)
- self.get_connection = lambda: ConnectionWrapper(self)
- self.check_connection = lambda cnx: cnx
- def cnxset_freed(cnx):
- cnx.close()
- self.cnxset_freed = cnxset_freed
+ self.binary_to_str = self.dbhelper.dbapi_module.binary_to_str
if self.dbdriver == 'sqlite':
- self._create_eid = None
- self.create_eid = self._create_eid_sqlite
- self.binary_to_str = self.dbhelper.dbapi_module.binary_to_str
-
-
- @property
- def _sqlcnx(self):
- # XXX: sqlite connections can only be used in the same thread, so
- # create a new one each time necessary. If it appears to be time
- # consuming, find another way
- return SQLAdapterMixIn.get_connection(self)
+ self.eid_generator = SQLITEEidGenerator(self)
+ else:
+ self.eid_generator = DefaultEidGenerator(self)
+ self.create_eid = self.eid_generator.create_eid
def check_config(self, source_entity):
"""check configuration of source entity"""
@@ -347,19 +369,18 @@
self._cache.pop('Any X WHERE X eid %s' % eid, None)
self._cache.pop('Any %s' % eid, None)
- def sqlexec(self, session, sql, args=None):
+ def sqlexec(self, cnx, sql, args=None):
"""execute the query and return its result"""
- return self.process_result(self.doexec(session, sql, args))
+ return self.process_result(self.doexec(cnx, sql, args))
def init_creating(self, cnxset=None):
# check full text index availibility
if self.do_fti:
if cnxset is None:
_cnxset = self.repo._get_cnxset()
- _cnxset.cnxset_set()
else:
_cnxset = cnxset
- if not self.dbhelper.has_fti_table(_cnxset['system']):
+ if not self.dbhelper.has_fti_table(_cnxset.cu):
if not self.repo.config.creating:
self.critical('no text index table')
self.do_fti = False
@@ -420,9 +441,7 @@
self.init_creating(source_entity._cw.cnxset)
def shutdown(self):
- if self._eid_creation_cnx:
- self._eid_creation_cnx.close()
- self._eid_creation_cnx = None
+ self.eid_generator.close()
# XXX deprecates [un]map_attribute?
def map_attribute(self, etype, attr, cb, sourcedb=True):
@@ -491,21 +510,18 @@
# can't claim not supporting a relation
return True #not rtype == 'content_for'
- def may_cross_relation(self, rtype):
- return True
-
- def authenticate(self, session, login, **kwargs):
+ def authenticate(self, cnx, login, **kwargs):
"""return CWUser eid for the given login and other authentication
information found in kwargs, else raise `AuthenticationError`
"""
for authentifier in self.authentifiers:
try:
- return authentifier.authenticate(session, login, **kwargs)
+ return authentifier.authenticate(cnx, login, **kwargs)
except AuthenticationError:
continue
raise AuthenticationError()
- def syntax_tree_search(self, session, union, args=None, cachekey=None,
+ def syntax_tree_search(self, cnx, union, args=None, cachekey=None,
varmap=None):
"""return result from this source for a rql query (actually from
a rql syntax tree and a solution dictionary mapping each used
@@ -531,74 +547,28 @@
args = self.merge_args(args, qargs)
assert isinstance(sql, basestring), repr(sql)
try:
- cursor = self.doexec(session, sql, args)
+ cursor = self.doexec(cnx, sql, args)
except (self.OperationalError, self.InterfaceError):
- if session.mode == 'write':
+ if cnx.mode == 'write':
# do not attempt to reconnect if there has been some write
# during the transaction
raise
# FIXME: better detection of deconnection pb
self.warning("trying to reconnect")
- session.cnxset.reconnect(self)
- cursor = self.doexec(session, sql, args)
+ cnx.cnxset.reconnect()
+ cursor = self.doexec(cnx, sql, args)
except self.DbapiError as exc:
# We get this one with pyodbc and SQL Server when connection was reset
- if exc.args[0] == '08S01' and session.mode != 'write':
+ if exc.args[0] == '08S01' and cnx.mode != 'write':
self.warning("trying to reconnect")
- session.cnxset.reconnect(self)
- cursor = self.doexec(session, sql, args)
+ cnx.cnxset.reconnect()
+ cursor = self.doexec(cnx, sql, args)
else:
raise
- results = self.process_result(cursor, cbs, session=session)
+ results = self.process_result(cursor, cbs, session=cnx)
assert dbg_results(results)
return results
- def flying_insert(self, table, session, union, args=None, varmap=None):
- """similar as .syntax_tree_search, but inserts data in the
- temporary table (on-the-fly if possible, eg for the system
- source whose the given cursor come from). If not possible,
- inserts all data by calling .executemany().
- """
- assert dbg_st_search(
- self.uri, union, varmap, args,
- prefix='ON THE FLY temp data insertion into %s from' % table)
- # generate sql queries if we are able to do so
- sql, qargs, cbs = self._rql_sqlgen.generate(union, args, varmap)
- query = 'INSERT INTO %s %s' % (table, sql.encode(self._dbencoding))
- self.doexec(session, query, self.merge_args(args, qargs))
-
- def manual_insert(self, results, table, session):
- """insert given result into a temporary table on the system source"""
- if server.DEBUG & server.DBG_RQL:
- print ' manual insertion of', len(results), 'results into', table
- if not results:
- return
- query_args = ['%%(%s)s' % i for i in xrange(len(results[0]))]
- query = 'INSERT INTO %s VALUES(%s)' % (table, ','.join(query_args))
- kwargs_list = []
- for row in results:
- kwargs = {}
- row = tuple(row)
- for index, cell in enumerate(row):
- if isinstance(cell, Binary):
- cell = self._binary(cell.getvalue())
- kwargs[str(index)] = cell
- kwargs_list.append(kwargs)
- self.doexecmany(session, query, kwargs_list)
-
- def clean_temp_data(self, session, temptables):
- """remove temporary data, usually associated to temporary tables"""
- if temptables:
- for table in temptables:
- try:
- self.doexec(session,'DROP TABLE %s' % table)
- except Exception:
- pass
- try:
- del self._temp_table_data[table]
- except KeyError:
- continue
-
@contextmanager
def _storage_handler(self, entity, event):
# 1/ memorize values as they are before the storage is called.
@@ -630,60 +600,60 @@
for entity, attr, value in restore_values:
entity.cw_edited.edited_attribute(attr, value)
- def add_entity(self, session, entity):
+ def add_entity(self, cnx, entity):
"""add a new entity to the source"""
with self._storage_handler(entity, 'added'):
attrs = self.preprocess_entity(entity)
sql = self.sqlgen.insert(SQL_PREFIX + entity.cw_etype, attrs)
- self.doexec(session, sql, attrs)
- if session.ertype_supports_undo(entity.cw_etype):
- self._record_tx_action(session, 'tx_entity_actions', 'C',
+ self.doexec(cnx, sql, attrs)
+ if cnx.ertype_supports_undo(entity.cw_etype):
+ self._record_tx_action(cnx, 'tx_entity_actions', 'C',
etype=entity.cw_etype, eid=entity.eid)
- def update_entity(self, session, entity):
+ def update_entity(self, cnx, entity):
"""replace an entity in the source"""
with self._storage_handler(entity, 'updated'):
attrs = self.preprocess_entity(entity)
- if session.ertype_supports_undo(entity.cw_etype):
- changes = self._save_attrs(session, entity, attrs)
- self._record_tx_action(session, 'tx_entity_actions', 'U',
+ if cnx.ertype_supports_undo(entity.cw_etype):
+ changes = self._save_attrs(cnx, entity, attrs)
+ self._record_tx_action(cnx, 'tx_entity_actions', 'U',
etype=entity.cw_etype, eid=entity.eid,
changes=self._binary(dumps(changes)))
sql = self.sqlgen.update(SQL_PREFIX + entity.cw_etype, attrs,
['cw_eid'])
- self.doexec(session, sql, attrs)
+ self.doexec(cnx, sql, attrs)
- def delete_entity(self, session, entity):
+ def delete_entity(self, cnx, entity):
"""delete an entity from the source"""
with self._storage_handler(entity, 'deleted'):
- if session.ertype_supports_undo(entity.cw_etype):
+ if cnx.ertype_supports_undo(entity.cw_etype):
attrs = [SQL_PREFIX + r.type
for r in entity.e_schema.subject_relations()
if (r.final or r.inlined) and not r in VIRTUAL_RTYPES]
- changes = self._save_attrs(session, entity, attrs)
- self._record_tx_action(session, 'tx_entity_actions', 'D',
+ changes = self._save_attrs(cnx, entity, attrs)
+ self._record_tx_action(cnx, 'tx_entity_actions', 'D',
etype=entity.cw_etype, eid=entity.eid,
changes=self._binary(dumps(changes)))
attrs = {'cw_eid': entity.eid}
sql = self.sqlgen.delete(SQL_PREFIX + entity.cw_etype, attrs)
- self.doexec(session, sql, attrs)
+ self.doexec(cnx, sql, attrs)
- def add_relation(self, session, subject, rtype, object, inlined=False):
+ def add_relation(self, cnx, subject, rtype, object, inlined=False):
"""add a relation to the source"""
- self._add_relations(session, rtype, [(subject, object)], inlined)
- if session.ertype_supports_undo(rtype):
- self._record_tx_action(session, 'tx_relation_actions', 'A',
+ self._add_relations(cnx, rtype, [(subject, object)], inlined)
+ if cnx.ertype_supports_undo(rtype):
+ self._record_tx_action(cnx, 'tx_relation_actions', 'A',
eid_from=subject, rtype=rtype, eid_to=object)
- def add_relations(self, session, rtype, subj_obj_list, inlined=False):
+ def add_relations(self, cnx, rtype, subj_obj_list, inlined=False):
"""add a relations to the source"""
- self._add_relations(session, rtype, subj_obj_list, inlined)
- if session.ertype_supports_undo(rtype):
+ self._add_relations(cnx, rtype, subj_obj_list, inlined)
+ if cnx.ertype_supports_undo(rtype):
for subject, object in subj_obj_list:
- self._record_tx_action(session, 'tx_relation_actions', 'A',
+ self._record_tx_action(cnx, 'tx_relation_actions', 'A',
eid_from=subject, rtype=rtype, eid_to=object)
- def _add_relations(self, session, rtype, subj_obj_list, inlined=False):
+ def _add_relations(self, cnx, rtype, subj_obj_list, inlined=False):
"""add a relation to the source"""
sql = []
if inlined is False:
@@ -693,7 +663,7 @@
else: # used by data import
etypes = {}
for subject, object in subj_obj_list:
- etype = session.describe(subject)[0]
+ etype = cnx.entity_metas(subject)['type']
if etype in etypes:
etypes[etype].append((subject, object))
else:
@@ -705,20 +675,20 @@
['cw_eid']),
attrs))
for statement, attrs in sql:
- self.doexecmany(session, statement, attrs)
+ self.doexecmany(cnx, statement, attrs)
- def delete_relation(self, session, subject, rtype, object):
+ def delete_relation(self, cnx, subject, rtype, object):
"""delete a relation from the source"""
rschema = self.schema.rschema(rtype)
- self._delete_relation(session, subject, rtype, object, rschema.inlined)
- if session.ertype_supports_undo(rtype):
- self._record_tx_action(session, 'tx_relation_actions', 'R',
+ self._delete_relation(cnx, subject, rtype, object, rschema.inlined)
+ if cnx.ertype_supports_undo(rtype):
+ self._record_tx_action(cnx, 'tx_relation_actions', 'R',
eid_from=subject, rtype=rtype, eid_to=object)
- def _delete_relation(self, session, subject, rtype, object, inlined=False):
+ def _delete_relation(self, cnx, subject, rtype, object, inlined=False):
"""delete a relation from the source"""
if inlined:
- table = SQL_PREFIX + session.describe(subject)[0]
+ table = SQL_PREFIX + cnx.entity_metas(subject)['type']
column = SQL_PREFIX + rtype
sql = 'UPDATE %s SET %s=NULL WHERE %seid=%%(eid)s' % (table, column,
SQL_PREFIX)
@@ -726,16 +696,16 @@
else:
attrs = {'eid_from': subject, 'eid_to': object}
sql = self.sqlgen.delete('%s_relation' % rtype, attrs)
- self.doexec(session, sql, attrs)
+ self.doexec(cnx, sql, attrs)
- def doexec(self, session, query, args=None, rollback=True):
+ def doexec(self, cnx, query, args=None, rollback=True):
"""Execute a query.
it's a function just so that it shows up in profiling
"""
- cursor = session.cnxset[self.uri]
+ cursor = cnx.cnxset.cu
if server.DEBUG & server.DBG_SQL:
- cnx = session.cnxset.connection(self.uri)
- # getattr to get the actual connection if cnx is a ConnectionWrapper
+ cnx = cnx.cnxset.cnx
+ # getattr to get the actual connection if cnx is a CnxLoggingWrapper
# instance
print 'exec', query, args, getattr(cnx, '_cnx', cnx)
try:
@@ -749,7 +719,7 @@
query, args, ex.args[0])
if rollback:
try:
- session.cnxset.connection(self.uri).rollback()
+ cnx.cnxset.rollback()
if self.repo.config.mode != 'test':
self.critical('transaction has been rolled back')
except Exception as ex:
@@ -760,24 +730,31 @@
# postgres, sqlserver
mo = re.search("unique_[a-z0-9]{32}", arg)
if mo is not None:
- raise UniqueTogetherError(session, cstrname=mo.group(0))
- # sqlite
+ raise UniqueTogetherError(cnx, cstrname=mo.group(0))
+ # old sqlite
mo = re.search('columns (.*) are not unique', arg)
if mo is not None: # sqlite in use
# we left chop the 'cw_' prefix of attribute names
rtypes = [c.strip()[3:]
for c in mo.group(1).split(',')]
- raise UniqueTogetherError(session, rtypes=rtypes)
+ raise UniqueTogetherError(cnx, rtypes=rtypes)
+ # sqlite after http://www.sqlite.org/cgi/src/info/c80e229dd9c1230a
+ if arg.startswith('UNIQUE constraint failed:'):
+ # message looks like: "UNIQUE constraint failed: foo.cw_bar, foo.cw_baz"
+ # so drop the prefix, split on comma, drop the tablenames, and drop "cw_"
+ columns = arg.split(':', 1)[1].split(',')
+ rtypes = [c.split('.', 1)[1].strip()[3:] for c in columns]
+ raise UniqueTogetherError(cnx, rtypes=rtypes)
raise
return cursor
- def doexecmany(self, session, query, args):
+ def doexecmany(self, cnx, query, args):
"""Execute a query.
it's a function just so that it shows up in profiling
"""
if server.DEBUG & server.DBG_SQL:
print 'execmany', query, 'with', len(args), 'arguments'
- cursor = session.cnxset[self.uri]
+ cursor = cnx.cnxset.cu
try:
# str(query) to avoid error if it's a unicode string
cursor.executemany(str(query), args)
@@ -788,7 +765,7 @@
self.critical("sql many: %r\n args: %s\ndbms message: %r",
query, args, ex.args[0])
try:
- session.cnxset.connection(self.uri).rollback()
+ cnx.cnxset.rollback()
if self.repo.config.mode != 'test':
self.critical('transaction has been rolled back')
except Exception:
@@ -797,7 +774,7 @@
# short cut to method requiring advanced db helper usage ##################
- def update_rdef_column(self, session, rdef):
+ def update_rdef_column(self, cnx, rdef):
"""update physical column for a relation definition (final or inlined)
"""
table, column = rdef_table_column(rdef)
@@ -806,12 +783,12 @@
self.error("backend can't alter %s.%s to %s%s", table, column, coltype,
not allownull and 'NOT NULL' or '')
return
- self.dbhelper.change_col_type(LogCursor(session.cnxset[self.uri]),
+ self.dbhelper.change_col_type(LogCursor(cnx.cnxset.cu),
table, column, coltype, allownull)
self.info('altered %s.%s: now %s%s', table, column, coltype,
not allownull and 'NOT NULL' or '')
- def update_rdef_null_allowed(self, session, rdef):
+ def update_rdef_null_allowed(self, cnx, rdef):
"""update NULL / NOT NULL of physical column for a relation definition
(final or inlined)
"""
@@ -821,62 +798,62 @@
return
table, column = rdef_table_column(rdef)
coltype, allownull = rdef_physical_info(self.dbhelper, rdef)
- self.dbhelper.set_null_allowed(LogCursor(session.cnxset[self.uri]),
+ self.dbhelper.set_null_allowed(LogCursor(cnx.cnxset.cu),
table, column, coltype, allownull)
- def update_rdef_indexed(self, session, rdef):
+ def update_rdef_indexed(self, cnx, rdef):
table, column = rdef_table_column(rdef)
if rdef.indexed:
- self.create_index(session, table, column)
+ self.create_index(cnx, table, column)
else:
- self.drop_index(session, table, column)
+ self.drop_index(cnx, table, column)
- def update_rdef_unique(self, session, rdef):
+ def update_rdef_unique(self, cnx, rdef):
table, column = rdef_table_column(rdef)
if rdef.constraint_by_type('UniqueConstraint'):
- self.create_index(session, table, column, unique=True)
+ self.create_index(cnx, table, column, unique=True)
else:
- self.drop_index(session, table, column, unique=True)
+ self.drop_index(cnx, table, column, unique=True)
- def create_index(self, session, table, column, unique=False):
- cursor = LogCursor(session.cnxset[self.uri])
+ def create_index(self, cnx, table, column, unique=False):
+ cursor = LogCursor(cnx.cnxset.cu)
self.dbhelper.create_index(cursor, table, column, unique)
- def drop_index(self, session, table, column, unique=False):
- cursor = LogCursor(session.cnxset[self.uri])
+ def drop_index(self, cnx, table, column, unique=False):
+ cursor = LogCursor(cnx.cnxset.cu)
self.dbhelper.drop_index(cursor, table, column, unique)
# system source interface #################################################
- def _eid_type_source(self, session, eid, sql, _retry=True):
+ def _eid_type_source(self, cnx, eid, sql, _retry=True):
try:
- res = self.doexec(session, sql).fetchone()
+ res = self.doexec(cnx, sql).fetchone()
if res is not None:
return res
except (self.OperationalError, self.InterfaceError):
- if session.mode == 'read' and _retry:
+ if cnx.mode == 'read' and _retry:
self.warning("trying to reconnect (eid_type_source())")
- session.cnxset.reconnect(self)
- return self._eid_type_source(session, eid, sql, _retry=False)
+ cnx.cnxset.reconnect()
+ return self._eid_type_source(cnx, eid, sql, _retry=False)
except Exception:
- assert session.cnxset, 'session has no connections set'
+ assert cnx.cnxset, 'connection has no connections set'
self.exception('failed to query entities table for eid %s', eid)
raise UnknownEid(eid)
- def eid_type_source(self, session, eid): # pylint: disable=E0202
+ def eid_type_source(self, cnx, eid): # pylint: disable=E0202
"""return a tuple (type, source, extid) for the entity with id <eid>"""
- sql = 'SELECT type, source, extid, asource FROM entities WHERE eid=%s' % eid
- res = self._eid_type_source(session, eid, sql)
+ sql = 'SELECT type, extid, asource FROM entities WHERE eid=%s' % eid
+ res = self._eid_type_source(cnx, eid, sql)
if res[-2] is not None:
if not isinstance(res, list):
res = list(res)
res[-2] = b64decode(res[-2])
return res
- def eid_type_source_pre_131(self, session, eid):
+ def eid_type_source_pre_131(self, cnx, eid):
"""return a tuple (type, source, extid) for the entity with id <eid>"""
- sql = 'SELECT type, source, extid FROM entities WHERE eid=%s' % eid
- res = self._eid_type_source(session, eid, sql)
+ sql = 'SELECT type, extid FROM entities WHERE eid=%s' % eid
+ res = self._eid_type_source(cnx, eid, sql)
if not isinstance(res, list):
res = list(res)
if res[-1] is not None:
@@ -884,13 +861,12 @@
res.append(res[1])
return res
- def extid2eid(self, session, source_uri, extid):
+ def extid2eid(self, cnx, extid):
"""get eid from an external id. Return None if no record found."""
assert isinstance(extid, str)
- cursor = self.doexec(session,
- 'SELECT eid FROM entities '
- 'WHERE extid=%(x)s AND source=%(s)s',
- {'x': b64encode(extid), 's': source_uri})
+ cursor = self.doexec(cnx,
+ 'SELECT eid FROM entities WHERE extid=%(x)s',
+ {'x': b64encode(extid)})
# XXX testing rowcount cause strange bug with sqlite, results are there
# but rowcount is 0
#if cursor.rowcount > 0:
@@ -902,167 +878,68 @@
pass
return None
- def make_temp_table_name(self, table):
- return self.dbhelper.temporary_table_name(table)
-
- def temp_table_def(self, selected, sol, table):
- return make_schema(selected, sol, table, self.dbhelper.TYPE_MAPPING)
-
- def create_temp_table(self, session, table, schema):
- # we don't want on commit drop, this may cause problem when
- # running with an ldap source, and table will be deleted manually any way
- # on commit
- sql = self.dbhelper.sql_temporary_table(table, schema, False)
- self.doexec(session, sql)
-
- def _create_eid_sqlite(self, session):
- with self._eid_cnx_lock:
- for sql in self.dbhelper.sqls_increment_sequence('entities_id_seq'):
- cursor = self.doexec(session, sql)
- return cursor.fetchone()[0]
-
-
- def create_eid(self, session): # pylint: disable=E0202
- # lock needed to prevent 'Connection is busy with results for another
- # command (0)' errors with SQLServer
- with self._eid_cnx_lock:
- return self._create_eid() # pylint: disable=E1102
-
- def _create_eid(self): # pylint: disable=E0202
- # internal function doing the eid creation without locking.
- # needed for the recursive handling of disconnections (otherwise we
- # deadlock on self._eid_cnx_lock
- if self._eid_creation_cnx is None:
- self._eid_creation_cnx = self.get_connection()
- cnx = self._eid_creation_cnx
- try:
- cursor = cnx.cursor()
- for sql in self.dbhelper.sqls_increment_sequence('entities_id_seq'):
- cursor.execute(sql)
- eid = cursor.fetchone()[0]
- except (self.OperationalError, self.InterfaceError):
- # FIXME: better detection of deconnection pb
- self.warning("trying to reconnect create eid connection")
- self._eid_creation_cnx = None
- return self._create_eid() # pylint: disable=E1102
- except self.DbapiError as exc:
- # We get this one with pyodbc and SQL Server when connection was reset
- if exc.args[0] == '08S01':
- self.warning("trying to reconnect create eid connection")
- self._eid_creation_cnx = None
- return self._create_eid() # pylint: disable=E1102
- else:
- raise
- except Exception: # WTF?
- cnx.rollback()
- self._eid_creation_cnx = None
- self.exception('create eid failed in an unforeseen way on SQL statement %s', sql)
- raise
- else:
- cnx.commit()
- return eid
-
- def _handle_is_relation_sql(self, session, sql, attrs):
+ def _handle_is_relation_sql(self, cnx, sql, attrs):
""" Handler for specific is_relation sql that may be
overwritten in some stores"""
- self.doexec(session, sql % attrs)
+ self.doexec(cnx, sql % attrs)
_handle_insert_entity_sql = doexec
_handle_is_instance_of_sql = _handle_source_relation_sql = _handle_is_relation_sql
- def add_info(self, session, entity, source, extid, complete):
+ def add_info(self, cnx, entity, source, extid):
"""add type and source info for an eid into the system table"""
- # begin by inserting eid/type/source/extid into the entities table
- if extid is not None:
- assert isinstance(extid, str)
- extid = b64encode(extid)
- uri = 'system' if source.copy_based_source else source.uri
- attrs = {'type': entity.cw_etype, 'eid': entity.eid, 'extid': extid,
- 'source': uri, 'asource': source.uri, 'mtime': datetime.utcnow()}
- self._handle_insert_entity_sql(session, self.sqlgen.insert('entities', attrs), attrs)
- # insert core relations: is, is_instance_of and cw_source
- try:
- self._handle_is_relation_sql(session, 'INSERT INTO is_relation(eid_from,eid_to) VALUES (%s,%s)',
- (entity.eid, eschema_eid(session, entity.e_schema)))
- except IndexError:
- # during schema serialization, skip
- pass
- else:
- for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
- self._handle_is_relation_sql(session,
- 'INSERT INTO is_instance_of_relation(eid_from,eid_to) VALUES (%s,%s)',
- (entity.eid, eschema_eid(session, eschema)))
- if 'CWSource' in self.schema and source.eid is not None: # else, cw < 3.10
- self._handle_is_relation_sql(session, 'INSERT INTO cw_source_relation(eid_from,eid_to) VALUES (%s,%s)',
- (entity.eid, source.eid))
- # now we can update the full text index
- if self.do_fti and self.need_fti_indexation(entity.cw_etype):
- if complete:
- entity.complete(entity.e_schema.indexable_attributes())
- self.index_entity(session, entity=entity)
+ with cnx.ensure_cnx_set:
+ # begin by inserting eid/type/source/extid into the entities table
+ if extid is not None:
+ assert isinstance(extid, str)
+ extid = b64encode(extid)
+ attrs = {'type': entity.cw_etype, 'eid': entity.eid, 'extid': extid,
+ 'asource': source.uri}
+ self._handle_insert_entity_sql(cnx, self.sqlgen.insert('entities', attrs), attrs)
+ # insert core relations: is, is_instance_of and cw_source
+ try:
+ self._handle_is_relation_sql(cnx, 'INSERT INTO is_relation(eid_from,eid_to) VALUES (%s,%s)',
+ (entity.eid, eschema_eid(cnx, entity.e_schema)))
+ except IndexError:
+ # during schema serialization, skip
+ pass
+ else:
+ for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
+ self._handle_is_relation_sql(cnx,
+ 'INSERT INTO is_instance_of_relation(eid_from,eid_to) VALUES (%s,%s)',
+ (entity.eid, eschema_eid(cnx, eschema)))
+ if 'CWSource' in self.schema and source.eid is not None: # else, cw < 3.10
+ self._handle_is_relation_sql(cnx, 'INSERT INTO cw_source_relation(eid_from,eid_to) VALUES (%s,%s)',
+ (entity.eid, source.eid))
+ # now we can update the full text index
+ if self.do_fti and self.need_fti_indexation(entity.cw_etype):
+ self.index_entity(cnx, entity=entity)
- def update_info(self, session, entity, need_fti_update):
+ def update_info(self, cnx, entity, need_fti_update):
"""mark entity as being modified, fulltext reindex if needed"""
if self.do_fti and need_fti_update:
# reindex the entity only if this query is updating at least
# one indexable attribute
- self.index_entity(session, entity=entity)
- # update entities.mtime.
- # XXX Only if entity.cw_etype in self.multisources_etypes?
- attrs = {'eid': entity.eid, 'mtime': datetime.utcnow()}
- self.doexec(session, self.sqlgen.update('entities', attrs, ['eid']), attrs)
+ self.index_entity(cnx, entity=entity)
- def delete_info_multi(self, session, entities, uri):
+ def delete_info_multi(self, cnx, entities):
"""delete system information on deletion of a list of entities with the
same etype and belinging to the same source
* update the fti
* remove record from the `entities` table
- * transfer it to the `deleted_entities`
"""
- self.fti_unindex_entities(session, entities)
+ self.fti_unindex_entities(cnx, entities)
attrs = {'eid': '(%s)' % ','.join([str(_e.eid) for _e in entities])}
- self.doexec(session, self.sqlgen.delete_many('entities', attrs), attrs)
- if entities[0].__regid__ not in self.multisources_etypes:
- return
- attrs = {'type': entities[0].__regid__,
- 'source': uri, 'dtime': datetime.utcnow()}
- for entity in entities:
- extid = entity.cw_metainformation()['extid']
- if extid is not None:
- assert isinstance(extid, str), type(extid)
- extid = b64encode(extid)
- attrs.update({'eid': entity.eid, 'extid': extid})
- self.doexec(session, self.sqlgen.insert('deleted_entities', attrs), attrs)
-
- def modified_entities(self, session, etypes, mtime):
- """return a 2-uple:
- * list of (etype, eid) of entities of the given types which have been
- modified since the given timestamp (actually entities whose full text
- index content has changed)
- * list of (etype, eid) of entities of the given types which have been
- deleted since the given timestamp
- """
- for etype in etypes:
- if not etype in self.multisources_etypes:
- self.error('%s not listed as a multi-sources entity types. '
- 'Modify your configuration' % etype)
- self.multisources_etypes.add(etype)
- modsql = _modified_sql('entities', etypes)
- cursor = self.doexec(session, modsql, {'time': mtime})
- modentities = cursor.fetchall()
- delsql = _modified_sql('deleted_entities', etypes)
- cursor = self.doexec(session, delsql, {'time': mtime})
- delentities = cursor.fetchall()
- return modentities, delentities
+ self.doexec(cnx, self.sqlgen.delete_many('entities', attrs), attrs)
# undo support #############################################################
- def undoable_transactions(self, session, ueid=None, **actionfilters):
- """See :class:`cubicweb.dbapi.Connection.undoable_transactions`"""
- # force filtering to session's user if not a manager
- if not session.user.is_in_group('managers'):
- ueid = session.user.eid
+ def undoable_transactions(self, cnx, ueid=None, **actionfilters):
+ """See :class:`cubicweb.repoapi.ClientConnection.undoable_transactions`"""
+ # force filtering to connection's user if not a manager
+ if not cnx.user.is_in_group('managers'):
+ ueid = cnx.user.eid
restr = {}
if ueid is not None:
restr['tx_user'] = ueid
@@ -1126,17 +1003,18 @@
restr.update(tearestr)
# we want results ordered by transaction's time descendant
sql += ' ORDER BY tx_time DESC'
- cu = self.doexec(session, sql, restr)
- # turn results into transaction objects
- return [tx.Transaction(*args) for args in cu.fetchall()]
+ with cnx.ensure_cnx_set:
+ cu = self.doexec(cnx, sql, restr)
+ # turn results into transaction objects
+ return [tx.Transaction(*args) for args in cu.fetchall()]
- def tx_info(self, session, txuuid):
- """See :class:`cubicweb.dbapi.Connection.transaction_info`"""
- return tx.Transaction(txuuid, *self._tx_info(session, txuuid))
+ def tx_info(self, cnx, txuuid):
+ """See :class:`cubicweb.repoapi.ClientConnection.transaction_info`"""
+ return tx.Transaction(txuuid, *self._tx_info(cnx, txuuid))
- def tx_actions(self, session, txuuid, public):
- """See :class:`cubicweb.dbapi.Connection.transaction_actions`"""
- self._tx_info(session, txuuid)
+ def tx_actions(self, cnx, txuuid, public):
+ """See :class:`cubicweb.repoapi.ClientConnection.transaction_actions`"""
+ self._tx_info(cnx, txuuid)
restr = {'tx_uuid': txuuid}
if public:
restr['txa_public'] = True
@@ -1144,54 +1022,54 @@
sql = self.sqlgen.select('tx_entity_actions', restr,
('txa_action', 'txa_public', 'txa_order',
'etype', 'eid', 'changes'))
- cu = self.doexec(session, sql, restr)
+ cu = self.doexec(cnx, sql, restr)
actions = [tx.EntityAction(a,p,o,et,e,c and loads(self.binary_to_str(c)))
for a,p,o,et,e,c in cu.fetchall()]
sql = self.sqlgen.select('tx_relation_actions', restr,
('txa_action', 'txa_public', 'txa_order',
'rtype', 'eid_from', 'eid_to'))
- cu = self.doexec(session, sql, restr)
+ cu = self.doexec(cnx, sql, restr)
actions += [tx.RelationAction(*args) for args in cu.fetchall()]
return sorted(actions, key=lambda x: x.order)
- def undo_transaction(self, session, txuuid):
- """See :class:`cubicweb.dbapi.Connection.undo_transaction`
+ def undo_transaction(self, cnx, txuuid):
+ """See :class:`cubicweb.repoapi.ClientConnection.undo_transaction`
important note: while undoing of a transaction, only hooks in the
'integrity', 'activeintegrity' and 'undo' categories are called.
"""
# set mode so connections set isn't released subsquently until commit/rollback
- session.mode = 'write'
+ cnx.mode = 'write'
errors = []
- session.transaction_data['undoing_uuid'] = txuuid
- with session.deny_all_hooks_but('integrity', 'activeintegrity', 'undo'):
- with session.security_enabled(read=False):
- for action in reversed(self.tx_actions(session, txuuid, False)):
+ cnx.transaction_data['undoing_uuid'] = txuuid
+ with cnx.deny_all_hooks_but('integrity', 'activeintegrity', 'undo'):
+ with cnx.security_enabled(read=False):
+ for action in reversed(self.tx_actions(cnx, txuuid, False)):
undomethod = getattr(self, '_undo_%s' % action.action.lower())
- errors += undomethod(session, action)
+ errors += undomethod(cnx, action)
# remove the transactions record
- self.doexec(session,
+ self.doexec(cnx,
"DELETE FROM transactions WHERE tx_uuid='%s'" % txuuid)
if errors:
raise UndoTransactionException(txuuid, errors)
else:
return
- def start_undoable_transaction(self, session, uuid):
- """session callback to insert a transaction record in the transactions
+ def start_undoable_transaction(self, cnx, uuid):
+ """connection callback to insert a transaction record in the transactions
table when some undoable transaction is started
"""
- ueid = session.user.eid
+ ueid = cnx.user.eid
attrs = {'tx_uuid': uuid, 'tx_user': ueid, 'tx_time': datetime.utcnow()}
- self.doexec(session, self.sqlgen.insert('transactions', attrs), attrs)
+ self.doexec(cnx, self.sqlgen.insert('transactions', attrs), attrs)
- def _save_attrs(self, session, entity, attrs):
+ def _save_attrs(self, cnx, entity, attrs):
"""return a pickleable dictionary containing current values for given
attributes of the entity
"""
restr = {'cw_eid': entity.eid}
sql = self.sqlgen.select(SQL_PREFIX + entity.cw_etype, restr, attrs)
- cu = self.doexec(session, sql, restr)
+ cu = self.doexec(cnx, sql, restr)
values = dict(zip(attrs, cu.fetchone()))
# ensure backend specific binary are converted back to string
eschema = entity.e_schema
@@ -1206,36 +1084,38 @@
values[column] = self.binary_to_str(value)
return values
- def _record_tx_action(self, session, table, action, **kwargs):
+ def _record_tx_action(self, cnx, table, action, **kwargs):
"""record a transaction action in the given table (either
'tx_entity_actions' or 'tx_relation_action')
"""
- kwargs['tx_uuid'] = session.transaction_uuid()
+ kwargs['tx_uuid'] = cnx.transaction_uuid()
kwargs['txa_action'] = action
- kwargs['txa_order'] = session.transaction_inc_action_counter()
- kwargs['txa_public'] = session.running_dbapi_query
- self.doexec(session, self.sqlgen.insert(table, kwargs), kwargs)
+ kwargs['txa_order'] = cnx.transaction_inc_action_counter()
+ kwargs['txa_public'] = cnx.running_dbapi_query
+ self.doexec(cnx, self.sqlgen.insert(table, kwargs), kwargs)
- def _tx_info(self, session, txuuid):
+ def _tx_info(self, cnx, txuuid):
"""return transaction's time and user of the transaction with the given uuid.
raise `NoSuchTransaction` if there is no such transaction of if the
- session's user isn't allowed to see it.
+ connection's user isn't allowed to see it.
"""
- restr = {'tx_uuid': txuuid}
- sql = self.sqlgen.select('transactions', restr, ('tx_time', 'tx_user'))
- cu = self.doexec(session, sql, restr)
- try:
- time, ueid = cu.fetchone()
- except TypeError:
- raise tx.NoSuchTransaction(txuuid)
- if not (session.user.is_in_group('managers')
- or session.user.eid == ueid):
- raise tx.NoSuchTransaction(txuuid)
- return time, ueid
+ with cnx.ensure_cnx_set:
+ restr = {'tx_uuid': txuuid}
+ sql = self.sqlgen.select('transactions', restr,
+ ('tx_time', 'tx_user'))
+ cu = self.doexec(cnx, sql, restr)
+ try:
+ time, ueid = cu.fetchone()
+ except TypeError:
+ raise tx.NoSuchTransaction(txuuid)
+ if not (cnx.user.is_in_group('managers')
+ or cnx.user.eid == ueid):
+ raise tx.NoSuchTransaction(txuuid)
+ return time, ueid
def _reedit_entity(self, entity, changes, err):
- session = entity._cw
+ cnx = entity._cw
eid = entity.eid
entity.cw_edited = edited = EditedEntity(entity)
# check for schema changes, entities linked through inlined relation
@@ -1249,7 +1129,7 @@
try:
rschema = getrschema[rtype]
except KeyError:
- err(session._("can't restore relation %(rtype)s of entity %(eid)s, "
+ err(cnx._("can't restore relation %(rtype)s of entity %(eid)s, "
"this relation does not exist in the schema anymore.")
% {'rtype': rtype, 'eid': eid})
if not rschema.final:
@@ -1262,57 +1142,53 @@
entity._cw.entity_from_eid(value) # check target exists
edited[rtype] = value
except UnknownEid:
- err(session._("can't restore entity %(eid)s of type %(eschema)s, "
+ err(cnx._("can't restore entity %(eid)s of type %(eschema)s, "
"target of %(rtype)s (eid %(value)s) does not exist any longer")
% locals())
elif eschema.destination(rtype) in ('Bytes', 'Password'):
changes[column] = self._binary(value)
edited[rtype] = Binary(value)
elif isinstance(value, str):
- edited[rtype] = unicode(value, session.encoding, 'replace')
+ edited[rtype] = unicode(value, cnx.encoding, 'replace')
else:
edited[rtype] = value
# This must only be done after init_entitiy_caches : defered in calling functions
# edited.check()
- def _undo_d(self, session, action):
+ def _undo_d(self, cnx, action):
"""undo an entity deletion"""
errors = []
err = errors.append
eid = action.eid
etype = action.etype
- _ = session._
+ _ = cnx._
# get an entity instance
try:
- entity = self.repo.vreg['etypes'].etype_class(etype)(session)
+ entity = self.repo.vreg['etypes'].etype_class(etype)(cnx)
except Exception:
err("can't restore entity %s of type %s, type no more supported"
% (eid, etype))
return errors
self._reedit_entity(entity, action.changes, err)
entity.eid = eid
- session.repo.init_entity_caches(session, entity, self)
+ cnx.repo.init_entity_caches(cnx, entity, self)
entity.cw_edited.check()
- self.repo.hm.call_hooks('before_add_entity', session, entity=entity)
+ self.repo.hm.call_hooks('before_add_entity', cnx, entity=entity)
# restore the entity
action.changes['cw_eid'] = eid
sql = self.sqlgen.insert(SQL_PREFIX + etype, action.changes)
- self.doexec(session, sql, action.changes)
+ self.doexec(cnx, sql, action.changes)
# restore record in entities (will update fti if needed)
- self.add_info(session, entity, self, None, True)
- # remove record from deleted_entities if entity's type is multi-sources
- if entity.cw_etype in self.multisources_etypes:
- self.doexec(session,
- 'DELETE FROM deleted_entities WHERE eid=%s' % eid)
- self.repo.hm.call_hooks('after_add_entity', session, entity=entity)
+ self.add_info(cnx, entity, self, None)
+ self.repo.hm.call_hooks('after_add_entity', cnx, entity=entity)
return errors
- def _undo_r(self, session, action):
+ def _undo_r(self, cnx, action):
"""undo a relation removal"""
errors = []
subj, rtype, obj = action.eid_from, action.rtype, action.eid_to
try:
- sentity, oentity, rdef = _undo_rel_info(session, subj, rtype, obj)
+ sentity, oentity, rdef = _undo_rel_info(cnx, subj, rtype, obj)
except _UndoException as ex:
errors.append(unicode(ex))
else:
@@ -1324,78 +1200,78 @@
errors.append(unicode(ex))
continue
if not errors:
- self.repo.hm.call_hooks('before_add_relation', session,
+ self.repo.hm.call_hooks('before_add_relation', cnx,
eidfrom=subj, rtype=rtype, eidto=obj)
# add relation in the database
- self._add_relations(session, rtype, [(subj, obj)], rdef.rtype.inlined)
+ self._add_relations(cnx, rtype, [(subj, obj)], rdef.rtype.inlined)
# set related cache
- session.update_rel_cache_add(subj, rtype, obj, rdef.rtype.symmetric)
- self.repo.hm.call_hooks('after_add_relation', session,
+ cnx.update_rel_cache_add(subj, rtype, obj, rdef.rtype.symmetric)
+ self.repo.hm.call_hooks('after_add_relation', cnx,
eidfrom=subj, rtype=rtype, eidto=obj)
return errors
- def _undo_c(self, session, action):
+ def _undo_c(self, cnx, action):
"""undo an entity creation"""
eid = action.eid
# XXX done to avoid fetching all remaining relation for the entity
# we should find an efficient way to do this (keeping current veolidf
# massive deletion performance)
- if _undo_has_later_transaction(session, eid):
- msg = session._('some later transaction(s) touch entity, undo them '
+ if _undo_has_later_transaction(cnx, eid):
+ msg = cnx._('some later transaction(s) touch entity, undo them '
'first')
raise ValidationError(eid, {None: msg})
etype = action.etype
# get an entity instance
try:
- entity = self.repo.vreg['etypes'].etype_class(etype)(session)
+ entity = self.repo.vreg['etypes'].etype_class(etype)(cnx)
except Exception:
- return [session._(
+ return [cnx._(
"Can't undo creation of entity %(eid)s of type %(etype)s, type "
"no more supported" % {'eid': eid, 'etype': etype})]
entity.eid = eid
# for proper eid/type cache update
- CleanupDeletedEidsCacheOp.get_instance(session).add_data(eid)
- self.repo.hm.call_hooks('before_delete_entity', session, entity=entity)
+ CleanupDeletedEidsCacheOp.get_instance(cnx).add_data(eid)
+ self.repo.hm.call_hooks('before_delete_entity', cnx, entity=entity)
# remove is / is_instance_of which are added using sql by hooks, hence
# unvisible as transaction action
- self.doexec(session, 'DELETE FROM is_relation WHERE eid_from=%s' % eid)
- self.doexec(session, 'DELETE FROM is_instance_of_relation WHERE eid_from=%s' % eid)
- self.doexec(session, 'DELETE FROM cw_source_relation WHERE eid_from=%s' % self.eid)
+ self.doexec(cnx, 'DELETE FROM is_relation WHERE eid_from=%s' % eid)
+ self.doexec(cnx, 'DELETE FROM is_instance_of_relation WHERE eid_from=%s' % eid)
+ self.doexec(cnx, 'DELETE FROM cw_source_relation WHERE eid_from=%s' % self.eid)
# XXX check removal of inlined relation?
# delete the entity
attrs = {'cw_eid': eid}
sql = self.sqlgen.delete(SQL_PREFIX + entity.cw_etype, attrs)
- self.doexec(session, sql, attrs)
+ self.doexec(cnx, sql, attrs)
# remove record from entities (will update fti if needed)
- self.delete_info_multi(session, [entity], self.uri)
- self.repo.hm.call_hooks('after_delete_entity', session, entity=entity)
+ self.delete_info_multi(cnx, [entity])
+ self.repo.hm.call_hooks('after_delete_entity', cnx, entity=entity)
return ()
- def _undo_u(self, session, action):
+ def _undo_u(self, cnx, action):
"""undo an entity update"""
errors = []
err = errors.append
try:
- entity = session.entity_from_eid(action.eid)
+ entity = cnx.entity_from_eid(action.eid)
except UnknownEid:
- err(session._("can't restore state of entity %s, it has been "
+ err(cnx._("can't restore state of entity %s, it has been "
"deleted inbetween") % action.eid)
return errors
self._reedit_entity(entity, action.changes, err)
entity.cw_edited.check()
- self.repo.hm.call_hooks('before_update_entity', session, entity=entity)
+ self.repo.hm.call_hooks('before_update_entity', cnx, entity=entity)
sql = self.sqlgen.update(SQL_PREFIX + entity.cw_etype, action.changes,
['cw_eid'])
- self.doexec(session, sql, action.changes)
- self.repo.hm.call_hooks('after_update_entity', session, entity=entity)
+ self.doexec(cnx, sql, action.changes)
+ self.repo.hm.call_hooks('after_update_entity', cnx, entity=entity)
return errors
- def _undo_a(self, session, action):
+ def _undo_a(self, cnx, action):
"""undo a relation addition"""
errors = []
subj, rtype, obj = action.eid_from, action.rtype, action.eid_to
try:
- sentity, oentity, rdef = _undo_rel_info(session, subj, rtype, obj)
+ sentity, oentity, rdef = _undo_rel_info(cnx, subj, rtype, obj)
except _UndoException as ex:
errors.append(unicode(ex))
else:
@@ -1406,19 +1282,19 @@
else:
sql = 'SELECT 1 FROM %s_relation WHERE eid_from=%s and eid_to=%s'\
% (rtype, subj, obj)
- cu = self.doexec(session, sql)
+ cu = self.doexec(cnx, sql)
if cu.fetchone() is None:
- errors.append(session._(
+ errors.append(cnx._(
"Can't undo addition of relation %(rtype)s from %(subj)s to"
" %(obj)s, doesn't exist anymore" % locals()))
if not errors:
- self.repo.hm.call_hooks('before_delete_relation', session,
+ self.repo.hm.call_hooks('before_delete_relation', cnx,
eidfrom=subj, rtype=rtype, eidto=obj)
# delete relation from the database
- self._delete_relation(session, subj, rtype, obj, rschema.inlined)
+ self._delete_relation(cnx, subj, rtype, obj, rschema.inlined)
# set related cache
- session.update_rel_cache_del(subj, rtype, obj, rschema.symmetric)
- self.repo.hm.call_hooks('after_delete_relation', session,
+ cnx.update_rel_cache_del(subj, rtype, obj, rschema.symmetric)
+ self.repo.hm.call_hooks('after_delete_relation', cnx,
eidfrom=subj, rtype=rtype, eidto=obj)
return errors
@@ -1433,16 +1309,16 @@
return True
return False
- def index_entity(self, session, entity):
+ def index_entity(self, cnx, entity):
"""create an operation to [re]index textual content of the given entity
on commit
"""
- FTIndexEntityOp.get_instance(session).add_data(entity.eid)
+ FTIndexEntityOp.get_instance(cnx).add_data(entity.eid)
- def fti_unindex_entities(self, session, entities):
+ def fti_unindex_entities(self, cnx, entities):
"""remove text content for entities from the full text index
"""
- cursor = session.cnxset['system']
+ cursor = cnx.cnxset.cu
cursor_unindex_object = self.dbhelper.cursor_unindex_object
try:
for entity in entities:
@@ -1451,11 +1327,11 @@
self.exception('error while unindexing %s', entity)
- def fti_index_entities(self, session, entities):
+ def fti_index_entities(self, cnx, entities):
"""add text content of created/modified entities to the full text index
"""
cursor_index_object = self.dbhelper.cursor_index_object
- cursor = session.cnxset['system']
+ cursor = cnx.cnxset.cu
try:
# use cursor_index_object, not cursor_reindex_object since
# unindexing done in the FTIndexEntityOp
@@ -1476,10 +1352,10 @@
"""
def precommit_event(self):
- session = self.session
- source = session.repo.system_source
- pendingeids = session.transaction_data.get('pendingeids', ())
- done = session.transaction_data.setdefault('indexedeids', set())
+ cnx = self.cnx
+ source = cnx.repo.system_source
+ pendingeids = cnx.transaction_data.get('pendingeids', ())
+ done = cnx.transaction_data.setdefault('indexedeids', set())
to_reindex = set()
for eid in self.get_data():
if eid in pendingeids or eid in done:
@@ -1487,10 +1363,10 @@
# processed
continue
done.add(eid)
- iftindexable = session.entity_from_eid(eid).cw_adapt_to('IFTIndexable')
+ iftindexable = cnx.entity_from_eid(eid).cw_adapt_to('IFTIndexable')
to_reindex |= set(iftindexable.fti_containers())
- source.fti_unindex_entities(session, to_reindex)
- source.fti_index_entities(session, to_reindex)
+ source.fti_unindex_entities(cnx, to_reindex)
+ source.fti_index_entities(cnx, to_reindex)
def sql_schema(driver):
helper = get_db_helper(driver)
@@ -1503,26 +1379,12 @@
CREATE TABLE entities (
eid INTEGER PRIMARY KEY NOT NULL,
type VARCHAR(64) NOT NULL,
- source VARCHAR(128) NOT NULL,
asource VARCHAR(128) NOT NULL,
- mtime %s NOT NULL,
extid VARCHAR(256)
);;
CREATE INDEX entities_type_idx ON entities(type);;
-CREATE INDEX entities_mtime_idx ON entities(mtime);;
CREATE INDEX entities_extid_idx ON entities(extid);;
-CREATE TABLE deleted_entities (
- eid INTEGER PRIMARY KEY NOT NULL,
- type VARCHAR(64) NOT NULL,
- source VARCHAR(128) NOT NULL,
- dtime %s NOT NULL,
- extid VARCHAR(256)
-);;
-CREATE INDEX deleted_entities_type_idx ON deleted_entities(type);;
-CREATE INDEX deleted_entities_dtime_idx ON deleted_entities(dtime);;
-CREATE INDEX deleted_entities_extid_idx ON deleted_entities(extid);;
-
CREATE TABLE transactions (
tx_uuid CHAR(32) PRIMARY KEY NOT NULL,
tx_user INTEGER NOT NULL,
@@ -1560,8 +1422,8 @@
CREATE INDEX tx_relation_actions_eid_from_idx ON tx_relation_actions(eid_from);;
CREATE INDEX tx_relation_actions_eid_to_idx ON tx_relation_actions(eid_to);;
CREATE INDEX tx_relation_actions_tx_uuid_idx ON tx_relation_actions(tx_uuid);;
-""" % (helper.sql_create_sequence('entities_id_seq').replace(';', ';;'),
- typemap['Datetime'], typemap['Datetime'], typemap['Datetime'],
+""" % (helper.sql_create_numrange('entities_id_seq').replace(';', ';;'),
+ typemap['Datetime'],
typemap['Boolean'], typemap['Bytes'], typemap['Boolean'])
if helper.backend_name == 'sqlite':
# sqlite support the ON DELETE CASCADE syntax but do nothing
@@ -1581,16 +1443,15 @@
return """
%s
DROP TABLE entities;
-DROP TABLE deleted_entities;
DROP TABLE tx_entity_actions;
DROP TABLE tx_relation_actions;
DROP TABLE transactions;
-""" % helper.sql_drop_sequence('entities_id_seq')
+""" % helper.sql_drop_numrange('entities_id_seq')
def grant_schema(user, set_owner=True):
result = ''
- for table in ('entities', 'deleted_entities', 'entities_id_seq',
+ for table in ('entities', 'entities_id_seq',
'transactions', 'tx_entity_actions', 'tx_relation_actions'):
if set_owner:
result = 'ALTER TABLE %s OWNER TO %s;\n' % (table, user)
@@ -1620,7 +1481,7 @@
self._passwd_rqlst = self.source.compile_rql(self.passwd_rql, self._sols)
self._auth_rqlst = self.source.compile_rql(self.auth_rql, self._sols)
- def authenticate(self, session, login, password=None, **kwargs):
+ def authenticate(self, cnx, login, password=None, **kwargs):
"""return CWUser eid for the given login/password if this account is
defined in this source, else raise `AuthenticationError`
@@ -1629,7 +1490,7 @@
"""
args = {'login': login, 'pwd' : None}
if password is not None:
- rset = self.source.syntax_tree_search(session, self._passwd_rqlst, args)
+ rset = self.source.syntax_tree_search(cnx, self._passwd_rqlst, args)
try:
pwd = rset[0][0]
except IndexError:
@@ -1640,7 +1501,7 @@
# passwords are stored using the Bytes type, so we get a StringIO
args['pwd'] = Binary(crypt_password(password, pwd.getvalue()))
# get eid from login and (crypted) password
- rset = self.source.syntax_tree_search(session, self._auth_rqlst, args)
+ rset = self.source.syntax_tree_search(cnx, self._auth_rqlst, args)
try:
user = rset[0][0]
# If the stored hash uses a deprecated scheme (e.g. DES or MD5 used
@@ -1650,32 +1511,33 @@
if not verify: # should not happen, but...
raise AuthenticationError('bad password')
if newhash:
- session.system_sql("UPDATE %s SET %s=%%(newhash)s WHERE %s=%%(login)s" % (
+ cnx.system_sql("UPDATE %s SET %s=%%(newhash)s WHERE %s=%%(login)s" % (
SQL_PREFIX + 'CWUser',
SQL_PREFIX + 'upassword',
SQL_PREFIX + 'login'),
{'newhash': self.source._binary(newhash),
'login': login})
- session.commit(free_cnxset=False)
+ cnx.commit(free_cnxset=False)
return user
except IndexError:
raise AuthenticationError('bad password')
class EmailPasswordAuthentifier(BaseAuthentifier):
- def authenticate(self, session, login, **authinfo):
+ def authenticate(self, cnx, login, **authinfo):
# email_auth flag prevent from infinite recursion (call to
# repo.check_auth_info at the end of this method may lead us here again)
if not '@' in login or authinfo.pop('email_auth', None):
raise AuthenticationError('not an email')
- rset = session.execute('Any L WHERE U login L, U primary_email M, '
+ rset = cnx.execute('Any L WHERE U login L, U primary_email M, '
'M address %(login)s', {'login': login},
build_descr=False)
if rset.rowcount != 1:
raise AuthenticationError('unexisting email')
login = rset.rows[0][0]
authinfo['email_auth'] = True
- return self.source.repo.check_auth_info(session, login, authinfo)
+ return self.source.repo.check_auth_info(cnx, login, authinfo)
+
class DatabaseIndependentBackupRestore(object):
"""Helper class to perform db backend agnostic backup and restore
@@ -1721,7 +1583,7 @@
self.cnx = self.get_connection()
try:
self.cursor = self.cnx.cursor()
- self.cursor.arraysize=100
+ self.cursor.arraysize = 100
self.logger.info('writing metadata')
self.write_metadata(archive)
for seq in self.get_sequences():
@@ -1737,7 +1599,6 @@
def get_tables(self):
non_entity_tables = ['entities',
- 'deleted_entities',
'transactions',
'tx_entity_actions',
'tx_relation_actions',
@@ -1765,8 +1626,8 @@
archive.writestr('tables.txt', '\n'.join(self.get_tables()))
archive.writestr('sequences.txt', '\n'.join(self.get_sequences()))
versions = self._get_versions()
- versions_str = '\n'.join('%s %s' % (k,v)
- for k,v in versions)
+ versions_str = '\n'.join('%s %s' % (k, v)
+ for k, v in versions)
archive.writestr('versions.txt', versions_str)
def write_sequence(self, archive, seq):
--- a/server/sources/pyrorql.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""Source to query another RQL repository using pyro"""
-
-__docformat__ = "restructuredtext en"
-_ = unicode
-
-# module is lazily imported
-import warnings
-warnings.warn('Imminent drop of pyrorql source. Switch to datafeed now!',
- DeprecationWarning)
-
-import threading
-from Pyro.errors import PyroError, ConnectionClosedError
-
-from cubicweb import ConnectionError
-from cubicweb.server.sources import ConnectionWrapper
-
-from cubicweb.server.sources.remoterql import RemoteSource
-
-class PyroRQLSource(RemoteSource):
- """External repository source, using Pyro connection"""
-
- def get_connection(self):
- try:
- return self._get_connection()
- except (ConnectionError, PyroError) as ex:
- self.critical("can't get connection to source %s: %s", self.uri, ex)
- return ConnectionWrapper()
-
- def check_connection(self, cnx):
- """check connection validity, return None if the connection is still valid
- else a new connection
- """
- # we have to transfer manually thread ownership. This can be done safely
- # since the connections set holding the connection is affected to one
- # session/thread and can't be called simultaneously
- try:
- cnx._repo._transferThread(threading.currentThread())
- except AttributeError:
- # inmemory connection
- pass
- try:
- return super(PyroRQLSource, self).check_connection(cnx)
- except ConnectionClosedError:
- # try to reconnect
- return self.get_connection()
-
--- a/server/sources/remoterql.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,670 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""Source to query another RQL remote repository"""
-
-__docformat__ = "restructuredtext en"
-_ = unicode
-
-from os.path import join
-from base64 import b64decode
-
-from logilab.common.configuration import REQUIRED
-
-from yams.schema import role_name
-
-from rql.nodes import Constant
-from rql.utils import rqlvar_maker
-
-from cubicweb import dbapi, server
-from cubicweb import ValidationError, BadConnectionId, UnknownEid
-from cubicweb.schema import VIRTUAL_RTYPES
-from cubicweb.server.sources import (AbstractSource, ConnectionWrapper,
- TimedCache, dbg_st_search, dbg_results)
-from cubicweb.server.msplanner import neged_relation
-
-def uidtype(union, col, etype, args):
- select, col = union.locate_subquery(col, etype, args)
- return getattr(select.selection[col], 'uidtype', None)
-
-
-class ReplaceByInOperator(Exception):
- def __init__(self, eids):
- self.eids = eids
-
-class RemoteSource(AbstractSource):
- """Generic external repository source"""
-
- # boolean telling if modification hooks should be called when something is
- # modified in this source
- should_call_hooks = False
- # boolean telling if the repository should connect to this source during
- # migration
- connect_for_migration = False
-
- options = (
-
- ('cubicweb-user',
- {'type' : 'string',
- 'default': REQUIRED,
- 'help': 'user to use for connection on the distant repository',
- 'group': 'remote-source', 'level': 0,
- }),
- ('cubicweb-password',
- {'type' : 'password',
- 'default': '',
- 'help': 'user to use for connection on the distant repository',
- 'group': 'remote-source', 'level': 0,
- }),
- ('base-url',
- {'type' : 'string',
- 'default': '',
- 'help': 'url of the web site for the distant repository, if you want '
- 'to generate external link to entities from this repository',
- 'group': 'remote-source', 'level': 1,
- }),
- ('skip-external-entities',
- {'type' : 'yn',
- 'default': False,
- 'help': 'should entities not local to the source be considered or not',
- 'group': 'remote-source', 'level': 0,
- }),
- ('synchronization-interval',
- {'type' : 'time',
- 'default': '5min',
- 'help': 'interval between synchronization with the external \
-repository (default to 5 minutes).',
- 'group': 'remote-source', 'level': 2,
- }))
-
- PUBLIC_KEYS = AbstractSource.PUBLIC_KEYS + ('base-url',)
-
- _conn = None
-
- def __init__(self, repo, source_config, eid=None):
- super(RemoteSource, self).__init__(repo, source_config, eid)
- self._query_cache = TimedCache(1800)
-
- def update_config(self, source_entity, processed_config):
- """update configuration from source entity"""
- super(RemoteSource, self).update_config(source_entity, processed_config)
- baseurl = processed_config.get('base-url')
- if baseurl and not baseurl.endswith('/'):
- processed_config['base-url'] += '/'
- self.config = processed_config
- self._skip_externals = processed_config['skip-external-entities']
- if source_entity is not None:
- self.latest_retrieval = source_entity.latest_retrieval
-
- def _entity_update(self, source_entity):
- super(RemoteSource, self)._entity_update(source_entity)
- if self.urls and len(self.urls) > 1:
- raise ValidationError(source_entity.eid, {'url': _('can only have one url')})
-
- def get_connection(self):
- try:
- return self._get_connection()
- except ConnectionError as ex:
- self.critical("can't get connection to source %s: %s", self.uri, ex)
- return ConnectionWrapper()
-
- def _get_connection(self):
- """open and return a connection to the source"""
- self.info('connecting to source %s as user %s',
- self.urls[0], self.config['cubicweb-user'])
- # XXX check protocol according to source type (zmq / pyro)
- return dbapi.connect(self.urls[0], login=self.config['cubicweb-user'],
- password=self.config['cubicweb-password'])
-
- def reset_caches(self):
- """method called during test to reset potential source caches"""
- self._query_cache = TimedCache(1800)
-
- def init(self, activated, source_entity):
- """method called by the repository once ready to handle request"""
- super(RemoteSource, self).init(activated, source_entity)
- self.load_mapping(source_entity._cw)
- if activated:
- interval = self.config['synchronization-interval']
- self.repo.looping_task(interval, self.synchronize)
- self.repo.looping_task(self._query_cache.ttl.seconds/10,
- self._query_cache.clear_expired)
- self.latest_retrieval = source_entity.latest_retrieval
-
- def load_mapping(self, session=None):
- self.support_entities = {}
- self.support_relations = {}
- self.dont_cross_relations = set(('owned_by', 'created_by'))
- self.cross_relations = set()
- assert self.eid is not None
- self._schemacfg_idx = {}
- self._load_mapping(session)
-
- etype_options = set(('write',))
- rtype_options = set(('maycross', 'dontcross', 'write',))
-
- def _check_options(self, schemacfg, allowedoptions):
- if schemacfg.options:
- options = set(w.strip() for w in schemacfg.options.split(':'))
- else:
- options = set()
- if options - allowedoptions:
- options = ', '.join(sorted(options - allowedoptions))
- msg = _('unknown option(s): %s' % options)
- raise ValidationError(schemacfg.eid, {role_name('options', 'subject'): msg})
- return options
-
- def add_schema_config(self, schemacfg, checkonly=False):
- """added CWSourceSchemaConfig, modify mapping accordingly"""
- try:
- ertype = schemacfg.schema.name
- except AttributeError:
- msg = schemacfg._cw._("attribute/relation can't be mapped, only "
- "entity and relation types")
- raise ValidationError(schemacfg.eid, {role_name('cw_for_schema', 'subject'): msg})
- if schemacfg.schema.__regid__ == 'CWEType':
- options = self._check_options(schemacfg, self.etype_options)
- if not checkonly:
- self.support_entities[ertype] = 'write' in options
- else: # CWRType
- if ertype in ('is', 'is_instance_of', 'cw_source') or ertype in VIRTUAL_RTYPES:
- msg = schemacfg._cw._('%s relation should not be in mapped') % ertype
- raise ValidationError(schemacfg.eid, {role_name('cw_for_schema', 'subject'): msg})
- options = self._check_options(schemacfg, self.rtype_options)
- if 'dontcross' in options:
- if 'maycross' in options:
- msg = schemacfg._("can't mix dontcross and maycross options")
- raise ValidationError(schemacfg.eid, {role_name('options', 'subject'): msg})
- if 'write' in options:
- msg = schemacfg._("can't mix dontcross and write options")
- raise ValidationError(schemacfg.eid, {role_name('options', 'subject'): msg})
- if not checkonly:
- self.dont_cross_relations.add(ertype)
- elif not checkonly:
- self.support_relations[ertype] = 'write' in options
- if 'maycross' in options:
- self.cross_relations.add(ertype)
- if not checkonly:
- # add to an index to ease deletion handling
- self._schemacfg_idx[schemacfg.eid] = ertype
-
- def del_schema_config(self, schemacfg, checkonly=False):
- """deleted CWSourceSchemaConfig, modify mapping accordingly"""
- if checkonly:
- return
- try:
- ertype = self._schemacfg_idx[schemacfg.eid]
- if ertype[0].isupper():
- del self.support_entities[ertype]
- else:
- if ertype in self.support_relations:
- del self.support_relations[ertype]
- if ertype in self.cross_relations:
- self.cross_relations.remove(ertype)
- else:
- self.dont_cross_relations.remove(ertype)
- except Exception:
- self.error('while updating mapping consequently to removal of %s',
- schemacfg)
-
- def local_eid(self, cnx, extid, session):
- etype, dexturi, dextid = cnx.describe(extid)
- if dexturi == 'system' or not (
- dexturi in self.repo.sources_by_uri or self._skip_externals):
- assert etype in self.support_entities, etype
- eid = self.repo.extid2eid(self, str(extid), etype, session)
- if eid > 0:
- return eid, True
- elif dexturi in self.repo.sources_by_uri:
- source = self.repo.sources_by_uri[dexturi]
- cnx = session.cnxset.connection(source.uri)
- eid = source.local_eid(cnx, dextid, session)[0]
- return eid, False
- return None, None
-
- def synchronize(self, mtime=None):
- """synchronize content known by this repository with content in the
- external repository
- """
- self.info('synchronizing remote source %s', self.uri)
- cnx = self.get_connection()
- try:
- extrepo = cnx._repo
- except AttributeError:
- # fake connection wrapper returned when we can't connect to the
- # external source (hence we've no chance to synchronize...)
- return
- etypes = list(self.support_entities)
- if mtime is None:
- mtime = self.latest_retrieval
- updatetime, modified, deleted = extrepo.entities_modified_since(etypes, mtime)
- self._query_cache.clear()
- repo = self.repo
- session = repo.internal_session()
- source = repo.system_source
- try:
- for etype, extid in modified:
- try:
- eid = self.local_eid(cnx, extid, session)[0]
- if eid is not None:
- rset = session.eid_rset(eid, etype)
- entity = rset.get_entity(0, 0)
- entity.complete(entity.e_schema.indexable_attributes())
- source.index_entity(session, entity)
- except Exception:
- self.exception('while updating %s with external id %s of source %s',
- etype, extid, self.uri)
- continue
- for etype, extid in deleted:
- try:
- eid = self.repo.extid2eid(self, str(extid), etype, session,
- insert=False)
- # entity has been deleted from external repository but is not known here
- if eid is not None:
- entity = session.entity_from_eid(eid, etype)
- repo.delete_info(session, entity, self.uri,
- scleanup=self.eid)
- except Exception:
- if self.repo.config.mode == 'test':
- raise
- self.exception('while updating %s with external id %s of source %s',
- etype, extid, self.uri)
- continue
- self.latest_retrieval = updatetime
- session.execute('SET X latest_retrieval %(date)s WHERE X eid %(x)s',
- {'x': self.eid, 'date': self.latest_retrieval})
- session.commit()
- finally:
- session.close()
-
- def get_connection(self):
- raise NotImplementedError()
-
- def check_connection(self, cnx):
- """check connection validity, return None if the connection is still valid
- else a new connection
- """
- if not isinstance(cnx, ConnectionWrapper):
- try:
- cnx.check()
- return # ok
- except BadConnectionId:
- pass
- # try to reconnect
- return self.get_connection()
-
- def syntax_tree_search(self, session, union, args=None, cachekey=None,
- varmap=None):
- assert dbg_st_search(self.uri, union, varmap, args, cachekey)
- rqlkey = union.as_string(kwargs=args)
- try:
- results = self._query_cache[rqlkey]
- except KeyError:
- results = self._syntax_tree_search(session, union, args)
- self._query_cache[rqlkey] = results
- assert dbg_results(results)
- return results
-
- def _syntax_tree_search(self, session, union, args):
- """return result from this source for a rql query (actually from a rql
- syntax tree and a solution dictionary mapping each used variable to a
- possible type). If cachekey is given, the query necessary to fetch the
- results (but not the results themselves) may be cached using this key.
- """
- if not args is None:
- args = args.copy()
- # get cached cursor anyway
- cu = session.cnxset[self.uri]
- if cu is None:
- # this is a ConnectionWrapper instance
- msg = session._("can't connect to source %s, some data may be missing")
- session.set_shared_data('sources_error', msg % self.uri, txdata=True)
- return []
- translator = RQL2RQL(self)
- try:
- rql = translator.generate(session, union, args)
- except UnknownEid as ex:
- if server.DEBUG:
- print ' unknown eid', ex, 'no results'
- return []
- if server.DEBUG & server.DBG_RQL:
- print ' translated rql', rql
- try:
- rset = cu.execute(rql, args)
- except Exception as ex:
- self.exception(str(ex))
- msg = session._("error while querying source %s, some data may be missing")
- session.set_shared_data('sources_error', msg % self.uri, txdata=True)
- return []
- descr = rset.description
- if rset:
- needtranslation = []
- rows = rset.rows
- for i, etype in enumerate(descr[0]):
- if (etype is None or not self.schema.eschema(etype).final
- or uidtype(union, i, etype, args)):
- needtranslation.append(i)
- if needtranslation:
- cnx = session.cnxset.connection(self.uri)
- for rowindex in xrange(rset.rowcount - 1, -1, -1):
- row = rows[rowindex]
- localrow = False
- for colindex in needtranslation:
- if row[colindex] is not None: # optional variable
- eid, local = self.local_eid(cnx, row[colindex], session)
- if local:
- localrow = True
- if eid is not None:
- row[colindex] = eid
- else:
- # skip this row
- del rows[rowindex]
- del descr[rowindex]
- break
- else:
- # skip row if it only contains eids of entities which
- # are actually from a source we also know locally,
- # except if some args specified (XXX should actually
- # check if there are some args local to the source)
- if not (translator.has_local_eid or localrow):
- del rows[rowindex]
- del descr[rowindex]
- results = rows
- else:
- results = []
- return results
-
- def _entity_relations_and_kwargs(self, session, entity):
- relations = []
- kwargs = {'x': self.repo.eid2extid(self, entity.eid, session)}
- for key, val in entity.cw_attr_cache.iteritems():
- relations.append('X %s %%(%s)s' % (key, key))
- kwargs[key] = val
- return relations, kwargs
-
- def add_entity(self, session, entity):
- """add a new entity to the source"""
- raise NotImplementedError()
-
- def update_entity(self, session, entity):
- """update an entity in the source"""
- relations, kwargs = self._entity_relations_and_kwargs(session, entity)
- cu = session.cnxset[self.uri]
- cu.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations), kwargs)
- self._query_cache.clear()
- entity.cw_clear_all_caches()
-
- def delete_entity(self, session, entity):
- """delete an entity from the source"""
- if session.deleted_in_transaction(self.eid):
- # source is being deleted, don't propagate
- self._query_cache.clear()
- return
- cu = session.cnxset[self.uri]
- cu.execute('DELETE %s X WHERE X eid %%(x)s' % entity.cw_etype,
- {'x': self.repo.eid2extid(self, entity.eid, session)})
- self._query_cache.clear()
-
- def add_relation(self, session, subject, rtype, object):
- """add a relation to the source"""
- cu = session.cnxset[self.uri]
- cu.execute('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
- {'x': self.repo.eid2extid(self, subject, session),
- 'y': self.repo.eid2extid(self, object, session)})
- self._query_cache.clear()
- session.entity_from_eid(subject).cw_clear_all_caches()
- session.entity_from_eid(object).cw_clear_all_caches()
-
- def delete_relation(self, session, subject, rtype, object):
- """delete a relation from the source"""
- if session.deleted_in_transaction(self.eid):
- # source is being deleted, don't propagate
- self._query_cache.clear()
- return
- cu = session.cnxset[self.uri]
- cu.execute('DELETE X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
- {'x': self.repo.eid2extid(self, subject, session),
- 'y': self.repo.eid2extid(self, object, session)})
- self._query_cache.clear()
- session.entity_from_eid(subject).cw_clear_all_caches()
- session.entity_from_eid(object).cw_clear_all_caches()
-
-
-class RQL2RQL(object):
- """translate a local rql query to be executed on a distant repository"""
- def __init__(self, source):
- self.source = source
- self.repo = source.repo
- self.current_operator = None
-
- def _accept_children(self, node):
- res = []
- for child in node.children:
- rql = child.accept(self)
- if rql is not None:
- res.append(rql)
- return res
-
- def generate(self, session, rqlst, args):
- self._session = session
- self.kwargs = args
- self.need_translation = False
- self.has_local_eid = False
- return self.visit_union(rqlst)
-
- def visit_union(self, node):
- s = self._accept_children(node)
- if len(s) > 1:
- return ' UNION '.join('(%s)' % q for q in s)
- return s[0]
-
- def visit_select(self, node):
- """return the tree as an encoded rql string"""
- self._varmaker = rqlvar_maker(defined=node.defined_vars.copy())
- self._const_var = {}
- if node.distinct:
- base = 'DISTINCT Any'
- else:
- base = 'Any'
- s = ['%s %s' % (base, ','.join(v.accept(self) for v in node.selection))]
- if node.groupby:
- s.append('GROUPBY %s' % ', '.join(group.accept(self)
- for group in node.groupby))
- if node.orderby:
- s.append('ORDERBY %s' % ', '.join(self.visit_sortterm(term)
- for term in node.orderby))
- if node.limit is not None:
- s.append('LIMIT %s' % node.limit)
- if node.offset:
- s.append('OFFSET %s' % node.offset)
- restrictions = []
- if node.where is not None:
- nr = node.where.accept(self)
- if nr is not None:
- restrictions.append(nr)
- if restrictions:
- s.append('WHERE %s' % ','.join(restrictions))
-
- if node.having:
- s.append('HAVING %s' % ', '.join(term.accept(self)
- for term in node.having))
- subqueries = []
- for subquery in node.with_:
- subqueries.append('%s BEING (%s)' % (','.join(ca.name for ca in subquery.aliases),
- self.visit_union(subquery.query)))
- if subqueries:
- s.append('WITH %s' % (','.join(subqueries)))
- return ' '.join(s)
-
- def visit_and(self, node):
- res = self._accept_children(node)
- if res:
- return ', '.join(res)
- return
-
- def visit_or(self, node):
- res = self._accept_children(node)
- if len(res) > 1:
- return ' OR '.join('(%s)' % rql for rql in res)
- elif res:
- return res[0]
- return
-
- def visit_not(self, node):
- rql = node.children[0].accept(self)
- if rql:
- return 'NOT (%s)' % rql
- return
-
- def visit_exists(self, node):
- rql = node.children[0].accept(self)
- if rql:
- return 'EXISTS(%s)' % rql
- return
-
- def visit_relation(self, node):
- try:
- if isinstance(node.children[0], Constant):
- # simplified rqlst, reintroduce eid relation
- try:
- restr, lhs = self.process_eid_const(node.children[0])
- except UnknownEid:
- # can safely skip not relation with an unsupported eid
- if neged_relation(node):
- return
- raise
- else:
- lhs = node.children[0].accept(self)
- restr = None
- except UnknownEid:
- # can safely skip not relation with an unsupported eid
- if neged_relation(node):
- return
- # XXX what about optional relation or outer NOT EXISTS()
- raise
- if node.optional in ('left', 'both'):
- lhs += '?'
- if node.r_type == 'eid' or not self.source.schema.rschema(node.r_type).final:
- self.need_translation = True
- self.current_operator = node.operator()
- if isinstance(node.children[0], Constant):
- self.current_etypes = (node.children[0].uidtype,)
- else:
- self.current_etypes = node.children[0].variable.stinfo['possibletypes']
- try:
- rhs = node.children[1].accept(self)
- except UnknownEid:
- # can safely skip not relation with an unsupported eid
- if neged_relation(node):
- return
- # XXX what about optional relation or outer NOT EXISTS()
- raise
- except ReplaceByInOperator as ex:
- rhs = 'IN (%s)' % ','.join(eid for eid in ex.eids)
- self.need_translation = False
- self.current_operator = None
- if node.optional in ('right', 'both'):
- rhs += '?'
- if restr is not None:
- return '%s %s %s, %s' % (lhs, node.r_type, rhs, restr)
- return '%s %s %s' % (lhs, node.r_type, rhs)
-
- def visit_comparison(self, node):
- if node.operator in ('=', 'IS'):
- return node.children[0].accept(self)
- return '%s %s' % (node.operator.encode(),
- node.children[0].accept(self))
-
- def visit_mathexpression(self, node):
- return '(%s %s %s)' % (node.children[0].accept(self),
- node.operator.encode(),
- node.children[1].accept(self))
-
- def visit_function(self, node):
- #if node.name == 'IN':
- res = []
- for child in node.children:
- try:
- rql = child.accept(self)
- except UnknownEid as ex:
- continue
- res.append(rql)
- if not res:
- raise ex
- return '%s(%s)' % (node.name, ', '.join(res))
-
- def visit_constant(self, node):
- if self.need_translation or node.uidtype:
- if node.type == 'Int':
- self.has_local_eid = True
- return str(self.eid2extid(node.value))
- if node.type == 'Substitute':
- key = node.value
- # ensure we have not yet translated the value...
- if not key in self._const_var:
- self.kwargs[key] = self.eid2extid(self.kwargs[key])
- self._const_var[key] = None
- self.has_local_eid = True
- return node.as_string()
-
- def visit_variableref(self, node):
- """get the sql name for a variable reference"""
- return node.name
-
- def visit_sortterm(self, node):
- if node.asc:
- return node.term.accept(self)
- return '%s DESC' % node.term.accept(self)
-
- def process_eid_const(self, const):
- value = const.eval(self.kwargs)
- try:
- return None, self._const_var[value]
- except Exception:
- var = self._varmaker.next()
- self.need_translation = True
- restr = '%s eid %s' % (var, self.visit_constant(const))
- self.need_translation = False
- self._const_var[value] = var
- return restr, var
-
- def eid2extid(self, eid):
- try:
- return self.repo.eid2extid(self.source, eid, self._session)
- except UnknownEid:
- operator = self.current_operator
- if operator is not None and operator != '=':
- # deal with query like "X eid > 12"
- #
- # The problem is that eid order in the external source may
- # differ from the local source
- #
- # So search for all eids from this source matching the condition
- # locally and then to replace the "> 12" branch by "IN (eids)"
- #
- # XXX we may have to insert a huge number of eids...)
- sql = "SELECT extid FROM entities WHERE source='%s' AND type IN (%s) AND eid%s%s"
- etypes = ','.join("'%s'" % etype for etype in self.current_etypes)
- cu = self._session.system_sql(sql % (self.source.uri, etypes,
- operator, eid))
- # XXX buggy cu.rowcount which may be zero while there are some
- # results
- rows = cu.fetchall()
- if rows:
- raise ReplaceByInOperator((b64decode(r[0]) for r in rows))
- raise
-
--- a/server/sources/rql2sql.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/sources/rql2sql.py Tue Jun 10 09:49:45 2014 +0200
@@ -58,8 +58,8 @@
from rql import BadRQLQuery, CoercionError
from rql.utils import common_parent
from rql.stmts import Union, Select
-from rql.nodes import (SortTerm, VariableRef, Constant, Function, Variable, Or,
- Not, Comparison, ColumnAlias, Relation, SubQuery, Exists)
+from rql.nodes import (VariableRef, Constant, Function, Variable, Or,
+ Not, Comparison, ColumnAlias, Relation, SubQuery)
from cubicweb import QueryError
from cubicweb.rqlrewrite import cleanup_solutions
--- a/server/sources/storages.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/sources/storages.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -236,7 +236,7 @@
"""return the current fs_path of the attribute, or None is the attr is
not stored yet.
"""
- sysource = entity._cw.cnxset.source('system')
+ sysource = entity._cw.repo.system_source
cu = sysource.doexec(entity._cw,
'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s' % (
attr, entity.cw_etype, entity.eid))
--- a/server/sources/zmqrql.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-# copyright 2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""Source to query another RQL repository using pyro"""
-
-__docformat__ = "restructuredtext en"
-_ = unicode
-
-from cubicweb.server.sources.remoterql import RemoteSource
-
-class ZMQRQLSource(RemoteSource):
- """External repository source, using ZMQ sockets"""
--- a/server/sqlutils.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/sqlutils.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -19,15 +19,18 @@
__docformat__ = "restructuredtext en"
+import sys
import os
import re
import subprocess
-from datetime import datetime, date
+from os.path import abspath
from itertools import ifilter
+from logging import getLogger
from logilab import database as db, common as lgc
from logilab.common.shellutils import ProgressBar
-from logilab.common.date import todate, todatetime, utcdatetime, utctime
+from logilab.common.deprecation import deprecated
+from logilab.common.logging_ext import set_log_methods
from logilab.database.sqlgen import SQLGenerator
from cubicweb import Binary, ConfigurationError
@@ -35,7 +38,6 @@
from cubicweb.schema import PURE_VIRTUAL_RTYPES
from cubicweb.server import SQL_CONNECT_HOOKS
from cubicweb.server.utils import crypt_password
-from rql.utils import RQL_FUNCTIONS_REGISTRY
lgc.USE_MX_DATETIME = False
SQL_PREFIX = 'cw_'
@@ -177,10 +179,125 @@
for name in ifilter(_SQL_DROP_ALL_USER_TABLES_FILTER_FUNCTION, dbhelper.list_tables(sqlcursor))]
return '\n'.join(cmds)
+
+class ConnectionWrapper(object):
+ """handle connection to the system source, at some point associated to a
+ :class:`Session`
+ """
+
+ # since 3.19, we only have to manage the system source connection
+ def __init__(self, system_source):
+ # dictionary of (source, connection), indexed by sources'uri
+ self._source = system_source
+ self.cnx = system_source.get_connection()
+ self.cu = self.cnx.cursor()
+
+ def commit(self):
+ """commit the current transaction for this user"""
+ # let exception propagates
+ self.cnx.commit()
+
+ def rollback(self):
+ """rollback the current transaction for this user"""
+ # catch exceptions, rollback other sources anyway
+ try:
+ self.cnx.rollback()
+ except Exception:
+ self._source.critical('rollback error', exc_info=sys.exc_info())
+ # error on rollback, the connection is much probably in a really
+ # bad state. Replace it by a new one.
+ self.reconnect()
+
+ def close(self, i_know_what_i_do=False):
+ """close all connections in the set"""
+ if i_know_what_i_do is not True: # unexpected closing safety belt
+ raise RuntimeError('connections set shouldn\'t be closed')
+ try:
+ self.cu.close()
+ self.cu = None
+ except Exception:
+ pass
+ try:
+ self.cnx.close()
+ self.cnx = None
+ except Exception:
+ pass
+
+ # internals ###############################################################
+
+ def cnxset_freed(self):
+ """connections set is being freed from a session"""
+ pass # no nothing by default
+
+ def reconnect(self):
+ """reopen a connection for this source or all sources if none specified
+ """
+ try:
+ # properly close existing connection if any
+ self.cnx.close()
+ except Exception:
+ pass
+ self._source.info('trying to reconnect')
+ self.cnx = self._source.get_connection()
+ self.cu = self.cnx.cursor()
+
+ @deprecated('[3.19] use .cu instead')
+ def __getitem__(self, uri):
+ assert uri == 'system'
+ return self.cu
+
+ @deprecated('[3.19] use repo.system_source instead')
+ def source(self, uid):
+ assert uid == 'system'
+ return self._source
+
+ @deprecated('[3.19] use .cnx instead')
+ def connection(self, uid):
+ assert uid == 'system'
+ return self.cnx
+
+
+class SqliteConnectionWrapper(ConnectionWrapper):
+ """Sqlite specific connection wrapper: close the connection each time it's
+ freed (and reopen it later when needed)
+ """
+ def __init__(self, system_source):
+ # don't call parent's __init__, we don't want to initiate the connection
+ self._source = system_source
+
+ _cnx = None
+
+ def cnxset_freed(self):
+ self.cu.close()
+ self.cnx.close()
+ self.cnx = self.cu = None
+
+ @property
+ def cnx(self):
+ if self._cnx is None:
+ self._cnx = self._source.get_connection()
+ self._cu = self._cnx.cursor()
+ return self._cnx
+ @cnx.setter
+ def cnx(self, value):
+ self._cnx = value
+
+ @property
+ def cu(self):
+ if self._cnx is None:
+ self._cnx = self._source.get_connection()
+ self._cu = self._cnx.cursor()
+ return self._cu
+ @cu.setter
+ def cu(self, value):
+ self._cu = value
+
+
class SQLAdapterMixIn(object):
"""Mixin for SQL data sources, getting a connection from a configuration
dictionary and handling connection locking
"""
+ cnx_wrap = ConnectionWrapper
def __init__(self, source_config):
try:
@@ -208,6 +325,15 @@
self._binary = self.dbhelper.binary_value
self._process_value = dbapi_module.process_value
self._dbencoding = dbencoding
+ if self.dbdriver == 'sqlite':
+ self.cnx_wrap = SqliteConnectionWrapper
+ self.dbhelper.dbname = abspath(self.dbhelper.dbname)
+
+ def wrapped_connection(self):
+ """open and return a connection to the database, wrapped into a class
+ handling reconnection and all
+ """
+ return self.cnx_wrap(self)
def get_connection(self):
"""open and return a connection to the database"""
@@ -319,10 +445,11 @@
# only defining here to prevent pylint from complaining
info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
-from logging import getLogger
-from cubicweb import set_log_methods
set_log_methods(SQLAdapterMixIn, getLogger('cubicweb.sqladapter'))
+
+# connection initialization functions ##########################################
+
def init_sqlite_connexion(cnx):
class group_concat(object):
--- a/server/ssplanner.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/ssplanner.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -68,13 +68,13 @@
"""return a dict mapping rqlst variable object to their eid if specified in
the syntax tree
"""
- session = plan.session
+ cnx = plan.cnx
if rqlst.where is None:
return {}
eidconsts = {}
- neweids = session.transaction_data.get('neweids', ())
- checkread = session.read_security
- eschema = session.vreg.schema.eschema
+ neweids = cnx.transaction_data.get('neweids', ())
+ checkread = cnx.read_security
+ eschema = cnx.vreg.schema.eschema
for rel in rqlst.where.get_nodes(Relation):
# only care for 'eid' relations ...
if (rel.r_type == 'eid'
@@ -89,9 +89,9 @@
# the generated select substep if not emited (eg nothing
# to be selected)
if checkread and eid not in neweids:
- with session.security_enabled(read=False):
- eschema(session.describe(eid)[0]).check_perm(
- session, 'read', eid=eid)
+ with cnx.security_enabled(read=False):
+ eschema(cnx.entity_metas(eid)['type']).check_perm(
+ cnx, 'read', eid=eid)
eidconsts[lhs.variable] = eid
return eidconsts
@@ -145,17 +145,17 @@
the rqlst should not be tagged at this point.
"""
plan.preprocess(rqlst)
- return (OneFetchStep(plan, rqlst, plan.session.repo.sources),)
+ return (OneFetchStep(plan, rqlst),)
def build_insert_plan(self, plan, rqlst):
"""get an execution plan from an INSERT RQL query"""
# each variable in main variables is a new entity to insert
to_build = {}
- session = plan.session
- etype_class = session.vreg['etypes'].etype_class
+ cnx = plan.cnx
+ etype_class = cnx.vreg['etypes'].etype_class
for etype, var in rqlst.main_variables:
# need to do this since entity class is shared w. web client code !
- to_build[var.name] = EditedEntity(etype_class(etype)(session))
+ to_build[var.name] = EditedEntity(etype_class(etype)(cnx))
plan.add_entity_def(to_build[var.name])
# add constant values to entity def, mark variables to be selected
to_select = _extract_const_attributes(plan, rqlst, to_build)
@@ -311,24 +311,6 @@
maprepr[var] = '%s.%s' % (tablesinorder[table], col)
return maprepr
-def offset_result(offset, result):
- offset -= len(result)
- if offset < 0:
- result = result[offset:]
- offset = None
- elif offset == 0:
- offset = None
- result = ()
- return offset, result
-
-
-class LimitOffsetMixIn(object):
- limit = offset = None
- def set_limit_offset(self, limit, offset):
- self.limit = limit
- self.offset = offset or None
-
-
class Step(object):
"""base abstract class for execution step"""
def __init__(self, plan):
@@ -357,29 +339,21 @@
[step.test_repr() for step in self.children],)
-class OneFetchStep(LimitOffsetMixIn, Step):
+class OneFetchStep(Step):
"""step consisting in fetching data from sources and directly returning
results
"""
- def __init__(self, plan, union, sources, inputmap=None):
+ def __init__(self, plan, union, inputmap=None):
Step.__init__(self, plan)
self.union = union
- self.sources = sources
self.inputmap = inputmap
- self.set_limit_offset(union.children[-1].limit, union.children[-1].offset)
-
- def set_limit_offset(self, limit, offset):
- LimitOffsetMixIn.set_limit_offset(self, limit, offset)
- for select in self.union.children:
- select.limit = limit
- select.offset = offset
def execute(self):
"""call .syntax_tree_search with the given syntax tree on each
source for each solution
"""
self.execute_children()
- session = self.plan.session
+ cnx = self.plan.cnx
args = self.plan.args
inputmap = self.inputmap
union = self.union
@@ -395,31 +369,9 @@
cachekey = tuple(cachekey)
else:
cachekey = union.as_string()
- result = []
- # limit / offset processing
- limit = self.limit
- offset = self.offset
- if offset is not None:
- if len(self.sources) > 1:
- # we'll have to deal with limit/offset by ourself
- if union.children[-1].limit:
- union.children[-1].limit = limit + offset
- union.children[-1].offset = None
- else:
- offset, limit = None, None
- for source in self.sources:
- if offset is None and limit is not None:
- # modifying the sample rqlst is enough since sql generation
- # will pick it here as well
- union.children[-1].limit = limit - len(result)
- result_ = source.syntax_tree_search(session, union, args, cachekey,
- inputmap)
- if offset is not None:
- offset, result_ = offset_result(offset, result_)
- result += result_
- if limit is not None:
- if len(result) >= limit:
- return result[:limit]
+ # get results for query
+ source = cnx.repo.system_source
+ result = source.syntax_tree_search(cnx, union, args, cachekey, inputmap)
#print 'ONEFETCH RESULT %s' % (result)
return result
@@ -432,8 +384,7 @@
return (self.__class__.__name__,
sorted((r.as_string(kwargs=self.plan.args), r.solutions)
for r in self.union.children),
- self.limit, self.offset,
- sorted(self.sources), inputmap)
+ inputmap)
# UPDATE/INSERT/DELETE steps ##################################################
@@ -515,8 +466,8 @@
results = self.execute_child()
if results:
todelete = frozenset(int(eid) for eid, in results)
- session = self.plan.session
- session.repo.glob_delete_entities(session, todelete)
+ cnx = self.plan.cnx
+ cnx.repo.glob_delete_entities(cnx, todelete)
return results
class DeleteRelationsStep(Step):
@@ -528,10 +479,10 @@
def execute(self):
"""execute this step"""
- session = self.plan.session
- delete = session.repo.glob_delete_relation
+ cnx = self.plan.cnx
+ delete = cnx.repo.glob_delete_relation
for subj, obj in self.execute_child():
- delete(session, subj, self.rtype, obj)
+ delete(cnx, subj, self.rtype, obj)
class UpdateStep(Step):
@@ -545,8 +496,8 @@
def execute(self):
"""execute this step"""
- session = self.plan.session
- repo = session.repo
+ cnx = self.plan.cnx
+ repo = cnx.repo
edefs = {}
relations = {}
# insert relations
@@ -564,7 +515,7 @@
try:
edited = edefs[eid]
except KeyError:
- edef = session.entity_from_eid(eid)
+ edef = cnx.entity_from_eid(eid)
edefs[eid] = edited = EditedEntity(edef)
edited.edited_attribute(str(rschema), rhsval)
else:
@@ -575,9 +526,9 @@
relations[str_rschema] = [(lhsval, rhsval)]
result[i] = newrow
# update entities
- repo.glob_add_relations(session, relations)
+ repo.glob_add_relations(cnx, relations)
for eid, edited in edefs.iteritems():
- repo.glob_update_entity(session, edited)
+ repo.glob_update_entity(cnx, edited)
return result
def _handle_relterm(info, row, newrow):
--- a/server/test/data-schemaserial/bootstrap_cubes Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-card,comment,folder,tag,basket,email,file,localperms
--- a/server/test/data-schemaserial/schema.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/data-schemaserial/schema.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -16,241 +16,17 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-from yams.buildobjs import (EntityType, RelationType, RelationDefinition,
- SubjectRelation, RichString, String, Int, Float,
- Boolean, Datetime, TZDatetime, Bytes)
-from yams.constraints import SizeConstraint
-from cubicweb.schema import (WorkflowableEntityType,
- RQLConstraint, RQLUniqueConstraint,
- ERQLExpression, RRQLExpression)
-
-from yams.buildobjs import make_type
+from yams.buildobjs import EntityType, SubjectRelation, String, make_type
BabarTestType = make_type('BabarTestType')
-
-class Affaire(WorkflowableEntityType):
- __permissions__ = {
- 'read': ('managers',
- ERQLExpression('X owned_by U'), ERQLExpression('X concerne S?, S owned_by U')),
- 'add': ('managers', ERQLExpression('X concerne S, S owned_by U')),
- 'update': ('managers', 'owners', ERQLExpression('X in_state S, S name in ("pitetre", "en cours")')),
- 'delete': ('managers', 'owners', ERQLExpression('X concerne S, S owned_by U')),
- }
-
- ref = String(fulltextindexed=True, indexed=True,
- constraints=[SizeConstraint(16)])
- sujet = String(fulltextindexed=True,
- constraints=[SizeConstraint(256)])
- descr = RichString(fulltextindexed=True,
- description=_('more detailed description'))
-
- duration = Int()
- invoiced = Float()
- opt_attr = Bytes()
-
- depends_on = SubjectRelation('Affaire')
- require_permission = SubjectRelation('CWPermission')
- concerne = SubjectRelation(('Societe', 'Note'))
- todo_by = SubjectRelation('Personne', cardinality='?*')
- documented_by = SubjectRelation('Card')
-
-
-class Societe(EntityType):
- __unique_together__ = [('nom', 'type', 'cp')]
- __permissions__ = {
- 'read': ('managers', 'users', 'guests'),
- 'update': ('managers', 'owners', ERQLExpression('U login L, X nom L')),
- 'delete': ('managers', 'owners', ERQLExpression('U login L, X nom L')),
- 'add': ('managers', 'users',)
- }
-
- nom = String(maxsize=64, fulltextindexed=True)
- web = String(maxsize=128)
- type = String(maxsize=128) # attribute in common with Note
- tel = Int()
- fax = Int()
- rncs = String(maxsize=128)
- ad1 = String(maxsize=128)
- ad2 = String(maxsize=128)
- ad3 = String(maxsize=128)
- cp = String(maxsize=12)
- ville= String(maxsize=32)
-
-
-class Division(Societe):
- __specializes_schema__ = True
-
-class SubDivision(Division):
- __specializes_schema__ = True
-
-class travaille_subdivision(RelationDefinition):
- subject = 'Personne'
- object = 'SubDivision'
-
-from cubicweb.schemas.base import CWUser
-CWUser.get_relations('login').next().fulltextindexed = True
-
-class Note(WorkflowableEntityType):
- date = String(maxsize=10)
- type = String(maxsize=6)
- para = String(maxsize=512,
- __permissions__ = {
- 'read': ('managers', 'users', 'guests'),
- 'update': ('managers', ERQLExpression('X in_state S, S name "todo"')),
- })
-
- migrated_from = SubjectRelation('Note')
- attachment = SubjectRelation('File')
- inline1 = SubjectRelation('Affaire', inlined=True, cardinality='?*',
- constraints=[RQLUniqueConstraint('S type T, S inline1 A1, A1 todo_by C, '
- 'Y type T, Y inline1 A2, A2 todo_by C',
- 'S,Y')])
- todo_by = SubjectRelation('CWUser')
+class Affaire(EntityType):
+ nom = String(unique=True, maxsize=64)
class Personne(EntityType):
__unique_together__ = [('nom', 'prenom', 'inline2')]
nom = String(fulltextindexed=True, required=True, maxsize=64)
prenom = String(fulltextindexed=True, maxsize=64)
- sexe = String(maxsize=1, default='M', fulltextindexed=True)
- promo = String(vocabulary=('bon','pasbon'))
- titre = String(fulltextindexed=True, maxsize=128)
- adel = String(maxsize=128)
- ass = String(maxsize=128)
- web = String(maxsize=128)
- tel = Int()
- fax = Int()
- datenaiss = Datetime()
- tzdatenaiss = TZDatetime()
- test = Boolean(__permissions__={
- 'read': ('managers', 'users', 'guests'),
- 'update': ('managers',),
- })
- description = String()
- firstname = String(fulltextindexed=True, maxsize=64)
-
- concerne = SubjectRelation('Affaire')
- connait = SubjectRelation('Personne')
inline2 = SubjectRelation('Affaire', inlined=True, cardinality='?*')
custom_field_of_jungle = BabarTestType(jungle_speed=42)
-
-class Old(EntityType):
- name = String()
-
-
-class connait(RelationType):
- symmetric = True
-
-class concerne(RelationType):
- __permissions__ = {
- 'read': ('managers', 'users', 'guests'),
- 'add': ('managers', RRQLExpression('U has_update_permission S')),
- 'delete': ('managers', RRQLExpression('O owned_by U')),
- }
-
-class travaille(RelationDefinition):
- __permissions__ = {
- 'read': ('managers', 'users', 'guests'),
- 'add': ('managers', RRQLExpression('U has_update_permission S')),
- 'delete': ('managers', RRQLExpression('O owned_by U')),
- }
- subject = 'Personne'
- object = 'Societe'
-
-class comments(RelationDefinition):
- subject = 'Comment'
- object = 'Personne'
-
-class fiche(RelationDefinition):
- inlined = True
- subject = 'Personne'
- object = 'Card'
- cardinality = '??'
-
-class multisource_inlined_rel(RelationDefinition):
- inlined = True
- cardinality = '?*'
- subject = ('Card', 'Note')
- object = ('Affaire', 'Note')
-
-class multisource_rel(RelationDefinition):
- subject = ('Card', 'Note')
- object = 'Note'
-
-class multisource_crossed_rel(RelationDefinition):
- subject = ('Card', 'Note')
- object = 'Note'
-
-
-class see_also_1(RelationDefinition):
- name = 'see_also'
- subject = object = 'Folder'
-
-class see_also_2(RelationDefinition):
- name = 'see_also'
- subject = ('Bookmark', 'Note')
- object = ('Bookmark', 'Note')
-
-class evaluee(RelationDefinition):
- subject = ('Personne', 'CWUser', 'Societe')
- object = ('Note')
-
-class ecrit_par(RelationType):
- inlined = True
-
-class ecrit_par_1(RelationDefinition):
- name = 'ecrit_par'
- subject = 'Note'
- object ='Personne'
- constraints = [RQLConstraint('E concerns P, S version_of P')]
- cardinality = '?*'
-
-class ecrit_par_2(RelationDefinition):
- name = 'ecrit_par'
- subject = 'Note'
- object ='CWUser'
- cardinality='?*'
-
-
-class copain(RelationDefinition):
- subject = object = 'CWUser'
-
-class tags(RelationDefinition):
- subject = 'Tag'
- object = ('CWUser', 'CWGroup', 'State', 'Note', 'Card', 'Affaire')
-
-class filed_under(RelationDefinition):
- subject = ('Note', 'Affaire')
- object = 'Folder'
-
-class require_permission(RelationDefinition):
- subject = ('Card', 'Note', 'Personne')
- object = 'CWPermission'
-
-class require_state(RelationDefinition):
- subject = 'CWPermission'
- object = 'State'
-
-class personne_composite(RelationDefinition):
- subject='Personne'
- object='Personne'
- composite='subject'
-
-class personne_inlined(RelationDefinition):
- subject='Personne'
- object='Personne'
- cardinality='?*'
- inlined=True
-
-
-class login_user(RelationDefinition):
- subject = 'Personne'
- object = 'CWUser'
- cardinality = '??'
-
-class ambiguous_inlined(RelationDefinition):
- subject = ('Affaire', 'Note')
- object = 'CWUser'
- inlined = True
- cardinality = '?*'
--- a/server/test/data/migratedapp/schema.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/data/migratedapp/schema.py Tue Jun 10 09:49:45 2014 +0200
@@ -127,6 +127,7 @@
class evaluee(RelationDefinition):
subject = ('Personne', 'CWUser', 'Societe')
object = ('Note')
+ constraints = [RQLVocabularyConstraint('S owned_by U')]
class ecrit_par(RelationType):
__permissions__ = {'read': ('managers', 'users', 'guests',),
--- a/server/test/data/schema.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/data/schema.py Tue Jun 10 09:49:45 2014 +0200
@@ -177,14 +177,6 @@
subject = ('Card', 'Note')
object = ('Affaire', 'Note')
-class multisource_rel(RelationDefinition):
- subject = ('Card', 'Note')
- object = 'Note'
-
-class multisource_crossed_rel(RelationDefinition):
- subject = ('Card', 'Note')
- object = 'Note'
-
class see_also_1(RelationDefinition):
name = 'see_also'
--- a/server/test/data/sources_postgres Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-[system]
-
-db-driver = postgres
-db-host = localhost
-db-port = 5433
-adapter = native
-db-name = cw_fti_test
-db-encoding = UTF-8
-db-user = syt
-db-password = syt
-
-[admin]
-login = admin
-password = gingkow
--- a/server/test/unittest_checkintegrity.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_checkintegrity.py Tue Jun 10 09:49:45 2014 +0200
@@ -41,8 +41,9 @@
self.repo.shutdown()
def test_checks(self):
- check(self.repo, self.cnx, ('entities', 'relations', 'text_index', 'metadata'),
- reindex=False, fix=True, withpb=False)
+ with self.cnx:
+ check(self.repo, self.cnx, ('entities', 'relations', 'text_index', 'metadata'),
+ reindex=False, fix=True, withpb=False)
def test_reindex_all(self):
self.execute('INSERT Personne X: X nom "toto", X prenom "tutu"')
--- a/server/test/unittest_datafeed.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_datafeed.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2011-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2011-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -31,7 +31,6 @@
def test(self):
self.assertIn('myfeed', self.repo.sources_by_uri)
dfsource = self.repo.sources_by_uri['myfeed']
- self.assertNotIn(dfsource, self.repo.sources)
self.assertEqual(dfsource.latest_retrieval, None)
self.assertEqual(dfsource.synchro_interval, timedelta(seconds=60))
self.assertFalse(dfsource.fresh())
@@ -71,8 +70,8 @@
self.assertEqual(entity.absolute_url(), 'http://www.cubicweb.org/')
# test repo cache keys
self.assertEqual(self.repo._type_source_cache[entity.eid],
- ('Card', 'system', 'http://www.cubicweb.org/', 'myfeed'))
- self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')],
+ ('Card', 'http://www.cubicweb.org/', 'myfeed'))
+ self.assertEqual(self.repo._extid_cache['http://www.cubicweb.org/'],
entity.eid)
# test repull
session.set_cnxset()
@@ -87,8 +86,8 @@
self.assertEqual(stats['created'], set())
self.assertEqual(stats['updated'], set((entity.eid,)))
self.assertEqual(self.repo._type_source_cache[entity.eid],
- ('Card', 'system', 'http://www.cubicweb.org/', 'myfeed'))
- self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')],
+ ('Card', 'http://www.cubicweb.org/', 'myfeed'))
+ self.assertEqual(self.repo._extid_cache['http://www.cubicweb.org/'],
entity.eid)
self.assertEqual(dfsource.source_cwuris(self.session),
@@ -110,8 +109,8 @@
'extid': 'http://www.cubicweb.org/'}
)
self.assertEqual(self.repo._type_source_cache[entity.eid],
- ('Card', 'system', 'http://www.cubicweb.org/', 'myrenamedfeed'))
- self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')],
+ ('Card', 'http://www.cubicweb.org/', 'myrenamedfeed'))
+ self.assertEqual(self.repo._extid_cache['http://www.cubicweb.org/'],
entity.eid)
# test_delete_source
--- a/server/test/unittest_hook.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_hook.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -25,51 +25,35 @@
from cubicweb.server import hook
from cubicweb.hooks import integrity, syncschema
-def clean_session_ops(func):
- def wrapper(self, *args, **kwargs):
- try:
- return func(self, *args, **kwargs)
- finally:
- self.session.pending_operations[:] = []
- return wrapper
-
class OperationsTC(CubicWebTC):
def setUp(self):
CubicWebTC.setUp(self)
self.hm = self.repo.hm
- @clean_session_ops
def test_late_operation(self):
- session = self.session
- l1 = hook.LateOperation(session)
- l2 = hook.LateOperation(session)
- l3 = hook.Operation(session)
- self.assertEqual(session.pending_operations, [l3, l1, l2])
+ with self.admin_access.repo_cnx() as cnx:
+ l1 = hook.LateOperation(cnx)
+ l2 = hook.LateOperation(cnx)
+ l3 = hook.Operation(cnx)
+ self.assertEqual(cnx.pending_operations, [l3, l1, l2])
- @clean_session_ops
def test_single_last_operation(self):
- session = self.session
- l0 = hook.SingleLastOperation(session)
- l1 = hook.LateOperation(session)
- l2 = hook.LateOperation(session)
- l3 = hook.Operation(session)
- self.assertEqual(session.pending_operations, [l3, l1, l2, l0])
- l4 = hook.SingleLastOperation(session)
- self.assertEqual(session.pending_operations, [l3, l1, l2, l4])
+ with self.admin_access.repo_cnx() as cnx:
+ l0 = hook.SingleLastOperation(cnx)
+ l1 = hook.LateOperation(cnx)
+ l2 = hook.LateOperation(cnx)
+ l3 = hook.Operation(cnx)
+ self.assertEqual(cnx.pending_operations, [l3, l1, l2, l0])
+ l4 = hook.SingleLastOperation(cnx)
+ self.assertEqual(cnx.pending_operations, [l3, l1, l2, l4])
- @clean_session_ops
def test_global_operation_order(self):
- session = self.session
- op1 = integrity._DelayedDeleteOp(session)
- op2 = syncschema.RDefDelOp(session)
- # equivalent operation generated by op2 but replace it here by op3 so we
- # can check the result...
- op3 = syncschema.MemSchemaNotifyChanges(session)
- op4 = integrity._DelayedDeleteOp(session)
- op5 = integrity._CheckORelationOp(session)
- self.assertEqual(session.pending_operations, [op1, op2, op4, op5, op3])
-
+ with self.admin_access.repo_cnx() as cnx:
+ op1 = syncschema.RDefDelOp(cnx)
+ op2 = integrity._CheckORelationOp(cnx)
+ op3 = syncschema.MemSchemaNotifyChanges(cnx)
+ self.assertEqual([op1, op2, op3], cnx.pending_operations)
class HookCalled(Exception): pass
@@ -144,9 +128,10 @@
def test_session_open_close(self):
import hooks # cubicweb/server/test/data/hooks.py
- cnx = self.login('anon')
- self.assertEqual(hooks.CALLED_EVENTS['session_open'], 'anon')
- cnx.close()
+ anonaccess = self.new_access('anon')
+ with anonaccess.repo_cnx() as cnx:
+ self.assertEqual(hooks.CALLED_EVENTS['session_open'], 'anon')
+ anonaccess.close()
self.assertEqual(hooks.CALLED_EVENTS['session_close'], 'anon')
--- a/server/test/unittest_ldapsource.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_ldapsource.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -31,7 +31,6 @@
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.devtools.repotest import RQLGeneratorTC
from cubicweb.devtools.httptest import get_available_port
-from cubicweb.devtools import get_test_db_handler
CONFIG_LDAPFEED = u'''
@@ -241,7 +240,7 @@
self.assertMetadata(e)
self.assertEqual(e.firstname, None)
self.assertEqual(e.surname, None)
- self.assertTrue('users' in [g.name for g in e.in_group])
+ self.assertIn('users', set(g.name for g in e.in_group))
self.assertEqual(e.owned_by[0].login, 'syt')
self.assertEqual(e.created_by, ())
addresses = [pe.address for pe in e.use_email]
--- a/server/test/unittest_migractions.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_migractions.py Tue Jun 10 09:49:45 2014 +0200
@@ -45,19 +45,18 @@
tags = CubicWebTC.tags | Tags(('server', 'migration', 'migractions'))
- @classmethod
- def _init_repo(cls):
- super(MigrationCommandsTC, cls)._init_repo()
+ def _init_repo(self):
+ super(MigrationCommandsTC, self)._init_repo()
# we have to read schema from the database to get eid for schema entities
- cls.repo.set_schema(cls.repo.deserialize_schema(), resetvreg=False)
+ self.repo.set_schema(self.repo.deserialize_schema(), resetvreg=False)
# hack to read the schema from data/migrschema
- config = cls.config
+ config = self.config
config.appid = join('data', 'migratedapp')
- config._apphome = cls.datapath('migratedapp')
+ config._apphome = self.datapath('migratedapp')
global migrschema
migrschema = config.load_schema()
config.appid = 'data'
- config._apphome = cls.datadir
+ config._apphome = self.datadir
assert 'Folder' in migrschema
def setUp(self):
@@ -65,7 +64,7 @@
self.mh = ServerMigrationHelper(self.repo.config, migrschema,
repo=self.repo, cnx=self.cnx,
interactive=False)
- assert self.cnx is self.mh._cnx
+ assert self.cnx is self.mh.cnx
assert self.session is self.mh.session, (self.session.id, self.mh.session.id)
def tearDown(self):
@@ -73,11 +72,11 @@
self.repo.vreg['etypes'].clear_caches()
def test_add_attribute_bool(self):
- self.assertFalse('yesno' in self.schema)
+ self.assertNotIn('yesno', self.schema)
self.session.create_entity('Note')
self.commit()
self.mh.cmd_add_attribute('Note', 'yesno')
- self.assertTrue('yesno' in self.schema)
+ self.assertIn('yesno', self.schema)
self.assertEqual(self.schema['yesno'].subjects(), ('Note',))
self.assertEqual(self.schema['yesno'].objects(), ('Boolean',))
self.assertEqual(self.schema['Note'].default('yesno'), False)
@@ -89,13 +88,13 @@
self.mh.rollback()
def test_add_attribute_int(self):
- self.assertFalse('whatever' in self.schema)
+ self.assertNotIn('whatever', self.schema)
self.session.create_entity('Note')
self.session.commit(free_cnxset=False)
orderdict = dict(self.mh.rqlexec('Any RTN, O WHERE X name "Note", RDEF from_entity X, '
'RDEF relation_type RT, RDEF ordernum O, RT name RTN'))
self.mh.cmd_add_attribute('Note', 'whatever')
- self.assertTrue('whatever' in self.schema)
+ self.assertIn('whatever', self.schema)
self.assertEqual(self.schema['whatever'].subjects(), ('Note',))
self.assertEqual(self.schema['whatever'].objects(), ('Int',))
self.assertEqual(self.schema['Note'].default('whatever'), 0)
@@ -124,12 +123,12 @@
self.mh.rollback()
def test_add_attribute_varchar(self):
- self.assertFalse('whatever' in self.schema)
+ self.assertNotIn('whatever', self.schema)
self.session.create_entity('Note')
self.session.commit(free_cnxset=False)
- self.assertFalse('shortpara' in self.schema)
+ self.assertNotIn('shortpara', self.schema)
self.mh.cmd_add_attribute('Note', 'shortpara')
- self.assertTrue('shortpara' in self.schema)
+ self.assertIn('shortpara', self.schema)
self.assertEqual(self.schema['shortpara'].subjects(), ('Note', ))
self.assertEqual(self.schema['shortpara'].objects(), ('String', ))
# test created column is actually a varchar(64)
@@ -143,15 +142,15 @@
self.mh.rollback()
def test_add_datetime_with_default_value_attribute(self):
- self.assertFalse('mydate' in self.schema)
- self.assertFalse('oldstyledefaultdate' in self.schema)
- self.assertFalse('newstyledefaultdate' in self.schema)
+ self.assertNotIn('mydate', self.schema)
+ self.assertNotIn('oldstyledefaultdate', self.schema)
+ self.assertNotIn('newstyledefaultdate', self.schema)
self.mh.cmd_add_attribute('Note', 'mydate')
self.mh.cmd_add_attribute('Note', 'oldstyledefaultdate')
self.mh.cmd_add_attribute('Note', 'newstyledefaultdate')
- self.assertTrue('mydate' in self.schema)
- self.assertTrue('oldstyledefaultdate' in self.schema)
- self.assertTrue('newstyledefaultdate' in self.schema)
+ self.assertIn('mydate', self.schema)
+ self.assertIn('oldstyledefaultdate', self.schema)
+ self.assertIn('newstyledefaultdate', self.schema)
self.assertEqual(self.schema['mydate'].subjects(), ('Note', ))
self.assertEqual(self.schema['mydate'].objects(), ('Date', ))
testdate = date(2005, 12, 13)
@@ -192,12 +191,12 @@
self.mh.rollback()
def test_rename_attribute(self):
- self.assertFalse('civility' in self.schema)
+ self.assertNotIn('civility', self.schema)
eid1 = self.mh.rqlexec('INSERT Personne X: X nom "lui", X sexe "M"')[0][0]
eid2 = self.mh.rqlexec('INSERT Personne X: X nom "l\'autre", X sexe NULL')[0][0]
self.mh.cmd_rename_attribute('Personne', 'sexe', 'civility')
- self.assertFalse('sexe' in self.schema)
- self.assertTrue('civility' in self.schema)
+ self.assertNotIn('sexe', self.schema)
+ self.assertIn('civility', self.schema)
# test data has been backported
c1 = self.mh.rqlexec('Any C WHERE X eid %s, X civility C' % eid1)[0][0]
self.assertEqual(c1, 'M')
@@ -217,13 +216,13 @@
self.assertEqual(s1, "foo")
def test_add_entity_type(self):
- self.assertFalse('Folder2' in self.schema)
- self.assertFalse('filed_under2' in self.schema)
+ self.assertNotIn('Folder2', self.schema)
+ self.assertNotIn('filed_under2', self.schema)
self.mh.cmd_add_entity_type('Folder2')
- self.assertTrue('Folder2' in self.schema)
- self.assertTrue('Old' in self.schema)
+ self.assertIn('Folder2', self.schema)
+ self.assertIn('Old', self.schema)
self.assertTrue(self.session.execute('CWEType X WHERE X name "Folder2"'))
- self.assertTrue('filed_under2' in self.schema)
+ self.assertIn('filed_under2', self.schema)
self.assertTrue(self.session.execute('CWRType X WHERE X name "filed_under2"'))
self.assertEqual(sorted(str(rs) for rs in self.schema['Folder2'].subject_relations()),
['created_by', 'creation_date', 'cw_source', 'cwuri',
@@ -254,7 +253,7 @@
self.session.commit(free_cnxset=False)
eschema = self.schema.eschema('Folder2')
self.mh.cmd_drop_entity_type('Folder2')
- self.assertFalse('Folder2' in self.schema)
+ self.assertNotIn('Folder2', self.schema)
self.assertFalse(self.session.execute('CWEType X WHERE X name "Folder2"'))
# test automatic workflow deletion
self.assertFalse(self.session.execute('Workflow X WHERE NOT X workflow_of ET'))
@@ -263,14 +262,14 @@
def test_rename_entity_type(self):
entity = self.mh.create_entity('Old', name=u'old')
- self.repo.type_and_source_from_eid(entity.eid)
+ self.repo.type_and_source_from_eid(entity.eid, entity._cw)
self.mh.cmd_rename_entity_type('Old', 'New')
self.mh.cmd_rename_attribute('New', 'name', 'new_name')
def test_add_drop_relation_type(self):
self.mh.cmd_add_entity_type('Folder2', auto=False)
self.mh.cmd_add_relation_type('filed_under2')
- self.assertTrue('filed_under2' in self.schema)
+ self.assertIn('filed_under2', self.schema)
# Old will be missing as it has been renamed into 'New' in the migrated
# schema while New hasn't been added here.
self.assertEqual(sorted(str(e) for e in self.schema['filed_under2'].subjects()),
@@ -278,7 +277,7 @@
if not e.final and e != 'Old'))
self.assertEqual(self.schema['filed_under2'].objects(), ('Folder2',))
self.mh.cmd_drop_relation_type('filed_under2')
- self.assertFalse('filed_under2' in self.schema)
+ self.assertNotIn('filed_under2', self.schema)
def test_add_relation_definition_nortype(self):
self.mh.cmd_add_relation_definition('Personne', 'concerne2', 'Affaire')
@@ -295,9 +294,9 @@
self.mh.rqlexec('SET X concerne2 Y WHERE X is Personne, Y is Affaire')
self.session.commit(free_cnxset=False)
self.mh.cmd_drop_relation_definition('Personne', 'concerne2', 'Affaire')
- self.assertTrue('concerne2' in self.schema)
+ self.assertIn('concerne2', self.schema)
self.mh.cmd_drop_relation_definition('Personne', 'concerne2', 'Note')
- self.assertFalse('concerne2' in self.schema)
+ self.assertNotIn('concerne2', self.schema)
def test_drop_relation_definition_existant_rtype(self):
self.assertEqual(sorted(str(e) for e in self.schema['concerne'].subjects()),
@@ -524,10 +523,10 @@
try:
self.mh.cmd_remove_cube('email', removedeps=True)
# file was there because it's an email dependancy, should have been removed
- self.assertFalse('email' in self.config.cubes())
- self.assertFalse(self.config.cube_dir('email') in self.config.cubes_path())
- self.assertFalse('file' in self.config.cubes())
- self.assertFalse(self.config.cube_dir('file') in self.config.cubes_path())
+ self.assertNotIn('email', self.config.cubes())
+ self.assertNotIn(self.config.cube_dir('email'), self.config.cubes_path())
+ self.assertNotIn('file', self.config.cubes())
+ self.assertNotIn(self.config.cube_dir('file'), self.config.cubes_path())
for ertype in ('Email', 'EmailThread', 'EmailPart', 'File',
'sender', 'in_thread', 'reply_to', 'data_format'):
self.assertFalse(ertype in schema, ertype)
@@ -547,10 +546,10 @@
raise
finally:
self.mh.cmd_add_cube('email')
- self.assertTrue('email' in self.config.cubes())
- self.assertTrue(self.config.cube_dir('email') in self.config.cubes_path())
- self.assertTrue('file' in self.config.cubes())
- self.assertTrue(self.config.cube_dir('file') in self.config.cubes_path())
+ self.assertIn('email', self.config.cubes())
+ self.assertIn(self.config.cube_dir('email'), self.config.cubes_path())
+ self.assertIn('file', self.config.cubes())
+ self.assertIn(self.config.cube_dir('file'), self.config.cubes_path())
for ertype in ('Email', 'EmailThread', 'EmailPart', 'File',
'sender', 'in_thread', 'reply_to', 'data_format'):
self.assertTrue(ertype in schema, ertype)
@@ -584,8 +583,8 @@
try:
self.mh.cmd_remove_cube('email')
cubes.remove('email')
- self.assertFalse('email' in self.config.cubes())
- self.assertTrue('file' in self.config.cubes())
+ self.assertNotIn('email', self.config.cubes())
+ self.assertIn('file', self.config.cubes())
for ertype in ('Email', 'EmailThread', 'EmailPart',
'sender', 'in_thread', 'reply_to'):
self.assertFalse(ertype in schema, ertype)
@@ -595,7 +594,7 @@
raise
finally:
self.mh.cmd_add_cube('email')
- self.assertTrue('email' in self.config.cubes())
+ self.assertIn('email', self.config.cubes())
# trick: overwrite self.maxeid to avoid deletion of just reintroduced
# types (and their associated tables!)
self.maxeid = self.session.execute('Any MAX(X)')[0][0]
--- a/server/test/unittest_msplanner.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,2809 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""unit tests for module cubicweb.server.msplanner"""
-
-from logilab.common.decorators import clear_cache
-from yams.buildobjs import RelationDefinition
-from rql import BadRQLQuery
-
-from cubicweb.devtools import get_test_db_handler, TestServerConfiguration
-from cubicweb.devtools.repotest import BasePlannerTC, test_plan
-
-class _SetGenerator(object):
- """singleton to easily create set using "s[0]" or "s[0,1,2]" for instance
- """
- def __getitem__(self, key):
- try:
- it = iter(key)
- except TypeError:
- it = (key,)
- return set(it)
-s = _SetGenerator()
-
-from cubicweb.schema import ERQLExpression
-from cubicweb.server.sources import AbstractSource
-from cubicweb.server.msplanner import MSPlanner, PartPlanInformation
-
-class FakeUserROSource(AbstractSource):
- support_entities = {'CWUser': False}
- support_relations = {}
- def syntax_tree_search(self, *args, **kwargs):
- return []
-
-
-class FakeCardSource(AbstractSource):
- support_entities = {'Card': True, 'Note': True, 'State': True}
- support_relations = {'in_state': True, 'multisource_rel': True, 'multisource_inlined_rel': True,
- 'multisource_crossed_rel': True,}
- dont_cross_relations = set(('fiche', 'state_of'))
- cross_relations = set(('multisource_crossed_rel',))
-
- def syntax_tree_search(self, *args, **kwargs):
- return []
-
-
-class FakeDataFeedSource(FakeCardSource):
- copy_based_source = True
-
-X_ALL_SOLS = sorted([{'X': 'Affaire'}, {'X': 'BaseTransition'}, {'X': 'Basket'},
- {'X': 'Bookmark'}, {'X': 'CWAttribute'}, {'X': 'CWCache'},
- {'X': 'CWConstraint'}, {'X': 'CWConstraintType'}, {'X': 'CWDataImport'}, {'X': 'CWEType'},
- {'X': 'CWGroup'}, {'X': 'CWPermission'}, {'X': 'CWProperty'},
- {'X': 'CWRType'}, {'X': 'CWRelation'},
- {'X': 'CWSource'}, {'X': 'CWSourceHostConfig'}, {'X': 'CWSourceSchemaConfig'},
- {'X': 'CWUser'}, {'X': 'CWUniqueTogetherConstraint'},
- {'X': 'Card'}, {'X': 'Comment'}, {'X': 'Division'},
- {'X': 'Email'}, {'X': 'EmailAddress'}, {'X': 'EmailPart'},
- {'X': 'EmailThread'}, {'X': 'ExternalUri'}, {'X': 'File'},
- {'X': 'Folder'}, {'X': 'Note'}, {'X': 'Old'},
- {'X': 'Personne'}, {'X': 'RQLExpression'}, {'X': 'Societe'},
- {'X': 'State'}, {'X': 'SubDivision'}, {'X': 'SubWorkflowExitPoint'},
- {'X': 'Tag'}, {'X': 'TrInfo'}, {'X': 'Transition'},
- {'X': 'Workflow'}, {'X': 'WorkflowTransition'}])
-
-
-# keep cnx so it's not garbage collected and the associated session is closed
-def setUpModule(*args):
- global repo, cnx
- handler = get_test_db_handler(TestServerConfiguration(apphome=BaseMSPlannerTC.datadir))
- handler.build_db_cache()
- repo, cnx = handler.get_repo_and_cnx()
-
-def tearDownModule(*args):
- global repo, cnx
- del repo, cnx
-
-
-class BaseMSPlannerTC(BasePlannerTC):
- """test planner related feature on a 3-sources repository:
-
- * system source supporting everything
- * ldap source supporting CWUser
- * rql source supporting Card
- """
-
- def setUp(self):
- self.__class__.repo = repo
- #_QuerierTC.setUp(self)
- self.setup()
- # hijack Affaire security
- affreadperms = list(self.schema['Affaire'].permissions['read'])
- self.prevrqlexpr_affaire = affreadperms[-1]
- # add access to type attribute so S can't be invariant
- affreadperms[-1] = ERQLExpression('X concerne S?, S owned_by U, S type "X"')
- self.schema['Affaire'].set_action_permissions('read', affreadperms)
- # hijack CWUser security
- userreadperms = list(self.schema['CWUser'].permissions['read'])
- self.prevrqlexpr_user = userreadperms[-1]
- userreadperms[-1] = ERQLExpression('X owned_by U')
- self.schema['CWUser'].set_action_permissions('read', userreadperms)
- self.add_source(FakeUserROSource, 'ldap')
- self.add_source(FakeCardSource, 'cards')
- self.add_source(FakeDataFeedSource, 'datafeed')
-
- def tearDown(self):
- # restore hijacked security
- self.restore_orig_affaire_security()
- self.restore_orig_cwuser_security()
- super(BaseMSPlannerTC, self).tearDown()
-
- def restore_orig_affaire_security(self):
- affreadperms = list(self.schema['Affaire'].permissions['read'])
- affreadperms[-1] = self.prevrqlexpr_affaire
- self.schema['Affaire'].set_action_permissions('read', affreadperms)
-
- def restore_orig_cwuser_security(self):
- if hasattr(self, '_orig_cwuser_security_restored'):
- return
- self._orig_cwuser_security_restored = True
- userreadperms = list(self.schema['CWUser'].permissions['read'])
- userreadperms[-1] = self.prevrqlexpr_user
- self.schema['CWUser'].set_action_permissions('read', userreadperms)
-
-
-class PartPlanInformationTC(BaseMSPlannerTC):
-
- def _test(self, rql, *args):
- if len(args) == 3:
- kwargs, sourcesterms, needsplit = args
- else:
- sourcesterms, needsplit = args
- kwargs = None
- plan = self._prepare_plan(rql, kwargs)
- union = plan.rqlst
- plan.preprocess(union)
- ppi = PartPlanInformation(plan, union.children[0])
- for sourcevars in ppi._sourcesterms.itervalues():
- for var in list(sourcevars):
- solindices = sourcevars.pop(var)
- sourcevars[var._ms_table_key()] = solindices
- self.assertEqual(ppi._sourcesterms, sourcesterms)
- self.assertEqual(ppi.needsplit, needsplit)
-
-
- def test_simple_system_only(self):
- """retrieve entities only supported by the system source"""
- self._test('CWGroup X',
- {self.system: {'X': s[0]}}, False)
-
- def test_simple_system_ldap(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- self._test('CWUser X',
- {self.system: {'X': s[0]}, self.ldap: {'X': s[0]}}, False)
-
- def test_simple_system_rql(self):
- """retrieve Card X from both sources and return concatenation of results
- """
- self._test('Any X, XT WHERE X is Card, X title XT',
- {self.system: {'X': s[0]}, self.cards: {'X': s[0]}}, False)
-
- def test_simple_eid_specified(self):
- """retrieve CWUser X from system source (eid is specified, can locate the entity)
- """
- ueid = self.session.user.eid
- self._test('Any X,L WHERE X eid %(x)s, X login L', {'x': ueid},
- {self.system: {'X': s[0]}}, False)
-
- def test_simple_eid_invariant(self):
- """retrieve CWUser X from system source (eid is specified, can locate the entity)
- """
- ueid = self.session.user.eid
- self._test('Any X WHERE X eid %(x)s', {'x': ueid},
- {self.system: {'x': s[0]}}, False)
-
- def test_simple_invariant(self):
- """retrieve CWUser X from system source only (X is invariant and in_group not supported by ldap source)
- """
- self._test('Any X WHERE X is CWUser, X in_group G, G name "users"',
- {self.system: {'X': s[0], 'G': s[0], 'in_group': s[0]}}, False)
-
- def test_security_has_text(self):
- """retrieve CWUser X from system source only (has_text not supported by ldap source)
- """
- # specify CWUser instead of any since the way this test is written we aren't well dealing
- # with ambigous query (eg only considering the first solution)
- self._test('CWUser X WHERE X has_text "bla"',
- {self.system: {'X': s[0]}}, False)
-
- def test_complex_base(self):
- """
- 1. retrieve Any X, L WHERE X is CWUser, X login L from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X, L WHERE X is TMP, X login L, X in_group G,
- G name 'users' on the system source
- """
- self._test('Any X,L WHERE X is CWUser, X in_group G, X login L, G name "users"',
- {self.system: {'X': s[0], 'G': s[0], 'in_group': s[0]},
- self.ldap : {'X': s[0]}}, True)
-
- def test_complex_invariant_ordered(self):
- """
- 1. retrieve Any X,AA WHERE X modification_date AA from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X,AA ORDERBY AA WHERE %s owned_by X, X modification_date AA
- on the system source
- """
- ueid = self.session.user.eid
- self._test('Any X,AA ORDERBY AA WHERE E eid %(x)s, E owned_by X, X modification_date AA', {'x': ueid},
- {self.system: {'x': s[0], 'X': s[0], 'owned_by': s[0]},
- self.ldap : {'X': s[0]}}, True)
-
- def test_complex_invariant(self):
- """
- 1. retrieve Any X,L,AA WHERE X login L, X modification_date AA from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X,L,AA WHERE %s owned_by X, X login L, X modification_date AA
- on the system source
- """
- ueid = self.session.user.eid
- self._test('Any X,L,AA WHERE E eid %(x)s, E owned_by X, X login L, X modification_date AA', {'x': ueid},
- {self.system: {'x': s[0], 'X': s[0], 'owned_by': s[0]},
- self.ldap : {'X': s[0]}}, True)
-
- def test_complex_ambigous(self):
- """retrieve CWUser X from system and ldap sources, Person X from system source only
- """
- self._test('Any X,F WHERE X firstname F',
- {self.system: {'X': s[0, 1]},
- self.ldap: {'X': s[0]}}, True)
-
- def test_complex_multiple(self):
- """
- 1. retrieve Any X,A,Y,B WHERE X login A, Y login B from system and ldap sources, store
- cartesian product of results into a temporary table
- 2. return the result of Any X,Y WHERE X login 'syt', Y login 'adim'
- on the system source
- """
- ueid = self.session.user.eid
- self._test('Any X,Y WHERE X login "syt", Y login "adim"', {'x': ueid},
- {self.system: {'Y': s[0], 'X': s[0]},
- self.ldap: {'Y': s[0], 'X': s[0]}}, True)
-
- def test_complex_aggregat(self):
- solindexes = set(range(len([e for e in self.schema.entities() if not e.final])))
- self._test('Any MAX(X)',
- {self.system: {'X': solindexes}}, False)
-
- def test_complex_optional(self):
- ueid = self.session.user.eid
- self._test('Any U WHERE WF wf_info_for X, X eid %(x)s, WF owned_by U?, WF from_state FS', {'x': ueid},
- {self.system: {'WF': s[0], 'FS': s[0], 'U': s[0],
- 'from_state': s[0], 'owned_by': s[0], 'wf_info_for': s[0],
- 'x': s[0]}},
- False)
-
- def test_exists4(self):
- """
- State S could come from both rql source and system source,
- but since X cannot come from the rql source, the solution
- {self.cards : 'S'} must be removed
- """
- self._test('Any G,L WHERE X in_group G, X login L, G name "managers", '
- 'EXISTS(X copain T, T login L, T login in ("comme", "cochon")) OR '
- 'EXISTS(X in_state S, S name "pascontent", NOT X copain T2, T2 login "billy")',
- {self.system: {'X': s[0], 'S': s[0], 'T2': s[0], 'T': s[0], 'G': s[0], 'copain': s[0], 'in_group': s[0]},
- self.ldap: {'X': s[0], 'T2': s[0], 'T': s[0]}},
- True)
-
- def test_relation_need_split(self):
- self._test('Any X, S WHERE X in_state S',
- {self.system: {'X': s[0, 1, 2], 'S': s[0, 1, 2]},
- self.cards: {'X': s[2], 'S': s[2]}},
- True)
-
- def test_not_relation_need_split(self):
- self._test('Any SN WHERE NOT X in_state S, S name SN',
- {self.cards: {'X': s[2], 'S': s[0, 1, 2]},
- self.system: {'X': s[0, 1, 2], 'S': s[0, 1, 2]}},
- True)
-
- def test_not_relation_no_split_external(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- # similar to the above test but with an eid coming from the external source.
- # the same plan may be used, since we won't find any record in the system source
- # linking 9999999 to a state
- self._test('Any SN WHERE NOT X in_state S, X eid %(x)s, S name SN',
- {'x': 999999},
- {self.cards: {'x': s[0], 'S': s[0]},
- self.system: {'x': s[0], 'S': s[0]}},
- False)
-
- def test_relation_restriction_ambigous_need_split(self):
- self._test('Any X,T WHERE X in_state S, S name "pending", T tags X',
- {self.system: {'X': s[0, 1, 2], 'S': s[0, 1, 2], 'T': s[0, 1, 2], 'tags': s[0, 1, 2]},
- self.cards: {'X': s[2], 'S': s[2]}},
- True)
-
- def test_simplified_var(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- # need access to source since X table has to be accessed because of the outer join
- self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR (X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
- {'x': 999999, 'u': self.session.user.eid},
- {self.system: {'P': s[0], 'G': s[0],
- 'require_permission': s[0], 'in_group': s[0], 'P': s[0], 'require_group': s[0],
- 'u': s[0]},
- self.cards: {'X': s[0]}},
- True)
-
- def test_delete_relation1(self):
- ueid = self.session.user.eid
- self._test('Any X, Y WHERE X created_by Y, X eid %(x)s, NOT Y eid %(y)s',
- {'x': ueid, 'y': ueid},
- {self.system: {'Y': s[0], 'created_by': s[0], 'x': s[0]}},
- False)
-
- def test_crossed_relation_eid_1_needattr(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- ueid = self.session.user.eid
- self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T',
- {'x': 999999,},
- {self.cards: {'Y': s[0]}, self.system: {'Y': s[0], 'x': s[0]}},
- True)
-
- def test_crossed_relation_eid_1_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
- {'x': 999999},
- {self.system: {'Y': s[0], 'x': s[0]}},
- False)
-
- def test_crossed_relation_eid_2_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
- {'x': 999999,},
- {self.cards: {'Y': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]},
- self.system: {'Y': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]}},
- False)
-
- def test_version_crossed_depends_on_1(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
- {'x': 999999},
- {self.cards: {'X': s[0], 'AD': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]},
- self.system: {'X': s[0], 'AD': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]}},
- True)
-
- def test_version_crossed_depends_on_2(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
- {'x': 999999},
- {self.cards: {'X': s[0], 'AD': s[0]},
- self.system: {'X': s[0], 'AD': s[0], 'x': s[0]}},
- True)
-
- def test_simplified_var_3(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'cards', 999998, 'cards')
- self._test('Any S,T WHERE S eid %(s)s, N eid %(n)s, N type T, N is Note, S is State',
- {'n': 999999, 's': 999998},
- {self.cards: {'s': s[0], 'N': s[0]}}, False)
-
-
-
-class MSPlannerTC(BaseMSPlannerTC):
-
- def setUp(self):
- BaseMSPlannerTC.setUp(self)
- self.planner = MSPlanner(self.o.schema, self.repo.vreg.rqlhelper)
- for cached in ('rel_type_sources', 'can_cross_relation', 'is_multi_sources_relation'):
- clear_cache(self.repo, cached)
-
- _test = test_plan
-
- def test_simple_system_only(self):
- """retrieve entities only supported by the system source
- """
- self._test('CWGroup X',
- [('OneFetchStep', [('Any X WHERE X is CWGroup', [{'X': 'CWGroup'}])],
- None, None, [self.system], {}, [])])
-
- def test_simple_system_only_limit(self):
- """retrieve entities only supported by the system source
- """
- self._test('CWGroup X LIMIT 10',
- [('OneFetchStep', [('Any X LIMIT 10 WHERE X is CWGroup', [{'X': 'CWGroup'}])],
- 10, None, [self.system], {}, [])])
-
- def test_simple_system_only_limit_offset(self):
- """retrieve entities only supported by the system source
- """
- self._test('CWGroup X LIMIT 10 OFFSET 10',
- [('OneFetchStep', [('Any X LIMIT 10 OFFSET 10 WHERE X is CWGroup', [{'X': 'CWGroup'}])],
- 10, 10, [self.system], {}, [])])
-
- def test_simple_system_ldap(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- self._test('CWUser X',
- [('OneFetchStep', [('Any X WHERE X is CWUser', [{'X': 'CWUser'}])],
- None, None, [self.ldap, self.system], {}, [])])
-
- def test_simple_system_ldap_limit(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- self._test('CWUser X LIMIT 10',
- [('OneFetchStep', [('Any X LIMIT 10 WHERE X is CWUser', [{'X': 'CWUser'}])],
- 10, None, [self.ldap, self.system], {}, [])])
-
- def test_simple_system_ldap_limit_offset(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- self._test('CWUser X LIMIT 10 OFFSET 10',
- [('OneFetchStep', [('Any X LIMIT 10 OFFSET 10 WHERE X is CWUser', [{'X': 'CWUser'}])],
- 10, 10, [self.ldap, self.system], {}, [])])
-
- def test_simple_system_ldap_ordered_limit_offset(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- self._test('CWUser X ORDERBY X LIMIT 10 OFFSET 10',
- [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY table0.C0\nLIMIT 10\nOFFSET 10', None, [
- ('FetchStep', [('Any X WHERE X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], {}, {'X': 'table0.C0'}, []),
- ]),
- ])
- def test_simple_system_ldap_aggregat(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- # COUNT(X) is kept in sub-step and transformed into SUM(X) in the AggrStep
- self._test('Any COUNT(X) WHERE X is CWUser',
- [('AggrStep', 'SELECT SUM(table0.C0) FROM table0', None, [
- ('FetchStep', [('Any COUNT(X) WHERE X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], {}, {'COUNT(X)': 'table0.C0'}, []),
- ]),
- ])
-
- def test_simple_system_rql(self):
- """retrieve Card X from both sources and return concatenation of results
- """
- self._test('Any X, XT WHERE X is Card, X title XT',
- [('OneFetchStep', [('Any X,XT WHERE X is Card, X title XT', [{'X': 'Card', 'XT': 'String'}])],
- None, None, [self.cards, self.system], {}, [])])
-
- def test_simple_eid_specified(self):
- """retrieve CWUser X from system source (eid is specified, can locate the entity)
- """
- ueid = self.session.user.eid
- self._test('Any X,L WHERE X eid %(x)s, X login L',
- [('OneFetchStep', [('Any X,L WHERE X eid %s, X login L'%ueid, [{'X': 'CWUser', 'L': 'String'}])],
- None, None, [self.system], {}, [])],
- {'x': ueid})
-
- def test_simple_eid_invariant(self):
- """retrieve CWUser X from system source (eid is specified, can locate the entity)
- """
- ueid = self.session.user.eid
- self._test('Any X WHERE X eid %(x)s',
- [('OneFetchStep', [('Any %s'%ueid, [{}])],
- None, None, [self.system], {}, [])],
- {'x': ueid})
-
- def test_simple_invariant(self):
- """retrieve CWUser X from system source only (X is invariant and in_group not supported by ldap source)
- """
- self._test('Any X WHERE X is CWUser, X in_group G, G name "users"',
- [('OneFetchStep', [('Any X WHERE X is CWUser, X in_group G, G name "users"',
- [{'X': 'CWUser', 'G': 'CWGroup'}])],
- None, None, [self.system], {}, [])])
-
- def test_complex_base(self):
- """
- 1. retrieve Any X, L WHERE X is CWUser, X login L from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X, L WHERE X is TMP, X login LX in_group G,
- G name 'users' on the system source
- """
- self._test('Any X,L WHERE X is CWUser, X in_group G, X login L, G name "users"',
- [('FetchStep', [('Any X,L WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,L WHERE X in_group G, X login L, G name "users", G is CWGroup, X is CWUser',
- [{'X': 'CWUser', 'L': 'String', 'G': 'CWGroup'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, [])
- ])
-
- def test_complex_base_limit_offset(self):
- """
- 1. retrieve Any X, L WHERE X is CWUser, X login L from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X, L WHERE X is TMP, X login LX in_group G,
- G name 'users' on the system source
- """
- self._test('Any X,L LIMIT 10 OFFSET 10 WHERE X is CWUser, X in_group G, X login L, G name "users"',
- [('FetchStep', [('Any X,L WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,L LIMIT 10 OFFSET 10 WHERE X in_group G, X login L, G name "users", G is CWGroup, X is CWUser',
- [{'X': 'CWUser', 'L': 'String', 'G': 'CWGroup'}])],
- 10, 10,
- [self.system], {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, [])
- ])
-
- def test_complex_ordered(self):
- self._test('Any L ORDERBY L WHERE X login L',
- [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY table0.C0', None,
- [('FetchStep', [('Any L WHERE X login L, X is CWUser',
- [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], {}, {'X.login': 'table0.C0', 'L': 'table0.C0'}, []),
- ])
- ])
-
- def test_complex_ordered_limit_offset(self):
- self._test('Any L ORDERBY L LIMIT 10 OFFSET 10 WHERE X login L',
- [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY table0.C0\nLIMIT 10\nOFFSET 10', None,
- [('FetchStep', [('Any L WHERE X login L, X is CWUser',
- [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], {}, {'X.login': 'table0.C0', 'L': 'table0.C0'}, []),
- ])
- ])
-
- def test_complex_invariant_ordered(self):
- """
- 1. retrieve Any X,AA WHERE X modification_date AA from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X,AA ORDERBY AA WHERE %s owned_by X, X modification_date AA
- on the system source
-
- herrr, this is what is expected by the XXX :(, not the actual result (which is correct anyway)
- """
- ueid = self.session.user.eid
- self._test('Any X,AA ORDERBY AA WHERE E eid %(x)s, E owned_by X, X modification_date AA',
- [('FetchStep',
- [('Any X,AA WHERE X modification_date AA, X is CWUser',
- [{'AA': 'Datetime', 'X': 'CWUser'}])],
- [self.ldap, self.system], None,
- {'AA': 'table0.C1', 'X': 'table0.C0', 'X.modification_date': 'table0.C1'}, []),
- ('OneFetchStep',
- [('Any X,AA ORDERBY AA WHERE %s owned_by X, X modification_date AA, X is CWUser' % ueid,
- [{'AA': 'Datetime', 'X': 'CWUser'}])],
- None, None, [self.system],
- {'AA': 'table0.C1', 'X': 'table0.C0', 'X.modification_date': 'table0.C1'}, []),
- ],
- {'x': ueid})
-
- def test_complex_invariant(self):
- """
- 1. retrieve Any X,L,AA WHERE X login L, X modification_date AA from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X,L,AA WHERE %s owned_by X, X login L, X modification_date AA
- on the system source
- """
- ueid = self.session.user.eid
- self._test('Any X,L,AA WHERE E eid %(x)s, E owned_by X, X login L, X modification_date AA',
- [('FetchStep', [('Any X,L,AA WHERE X login L, X modification_date AA, X is CWUser',
- [{'AA': 'Datetime', 'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'AA': 'table0.C2', 'X': 'table0.C0', 'X.login': 'table0.C1', 'X.modification_date': 'table0.C2', 'L': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,L,AA WHERE %s owned_by X, X login L, X modification_date AA, X is CWUser'%ueid,
- [{'AA': 'Datetime', 'X': 'CWUser', 'L': 'String'}])],
- None, None, [self.system],
- {'AA': 'table0.C2', 'X': 'table0.C0', 'X.login': 'table0.C1', 'X.modification_date': 'table0.C2', 'L': 'table0.C1'}, [])],
- {'x': ueid})
-
- def test_complex_ambigous(self):
- """retrieve CWUser X from system and ldap sources, Person X from system source only
- """
- self._test('Any X,F WHERE X firstname F',
- [('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,F WHERE X firstname F, X is CWUser',
- [{'X': 'CWUser', 'F': 'String'}])],
- None, None, [self.ldap, self.system], {}, []),
- ('OneFetchStep', [('Any X,F WHERE X firstname F, X is Personne',
- [{'X': 'Personne', 'F': 'String'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_complex_ambigous_limit_offset(self):
- """retrieve CWUser X from system and ldap sources, Person X from system source only
- """
- self._test('Any X,F LIMIT 10 OFFSET 10 WHERE X firstname F',
- [('UnionStep', 10, 10, [
- ('OneFetchStep', [('Any X,F WHERE X firstname F, X is CWUser',
- [{'X': 'CWUser', 'F': 'String'}])],
- None, None,
- [self.ldap, self.system], {}, []),
- ('OneFetchStep', [('Any X,F WHERE X firstname F, X is Personne',
- [{'X': 'Personne', 'F': 'String'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_complex_ambigous_ordered(self):
- """
- 1. retrieve CWUser X from system and ldap sources, Person X from system source only, store
- each result in the same temp table
- 2. return content of the table sorted
- """
- self._test('Any X,F ORDERBY F WHERE X firstname F',
- [('AggrStep', 'SELECT table0.C0, table0.C1 FROM table0\nORDER BY table0.C1', None,
- [('FetchStep', [('Any X,F WHERE X firstname F, X is CWUser',
- [{'X': 'CWUser', 'F': 'String'}])],
- [self.ldap, self.system], {},
- {'X': 'table0.C0', 'X.firstname': 'table0.C1', 'F': 'table0.C1'}, []),
- ('FetchStep', [('Any X,F WHERE X firstname F, X is Personne',
- [{'X': 'Personne', 'F': 'String'}])],
- [self.system], {},
- {'X': 'table0.C0', 'X.firstname': 'table0.C1', 'F': 'table0.C1'}, []),
- ]),
- ])
-
- def test_complex_multiple(self):
- """
- 1. retrieve Any X,A,Y,B WHERE X login A, Y login B from system and ldap sources, store
- cartesian product of results into a temporary table
- 2. return the result of Any X,Y WHERE X login 'syt', Y login 'adim'
- on the system source
- """
- ueid = self.session.user.eid
- self._test('Any X,Y WHERE X login "syt", Y login "adim"',
- [('FetchStep',
- [('Any X WHERE X login "syt", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None,
- {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any Y WHERE Y login "adim", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system], None,
- {'Y': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any X,Y WHERE X is CWUser, Y is CWUser', [{'X': 'CWUser', 'Y': 'CWUser'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'Y': 'table1.C0'}, [])
- ], {'x': ueid})
-
- def test_complex_multiple_limit_offset(self):
- """
- 1. retrieve Any X,A,Y,B WHERE X login A, Y login B from system and ldap sources, store
- cartesian product of results into a temporary table
- 2. return the result of Any X,Y WHERE X login 'syt', Y login 'adim'
- on the system source
- """
- self._test('Any X,Y LIMIT 10 OFFSET 10 WHERE X login "syt", Y login "adim"',
- [('FetchStep',
- [('Any X WHERE X login "syt", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any Y WHERE Y login "adim", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system], None, {'Y': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any X,Y LIMIT 10 OFFSET 10 WHERE X is CWUser, Y is CWUser', [{'X': 'CWUser', 'Y': 'CWUser'}])],
- 10, 10, [self.system],
- {'X': 'table0.C0', 'Y': 'table1.C0'}, [])
- ])
-
- def test_complex_aggregat(self):
- self._test('Any MAX(X)',
- [('OneFetchStep',
- [('Any MAX(X)', X_ALL_SOLS)],
- None, None, [self.system], {}, [])
- ])
-
- def test_complex_typed_aggregat(self):
- self._test('Any MAX(X) WHERE X is Card',
- [('AggrStep', 'SELECT MAX(table0.C0) FROM table0', None,
- [('FetchStep',
- [('Any MAX(X) WHERE X is Card', [{'X': 'Card'}])],
- [self.cards, self.system], {}, {'MAX(X)': 'table0.C0'}, [])
- ])
- ])
-
- def test_complex_greater_eid(self):
- self._test('Any X WHERE X eid > 12',
- [('OneFetchStep',
- [('Any X WHERE X eid > 12', X_ALL_SOLS)],
- None, None, [self.system], {}, [])
- ])
-
- def test_complex_greater_typed_eid(self):
- self._test('Any X WHERE X eid > 12, X is Card',
- [('OneFetchStep',
- [('Any X WHERE X eid > 12, X is Card', [{'X': 'Card'}])],
- None, None, [self.system], {}, [])
- ])
-
- def test_complex_optional(self):
- ueid = self.session.user.eid
- self._test('Any U WHERE WF wf_info_for X, X eid %(x)s, WF owned_by U?, WF from_state FS',
- [('OneFetchStep', [('Any U WHERE WF wf_info_for %s, WF owned_by U?, WF from_state FS' % ueid,
- [{'WF': 'TrInfo', 'FS': 'State', 'U': 'CWUser'}])],
- None, None, [self.system], {}, [])],
- {'x': ueid})
-
- def test_complex_optional(self):
- ueid = self.session.user.eid
- self._test('Any U WHERE WF wf_info_for X, X eid %(x)s, WF owned_by U?, WF from_state FS',
- [('OneFetchStep', [('Any U WHERE WF wf_info_for %s, WF owned_by U?, WF from_state FS' % ueid,
- [{'WF': 'TrInfo', 'FS': 'State', 'U': 'CWUser'}])],
- None, None, [self.system], {}, [])],
- {'x': ueid})
-
-
- def test_3sources_ambigous(self):
- self._test('Any X,T WHERE X owned_by U, U login "syt", X title T, X is IN(Bookmark, Card, EmailThread)',
- [('FetchStep', [('Any X,T WHERE X title T, X is Card', [{'X': 'Card', 'T': 'String'}])],
- [self.cards, self.system], None,
- {'T': 'table0.C1', 'X': 'table0.C0', 'X.title': 'table0.C1'}, []),
- ('FetchStep', [('Any U WHERE U login "syt", U is CWUser', [{'U': 'CWUser'}])],
- [self.ldap, self.system], None,
- {'U': 'table1.C0'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,T WHERE X owned_by U, X title T, U is CWUser, X is IN(Bookmark, EmailThread)',
- [{'T': 'String', 'U': 'CWUser', 'X': 'Bookmark'},
- {'T': 'String', 'U': 'CWUser', 'X': 'EmailThread'}])],
- None, None, [self.system], {'U': 'table1.C0'}, []),
- ('OneFetchStep', [('Any X,T WHERE X owned_by U, X title T, U is CWUser, X is Card',
- [{'X': 'Card', 'U': 'CWUser', 'T': 'String'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'T': 'table0.C1', 'U': 'table1.C0'}, []),
- ]),
- ])
-
- def test_restricted_max(self):
- # dumb query to emulate the one generated by svnfile.entities.rql_revision_content
- self._test('Any V, MAX(VR) WHERE V is Card, V creation_date VR, '
- '(V creation_date TODAY OR (V creation_date < TODAY AND NOT EXISTS('
- 'X is Card, X creation_date < TODAY, X creation_date >= VR)))',
- [('FetchStep', [('Any VR WHERE X creation_date < TODAY, X creation_date VR, X is Card',
- [{'X': 'Card', 'VR': 'Datetime'}])],
- [self.cards, self.system], None,
- {'VR': 'table0.C0', 'X.creation_date': 'table0.C0'}, []),
- ('FetchStep', [('Any V,VR WHERE V creation_date VR, V is Card',
- [{'VR': 'Datetime', 'V': 'Card'}])],
- [self.cards, self.system], None,
- {'VR': 'table1.C1', 'V': 'table1.C0', 'V.creation_date': 'table1.C1'}, []),
- ('OneFetchStep', [('Any V,MAX(VR) WHERE V creation_date VR, (V creation_date TODAY) OR (V creation_date < TODAY, NOT EXISTS(X creation_date >= VR, X is Card)), V is Card',
- [{'X': 'Card', 'VR': 'Datetime', 'V': 'Card'}])],
- None, None, [self.system],
- {'VR': 'table1.C1', 'V': 'table1.C0', 'V.creation_date': 'table1.C1', 'X.creation_date': 'table0.C0'}, [])
- ])
-
- def test_outer_supported_rel1(self):
- # both system and rql support all variables, can be
- self._test('Any X, R WHERE X is Note, X in_state S, X type R, '
- 'NOT EXISTS(Y is Note, Y in_state S, Y type R, X identity Y)',
- [('OneFetchStep', [('Any X,R WHERE X is Note, X in_state S, X type R, NOT EXISTS(Y is Note, Y in_state S, Y type R, X identity Y), S is State',
- [{'Y': 'Note', 'X': 'Note', 'S': 'State', 'R': 'String'}])],
- None, None,
- [self.cards, self.system], {}, [])
- ])
-
- def test_not_identity(self):
- ueid = self.session.user.eid
- self._test('Any X WHERE NOT X identity U, U eid %s, X is CWUser' % ueid,
- [('OneFetchStep',
- [('Any X WHERE NOT X identity %s, X is CWUser' % ueid, [{'X': 'CWUser'}])],
- None, None,
- [self.ldap, self.system], {}, [])
- ])
-
- def test_outer_supported_rel2(self):
- self._test('Any X, MAX(R) GROUPBY X WHERE X in_state S, X login R, '
- 'NOT EXISTS(Y is Note, Y in_state S, Y type R)',
- [('FetchStep', [('Any A,R WHERE Y in_state A, Y type R, A is State, Y is Note',
- [{'Y': 'Note', 'A': 'State', 'R': 'String'}])],
- [self.cards, self.system], None,
- {'A': 'table0.C0', 'R': 'table0.C1', 'Y.type': 'table0.C1'}, []),
- ('FetchStep', [('Any X,R WHERE X login R, X is CWUser', [{'X': 'CWUser', 'R': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table1.C0', 'X.login': 'table1.C1', 'R': 'table1.C1'}, []),
- ('OneFetchStep', [('Any X,MAX(R) GROUPBY X WHERE X in_state S, X login R, NOT EXISTS(Y type R, S identity A, A is State, Y is Note), S is State, X is CWUser',
- [{'Y': 'Note', 'X': 'CWUser', 'S': 'State', 'R': 'String', 'A': 'State'}])],
- None, None, [self.system],
- {'A': 'table0.C0', 'X': 'table1.C0', 'X.login': 'table1.C1', 'R': 'table1.C1', 'Y.type': 'table0.C1'}, [])
- ])
-
- def test_security_has_text(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X WHERE X has_text "bla"',
- [('FetchStep', [('Any E WHERE E type "X", E is Note', [{'E': 'Note'}])],
- [self.cards, self.system], None, {'E': 'table0.C0'}, []),
- ('UnionStep', None, None,
- [('OneFetchStep',
- [(u'Any X WHERE X has_text "bla", (EXISTS(X owned_by %(ueid)s)) OR ((((EXISTS(D concerne C?, C owned_by %(ueid)s, C type "X", X identity D, C is Division, D is Affaire)) OR (EXISTS(H concerne G?, G owned_by %(ueid)s, G type "X", X identity H, G is SubDivision, H is Affaire))) OR (EXISTS(I concerne F?, F owned_by %(ueid)s, F type "X", X identity I, F is Societe, I is Affaire))) OR (EXISTS(J concerne E?, E owned_by %(ueid)s, X identity J, E is Note, J is Affaire))), X is Affaire' % {'ueid': ueid},
- [{'C': 'Division', 'E': 'Note', 'D': 'Affaire', 'G': 'SubDivision', 'F': 'Societe', 'I': 'Affaire', 'H': 'Affaire', 'J': 'Affaire', 'X': 'Affaire'}])],
- None, None, [self.system], {'E': 'table0.C0'}, []),
- ('OneFetchStep',
- [('Any X WHERE X has_text "bla", EXISTS(X owned_by %s), X is IN(Basket, CWUser)' % ueid,
- [{'X': 'Basket'}, {'X': 'CWUser'}]),
- ('Any X WHERE X has_text "bla", X is IN(Card, Comment, Division, Email, EmailThread, File, Folder, Note, Personne, Societe, SubDivision, Tag)',
- [{'X': 'Card'}, {'X': 'Comment'},
- {'X': 'Division'}, {'X': 'Email'}, {'X': 'EmailThread'},
- {'X': 'File'}, {'X': 'Folder'},
- {'X': 'Note'}, {'X': 'Personne'}, {'X': 'Societe'},
- {'X': 'SubDivision'}, {'X': 'Tag'}]),],
- None, None, [self.system], {}, []),
- ])
- ])
-
- def test_security_has_text_limit_offset(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- # note: same as the above query but because of the subquery usage, the
- # display differs (not printing solutions for each union)
- self._test('Any X LIMIT 10 OFFSET 10 WHERE X has_text "bla"',
- [('FetchStep', [('Any E WHERE E type "X", E is Note', [{'E': 'Note'}])],
- [self.cards, self.system], None, {'E': 'table1.C0'}, []),
- ('UnionFetchStep', [
- ('FetchStep', [('Any X WHERE X has_text "bla", (EXISTS(X owned_by %(ueid)s)) OR ((((EXISTS(D concerne C?, C owned_by %(ueid)s, C type "X", X identity D, C is Division, D is Affaire)) OR (EXISTS(H concerne G?, G owned_by %(ueid)s, G type "X", X identity H, G is SubDivision, H is Affaire))) OR (EXISTS(I concerne F?, F owned_by %(ueid)s, F type "X", X identity I, F is Societe, I is Affaire))) OR (EXISTS(J concerne E?, E owned_by %(ueid)s, X identity J, E is Note, J is Affaire))), X is Affaire' % {'ueid': ueid},
- [{'C': 'Division', 'E': 'Note', 'D': 'Affaire', 'G': 'SubDivision', 'F': 'Societe', 'I': 'Affaire', 'H': 'Affaire', 'J': 'Affaire', 'X': 'Affaire'}])],
- [self.system], {'E': 'table1.C0'}, {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any X WHERE X has_text "bla", EXISTS(X owned_by %s), X is IN(Basket, CWUser)' % ueid,
- [{'X': 'Basket'}, {'X': 'CWUser'}]),
- ('Any X WHERE X has_text "bla", X is IN(Card, Comment, Division, Email, EmailThread, File, Folder, Note, Personne, Societe, SubDivision, Tag)',
- [{'X': 'Card'}, {'X': 'Comment'},
- {'X': 'Division'}, {'X': 'Email'}, {'X': 'EmailThread'},
- {'X': 'File'}, {'X': 'Folder'},
- {'X': 'Note'}, {'X': 'Personne'}, {'X': 'Societe'},
- {'X': 'SubDivision'}, {'X': 'Tag'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ]),
- ('OneFetchStep',
- [('Any X LIMIT 10 OFFSET 10',
- [{'X': 'Affaire'}, {'X': 'Basket'},
- {'X': 'CWUser'}, {'X': 'Card'}, {'X': 'Comment'},
- {'X': 'Division'}, {'X': 'Email'}, {'X': 'EmailThread'},
- {'X': 'File'}, {'X': 'Folder'},
- {'X': 'Note'}, {'X': 'Personne'}, {'X': 'Societe'},
- {'X': 'SubDivision'}, {'X': 'Tag'}])],
- 10, 10, [self.system], {'X': 'table0.C0'}, [])
- ])
-
- def test_security_user(self):
- """a guest user trying to see another user: EXISTS(X owned_by U) is automatically inserted"""
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X WHERE X login "bla"',
- [('FetchStep',
- [('Any X WHERE X login "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('OneFetchStep',
- [('Any X WHERE EXISTS(X owned_by %s), X is CWUser' % ueid, [{'X': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0'}, [])])
-
- def test_security_complex_has_text(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X WHERE X has_text "bla", X firstname "bla"',
- [('FetchStep', [('Any X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [('Any X WHERE X has_text "bla", EXISTS(X owned_by %s), X is CWUser' % ueid, [{'X': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0'}, []),
- ('OneFetchStep', [('Any X WHERE X has_text "bla", X firstname "bla", X is Personne', [{'X': 'Personne'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_security_complex_has_text_limit_offset(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X LIMIT 10 OFFSET 10 WHERE X has_text "bla", X firstname "bla"',
- [('FetchStep', [('Any X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table1.C0'}, []),
- ('UnionFetchStep', [
- ('FetchStep', [('Any X WHERE X has_text "bla", EXISTS(X owned_by %s), X is CWUser' % ueid, [{'X': 'CWUser'}])],
- [self.system], {'X': 'table1.C0'}, {'X': 'table0.C0'}, []),
- ('FetchStep', [('Any X WHERE X has_text "bla", X firstname "bla", X is Personne', [{'X': 'Personne'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ]),
- ('OneFetchStep',
- [('Any X LIMIT 10 OFFSET 10', [{'X': 'CWUser'}, {'X': 'Personne'}])],
- 10, 10, [self.system], {'X': 'table0.C0'}, [])
- ])
-
- def test_security_complex_aggregat(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- ALL_SOLS = X_ALL_SOLS[:]
- ALL_SOLS.remove({'X': 'CWSourceHostConfig'}) # not authorized
- ALL_SOLS.remove({'X': 'CWSourceSchemaConfig'}) # not authorized
- ALL_SOLS.remove({'X': 'CWDataImport'}) # not authorized
- self._test('Any MAX(X)',
- [('FetchStep', [('Any E WHERE E type "X", E is Note', [{'E': 'Note'}])],
- [self.cards, self.system], None, {'E': 'table1.C0'}, []),
- ('FetchStep', [('Any X WHERE X is IN(CWUser)', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table2.C0'}, []),
- ('UnionFetchStep', [
- ('FetchStep', [('Any X WHERE EXISTS(%s use_email X), X is EmailAddress' % ueid,
- [{'X': 'EmailAddress'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ('UnionFetchStep',
- [('FetchStep', [('Any X WHERE X is IN(Card, Note, State)',
- [{'X': 'Card'}, {'X': 'Note'}, {'X': 'State'}])],
- [self.cards, self.system], {}, {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any X WHERE X is IN(BaseTransition, Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, CWSource, CWUniqueTogetherConstraint, Comment, Division, Email, EmailPart, EmailThread, ExternalUri, File, Folder, Old, Personne, RQLExpression, Societe, SubDivision, SubWorkflowExitPoint, Tag, TrInfo, Transition, Workflow, WorkflowTransition)',
- [{'X': 'BaseTransition'}, {'X': 'Bookmark'},
- {'X': 'CWAttribute'}, {'X': 'CWCache'},
- {'X': 'CWConstraint'}, {'X': 'CWConstraintType'},
- {'X': 'CWEType'}, {'X': 'CWGroup'},
- {'X': 'CWPermission'}, {'X': 'CWProperty'},
- {'X': 'CWRType'}, {'X': 'CWRelation'},
- {'X': 'CWSource'},
- {'X': 'CWUniqueTogetherConstraint'},
- {'X': 'Comment'}, {'X': 'Division'},
- {'X': 'Email'},
- {'X': 'EmailPart'}, {'X': 'EmailThread'},
- {'X': 'ExternalUri'}, {'X': 'File'},
- {'X': 'Folder'}, {'X': 'Old'},
- {'X': 'Personne'}, {'X': 'RQLExpression'},
- {'X': 'Societe'}, {'X': 'SubDivision'},
- {'X': 'SubWorkflowExitPoint'}, {'X': 'Tag'},
- {'X': 'TrInfo'}, {'X': 'Transition'},
- {'X': 'Workflow'}, {'X': 'WorkflowTransition'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ]),
- ('FetchStep', [('Any X WHERE (EXISTS(X owned_by %(ueid)s)) OR ((((EXISTS(D concerne C?, C owned_by %(ueid)s, C type "X", X identity D, C is Division, D is Affaire)) OR (EXISTS(H concerne G?, G owned_by %(ueid)s, G type "X", X identity H, G is SubDivision, H is Affaire))) OR (EXISTS(I concerne F?, F owned_by %(ueid)s, F type "X", X identity I, F is Societe, I is Affaire))) OR (EXISTS(J concerne E?, E owned_by %(ueid)s, X identity J, E is Note, J is Affaire))), X is Affaire' % {'ueid': ueid},
- [{'C': 'Division', 'E': 'Note', 'D': 'Affaire', 'G': 'SubDivision', 'F': 'Societe', 'I': 'Affaire', 'H': 'Affaire', 'J': 'Affaire', 'X': 'Affaire'}])],
- [self.system], {'E': 'table1.C0'}, {'X': 'table0.C0'}, []),
- ('UnionFetchStep', [
- ('FetchStep', [('Any X WHERE EXISTS(X owned_by %s), X is Basket' % ueid,
- [{'X': 'Basket'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ('FetchStep', [('Any X WHERE EXISTS(X owned_by %s), X is CWUser' % ueid,
- [{'X': 'CWUser'}])],
- [self.system], {'X': 'table2.C0'}, {'X': 'table0.C0'}, []),
- ]),
- ]),
- ('OneFetchStep', [('Any MAX(X)', ALL_SOLS)],
- None, None, [self.system], {'X': 'table0.C0'}, [])
- ])
-
- def test_security_complex_aggregat2(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- X_ET_ALL_SOLS = []
- for s in X_ALL_SOLS:
- if s in ({'X': 'CWSourceHostConfig'}, {'X': 'CWSourceSchemaConfig'}, {'X': 'CWDataImport'}):
- continue # not authorized
- ets = {'ET': 'CWEType'}
- ets.update(s)
- X_ET_ALL_SOLS.append(ets)
- self._test('Any ET, COUNT(X) GROUPBY ET ORDERBY ET WHERE X is ET',
- [('FetchStep', [('Any X WHERE X is IN(Card, Note, State)',
- [{'X': 'Card'}, {'X': 'Note'}, {'X': 'State'}])],
- [self.cards, self.system], None, {'X': 'table1.C0'}, []),
- ('FetchStep', [('Any E WHERE E type "X", E is Note', [{'E': 'Note'}])],
- [self.cards, self.system], None, {'E': 'table2.C0'}, []),
- ('FetchStep', [('Any X WHERE X is IN(CWUser)', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table3.C0'}, []),
- ('UnionFetchStep',
- [('FetchStep', [('Any ET,X WHERE X is ET, EXISTS(%s use_email X), ET is CWEType, X is EmailAddress' % ueid,
- [{'ET': 'CWEType', 'X': 'EmailAddress'}]),
- ],
- [self.system], {}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
- # extra UnionFetchStep could be avoided but has no cost, so don't care
- ('UnionFetchStep',
- [('FetchStep', [('Any ET,X WHERE X is ET, ET is CWEType, X is IN(BaseTransition, Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, CWSource, CWUniqueTogetherConstraint, Comment, Division, Email, EmailPart, EmailThread, ExternalUri, File, Folder, Old, Personne, RQLExpression, Societe, SubDivision, SubWorkflowExitPoint, Tag, TrInfo, Transition, Workflow, WorkflowTransition)',
- [{'X': 'BaseTransition', 'ET': 'CWEType'},
- {'X': 'Bookmark', 'ET': 'CWEType'}, {'X': 'CWAttribute', 'ET': 'CWEType'},
- {'X': 'CWCache', 'ET': 'CWEType'}, {'X': 'CWConstraint', 'ET': 'CWEType'},
- {'X': 'CWConstraintType', 'ET': 'CWEType'},
- {'X': 'CWEType', 'ET': 'CWEType'},
- {'X': 'CWGroup', 'ET': 'CWEType'}, {'X': 'CWPermission', 'ET': 'CWEType'},
- {'X': 'CWProperty', 'ET': 'CWEType'}, {'X': 'CWRType', 'ET': 'CWEType'},
- {'X': 'CWSource', 'ET': 'CWEType'},
- {'X': 'CWRelation', 'ET': 'CWEType'},
- {'X': 'CWUniqueTogetherConstraint', 'ET': 'CWEType'},
- {'X': 'Comment', 'ET': 'CWEType'},
- {'X': 'Division', 'ET': 'CWEType'}, {'X': 'Email', 'ET': 'CWEType'},
- {'X': 'EmailPart', 'ET': 'CWEType'},
- {'X': 'EmailThread', 'ET': 'CWEType'}, {'X': 'ExternalUri', 'ET': 'CWEType'},
- {'X': 'File', 'ET': 'CWEType'}, {'X': 'Folder', 'ET': 'CWEType'},
- {'X': 'Old', 'ET': 'CWEType'}, {'X': 'Personne', 'ET': 'CWEType'},
- {'X': 'RQLExpression', 'ET': 'CWEType'}, {'X': 'Societe', 'ET': 'CWEType'},
- {'X': 'SubDivision', 'ET': 'CWEType'}, {'X': 'SubWorkflowExitPoint', 'ET': 'CWEType'},
- {'X': 'Tag', 'ET': 'CWEType'}, {'X': 'TrInfo', 'ET': 'CWEType'},
- {'X': 'Transition', 'ET': 'CWEType'}, {'X': 'Workflow', 'ET': 'CWEType'},
- {'X': 'WorkflowTransition', 'ET': 'CWEType'}])],
- [self.system], {}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
- ('FetchStep',
- [('Any ET,X WHERE X is ET, ET is CWEType, X is IN(Card, Note, State)',
- [{'ET': 'CWEType', 'X': 'Card'},
- {'ET': 'CWEType', 'X': 'Note'},
- {'ET': 'CWEType', 'X': 'State'}])],
- [self.system], {'X': 'table1.C0'}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
- ]),
-
- ('FetchStep', [('Any ET,X WHERE X is ET, (EXISTS(X owned_by %(ueid)s)) OR ((((EXISTS(D concerne C?, C owned_by %(ueid)s, C type "X", X identity D, C is Division, D is Affaire)) OR (EXISTS(H concerne G?, G owned_by %(ueid)s, G type "X", X identity H, G is SubDivision, H is Affaire))) OR (EXISTS(I concerne F?, F owned_by %(ueid)s, F type "X", X identity I, F is Societe, I is Affaire))) OR (EXISTS(J concerne E?, E owned_by %(ueid)s, X identity J, E is Note, J is Affaire))), ET is CWEType, X is Affaire' % {'ueid': ueid},
- [{'C': 'Division', 'E': 'Note', 'D': 'Affaire',
- 'G': 'SubDivision', 'F': 'Societe', 'I': 'Affaire',
- 'H': 'Affaire', 'J': 'Affaire', 'X': 'Affaire',
- 'ET': 'CWEType'}])],
- [self.system], {'E': 'table2.C0'}, {'ET': 'table0.C0', 'X': 'table0.C1'},
- []),
- ('UnionFetchStep', [
- ('FetchStep', [('Any ET,X WHERE X is ET, EXISTS(X owned_by %s), ET is CWEType, X is Basket' % ueid,
- [{'ET': 'CWEType', 'X': 'Basket'}])],
- [self.system], {}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
- ('FetchStep', [('Any ET,X WHERE X is ET, EXISTS(X owned_by %s), ET is CWEType, X is CWUser' % ueid,
- [{'ET': 'CWEType', 'X': 'CWUser'}])],
- [self.system], {'X': 'table3.C0'}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
- ]),
- ]),
- ('OneFetchStep',
- [('Any ET,COUNT(X) GROUPBY ET ORDERBY ET', X_ET_ALL_SOLS)],
- None, None, [self.system], {'ET': 'table0.C0', 'X': 'table0.C1'}, [])
- ])
-
- def test_security_3sources(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X, XT WHERE X is Card, X owned_by U, X title XT, U login "syt"',
- [('FetchStep',
- [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'}, []),
- ('FetchStep',
- [('Any U WHERE U login "syt", U is CWUser', [{'U': 'CWUser'}])],
- [self.ldap, self.system], None, {'U': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any X,XT WHERE X owned_by U, X title XT, EXISTS(U owned_by %s), U is CWUser, X is Card' % ueid,
- [{'X': 'Card', 'U': 'CWUser', 'XT': 'String'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1', 'U': 'table1.C0'}, [])
- ])
-
- def test_security_3sources_identity(self):
- self.restore_orig_cwuser_security()
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X, XT WHERE X is Card, X owned_by U, X title XT, U login "syt"',
- [('FetchStep',
- [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'}, []),
- ('OneFetchStep',
- [('Any X,XT WHERE X owned_by U, X title XT, U login "syt", EXISTS(U identity %s), U is CWUser, X is Card' % ueid,
- [{'U': 'CWUser', 'X': 'Card', 'XT': 'String'}])],
- None, None, [self.system], {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'}, [])
- ])
-
- def test_security_3sources_identity_optional_var(self):
- self.restore_orig_cwuser_security()
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X,XT,U WHERE X is Card, X owned_by U?, X title XT, U login L',
- [('FetchStep',
- [('Any U,L WHERE U login L, EXISTS(U identity %s), U is CWUser' % ueid,
- [{'L': 'String', u'U': 'CWUser'}])],
- [self.system], {}, {'L': 'table0.C1', 'U': 'table0.C0', 'U.login': 'table0.C1'}, []),
- ('FetchStep',
- [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
- [self.cards, self.system], None, {'X': 'table1.C0', 'X.title': 'table1.C1', 'XT': 'table1.C1'}, []),
- ('OneFetchStep',
- [('Any X,XT,U WHERE X owned_by U?, X title XT, X is Card',
- [{'X': 'Card', 'U': 'CWUser', 'XT': 'String'}])],
- None, None, [self.system], {'L': 'table0.C1',
- 'U': 'table0.C0',
- 'X': 'table1.C0',
- 'X.title': 'table1.C1',
- 'XT': 'table1.C1'}, [])
- ])
-
- def test_security_3sources_limit_offset(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X, XT LIMIT 10 OFFSET 10 WHERE X is Card, X owned_by U, X title XT, U login "syt"',
- [('FetchStep',
- [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'}, []),
- ('FetchStep',
- [('Any U WHERE U login "syt", U is CWUser', [{'U': 'CWUser'}])],
- [self.ldap, self.system], None, {'U': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any X,XT LIMIT 10 OFFSET 10 WHERE X owned_by U, X title XT, EXISTS(U owned_by %s), U is CWUser, X is Card' % ueid,
- [{'X': 'Card', 'U': 'CWUser', 'XT': 'String'}])],
- 10, 10, [self.system],
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1', 'U': 'table1.C0'}, [])
- ])
-
- def test_exists_base(self):
- self._test('Any X,L,S WHERE X in_state S, X login L, EXISTS(X in_group G, G name "bougloup")',
- [('FetchStep', [('Any X,L WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, []),
- ('OneFetchStep', [("Any X,L,S WHERE X in_state S, X login L, "
- 'EXISTS(X in_group G, G name "bougloup", G is CWGroup), S is State, X is CWUser',
- [{'X': 'CWUser', 'L': 'String', 'S': 'State', 'G': 'CWGroup'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, [])])
-
- def test_exists_complex(self):
- self._test('Any G WHERE X in_group G, G name "managers", EXISTS(X copain T, T login in ("comme", "cochon"))',
- [('FetchStep', [('Any T WHERE T login IN("comme", "cochon"), T is CWUser', [{'T': 'CWUser'}])],
- [self.ldap, self.system], None, {'T': 'table0.C0'}, []),
- ('OneFetchStep',
- [('Any G WHERE X in_group G, G name "managers", EXISTS(X copain T, T is CWUser), G is CWGroup, X is CWUser',
- [{'X': 'CWUser', 'T': 'CWUser', 'G': 'CWGroup'}])],
- None, None, [self.system], {'T': 'table0.C0'}, [])])
-
- def test_exists3(self):
- self._test('Any G,L WHERE X in_group G, X login L, G name "managers", EXISTS(X copain T, T login in ("comme", "cochon"))',
- [('FetchStep',
- [('Any T WHERE T login IN("comme", "cochon"), T is CWUser',
- [{'T': 'CWUser'}])],
- [self.ldap, self.system], None, {'T': 'table0.C0'}, []),
- ('FetchStep',
- [('Any L,X WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table1.C1', 'X.login': 'table1.C0', 'L': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any G,L WHERE X in_group G, X login L, G name "managers", EXISTS(X copain T, T is CWUser), G is CWGroup, X is CWUser',
- [{'G': 'CWGroup', 'L': 'String', 'T': 'CWUser', 'X': 'CWUser'}])],
- None, None,
- [self.system], {'T': 'table0.C0', 'X': 'table1.C1', 'X.login': 'table1.C0', 'L': 'table1.C0'}, [])])
-
- def test_exists4(self):
- self._test('Any G,L WHERE X in_group G, X login L, G name "managers", '
- 'EXISTS(X copain T, T login L, T login in ("comme", "cochon")) OR '
- 'EXISTS(X in_state S, S name "pascontent", NOT X copain T2, T2 login "billy")',
- [('FetchStep',
- [('Any T,L WHERE T login L, T login IN("comme", "cochon"), T is CWUser', [{'T': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'T': 'table0.C0', 'T.login': 'table0.C1', 'L': 'table0.C1'}, []),
- ('FetchStep',
- [('Any T2 WHERE T2 login "billy", T2 is CWUser', [{'T2': 'CWUser'}])],
- [self.ldap, self.system], None, {'T2': 'table1.C0'}, []),
- ('FetchStep',
- [('Any L,X WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None, {'X': 'table2.C1', 'X.login': 'table2.C0', 'L': 'table2.C0'}, []),
- ('OneFetchStep',
- [('Any G,L WHERE X in_group G, X login L, G name "managers", (EXISTS(X copain T, T login L, T is CWUser)) OR (EXISTS(X in_state S, S name "pascontent", NOT EXISTS(X copain T2), S is State)), G is CWGroup, T2 is CWUser, X is CWUser',
- [{'G': 'CWGroup', 'L': 'String', 'S': 'State', 'T': 'CWUser', 'T2': 'CWUser', 'X': 'CWUser'}])],
- None, None, [self.system],
- {'T2': 'table1.C0', 'L': 'table2.C0',
- 'T': 'table0.C0', 'T.login': 'table0.C1', 'X': 'table2.C1', 'X.login': 'table2.C0'}, [])])
-
- def test_exists5(self):
- self._test('Any GN,L WHERE X in_group G, X login L, G name GN, '
- 'EXISTS(X copain T, T login in ("comme", "cochon")) AND '
- 'NOT EXISTS(X copain T2, T2 login "billy")',
- [('FetchStep', [('Any T WHERE T login IN("comme", "cochon"), T is CWUser',
- [{'T': 'CWUser'}])],
- [self.ldap, self.system], None, {'T': 'table0.C0'}, []),
- ('FetchStep', [('Any T2 WHERE T2 login "billy", T2 is CWUser', [{'T2': 'CWUser'}])],
- [self.ldap, self.system], None, {'T2': 'table1.C0'}, []),
- ('FetchStep', [('Any L,X WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table2.C1', 'X.login': 'table2.C0', 'L': 'table2.C0'}, []),
- ('OneFetchStep', [('Any GN,L WHERE X in_group G, X login L, G name GN, EXISTS(X copain T, T is CWUser), NOT EXISTS(X copain T2, T2 is CWUser), G is CWGroup, X is CWUser',
- [{'G': 'CWGroup', 'GN': 'String', 'L': 'String', 'T': 'CWUser', 'T2': 'CWUser', 'X': 'CWUser'}])],
- None, None, [self.system],
- {'T': 'table0.C0', 'T2': 'table1.C0',
- 'X': 'table2.C1', 'X.login': 'table2.C0', 'L': 'table2.C0'}, [])])
-
- def test_exists_security_no_invariant(self):
- ueid = self.session.user.eid
- self._test('Any X,AA,AB,AC,AD ORDERBY AA WHERE X is CWUser, X login AA, X firstname AB, X surname AC, X modification_date AD, A eid %(B)s, \
- EXISTS(((X identity A) OR \
- (EXISTS(X in_group C, C name IN("managers", "staff"), C is CWGroup))) OR \
- (EXISTS(X in_group D, A in_group D, NOT D name "users", D is CWGroup)))',
- [('FetchStep', [('Any X,AA,AB,AC,AD WHERE X login AA, X firstname AB, X surname AC, X modification_date AD, X is CWUser',
- [{'AA': 'String', 'AB': 'String', 'AC': 'String', 'AD': 'Datetime',
- 'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'AA': 'table0.C1', 'AB': 'table0.C2',
- 'AC': 'table0.C3', 'AD': 'table0.C4',
- 'X': 'table0.C0',
- 'X.firstname': 'table0.C2',
- 'X.login': 'table0.C1',
- 'X.modification_date': 'table0.C4',
- 'X.surname': 'table0.C3'}, []),
- ('OneFetchStep', [('Any X,AA,AB,AC,AD ORDERBY AA WHERE X login AA, X firstname AB, X surname AC, X modification_date AD, EXISTS(((X identity %(ueid)s) OR (EXISTS(X in_group C, C name IN("managers", "staff"), C is CWGroup))) OR (EXISTS(X in_group D, %(ueid)s in_group D, NOT D name "users", D is CWGroup))), X is CWUser' % {'ueid': ueid},
- [{'AA': 'String', 'AB': 'String', 'AC': 'String', 'AD': 'Datetime',
- 'C': 'CWGroup', 'D': 'CWGroup', 'X': 'CWUser'}])],
- None, None, [self.system],
- {'AA': 'table0.C1', 'AB': 'table0.C2', 'AC': 'table0.C3', 'AD': 'table0.C4',
- 'X': 'table0.C0',
- 'X.firstname': 'table0.C2', 'X.login': 'table0.C1', 'X.modification_date': 'table0.C4', 'X.surname': 'table0.C3'},
- [])],
- {'B': ueid})
-
- def test_relation_need_split(self):
- self._test('Any X, S WHERE X in_state S',
- [('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,S WHERE X in_state S, S is State, X is IN(Affaire, CWUser)',
- [{'X': 'Affaire', 'S': 'State'}, {'X': 'CWUser', 'S': 'State'}])],
- None, None, [self.system], {}, []),
- ('OneFetchStep', [('Any X,S WHERE X in_state S, S is State, X is Note',
- [{'X': 'Note', 'S': 'State'}])],
- None, None, [self.cards, self.system], {}, []),
- ])])
-
- def test_relation_selection_need_split(self):
- self._test('Any X,S,U WHERE X in_state S, X todo_by U',
- [('FetchStep', [('Any X,S WHERE X in_state S, S is State, X is Note',
- [{'X': 'Note', 'S': 'State'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'S': 'table0.C1'}, []),
- ('UnionStep', None, None,
- [('OneFetchStep', [('Any X,S,U WHERE X in_state S, X todo_by U, S is State, U is Personne, X is Affaire',
- [{'X': 'Affaire', 'S': 'State', 'U': 'Personne'}])],
- None, None, [self.system], {}, []),
- ('OneFetchStep', [('Any X,S,U WHERE X todo_by U, S is State, U is CWUser, X is Note',
- [{'X': 'Note', 'S': 'State', 'U': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0', 'S': 'table0.C1'}, []),
- ])
- ])
-
- def test_relation_restriction_need_split(self):
- self._test('Any X,U WHERE X in_state S, S name "pending", X todo_by U',
- [('FetchStep', [('Any X WHERE X in_state S, S name "pending", S is State, X is Note',
- [{'X': 'Note', 'S': 'State'}])],
- [self.cards, self.system], None, {'X': 'table0.C0'}, []),
- ('UnionStep', None, None,
- [('OneFetchStep', [('Any X,U WHERE X todo_by U, U is CWUser, X is Note',
- [{'X': 'Note', 'U': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0'}, []),
- ('OneFetchStep', [('Any X,U WHERE X in_state S, S name "pending", X todo_by U, S is State, U is Personne, X is Affaire',
- [{'S': 'State', 'U': 'Personne', 'X': 'Affaire'}])],
- None, None, [self.system], {}, [])
- ])
- ])
-
- def test_relation_restriction_ambigous_need_split(self):
- self._test('Any X,T WHERE X in_state S, S name "pending", T tags X',
- [('FetchStep', [('Any X WHERE X in_state S, S name "pending", S is State, X is Note',
- [{'X': 'Note', 'S': 'State'}])],
- [self.cards, self.system], None, {'X': 'table0.C0'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,T WHERE T tags X, T is Tag, X is Note',
- [{'X': 'Note', 'T': 'Tag'}])],
- None, None,
- [self.system], {'X': 'table0.C0'}, []),
- ('OneFetchStep', [('Any X,T WHERE X in_state S, S name "pending", T tags X, S is State, T is Tag, X is IN(Affaire, CWUser)',
- [{'X': 'Affaire', 'S': 'State', 'T': 'Tag'},
- {'X': 'CWUser', 'S': 'State', 'T': 'Tag'}])],
- None, None,
- [self.system], {}, []),
- ])
- ])
-
- def test_not_relation_no_split_internal(self):
- ueid = self.session.user.eid
- # NOT on a relation supported by rql and system source: we want to get
- # all states (eg from both sources) which are not related to entity with the
- # given eid. The "NOT X in_state S, X eid %(x)s" expression is necessarily true
- # in the source where %(x)s is not coming from and will be removed during rql
- # generation for the external source
- self._test('Any SN WHERE NOT X in_state S, X eid %(x)s, S name SN',
- [('OneFetchStep', [('Any SN WHERE NOT EXISTS(%s in_state S), S name SN, S is State' % ueid,
- [{'S': 'State', 'SN': 'String'}])],
- None, None, [self.cards, self.system], {}, [])],
- {'x': ueid})
-
- def test_not_relation_no_split_external(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- # similar to the above test but with an eid coming from the external source.
- # the same plan may be used, since we won't find any record in the system source
- # linking 9999999 to a state
- self._test('Any SN WHERE NOT X in_state S, X eid %(x)s, S name SN',
- [('OneFetchStep', [('Any SN WHERE NOT EXISTS(999999 in_state S), S name SN, S is State',
- [{'S': 'State', 'SN': 'String'}])],
- None, None, [self.cards, self.system], {}, [])],
- {'x': 999999})
-
- def test_not_relation_need_split(self):
- self._test('Any SN WHERE NOT X in_state S, S name SN',
- [('FetchStep', [('Any SN,S WHERE S name SN, S is State',
- [{'S': 'State', 'SN': 'String'}])],
- [self.cards, self.system], None, {'S': 'table0.C1', 'S.name': 'table0.C0', 'SN': 'table0.C0'},
- []),
- ('IntersectStep', None, None,
- [('OneFetchStep',
- [('Any SN WHERE NOT EXISTS(X in_state S, X is Note), S name SN, S is State',
- [{'S': 'State', 'SN': 'String', 'X': 'Note'}])],
- None, None, [self.cards, self.system], {},
- []),
- ('OneFetchStep',
- [('Any SN WHERE NOT EXISTS(X in_state S, X is IN(Affaire, CWUser)), S name SN, S is State',
- [{'S': 'State', 'SN': 'String', 'X': 'Affaire'},
- {'S': 'State', 'SN': 'String', 'X': 'CWUser'}])],
- None, None, [self.system], {'S': 'table0.C1', 'S.name': 'table0.C0', 'SN': 'table0.C0'},
- []),]
- )])
-
- def test_external_attributes_and_relation(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any A,B,C,D WHERE A eid %(x)s,A creation_date B,A modification_date C, A todo_by D?',
- [('FetchStep', [('Any A,B,C WHERE A eid 999999, A creation_date B, A modification_date C, A is Note',
- [{'A': 'Note', 'C': 'Datetime', 'B': 'Datetime'}])],
- [self.cards], None,
- {'A': 'table0.C0', 'A.creation_date': 'table0.C1', 'A.modification_date': 'table0.C2', 'C': 'table0.C2', 'B': 'table0.C1'}, []),
- #('FetchStep', [('Any D WHERE D is CWUser', [{'D': 'CWUser'}])],
- # [self.ldap, self.system], None, {'D': 'table1.C0'}, []),
- ('OneFetchStep', [('Any A,B,C,D WHERE A creation_date B, A modification_date C, A todo_by D?, A is Note, D is CWUser',
- [{'A': 'Note', 'C': 'Datetime', 'B': 'Datetime', 'D': 'CWUser'}])],
- None, None, [self.system],
- {'A': 'table0.C0', 'A.creation_date': 'table0.C1', 'A.modification_date': 'table0.C2', 'C': 'table0.C2', 'B': 'table0.C1'}, [])],
- {'x': 999999})
-
-
- def test_simplified_var_1(self):
- ueid = self.session.user.eid
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- # need access to cards source since X table has to be accessed because of the outer join
- self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR '
- '(X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
- [('FetchStep',
- [('Any 999999', [{}])], [self.cards],
- None, {u'%(x)s': 'table0.C0'}, []),
- ('OneFetchStep',
- [(u'Any 6 WHERE 6 in_group G, (G name IN("managers", "logilab")) OR '
- '(X require_permission P?, P name "bla", P require_group G), '
- 'G is CWGroup, P is CWPermission, X is Note',
- [{'G': 'CWGroup', 'P': 'CWPermission', 'X': 'Note'}])],
- None, None, [self.system], {u'%(x)s': 'table0.C0'}, [])],
- {'x': 999999, 'u': ueid})
-
- def test_simplified_var_2(self):
- ueid = self.session.user.eid
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- # no need access to source since X is invariant
- self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR '
- '(X require_permission P, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
- [('OneFetchStep', [('Any %s WHERE %s in_group G, (G name IN("managers", "logilab")) OR (999999 require_permission P, P name "bla", P require_group G)' % (ueid, ueid),
- [{'G': 'CWGroup', 'P': 'CWPermission'}])],
- None, None, [self.system], {}, [])],
- {'x': 999999, 'u': ueid})
-
- def test_has_text(self):
- self._test('Card X WHERE X has_text "toto"',
- [('OneFetchStep', [('Any X WHERE X has_text "toto", X is Card',
- [{'X': 'Card'}])],
- None, None, [self.system], {}, [])])
-
- def test_has_text_3(self):
- self._test('Any X WHERE X has_text "toto", X title "zoubidou", X is IN (Card, EmailThread)',
- [('FetchStep', [(u'Any X WHERE X title "zoubidou", X is Card',
- [{'X': 'Card'}])],
- [self.cards, self.system], None, {'X': 'table0.C0'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [(u'Any X WHERE X has_text "toto", X is Card',
- [{'X': 'Card'}])],
- None, None, [self.system], {'X': 'table0.C0'}, []),
- ('OneFetchStep', [(u'Any X WHERE X has_text "toto", X title "zoubidou", X is EmailThread',
- [{'X': 'EmailThread'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_has_text_orderby_rank(self):
- self._test('Any X ORDERBY FTIRANK(X) WHERE X has_text "bla", X firstname "bla"',
- [('FetchStep', [('Any X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('AggrStep', 'SELECT table1.C1 FROM table1\nORDER BY table1.C0', None, [
- ('FetchStep', [('Any FTIRANK(X),X WHERE X has_text "bla", X is CWUser',
- [{'X': 'CWUser'}])],
- [self.system], {'X': 'table0.C0'}, {'FTIRANK(X)': 'table1.C0', 'X': 'table1.C1'}, []),
- ('FetchStep', [('Any FTIRANK(X),X WHERE X has_text "bla", X firstname "bla", X is Personne',
- [{'X': 'Personne'}])],
- [self.system], {}, {'FTIRANK(X)': 'table1.C0', 'X': 'table1.C1'}, []),
- ]),
- ])
-
- def test_security_has_text_orderby_rank(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X ORDERBY FTIRANK(X) WHERE X has_text "bla", X firstname "bla"',
- [('FetchStep', [('Any X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table1.C0'}, []),
- ('UnionFetchStep',
- [('FetchStep', [('Any X WHERE X firstname "bla", X is Personne', [{'X': 'Personne'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ('FetchStep', [('Any X WHERE EXISTS(X owned_by %s), X is CWUser' % ueid, [{'X': 'CWUser'}])],
- [self.system], {'X': 'table1.C0'}, {'X': 'table0.C0'}, [])]),
- ('OneFetchStep', [('Any X ORDERBY FTIRANK(X) WHERE X has_text "bla"',
- [{'X': 'CWUser'}, {'X': 'Personne'}])],
- None, None, [self.system], {'X': 'table0.C0'}, []),
- ])
-
- def test_has_text_select_rank(self):
- self._test('Any X, FTIRANK(X) WHERE X has_text "bla", X firstname "bla"',
- # XXX unecessary duplicate selection
- [('FetchStep', [('Any X,X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C1'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,FTIRANK(X) WHERE X has_text "bla", X is CWUser', [{'X': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,FTIRANK(X) WHERE X has_text "bla", X firstname "bla", X is Personne', [{'X': 'Personne'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_security_has_text_select_rank(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X, FTIRANK(X) WHERE X has_text "bla", X firstname "bla"',
- [('FetchStep', [('Any X,X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C1'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,FTIRANK(X) WHERE X has_text "bla", EXISTS(X owned_by %s), X is CWUser' % ueid, [{'X': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,FTIRANK(X) WHERE X has_text "bla", X firstname "bla", X is Personne', [{'X': 'Personne'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_sort_func(self):
- self._test('Note X ORDERBY DUMB_SORT(RF) WHERE X type RF',
- [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY DUMB_SORT(table0.C1)', None, [
- ('FetchStep', [('Any X,RF WHERE X type RF, X is Note',
- [{'X': 'Note', 'RF': 'String'}])],
- [self.cards, self.system], {}, {'X': 'table0.C0', 'X.type': 'table0.C1', 'RF': 'table0.C1'}, []),
- ])
- ])
-
- def test_ambigous_sort_func(self):
- self._test('Any X ORDERBY DUMB_SORT(RF) WHERE X title RF, X is IN (Bookmark, Card, EmailThread)',
- [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY DUMB_SORT(table0.C1)', None,
- [('FetchStep', [('Any X,RF WHERE X title RF, X is Card',
- [{'X': 'Card', 'RF': 'String'}])],
- [self.cards, self.system], {},
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'RF': 'table0.C1'}, []),
- ('FetchStep', [('Any X,RF WHERE X title RF, X is IN(Bookmark, EmailThread)',
- [{'RF': 'String', 'X': 'Bookmark'},
- {'RF': 'String', 'X': 'EmailThread'}])],
- [self.system], {},
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'RF': 'table0.C1'}, []),
- ]),
- ])
-
- def test_attr_unification_1(self):
- self._test('Any X,Y WHERE X is Bookmark, Y is Card, X title T, Y title T',
- [('FetchStep',
- [('Any Y,T WHERE Y title T, Y is Card', [{'T': 'String', 'Y': 'Card'}])],
- [self.cards, self.system], None,
- {'T': 'table0.C1', 'Y': 'table0.C0', 'Y.title': 'table0.C1'}, []),
- ('OneFetchStep',
- [('Any X,Y WHERE X title T, Y title T, X is Bookmark, Y is Card',
- [{'T': 'String', 'X': 'Bookmark', 'Y': 'Card'}])],
- None, None, [self.system],
- {'T': 'table0.C1', 'Y': 'table0.C0', 'Y.title': 'table0.C1'}, [])
- ])
-
- def test_attr_unification_2(self):
- self._test('Any X,Y WHERE X is Note, Y is Card, X type T, Y title T',
- [('FetchStep',
- [('Any X,T WHERE X type T, X is Note', [{'T': 'String', 'X': 'Note'}])],
- [self.cards, self.system], None,
- {'T': 'table0.C1', 'X': 'table0.C0', 'X.type': 'table0.C1'}, []),
- ('FetchStep',
- [('Any Y,T WHERE Y title T, Y is Card', [{'T': 'String', 'Y': 'Card'}])],
- [self.cards, self.system], None,
- {'T': 'table1.C1', 'Y': 'table1.C0', 'Y.title': 'table1.C1'}, []),
- ('OneFetchStep',
- [('Any X,Y WHERE X type T, Y title T, X is Note, Y is Card',
- [{'T': 'String', 'X': 'Note', 'Y': 'Card'}])],
- None, None, [self.system],
- {'T': 'table1.C1',
- 'X': 'table0.C0', 'X.type': 'table0.C1',
- 'Y': 'table1.C0', 'Y.title': 'table1.C1'}, [])
- ])
-
- def test_attr_unification_neq_1(self):
- self._test('Any X,Y WHERE X is Bookmark, Y is Card, X creation_date D, Y creation_date > D',
- [('FetchStep',
- [('Any Y,D WHERE Y creation_date D, Y is Card',
- [{'D': 'Datetime', 'Y': 'Card'}])],
- [self.cards,self.system], None,
- {'D': 'table0.C1', 'Y': 'table0.C0', 'Y.creation_date': 'table0.C1'}, []),
- ('OneFetchStep',
- [('Any X,Y WHERE X creation_date D, Y creation_date > D, X is Bookmark, Y is Card',
- [{'D': 'Datetime', 'X': 'Bookmark', 'Y': 'Card'}])], None, None,
- [self.system],
- {'D': 'table0.C1', 'Y': 'table0.C0', 'Y.creation_date': 'table0.C1'}, [])
- ])
-
- def test_subquery_1(self):
- ueid = self.session.user.eid
- self._test('DISTINCT Any B,C ORDERBY C WHERE A created_by B, B login C, EXISTS(B owned_by D), D eid %(E)s '
- 'WITH A,N BEING ((Any X,N WHERE X is Tag, X name N) UNION (Any X,T WHERE X is Bookmark, X title T))',
- [('FetchStep', [('Any X,N WHERE X is Tag, X name N', [{'N': 'String', 'X': 'Tag'}]),
- ('Any X,T WHERE X is Bookmark, X title T',
- [{'T': 'String', 'X': 'Bookmark'}])],
- [self.system], {}, {'N': 'table0.C1', 'X': 'table0.C0', 'X.name': 'table0.C1'}, []),
- ('FetchStep',
- [('Any B,C WHERE B login C, B is CWUser', [{'B': 'CWUser', 'C': 'String'}])],
- [self.ldap, self.system], None, {'B': 'table1.C0', 'B.login': 'table1.C1', 'C': 'table1.C1'}, []),
- ('OneFetchStep', [('DISTINCT Any B,C ORDERBY C WHERE A created_by B, B login C, EXISTS(B owned_by %s), B is CWUser, A is IN(Bookmark, Tag)' % ueid,
- [{'A': 'Bookmark', 'B': 'CWUser', 'C': 'String'},
- {'A': 'Tag', 'B': 'CWUser', 'C': 'String'}])],
- None, None, [self.system],
- {'A': 'table0.C0',
- 'B': 'table1.C0', 'B.login': 'table1.C1',
- 'C': 'table1.C1',
- 'N': 'table0.C1'},
- [])],
- {'E': ueid})
-
- def test_subquery_2(self):
- ueid = self.session.user.eid
- self._test('DISTINCT Any B,C ORDERBY C WHERE A created_by B, B login C, EXISTS(B owned_by D), D eid %(E)s '
- 'WITH A,N BEING ((Any X,N WHERE X is Tag, X name N) UNION (Any X,T WHERE X is Card, X title T))',
- [('UnionFetchStep',
- [('FetchStep', [('Any X,N WHERE X is Tag, X name N', [{'N': 'String', 'X': 'Tag'}])],
- [self.system], {},
- {'N': 'table0.C1',
- 'T': 'table0.C1',
- 'X': 'table0.C0',
- 'X.name': 'table0.C1',
- 'X.title': 'table0.C1'}, []),
- ('FetchStep', [('Any X,T WHERE X is Card, X title T',
- [{'T': 'String', 'X': 'Card'}])],
- [self.cards, self.system], {},
- {'N': 'table0.C1',
- 'T': 'table0.C1',
- 'X': 'table0.C0',
- 'X.name': 'table0.C1',
- 'X.title': 'table0.C1'}, []),
- ]),
- ('FetchStep',
- [('Any B,C WHERE B login C, B is CWUser', [{'B': 'CWUser', 'C': 'String'}])],
- [self.ldap, self.system], None, {'B': 'table1.C0', 'B.login': 'table1.C1', 'C': 'table1.C1'}, []),
- ('OneFetchStep', [('DISTINCT Any B,C ORDERBY C WHERE A created_by B, B login C, EXISTS(B owned_by %s), B is CWUser, A is IN(Card, Tag)' % ueid,
- [{'A': 'Card', 'B': 'CWUser', 'C': 'String'},
- {'A': 'Tag', 'B': 'CWUser', 'C': 'String'}])],
- None, None, [self.system],
- {'A': 'table0.C0',
- 'B': 'table1.C0', 'B.login': 'table1.C1',
- 'C': 'table1.C1',
- 'N': 'table0.C1'},
- [])],
- {'E': ueid})
-
- def test_eid_dont_cross_relation_1(self):
- repo._type_source_cache[999999] = ('Personne', 'system', 999999, 'system')
- self._test('Any Y,YT WHERE X eid %(x)s, X fiche Y, Y title YT',
- [('OneFetchStep', [('Any Y,YT WHERE X eid 999999, X fiche Y, Y title YT',
- [{'X': 'Personne', 'Y': 'Card', 'YT': 'String'}])],
- None, None, [self.system], {}, [])],
- {'x': 999999})
-
- def test_eid_dont_cross_relation_2(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.cards.dont_cross_relations.add('concerne')
- try:
- self._test('Any Y,S,YT,X WHERE Y concerne X, Y in_state S, X eid 999999, Y ref YT',
- [('OneFetchStep', [('Any Y,S,YT,999999 WHERE Y concerne 999999, Y in_state S, Y ref YT',
- [{'Y': 'Affaire', 'YT': 'String', 'S': 'State'}])],
- None, None, [self.system], {}, [])],
- {'x': 999999})
- finally:
- self.cards.dont_cross_relations.remove('concerne')
-
-
- # external source w/ .cross_relations == ['multisource_crossed_rel'] ######
-
- def test_crossed_relation_eid_1_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
- [('OneFetchStep', [('Any Y WHERE 999999 multisource_crossed_rel Y', [{u'Y': 'Note'}])],
- None, None, [self.system], {}, [])
- ],
- {'x': 999999,})
-
- def test_crossed_relation_eid_1_needattr(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T',
- [('FetchStep', [('Any Y,T WHERE Y type T, Y is Note', [{'T': 'String', 'Y': 'Note'}])],
- [self.cards, self.system], None,
- {'T': 'table0.C1', 'Y': 'table0.C0', 'Y.type': 'table0.C1'}, []),
- ('OneFetchStep', [('Any Y,T WHERE 999999 multisource_crossed_rel Y, Y type T, Y is Note',
- [{'T': 'String', 'Y': 'Note'}])],
- None, None, [self.system],
- {'T': 'table0.C1', 'Y': 'table0.C0', 'Y.type': 'table0.C1'}, []),
- ],
- {'x': 999999,})
-
- def test_crossed_relation_eid_2_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
- [('OneFetchStep', [('Any Y WHERE 999999 multisource_crossed_rel Y, Y is Note', [{'Y': 'Note'}])],
- None, None, [self.cards, self.system], {}, [])
- ],
- {'x': 999999,})
-
- def test_crossed_relation_eid_2_needattr(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T',
- [('OneFetchStep', [('Any Y,T WHERE 999999 multisource_crossed_rel Y, Y type T, Y is Note',
- [{'T': 'String', 'Y': 'Note'}])],
- None, None, [self.cards, self.system], {},
- []),
- ],
- {'x': 999999,})
-
- def test_crossed_relation_eid_not_1(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any Y WHERE X eid %(x)s, NOT X multisource_crossed_rel Y',
- [('FetchStep', [('Any Y WHERE Y is Note', [{'Y': 'Note'}])],
- [self.cards, self.system], None, {'Y': 'table0.C0'}, []),
- ('OneFetchStep', [('Any Y WHERE NOT EXISTS(999999 multisource_crossed_rel Y), Y is Note',
- [{'Y': 'Note'}])],
- None, None, [self.system],
- {'Y': 'table0.C0'}, [])],
- {'x': 999999,})
-
-# def test_crossed_relation_eid_not_2(self):
-# repo._type_source_cache[999999] = ('Note', 'cards', 999999)
-# self._test('Any Y WHERE X eid %(x)s, NOT X multisource_crossed_rel Y',
-# [],
-# {'x': 999999,})
-
- def test_crossed_relation_base_XXXFIXME(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any X,Y,T WHERE X multisource_crossed_rel Y, Y type T, X type T',
- [('FetchStep', [('Any X,T WHERE X type T, X is Note', [{'T': 'String', 'X': 'Note'}])],
- [self.cards, self.system], None,
- {'T': 'table0.C1', 'X': 'table0.C0', 'X.type': 'table0.C1'}, []),
- ('FetchStep', [('Any Y,T WHERE Y type T, Y is Note', [{'T': 'String', 'Y': 'Note'}])],
- [self.cards, self.system], None,
- {'T': 'table1.C1', 'Y': 'table1.C0', 'Y.type': 'table1.C1'}, []),
- ('FetchStep', [('Any X,Y WHERE X multisource_crossed_rel Y, X is Note, Y is Note',
- [{'X': 'Note', 'Y': 'Note'}])],
- [self.cards, self.system], None,
- {'X': 'table2.C0', 'Y': 'table2.C1'},
- []),
- ('OneFetchStep', [('Any X,Y,T WHERE X multisource_crossed_rel Y, Y type T, X type T, '
- 'X is Note, Y is Note, Y identity A, X identity B, A is Note, B is Note',
- [{u'A': 'Note', u'B': 'Note', 'T': 'String', 'X': 'Note', 'Y': 'Note'}])],
- None, None,
- [self.system],
- {'A': 'table1.C0',
- 'B': 'table0.C0',
- 'T': 'table1.C1',
- 'X': 'table2.C0',
- 'X.type': 'table0.C1',
- 'Y': 'table2.C1',
- 'Y.type': 'table1.C1'},
- []),
- ],
- {'x': 999999,})
-
- def test_crossed_relation_noeid_needattr(self):
- # http://www.cubicweb.org/ticket/1382452
- self._test('DISTINCT Any DEP WHERE DEP is Note, P type "cubicweb-foo", P multisource_crossed_rel DEP, DEP type LIKE "cubicweb%"',
- [('FetchStep', [(u'Any DEP WHERE DEP type LIKE "cubicweb%", DEP is Note',
- [{'DEP': 'Note'}])],
- [self.cards, self.system], None,
- {'DEP': 'table0.C0'},
- []),
- ('FetchStep', [(u'Any P WHERE P type "cubicweb-foo", P is Note', [{'P': 'Note'}])],
- [self.cards, self.system], None, {'P': 'table1.C0'},
- []),
- ('FetchStep', [('Any DEP,P WHERE P multisource_crossed_rel DEP, DEP is Note, P is Note',
- [{'DEP': 'Note', 'P': 'Note'}])],
- [self.cards, self.system], None, {'DEP': 'table2.C0', 'P': 'table2.C1'},
- []),
- ('OneFetchStep',
- [('DISTINCT Any DEP WHERE P multisource_crossed_rel DEP, DEP is Note, '
- 'P is Note, DEP identity A, P identity B, A is Note, B is Note',
- [{u'A': 'Note', u'B': 'Note', 'DEP': 'Note', 'P': 'Note'}])],
- None, None, [self.system],
- {'A': 'table0.C0', 'B': 'table1.C0', 'DEP': 'table2.C0', 'P': 'table2.C1'},
- [])])
-
- def test_crossed_relation_noeid_invariant(self):
- # see comment in http://www.cubicweb.org/ticket/1382452
- self.schema.add_relation_def(
- RelationDefinition(subject='Note', name='multisource_crossed_rel', object='Affaire'))
- self.repo.set_schema(self.schema)
- try:
- self._test('DISTINCT Any P,DEP WHERE P type "cubicweb-foo", P multisource_crossed_rel DEP',
- [('FetchStep',
- [('Any DEP WHERE DEP is Note', [{'DEP': 'Note'}])],
- [self.cards, self.system], None, {'DEP': 'table0.C0'}, []),
- ('FetchStep',
- [(u'Any P WHERE P type "cubicweb-foo", P is Note', [{'P': 'Note'}])],
- [self.cards, self.system], None, {'P': 'table1.C0'}, []),
- ('UnionStep', None, None,
- [('OneFetchStep',
- [('DISTINCT Any P,DEP WHERE P multisource_crossed_rel DEP, DEP is Note, P is Note',
- [{'DEP': 'Note', 'P': 'Note'}])],
- None, None, [self.cards], None, []),
- ('OneFetchStep',
- [('DISTINCT Any P,DEP WHERE P multisource_crossed_rel DEP, DEP is Note, P is Note',
- [{'DEP': 'Note', 'P': 'Note'}])],
- None, None, [self.system],
- {'DEP': 'table0.C0', 'P': 'table1.C0'},
- []),
- ('OneFetchStep',
- [('DISTINCT Any P,DEP WHERE P multisource_crossed_rel DEP, DEP is Affaire, P is Note',
- [{'DEP': 'Affaire', 'P': 'Note'}])],
- None, None, [self.system], {'P': 'table1.C0'},
- [])])
- ])
- finally:
- self.schema.del_relation_def('Note', 'multisource_crossed_rel', 'Affaire')
- self.repo.set_schema(self.schema)
-
- # edition queries tests ###################################################
-
- def test_insert_simplified_var_1(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'system', None, 'system')
- self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T',
- [('InsertStep',
- [('InsertRelationsStep',
- [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
- [{'N': 'Note', 'T': 'String'}])],
- None, None, [self.cards], {}, [])])
- ])
- ],
- {'n': 999999, 's': 999998})
-
- def test_insert_simplified_var_2(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'system', None, 'system')
- self._test('INSERT Note X: X in_state S, X type T, X migrated_from N WHERE S eid %(s)s, N eid %(n)s, N type T',
- [('InsertStep',
- [('InsertRelationsStep',
- [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
- [{'N': 'Note', 'T': 'String'}])],
- None, None, [self.cards], {}, [])
- ])
- ])
- ],
- {'n': 999999, 's': 999998})
-
- def test_insert_simplified_var_3(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'cards', 999998, 'cards')
- self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T',
- [('InsertStep',
- [('InsertRelationsStep',
- [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
- [{'N': 'Note', 'T': 'String'}])],
- None, None, [self.cards], {}, [])]
- )]
- )],
- {'n': 999999, 's': 999998})
-
- def test_insert_simplified_var_4(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'system', None, 'system')
- self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s',
- [('InsertStep',
- [('InsertRelationsStep',
- [('OneFetchStep',
- [('Any 999999', [{}])],
- None, None,
- [self.system], {},
- [])])]
- )],
- {'n': 999999, 's': 999998})
-
- def test_insert_simplified_var_5(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'system', None, 'system')
- self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s, A concerne N',
- [('InsertStep',
- [('InsertRelationsStep',
- [('OneFetchStep',
- [('Any A WHERE A concerne 999999, A is Affaire',
- [{'A': 'Affaire'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
- ],
- {'n': 999999, 's': 999998})
-
- def test_delete_relation1(self):
- ueid = self.session.user.eid
- self._test('DELETE X created_by Y WHERE X eid %(x)s, NOT Y eid %(y)s',
- [('DeleteRelationsStep', [
- ('OneFetchStep', [('Any %s,Y WHERE %s created_by Y, NOT Y eid %s, Y is CWUser' % (ueid, ueid, ueid),
- [{'Y': 'CWUser'}])],
- None, None, [self.system], {}, []),
- ]),
- ],
- {'x': ueid, 'y': ueid})
-
- def test_delete_relation2(self):
- ueid = self.session.user.eid
- self._test('DELETE X created_by Y WHERE X eid %(x)s, NOT Y login "syt"',
- [('FetchStep', [('Any Y WHERE NOT Y login "syt", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system], None, {'Y': 'table0.C0'}, []),
- ('DeleteRelationsStep', [
- ('OneFetchStep', [('Any %s,Y WHERE %s created_by Y, Y is CWUser'%(ueid,ueid), [{'Y': 'CWUser'}])],
- None, None, [self.system], {'Y': 'table0.C0'}, []),
- ]),
- ],
- {'x': ueid, 'y': ueid})
-
- def test_delete_relation3(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.assertRaises(
- BadRQLQuery, self._test,
- 'DELETE Y multisource_inlined_rel X WHERE X eid %(x)s, '
- 'NOT (Y cw_source S, S name %(source)s)', [],
- {'x': 999999, 'source': 'cards'})
-
- def test_delete_relation4(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.assertRaises(
- BadRQLQuery, self._test,
- 'DELETE X multisource_inlined_rel Y WHERE Y is Note, X eid %(x)s, '
- 'NOT (Y cw_source S, S name %(source)s)', [],
- {'x': 999999, 'source': 'cards'})
-
- def test_delete_entity1(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('DELETE Note X WHERE X eid %(x)s, NOT Y multisource_rel X',
- [('DeleteEntitiesStep',
- [('OneFetchStep', [('Any 999999 WHERE NOT EXISTS(Y multisource_rel 999999), Y is IN(Card, Note)',
- [{'Y': 'Card'}, {'Y': 'Note'}])],
- None, None, [self.system], {}, [])
- ])
- ],
- {'x': 999999})
-
- def test_delete_entity2(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('DELETE Note X WHERE X eid %(x)s, NOT X multisource_inlined_rel Y',
- [('DeleteEntitiesStep',
- [('OneFetchStep', [('Any X WHERE X eid 999999, NOT X multisource_inlined_rel Y, X is Note, Y is IN(Affaire, Note)',
- [{'X': 'Note', 'Y': 'Affaire'}, {'X': 'Note', 'Y': 'Note'}])],
- None, None, [self.system], {}, [])
- ])
- ],
- {'x': 999999})
-
- def test_update(self):
- self._test('SET X copain Y WHERE X login "comme", Y login "cochon"',
- [('FetchStep',
- [('Any X WHERE X login "comme", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any Y WHERE Y login "cochon", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system], None, {'Y': 'table1.C0'}, []),
- ('UpdateStep',
- [('OneFetchStep',
- [('DISTINCT Any X,Y WHERE X is CWUser, Y is CWUser',
- [{'X': 'CWUser', 'Y': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0', 'Y': 'table1.C0'}, [])
- ])
- ])
-
- def test_update2(self):
- self._test('SET U in_group G WHERE G name ~= "bougloup%", U login "admin"',
- [('FetchStep', [('Any U WHERE U login "admin", U is CWUser', [{'U': 'CWUser'}])],
- [self.ldap, self.system], None, {'U': 'table0.C0'}, []),
- ('UpdateStep', [
- ('OneFetchStep', [('DISTINCT Any U,G WHERE G name ILIKE "bougloup%", G is CWGroup, U is CWUser',
- [{'U': 'CWUser', 'G': 'CWGroup'}])],
- None, None, [self.system], {'U': 'table0.C0'}, []),
- ]),
- ])
-
- def test_update3(self):
- anoneid = self.user_groups_session('guests').user.eid
- # since we are adding a in_state relation for an entity in the system
- # source, states should only be searched in the system source as well
- self._test('SET X in_state S WHERE X eid %(x)s, S name "deactivated"',
- [('UpdateStep', [
- ('OneFetchStep', [('DISTINCT Any S WHERE S name "deactivated", S is State',
- [{'S': 'State'}])],
- None, None, [self.system], {}, []),
- ]),
- ],
- {'x': anoneid})
-
-# def test_update4(self):
-# # since we are adding a in_state relation with a state from the system
-# # source, CWUser should only be searched only in the system source as well
-# rset = self.execute('State X WHERE X name "activated"')
-# assert len(rset) == 1, rset
-# activatedeid = rset[0][0]
-# self._test('SET X in_state S WHERE X is CWUser, S eid %s' % activatedeid,
-# [('UpdateStep', [
-# ('OneFetchStep', [('DISTINCT Any X,%s WHERE X is CWUser' % activatedeid,
-# [{'X': 'CWUser'}])],
-# None, None, [self.system], {}, []),
-# ]),
-# ])
-
- def test_ldap_user_related_to_invariant_and_dont_cross_rel(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.cards.dont_cross_relations.add('created_by')
- try:
- self._test('Any X,XL WHERE E eid %(x)s, E created_by X, X login XL',
- [('FetchStep', [('Any X,XL WHERE X login XL, X is CWUser',
- [{'X': 'CWUser', 'XL': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'XL': 'table0.C1'},
- []),
- ('OneFetchStep',
- [('Any X,XL WHERE 999999 created_by X, X login XL, X is CWUser',
- [{'X': 'CWUser', 'XL': 'String'}])],
- None, None,
- [self.system],
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'XL': 'table0.C1'},
- [])],
- {'x': 999999})
- finally:
- self.cards.dont_cross_relations.remove('created_by')
-
- def test_ambigous_cross_relation(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.cards.support_relations['see_also'] = True
- self.cards.cross_relations.add('see_also')
- try:
- self._test('Any X,AA ORDERBY AA WHERE E eid %(x)s, E see_also X, X modification_date AA',
- [('AggrStep',
- 'SELECT table0.C0, table0.C1 FROM table0\nORDER BY table0.C1',
- None,
- [('FetchStep',
- [('Any X,AA WHERE 999999 see_also X, X modification_date AA, X is Note',
- [{'AA': 'Datetime', 'X': 'Note'}])], [self.cards, self.system], {},
- {'AA': 'table0.C1', 'X': 'table0.C0',
- 'X.modification_date': 'table0.C1'},
- []),
- ('FetchStep',
- [('Any X,AA WHERE 999999 see_also X, X modification_date AA, X is Bookmark',
- [{'AA': 'Datetime', 'X': 'Bookmark'}])],
- [self.system], {},
- {'AA': 'table0.C1', 'X': 'table0.C0',
- 'X.modification_date': 'table0.C1'},
- [])])],
- {'x': 999999})
- finally:
- del self.cards.support_relations['see_also']
- self.cards.cross_relations.remove('see_also')
-
- def test_state_of_cross(self):
- self._test('DELETE State X WHERE NOT X state_of Y',
- [('DeleteEntitiesStep',
- [('OneFetchStep',
- [('Any X WHERE NOT X state_of Y, X is State, Y is Workflow',
- [{'X': 'State', 'Y': 'Workflow'}])],
- None, None, [self.system], {}, [])])]
- )
-
-
- def test_source_specified_0_0(self):
- self._test('Card X WHERE X cw_source S, S eid 1',
- [('OneFetchStep', [('Any X WHERE X cw_source 1, X is Card',
- [{'X': 'Card'}])],
- None, None,
- [self.system],{}, [])
- ])
-
- def test_source_specified_0_1(self):
- self._test('Any X, S WHERE X is Card, X cw_source S, S eid 1',
- [('OneFetchStep', [('Any X,1 WHERE X is Card, X cw_source 1',
- [{'X': 'Card'}])],
- None, None,
- [self.system],{}, [])
- ])
-
- def test_source_specified_1_0(self):
- self._test('Card X WHERE X cw_source S, S name "system"',
- [('OneFetchStep', [('Any X WHERE X cw_source S, S name "system", X is Card',
- [{'X': 'Card', 'S': 'CWSource'}])],
- None, None,
- [self.system],{}, [])
- ])
-
- def test_source_specified_1_1(self):
- self._test('Any X, SN WHERE X is Card, X cw_source S, S name "system", S name SN',
- [('OneFetchStep', [('Any X,SN WHERE X is Card, X cw_source S, S name "system", '
- 'S name SN',
- [{'S': 'CWSource', 'SN': 'String', 'X': 'Card'}])],
- None, None, [self.system], {}, [])
- ])
-
- def test_source_specified_1_2(self):
- self._test('Card X WHERE X cw_source S, S name "datafeed"',
- [('OneFetchStep', [('Any X WHERE X cw_source S, S name "datafeed", X is Card',
- [{'X': 'Card', 'S': 'CWSource'}])],
- None, None,
- [self.system],{}, [])
- ])
-
- def test_source_specified_1_3(self):
- self._test('Any X, SN WHERE X is Card, X cw_source S, S name "datafeed", S name SN',
- [('OneFetchStep', [('Any X,SN WHERE X is Card, X cw_source S, S name "datafeed", '
- 'S name SN',
- [{'S': 'CWSource', 'SN': 'String', 'X': 'Card'}])],
- None, None, [self.system], {}, [])
- ])
-
- def test_source_specified_1_4(self):
- sols = []
- for sol in X_ALL_SOLS:
- sol = sol.copy()
- sol['S'] = 'CWSource'
- sols.append(sol)
- self._test('Any X WHERE X cw_source S, S name "cards"',
- [('OneFetchStep', [('Any X WHERE X cw_source S, S name "cards"',
- sols)],
- None, None,
- [self.system],{}, [])
- ])
-
- def test_source_specified_2_0(self):
- # self._test('Card X WHERE X cw_source S, NOT S eid 1',
- # [('OneFetchStep', [('Any X WHERE X is Card',
- # [{'X': 'Card'}])],
- # None, None,
- # [self.cards],{}, [])
- # ])
- self._test('Card X WHERE NOT X cw_source S, S eid 1',
- [('OneFetchStep', [('Any X WHERE X is Card',
- [{'X': 'Card'}])],
- None, None,
- [self.cards],{}, [])
- ])
-
- def test_source_specified_2_1(self):
- self._test('Card X WHERE X cw_source S, NOT S name "system"',
- [('OneFetchStep', [('Any X WHERE X is Card',
- [{'X': 'Card'}])],
- None, None,
- [self.cards],{}, [])
- ])
- self._test('Card X WHERE NOT X cw_source S, S name "system"',
- [('OneFetchStep', [('Any X WHERE X is Card',
- [{'X': 'Card'}])],
- None, None,
- [self.cards],{}, [])
- ])
-
- def test_source_specified_3_1(self):
- self._test('Any X,XT WHERE X is Card, X title XT, X cw_source S, S name "cards"',
- [('OneFetchStep',
- [('Any X,XT WHERE X is Card, X title XT',
- [{'X': 'Card', 'XT': 'String'}])],
- None, None, [self.cards], {}, [])
- ])
-
- def test_source_specified_3_2(self):
- self._test('Any X,XT WHERE X is Card, X title XT, X cw_source S, S name "datafeed"',
- [('OneFetchStep',
- [('Any X,XT WHERE X is Card, X title XT, X cw_source S, S name "datafeed"',
- [{'X': 'Card', 'XT': 'String', 'S': 'CWSource'}])],
- None, None, [self.system], {}, [])
- ])
-
- def test_source_specified_3_3(self):
- self.skipTest('oops')
- self._test('Any STN WHERE X is Note, X type XT, X in_state ST, ST name STN, X cw_source S, S name "cards"',
- [('OneFetchStep',
- [('Any X,XT WHERE X is Card, X title XT',
- [{'X': 'Card', 'XT': 'String'}])],
- None, None, [self.cards], {}, [])
- ])
-
- def test_source_conflict_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- with self.assertRaises(BadRQLQuery) as cm:
- self._test('Any X WHERE X cw_source S, S name "system", X eid %(x)s',
- [], {'x': 999999})
- self.assertEqual(str(cm.exception), 'source conflict for term %(x)s')
-
- def test_source_conflict_2(self):
- with self.assertRaises(BadRQLQuery) as cm:
- self._test('Card X WHERE X cw_source S, S name "systeme"', [])
- self.assertEqual(str(cm.exception), 'source conflict for term X')
-
- def test_source_conflict_3(self):
- self.skipTest('oops')
- self._test('CWSource X WHERE X cw_source S, S name "cards"',
- [('OneFetchStep',
- [(u'Any X WHERE X cw_source S, S name "cards", X is CWSource',
- [{'S': 'CWSource', 'X': 'CWSource'}])],
- None, None,
- [self.system],
- {}, [])])
-
-
- def test_ambigous_cross_relation_source_specified(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.cards.support_relations['see_also'] = True
- self.cards.cross_relations.add('see_also')
- try:
- self._test('Any X,AA ORDERBY AA WHERE E eid %(x)s, E see_also X, X modification_date AA',
- [('AggrStep',
- 'SELECT table0.C0, table0.C1 FROM table0\nORDER BY table0.C1',
- None,
- [('FetchStep',
- [('Any X,AA WHERE 999999 see_also X, X modification_date AA, X is Note',
- [{'AA': 'Datetime', 'X': 'Note'}])], [self.cards, self.system], {},
- {'AA': 'table0.C1', 'X': 'table0.C0',
- 'X.modification_date': 'table0.C1'},
- []),
- ('FetchStep',
- [('Any X,AA WHERE 999999 see_also X, X modification_date AA, X is Bookmark',
- [{'AA': 'Datetime', 'X': 'Bookmark'}])],
- [self.system], {},
- {'AA': 'table0.C1', 'X': 'table0.C0',
- 'X.modification_date': 'table0.C1'},
- [])])],
- {'x': 999999})
- finally:
- del self.cards.support_relations['see_also']
- self.cards.cross_relations.remove('see_also')
-
- # non regression tests ####################################################
-
- def test_nonregr1(self):
- self._test('Any X, Y WHERE X copain Y, X login "syt", Y login "cochon"',
- [('FetchStep',
- [('Any X WHERE X login "syt", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any Y WHERE Y login "cochon", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system], None, {'Y': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any X,Y WHERE X copain Y, X is CWUser, Y is CWUser',
- [{'X': 'CWUser', 'Y': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0', 'Y': 'table1.C0'}, [])
- ])
-
- def test_nonregr2(self):
- iworkflowable = self.session.user.cw_adapt_to('IWorkflowable')
- iworkflowable.fire_transition('deactivate')
- treid = iworkflowable.latest_trinfo().eid
- self._test('Any X ORDERBY D DESC WHERE E eid %(x)s, E wf_info_for X, X modification_date D',
- [('FetchStep', [('Any X,D WHERE X modification_date D, X is Note',
- [{'X': 'Note', 'D': 'Datetime'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'X.modification_date': 'table0.C1', 'D': 'table0.C1'}, []),
- ('FetchStep', [('Any X,D WHERE X modification_date D, X is CWUser',
- [{'X': 'CWUser', 'D': 'Datetime'}])],
- [self.ldap, self.system], None, {'X': 'table1.C0', 'X.modification_date': 'table1.C1', 'D': 'table1.C1'}, []),
- ('AggrStep', 'SELECT table2.C0 FROM table2\nORDER BY table2.C1 DESC', None, [
- ('FetchStep', [('Any X,D WHERE E eid %s, E wf_info_for X, X modification_date D, E is TrInfo, X is Affaire'%treid,
- [{'X': 'Affaire', 'E': 'TrInfo', 'D': 'Datetime'}])],
- [self.system],
- {},
- {'X': 'table2.C0', 'X.modification_date': 'table2.C1', 'D': 'table2.C1', 'E.wf_info_for': 'table2.C0'}, []),
- ('FetchStep', [('Any X,D WHERE E eid %s, E wf_info_for X, X modification_date D, E is TrInfo, X is CWUser'%treid,
- [{'X': 'CWUser', 'E': 'TrInfo', 'D': 'Datetime'}])],
- [self.system],
- {'X': 'table1.C0', 'X.modification_date': 'table1.C1', 'D': 'table1.C1'},
- {'X': 'table2.C0', 'X.modification_date': 'table2.C1', 'D': 'table2.C1', 'E.wf_info_for': 'table2.C0'}, []),
- ('FetchStep', [('Any X,D WHERE E eid %s, E wf_info_for X, X modification_date D, E is TrInfo, X is Note'%treid,
- [{'X': 'Note', 'E': 'TrInfo', 'D': 'Datetime'}])],
- [self.system],
- {'X': 'table0.C0', 'X.modification_date': 'table0.C1', 'D': 'table0.C1'},
- {'X': 'table2.C0', 'X.modification_date': 'table2.C1', 'D': 'table2.C1', 'E.wf_info_for': 'table2.C0'}, []),
- ]),
- ],
- {'x': treid})
-
- def test_nonregr3(self):
- # original jpl query:
- # Any X, NOW - CD, P WHERE P is Project, U interested_in P, U is CWUser, U login "sthenault", X concerns P, X creation_date CD ORDERBY CD DESC LIMIT 5
- self._test('Any X, NOW - CD, P ORDERBY CD DESC LIMIT 5 WHERE P bookmarked_by U, U login "admin", P is X, X creation_date CD',
- [('FetchStep', [('Any U WHERE U login "admin", U is CWUser', [{'U': 'CWUser'}])],
- [self.ldap, self.system], None, {'U': 'table0.C0'}, []),
- ('OneFetchStep', [('Any X,(NOW - CD),P ORDERBY CD DESC LIMIT 5 WHERE P bookmarked_by U, P is X, X creation_date CD, P is Bookmark, U is CWUser, X is CWEType',
- [{'P': 'Bookmark', 'U': 'CWUser', 'X': 'CWEType', 'CD': 'Datetime'}])],
- 5, None, [self.system], {'U': 'table0.C0'}, [])]
- )
-
- def test_nonregr4(self):
- ueid = self.session.user.eid
- self._test('Any U ORDERBY D DESC WHERE WF wf_info_for X, WF creation_date D, WF from_state FS, '
- 'WF owned_by U?, X eid %(x)s',
- [#('FetchStep', [('Any U WHERE U is CWUser', [{'U': 'CWUser'}])],
- # [self.ldap, self.system], None, {'U': 'table0.C0'}, []),
- ('OneFetchStep', [('Any U ORDERBY D DESC WHERE WF wf_info_for %s, WF creation_date D, WF from_state FS, WF owned_by U?' % ueid,
- [{'WF': 'TrInfo', 'FS': 'State', 'U': 'CWUser', 'D': 'Datetime'}])],
- None, None,
- [self.system], {}, [])],
- {'x': ueid})
-
- def test_nonregr5(self):
- # original jpl query:
- # DISTINCT Version V WHERE MB done_in MV, MV eid %(x)s,
- # MB depends_on B, B done_in V, V version_of P, NOT P eid %(p)s'
- cardeid = self.execute('INSERT Card X: X title "hop"')[0][0]
- noteeid = self.execute('INSERT Note X')[0][0]
- self._test('DISTINCT Card V WHERE MB documented_by MV, MV eid %(x)s, '
- 'MB depends_on B, B documented_by V, V multisource_rel P, NOT P eid %(p)s',
- [('FetchStep', [('Any V WHERE V multisource_rel P, NOT P eid %s, P is Note, V is Card'%noteeid,
- [{'P': 'Note', 'V': 'Card'}])],
- [self.cards, self.system], None, {'V': 'table0.C0'}, []),
- ('OneFetchStep', [('DISTINCT Any V WHERE MB documented_by %s, MB depends_on B, B documented_by V, B is Affaire, MB is Affaire, V is Card'%cardeid,
- [{'B': 'Affaire', 'MB': 'Affaire', 'V': 'Card'}])],
- None, None, [self.system], {'V': 'table0.C0'}, [])],
- {'x': cardeid, 'p': noteeid})
-
- def test_nonregr6(self):
- self._test('Any X WHERE X concerne Y',
- [('OneFetchStep', [('Any X WHERE X concerne Y',
- [{'Y': 'Division', 'X': 'Affaire'},
- {'Y': 'Note', 'X': 'Affaire'},
- {'Y': 'Societe', 'X': 'Affaire'},
- {'Y': 'SubDivision', 'X': 'Affaire'},
- {'Y': 'Affaire', 'X': 'Personne'}])],
- None, None, [self.system], {}, [])
- ])
- self._test('Any X WHERE X concerne Y, Y is Note',
- [('FetchStep', [('Any Y WHERE Y is Note', [{'Y': 'Note'}])],
- [self.cards, self.system], None, {'Y': 'table0.C0'}, []),
- ('OneFetchStep', [('Any X WHERE X concerne Y, X is Affaire, Y is Note',
- [{'X': 'Affaire', 'Y': 'Note'}])],
- None, None, [self.system], {'Y': 'table0.C0'}, [])
- ])
-
- def test_nonregr7(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any S,SUM(DUR),SUM(I),(SUM(I) - SUM(DUR)),MIN(DI),MAX(DI) GROUPBY S ORDERBY S WHERE A is Affaire, A duration DUR, A invoiced I, A modification_date DI, A in_state S, S name SN, (EXISTS(A concerne WP, W multisource_rel WP)) OR (EXISTS(A concerne W)), W eid %(n)s',
- [('FetchStep', [('Any WP WHERE 999999 multisource_rel WP, WP is Note', [{'WP': 'Note'}])],
- [self.cards], None, {'WP': u'table0.C0'}, []),
- ('OneFetchStep', [('Any S,SUM(DUR),SUM(I),(SUM(I) - SUM(DUR)),MIN(DI),MAX(DI) GROUPBY S ORDERBY S WHERE A duration DUR, A invoiced I, A modification_date DI, A in_state S, S name SN, (EXISTS(A concerne WP, WP is Note)) OR (EXISTS(A concerne 999999)), A is Affaire, S is State',
- [{'A': 'Affaire', 'DI': 'Datetime', 'DUR': 'Int', 'I': 'Float', 'S': 'State', 'SN': 'String', 'WP': 'Note'}])],
- None, None, [self.system], {'WP': u'table0.C0'}, [])],
- {'n': 999999})
-
- def test_nonregr8(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X,Z WHERE X eid %(x)s, X multisource_rel Y, Z concerne X',
- [('FetchStep', [('Any 999999 WHERE 999999 multisource_rel Y, Y is Note',
- [{'Y': 'Note'}])],
- [self.cards],
- None, {u'%(x)s': 'table0.C0'},
- []),
- ('OneFetchStep', [('Any 999999,Z WHERE Z concerne 999999, Z is Affaire',
- [{'Z': 'Affaire'}])],
- None, None, [self.system],
- {u'%(x)s': 'table0.C0'}, []),
- ],
- {'x': 999999})
-
- def test_nonregr9(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('Note', 'cards', 999998, 'cards')
- self._test('SET X migrated_from Y WHERE X eid %(x)s, Y multisource_rel Z, Z eid %(z)s, Y migrated_from Z',
- [('FetchStep', [('Any Y WHERE Y multisource_rel 999998, Y is Note', [{'Y': 'Note'}])],
- [self.cards], None, {'Y': u'table0.C0'}, []),
- ('UpdateStep',
- [('OneFetchStep', [('DISTINCT Any Y WHERE Y migrated_from 999998, Y is Note',
- [{'Y': 'Note'}])],
- None, None, [self.system],
- {'Y': u'table0.C0'}, [])])],
- {'x': 999999, 'z': 999998})
-
- def test_nonregr10(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap')
- self._test('Any X,AA,AB ORDERBY AA WHERE E eid %(x)s, E owned_by X, X login AA, X modification_date AB',
- [('FetchStep',
- [('Any X,AA,AB WHERE X login AA, X modification_date AB, X is CWUser',
- [{'AA': 'String', 'AB': 'Datetime', 'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'AA': 'table0.C1', 'AB': 'table0.C2',
- 'X': 'table0.C0', 'X.login': 'table0.C1', 'X.modification_date': 'table0.C2'},
- []),
- ('OneFetchStep',
- [('Any X,AA,AB ORDERBY AA WHERE 999999 owned_by X, X login AA, X modification_date AB, X is CWUser',
- [{'AA': 'String', 'AB': 'Datetime', 'X': 'CWUser'}])],
- None, None, [self.system], {'AA': 'table0.C1', 'AB': 'table0.C2',
- 'X': 'table0.C0', 'X.login': 'table0.C1', 'X.modification_date': 'table0.C2'},
- [])
- ],
- {'x': 999999})
-
- def test_nonregr11(self):
- repo._type_source_cache[999999] = ('Bookmark', 'system', 999999, 'system')
- self._test('SET X bookmarked_by Y WHERE X eid %(x)s, Y login "hop"',
- [('UpdateStep',
- [('OneFetchStep', [('DISTINCT Any Y WHERE Y login "hop", Y is CWUser', [{'Y': 'CWUser'}])],
- None, None, [self.ldap, self.system], {}, [])]
- )],
- {'x': 999999})
-
- def test_nonregr12(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X ORDERBY Z DESC WHERE X modification_date Z, E eid %(x)s, E see_also X',
- [('FetchStep', [('Any X,Z WHERE X modification_date Z, X is Note',
- [{'X': 'Note', 'Z': 'Datetime'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'X.modification_date': 'table0.C1', 'Z': 'table0.C1'},
- []),
- ('AggrStep', 'SELECT table1.C0 FROM table1\nORDER BY table1.C1 DESC', None,
- [('FetchStep', [('Any X,Z WHERE X modification_date Z, 999999 see_also X, X is Bookmark',
- [{'X': 'Bookmark', 'Z': 'Datetime'}])],
- [self.system], {}, {'X': 'table1.C0', 'X.modification_date': 'table1.C1',
- 'Z': 'table1.C1'},
- []),
- ('FetchStep', [('Any X,Z WHERE X modification_date Z, 999999 see_also X, X is Note',
- [{'X': 'Note', 'Z': 'Datetime'}])],
- [self.system], {'X': 'table0.C0', 'X.modification_date': 'table0.C1',
- 'Z': 'table0.C1'},
- {'X': 'table1.C0', 'X.modification_date': 'table1.C1',
- 'Z': 'table1.C1'},
- [])]
- )],
- {'x': 999999})
-
- def test_nonregr13_1(self):
- ueid = self.session.user.eid
- # identity wrapped into exists:
- # should'nt propagate constraint that U is in the same source as ME
- self._test('Any B,U,UL GROUPBY B,U,UL WHERE B created_by U?, B is File '
- 'WITH U,UL BEING (Any U,UL WHERE ME eid %(x)s, (EXISTS(U identity ME) '
- 'OR (EXISTS(U in_group G, G name IN("managers", "staff")))) '
- 'OR (EXISTS(U in_group H, ME in_group H, NOT H name "users")), U login UL, U is CWUser)',
- [('FetchStep', [('Any U,UL WHERE U login UL, U is CWUser',
- [{'U': 'CWUser', 'UL': 'String'}])],
- [self.ldap, self.system], None,
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- []),
- ('FetchStep', [('Any U,UL WHERE ((EXISTS(U identity %s)) OR (EXISTS(U in_group G, G name IN("managers", "staff"), G is CWGroup))) OR (EXISTS(U in_group H, %s in_group H, NOT H name "users", H is CWGroup)), U login UL, U is CWUser' % (ueid, ueid),
- [{'G': 'CWGroup', 'H': 'CWGroup', 'U': 'CWUser', 'UL': 'String'}])],
- [self.system],
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- {'U': 'table1.C0', 'U.login': 'table1.C1', 'UL': 'table1.C1'},
- []),
- ('OneFetchStep', [('Any B,U,UL GROUPBY B,U,UL WHERE B created_by U?, B is File',
- [{'B': 'File', 'U': 'CWUser', 'UL': 'String'}])],
- None, None, [self.system],
- {'U': 'table1.C0', 'UL': 'table1.C1'},
- [])],
- {'x': ueid})
-
- def test_nonregr13_2(self):
- # identity *not* wrapped into exists.
- #
- # XXX this test fail since in this case, in "U identity 5" U and 5 are
- # from the same scope so constraints are applied (telling the U should
- # come from the same source as user with eid 5).
- #
- # IMO this is normal, unless we introduce a special case for the
- # identity relation. BUT I think it's better to leave it as is and to
- # explain constraint propagation rules, and so why this should be
- # wrapped in exists() if used in multi-source
- self.skipTest('take a look at me if you wish')
- ueid = self.session.user.eid
- self._test('Any B,U,UL GROUPBY B,U,UL WHERE B created_by U?, B is File '
- 'WITH U,UL BEING (Any U,UL WHERE ME eid %(x)s, (U identity ME '
- 'OR (EXISTS(U in_group G, G name IN("managers", "staff")))) '
- 'OR (EXISTS(U in_group H, ME in_group H, NOT H name "users")), U login UL, U is CWUser)',
- [('FetchStep', [('Any U,UL WHERE U login UL, U is CWUser',
- [{'U': 'CWUser', 'UL': 'String'}])],
- [self.ldap, self.system], None,
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- []),
- ('FetchStep', [('Any U,UL WHERE ((U identity %s) OR (EXISTS(U in_group G, G name IN("managers", "staff"), G is CWGroup))) OR (EXISTS(U in_group H, %s in_group H, NOT H name "users", H is CWGroup)), U login UL, U is CWUser' % (ueid, ueid),
- [{'G': 'CWGroup', 'H': 'CWGroup', 'U': 'CWUser', 'UL': 'String'}])],
- [self.system],
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- {'U': 'table1.C0', 'U.login': 'table1.C1', 'UL': 'table1.C1'},
- []),
- ('OneFetchStep', [('Any B,U,UL GROUPBY B,U,UL WHERE B created_by U?, B is File',
- [{'B': 'File', 'U': 'CWUser', 'UL': 'String'}])],
- None, None, [self.system],
- {'U': 'table1.C0', 'UL': 'table1.C1'},
- [])],
- {'x': self.session.user.eid})
-
- def test_nonregr14_1(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap')
- self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
- [('OneFetchStep', [('Any 999999 WHERE 999999 owned_by 999999', [{}])],
- None, None, [self.system], {}, [])],
- {'x': 999999, 'u': 999999})
-
- def test_nonregr14_2(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap')
- repo._type_source_cache[999998] = ('Note', 'system', 999998, 'system')
- self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
- [('OneFetchStep', [('Any 999998 WHERE 999998 owned_by 999999', [{}])],
- None, None, [self.system], {}, [])],
- {'x': 999998, 'u': 999999})
-
- def test_nonregr14_3(self):
- repo._type_source_cache[999999] = ('CWUser', 'system', 999999, 'system')
- repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998, 'ldap')
- self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
- [('OneFetchStep', [('Any 999998 WHERE 999998 owned_by 999999', [{}])],
- None, None, [self.system], {}, [])],
- {'x': 999998, 'u': 999999})
-
- def test_nonregr_identity_no_source_access_1(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999998, 'ldap')
- self._test('Any S WHERE S identity U, S eid %(s)s, U eid %(u)s',
- [('OneFetchStep', [('Any 999999 WHERE 999999 identity 999999', [{}])],
- None, None, [self.system], {}, [])],
- {'s': 999999, 'u': 999999})
-
- def test_nonregr_identity_no_source_access_2(self):
- repo._type_source_cache[999999] = ('EmailAddress', 'system', 999999, 'system')
- repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998, 'ldap')
- self._test('Any X WHERE O use_email X, ((EXISTS(O identity U)) OR (EXISTS(O in_group G, G name IN("managers", "staff")))) OR (EXISTS(O in_group G2, U in_group G2, NOT G2 name "users")), X eid %(x)s, U eid %(u)s',
- [('OneFetchStep', [('Any 999999 WHERE O use_email 999999, ((EXISTS(O identity 999998)) OR (EXISTS(O in_group G, G name IN("managers", "staff")))) OR (EXISTS(O in_group G2, 999998 in_group G2, NOT G2 name "users"))',
- [{'G': 'CWGroup', 'G2': 'CWGroup', 'O': 'CWUser'}])],
- None, None, [self.system], {}, [])],
- {'x': 999999, 'u': 999998})
-
- def test_nonregr_similar_subquery(self):
- repo._type_source_cache[999999] = ('Personne', 'system', 999999, 'system')
- self._test('Any T,TD,U,T,UL WITH T,TD,U,UL BEING ('
- '(Any T,TD,U,UL WHERE X eid %(x)s, T comments X, T content TD, T created_by U?, U login UL)'
- ' UNION '
- '(Any T,TD,U,UL WHERE X eid %(x)s, X connait P, T comments P, T content TD, T created_by U?, U login UL))',
- # XXX optimization: use a OneFetchStep with a UNION of both queries
- [('FetchStep', [('Any U,UL WHERE U login UL, U is CWUser',
- [{'U': 'CWUser', 'UL': 'String'}])],
- [self.ldap, self.system], None,
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- []),
- ('UnionFetchStep',
- [('FetchStep',
- [('Any T,TD,U,UL WHERE T comments 999999, T content TD, T created_by U?, U login UL, T is Comment, U is CWUser',
- [{'T': 'Comment', 'TD': 'String', 'U': 'CWUser', 'UL': 'String'}])],
- [self.system],
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- {'T': 'table1.C0',
- 'T.content': 'table1.C1',
- 'TD': 'table1.C1',
- 'U': 'table1.C2',
- 'U.login': 'table1.C3',
- 'UL': 'table1.C3'},
- []),
- ('FetchStep',
- [('Any T,TD,U,UL WHERE 999999 connait P, T comments P, T content TD, T created_by U?, U login UL, P is Personne, T is Comment, U is CWUser',
- [{'P': 'Personne',
- 'T': 'Comment',
- 'TD': 'String',
- 'U': 'CWUser',
- 'UL': 'String'}])],
- [self.system],
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- {'T': 'table1.C0',
- 'T.content': 'table1.C1',
- 'TD': 'table1.C1',
- 'U': 'table1.C2',
- 'U.login': 'table1.C3',
- 'UL': 'table1.C3'},
- [])]),
- ('OneFetchStep',
- [('Any T,TD,U,T,UL',
- [{'T': 'Comment', 'TD': 'String', 'U': 'CWUser', 'UL': 'String'}])],
- None, None,
- [self.system],
- {'T': 'table1.C0', 'TD': 'table1.C1', 'U': 'table1.C2', 'UL': 'table1.C3'},
- [])],
- {'x': 999999})
-
- def test_nonregr_dont_readd_already_processed_relation(self):
- self._test('Any WO,D,SO WHERE WO is Note, D tags WO, WO in_state SO',
- [('FetchStep',
- [('Any WO,SO WHERE WO in_state SO, SO is State, WO is Note',
- [{'SO': 'State', 'WO': 'Note'}])],
- [self.cards, self.system], None,
- {'SO': 'table0.C1', 'WO': 'table0.C0'},
- []),
- ('OneFetchStep',
- [('Any WO,D,SO WHERE D tags WO, D is Tag, SO is State, WO is Note',
- [{'D': 'Tag', 'SO': 'State', 'WO': 'Note'}])],
- None, None, [self.system],
- {'SO': 'table0.C1', 'WO': 'table0.C0'},
- [])
- ])
-
-class MSPlannerTwoSameExternalSourcesTC(BasePlannerTC):
- """test planner related feature on a 3-sources repository:
-
- * 2 rql sources supporting Card
- """
-
- def setUp(self):
- self.__class__.repo = repo
- self.setup()
- self.add_source(FakeCardSource, 'cards')
- self.add_source(FakeCardSource, 'cards2')
- self.planner = MSPlanner(self.o.schema, self.repo.vreg.rqlhelper)
- assert repo.sources_by_uri['cards2'].support_relation('multisource_crossed_rel')
- assert 'multisource_crossed_rel' in repo.sources_by_uri['cards2'].cross_relations
- assert repo.sources_by_uri['cards'].support_relation('multisource_crossed_rel')
- assert 'multisource_crossed_rel' in repo.sources_by_uri['cards'].cross_relations
- _test = test_plan
-
-
- def test_linked_external_entities(self):
- repo._type_source_cache[999999] = ('Tag', 'system', 999999, 'system')
- self._test('Any X,XT WHERE X is Card, X title XT, T tags X, T eid %(t)s',
- [('FetchStep',
- [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
- [self.cards, self.cards2, self.system],
- None, {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'},
- []),
- ('OneFetchStep',
- [('Any X,XT WHERE X title XT, 999999 tags X, X is Card',
- [{'X': 'Card', 'XT': 'String'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'},
- [])],
- {'t': 999999})
-
- def test_version_depends_on(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X,AD,AE WHERE E eid %(x)s, E migrated_from X, X in_state AD, AD name AE',
- [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- [self.cards, self.cards2, self.system],
- None, {'AD': 'table0.C1', 'AD.name': 'table0.C2',
- 'AE': 'table0.C2', 'X': 'table0.C0'},
- []),
- ('OneFetchStep', [('Any X,AD,AE WHERE 999999 migrated_from X, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- None, None, [self.system],
- {'AD': 'table0.C1', 'AD.name': 'table0.C2', 'AE': 'table0.C2', 'X': 'table0.C0'},
- [])],
- {'x': 999999})
-
- def test_version_crossed_depends_on_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
- [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- [self.cards, self.cards2, self.system],
- None, {'AD': 'table0.C1', 'AD.name': 'table0.C2',
- 'AE': 'table0.C2', 'X': 'table0.C0'},
- []),
- ('UnionStep', None, None,
- [('OneFetchStep', [('Any X,AD,AE WHERE 999999 multisource_crossed_rel X, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- None, None, [self.cards], None,
- []),
- ('OneFetchStep', [('Any X,AD,AE WHERE 999999 multisource_crossed_rel X, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- None, None, [self.system],
- {'AD': 'table0.C1', 'AD.name': 'table0.C2',
- 'AE': 'table0.C2', 'X': 'table0.C0'},
- [])]
- )],
- {'x': 999999})
-
- def test_version_crossed_depends_on_2(self):
- self.repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
- [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- [self.cards, self.cards2, self.system],
- None, {'AD': 'table0.C1', 'AD.name': 'table0.C2',
- 'AE': 'table0.C2', 'X': 'table0.C0'},
- []),
- ('OneFetchStep', [('Any X,AD,AE WHERE 999999 multisource_crossed_rel X, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- None, None, [self.system],
- {'AD': 'table0.C1', 'AD.name': 'table0.C2', 'AE': 'table0.C2', 'X': 'table0.C0'},
- [])],
- {'x': 999999})
-
- def test_version_crossed_depends_on_3(self):
- self._test('Any X,AD,AE WHERE E multisource_crossed_rel X, X in_state AD, AD name AE, E is Note',
- [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- [self.cards, self.cards2, self.system],
- None, {'AD': 'table0.C1', 'AD.name': 'table0.C2',
- 'AE': 'table0.C2', 'X': 'table0.C0'},
- []),
- ('FetchStep', [('Any E WHERE E is Note', [{'E': 'Note'}])],
- [self.cards, self.cards2, self.system],
- None, {'E': 'table1.C0'},
- []),
- ('UnionStep', None, None,
- [('OneFetchStep', [('Any X,AD,AE WHERE E multisource_crossed_rel X, AD name AE, AD is State, E is Note, X is Note',
- [{'AD': 'State', 'AE': 'String', 'E': 'Note', 'X': 'Note'}])],
- None, None, [self.cards, self.cards2], None,
- []),
- ('OneFetchStep', [('Any X,AD,AE WHERE E multisource_crossed_rel X, AD name AE, AD is State, E is Note, X is Note',
- [{'AD': 'State', 'AE': 'String', 'E': 'Note', 'X': 'Note'}])],
- None, None, [self.system],
- {'AD': 'table0.C1',
- 'AD.name': 'table0.C2',
- 'AE': 'table0.C2',
- 'E': 'table1.C0',
- 'X': 'table0.C0'},
- [])]
- )]
- )
-
- def test_version_crossed_depends_on_4(self):
- self._test('Any X,AD,AE WHERE EXISTS(E multisource_crossed_rel X), X in_state AD, AD name AE, E is Note',
- [('FetchStep',
- [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
- [{'X': 'Note', 'AD': 'State', 'AE': 'String'}])],
- [self.cards, self.cards2, self.system], None,
- {'X': 'table0.C0',
- 'AD': 'table0.C1',
- 'AD.name': 'table0.C2',
- 'AE': 'table0.C2'},
- []),
- ('FetchStep',
- [('Any A WHERE E multisource_crossed_rel A, A is Note, E is Note',
- [{'A': 'Note', 'E': 'Note'}])],
- [self.cards, self.cards2, self.system], None,
- {'A': 'table1.C0'},
- []),
- ('OneFetchStep',
- [('Any X,AD,AE WHERE EXISTS(X identity A), AD name AE, A is Note, AD is State, X is Note',
- [{'A': 'Note', 'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- None, None,
- [self.system],
- {'A': 'table1.C0',
- 'AD': 'table0.C1',
- 'AD.name': 'table0.C2',
- 'AE': 'table0.C2',
- 'X': 'table0.C0'},
- []
- )]
- )
-
- def test_nonregr_dont_cross_rel_source_filtering_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any S WHERE E eid %(x)s, E in_state S, NOT S name "moved"',
- [('OneFetchStep', [('Any S WHERE 999999 in_state S, NOT S name "moved", S is State',
- [{'S': 'State'}])],
- None, None, [self.cards], {}, []
- )],
- {'x': 999999})
-
- def test_nonregr_dont_cross_rel_source_filtering_2(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X,AA,AB WHERE E eid %(x)s, E in_state X, X name AA, X modification_date AB',
- [('OneFetchStep', [('Any X,AA,AB WHERE 999999 in_state X, X name AA, X modification_date AB, X is State',
- [{'AA': 'String', 'AB': 'Datetime', 'X': 'State'}])],
- None, None, [self.cards], {}, []
- )],
- {'x': 999999})
-
- def test_nonregr_eid_query(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X WHERE X eid 999999',
- [('OneFetchStep', [('Any 999999', [{}])],
- None, None, [self.system], {}, []
- )],
- {'x': 999999})
-
-
- def test_nonregr_not_is(self):
- self._test("Any X WHERE X owned_by U, U login 'anon', NOT X is Comment",
- [('FetchStep', [('Any X WHERE X is IN(Card, Note, State)',
- [{'X': 'Note'}, {'X': 'State'}, {'X': 'Card'}])],
- [self.cards, self.cards2, self.system],
- None, {'X': 'table0.C0'}, []),
- ('UnionStep', None, None,
- [('OneFetchStep',
- [(u'Any X WHERE X owned_by U, U login "anon", U is CWUser, X is IN(Affaire, BaseTransition, Basket, Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWDataImport, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, CWSource, CWSourceHostConfig, CWSourceSchemaConfig, CWUniqueTogetherConstraint, CWUser, Division, Email, EmailAddress, EmailPart, EmailThread, ExternalUri, File, Folder, Old, Personne, RQLExpression, Societe, SubDivision, SubWorkflowExitPoint, Tag, TrInfo, Transition, Workflow, WorkflowTransition)',
- [{'U': 'CWUser', 'X': 'Affaire'},
- {'U': 'CWUser', 'X': 'BaseTransition'},
- {'U': 'CWUser', 'X': 'Basket'},
- {'U': 'CWUser', 'X': 'Bookmark'},
- {'U': 'CWUser', 'X': 'CWAttribute'},
- {'U': 'CWUser', 'X': 'CWCache'},
- {'U': 'CWUser', 'X': 'CWConstraint'},
- {'U': 'CWUser', 'X': 'CWConstraintType'},
- {'U': 'CWUser', 'X': 'CWDataImport'},
- {'U': 'CWUser', 'X': 'CWEType'},
- {'U': 'CWUser', 'X': 'CWGroup'},
- {'U': 'CWUser', 'X': 'CWPermission'},
- {'U': 'CWUser', 'X': 'CWProperty'},
- {'U': 'CWUser', 'X': 'CWRType'},
- {'U': 'CWUser', 'X': 'CWRelation'},
- {'U': 'CWUser', 'X': 'CWSource'},
- {'U': 'CWUser', 'X': 'CWSourceHostConfig'},
- {'U': 'CWUser', 'X': 'CWSourceSchemaConfig'},
- {'U': 'CWUser', 'X': 'CWUniqueTogetherConstraint'},
- {'U': 'CWUser', 'X': 'CWUser'},
- {'U': 'CWUser', 'X': 'Division'},
- {'U': 'CWUser', 'X': 'Email'},
- {'U': 'CWUser', 'X': 'EmailAddress'},
- {'U': 'CWUser', 'X': 'EmailPart'},
- {'U': 'CWUser', 'X': 'EmailThread'},
- {'U': 'CWUser', 'X': 'ExternalUri'},
- {'U': 'CWUser', 'X': 'File'},
- {'U': 'CWUser', 'X': 'Folder'},
- {'U': 'CWUser', 'X': 'Old'},
- {'U': 'CWUser', 'X': 'Personne'},
- {'U': 'CWUser', 'X': 'RQLExpression'},
- {'U': 'CWUser', 'X': 'Societe'},
- {'U': 'CWUser', 'X': 'SubDivision'},
- {'U': 'CWUser', 'X': 'SubWorkflowExitPoint'},
- {'U': 'CWUser', 'X': 'Tag'},
- {'U': 'CWUser', 'X': 'TrInfo'},
- {'U': 'CWUser', 'X': 'Transition'},
- {'U': 'CWUser', 'X': 'Workflow'},
- {'U': 'CWUser', 'X': 'WorkflowTransition'}])],
- None, None,
- [self.system], {}, []),
- ('OneFetchStep',
- [(u'Any X WHERE X owned_by U, U login "anon", U is CWUser, X is IN(Card, Note, State)',
- [{'U': 'CWUser', 'X': 'Note'},
- {'U': 'CWUser', 'X': 'State'},
- {'U': 'CWUser', 'X': 'Card'}])],
- None, None,
- [self.system], {'X': 'table0.C0'}, [])
- ])
- ])
-
- def test_remove_from_deleted_source_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Note X WHERE X eid 999999, NOT X cw_source Y',
- [('OneFetchStep',
- [('Any 999999 WHERE NOT EXISTS(999999 cw_source Y)',
- [{'Y': 'CWSource'}])],
- None, None, [self.system], {}, [])
- ])
-
- def test_remove_from_deleted_source_2(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.repo._type_source_cache[999998] = ('Note', 'cards', 999998, 'cards')
- self._test('Note X WHERE X eid IN (999998, 999999), NOT X cw_source Y',
- [('FetchStep',
- [('Any X WHERE X eid IN(999998, 999999), X is Note',
- [{'X': 'Note'}])],
- [self.cards], None, {'X': 'table0.C0'}, []),
- ('OneFetchStep',
- [('Any X WHERE NOT EXISTS(X cw_source Y, Y is CWSource), X is Note',
- [{'X': 'Note', 'Y': 'CWSource'}])],
- None, None, [self.system],{'X': 'table0.C0'}, [])
- ])
-
-
-class FakeVCSSource(AbstractSource):
- uri = 'ccc'
- support_entities = {'Card': True, 'Note': True}
- support_relations = {'multisource_inlined_rel': True,
- 'multisource_rel': True}
-
- def syntax_tree_search(self, *args, **kwargs):
- return []
-
-class MSPlannerVCSSource(BasePlannerTC):
-
- def setUp(self):
- self.__class__.repo = repo
- self.setup()
- self.add_source(FakeVCSSource, 'vcs')
- self.planner = MSPlanner(self.o.schema, self.repo.vreg.rqlhelper)
- _test = test_plan
-
- def test_multisource_inlined_rel_skipped(self):
- self._test('Any MAX(VC) '
- 'WHERE VC multisource_inlined_rel R2, R para %(branch)s, VC in_state S, S name "published", '
- '(EXISTS(R identity R2)) OR (EXISTS(R multisource_rel R2))',
- [('FetchStep', [('Any VC WHERE VC multisource_inlined_rel R2, R para "???", (EXISTS(R identity R2)) OR (EXISTS(R multisource_rel R2)), R is Note, R2 is Note, VC is Note',
- [{'R': 'Note', 'R2': 'Note', 'VC': 'Note'}])],
- [self.vcs, self.system], None,
- {'VC': 'table0.C0'},
- []),
- ('OneFetchStep', [(u'Any MAX(VC) WHERE VC in_state S, S name "published", S is State, VC is Note',
- [{'S': 'State', 'VC': 'Note'}])],
- None, None, [self.system],
- {'VC': 'table0.C0'},
- [])
- ])
-
- def test_fully_simplified_extsource(self):
- self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998, 'vcs')
- self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999, 'vcs')
- self._test('Any X, Y WHERE NOT X multisource_rel Y, X eid 999998, Y eid 999999',
- [('OneFetchStep', [('Any 999998,999999 WHERE NOT EXISTS(999998 multisource_rel 999999)', [{}])],
- None, None, [self.vcs], {}, [])
- ])
-
- def test_nonregr_fully_simplified_extsource(self):
- self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998, 'vcs')
- self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999, 'vcs')
- self.repo._type_source_cache[1000000] = ('Note', 'system', 1000000, 'system')
- self._test('DISTINCT Any T,FALSE,L,M WHERE L eid 1000000, M eid 999999, T eid 999998',
- [('OneFetchStep', [('DISTINCT Any 999998,FALSE,1000000,999999', [{}])],
- None, None, [self.system], {}, [])
- ])
-
-
-if __name__ == '__main__':
- from logilab.common.testlib import unittest_main
- unittest_main()
--- a/server/test/unittest_multisources.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,394 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-
-from datetime import datetime, timedelta
-from itertools import repeat
-
-from cubicweb.devtools import TestServerConfiguration, init_test_database
-from cubicweb.devtools.testlib import CubicWebTC, Tags
-from cubicweb.devtools.repotest import do_monkey_patch, undo_monkey_patch
-from cubicweb.devtools import get_test_db_handler
-
-class ExternalSource1Configuration(TestServerConfiguration):
- sourcefile = 'sources_extern'
-
-class ExternalSource2Configuration(TestServerConfiguration):
- sourcefile = 'sources_multi'
-
-MTIME = datetime.utcnow() - timedelta(0, 10)
-
-EXTERN_SOURCE_CFG = u'''
-cubicweb-user = admin
-cubicweb-password = gingkow
-base-url=http://extern.org/
-'''
-
-# hi-jacking
-from cubicweb.server.sources.pyrorql import PyroRQLSource
-from cubicweb.dbapi import Connection
-
-PyroRQLSource_get_connection = PyroRQLSource.get_connection
-Connection_close = Connection.close
-
-def add_extern_mapping(source):
- source.init_mapping(zip(('Card', 'Affaire', 'State',
- 'in_state', 'documented_by', 'multisource_inlined_rel'),
- repeat(u'write')))
-
-
-def pre_setup_database_extern(session, config):
- session.execute('INSERT Card X: X title "C3: An external card", X wikiid "aaa"')
- session.execute('INSERT Card X: X title "C4: Ze external card", X wikiid "zzz"')
- session.execute('INSERT Affaire X: X ref "AFFREF"')
- session.commit()
-
-def pre_setup_database_multi(session, config):
- session.create_entity('CWSource', name=u'extern', type=u'pyrorql',
- url=u'pyro:///extern', config=EXTERN_SOURCE_CFG)
- session.commit()
-
-
-class TwoSourcesTC(CubicWebTC):
- """Main repo -> extern-multi -> extern
- \-------------/
- """
- test_db_id= 'cw-server-multisources'
- tags = CubicWebTC.tags | Tags(('multisources'))
-
- @classmethod
- def setUpClass(cls):
- cls._cfg2 = ExternalSource1Configuration('data', apphome=TwoSourcesTC.datadir)
- cls._cfg3 = ExternalSource2Configuration('data', apphome=TwoSourcesTC.datadir)
- TestServerConfiguration.no_sqlite_wrap = True
- # hi-jack PyroRQLSource.get_connection to access existing connection (no
- # pyro connection)
- PyroRQLSource.get_connection = lambda x: x.uri == 'extern-multi' and cls.cnx3 or cls.cnx2
- # also necessary since the repository is closing its initial connections
- # pool though we want to keep cnx2 valid
- Connection.close = lambda x: None
-
- @classmethod
- def tearDowncls(cls):
- PyroRQLSource.get_connection = PyroRQLSource_get_connection
- Connection.close = Connection_close
- cls.cnx2.close()
- cls.cnx3.close()
- TestServerConfiguration.no_sqlite_wrap = False
-
- @classmethod
- def _init_repo(cls):
- repo2_handler = get_test_db_handler(cls._cfg2)
- repo2_handler.build_db_cache('4cards-1affaire',pre_setup_func=pre_setup_database_extern)
- cls.repo2, cls.cnx2 = repo2_handler.get_repo_and_cnx('4cards-1affaire')
-
- repo3_handler = get_test_db_handler(cls._cfg3)
- repo3_handler.build_db_cache('multisource',pre_setup_func=pre_setup_database_multi)
- cls.repo3, cls.cnx3 = repo3_handler.get_repo_and_cnx('multisource')
-
-
- super(TwoSourcesTC, cls)._init_repo()
-
- def setUp(self):
- CubicWebTC.setUp(self)
- self.addCleanup(self.cnx2.close)
- self.addCleanup(self.cnx3.close)
- do_monkey_patch()
-
- def tearDown(self):
- for source in self.repo.sources[1:]:
- self.repo.remove_source(source.uri)
- CubicWebTC.tearDown(self)
- self.cnx2.close()
- self.cnx3.close()
- undo_monkey_patch()
-
- @staticmethod
- def pre_setup_database(session, config):
- for uri, src_config in [('extern', EXTERN_SOURCE_CFG),
- ('extern-multi', '''
-cubicweb-user = admin
-cubicweb-password = gingkow
-''')]:
- source = session.create_entity('CWSource', name=unicode(uri),
- type=u'pyrorql', url=u'pyro:///extern-multi',
- config=unicode(src_config))
- session.commit()
- add_extern_mapping(source)
-
- session.commit()
- # trigger discovery
- session.execute('Card X')
- session.execute('Affaire X')
- session.execute('State X')
-
- def setup_database(self):
- cu2 = self.cnx2.cursor()
- self.ec1 = cu2.execute('Any X WHERE X is Card, X title "C3: An external card", X wikiid "aaa"')[0][0]
- self.aff1 = cu2.execute('Any X WHERE X is Affaire, X ref "AFFREF"')[0][0]
- cu2.close()
- # add some entities
- self.ic1 = self.sexecute('INSERT Card X: X title "C1: An internal card", X wikiid "aaai"')[0][0]
- self.ic2 = self.sexecute('INSERT Card X: X title "C2: Ze internal card", X wikiid "zzzi"')[0][0]
-
- def test_eid_comp(self):
- rset = self.sexecute('Card X WHERE X eid > 1')
- self.assertEqual(len(rset), 4)
- rset = self.sexecute('Any X,T WHERE X title T, X eid > 1')
- self.assertEqual(len(rset), 4)
-
- def test_metainformation(self):
- rset = self.sexecute('Card X ORDERBY T WHERE X title T')
- # 2 added to the system source, 2 added to the external source
- self.assertEqual(len(rset), 4)
- # since they are orderd by eid, we know the 3 first one is coming from the system source
- # and the others from external source
- self.assertEqual(rset.get_entity(0, 0).cw_metainformation(),
- {'source': {'type': 'native', 'uri': 'system', 'use-cwuri-as-url': False},
- 'type': u'Card', 'extid': None})
- externent = rset.get_entity(3, 0)
- metainf = externent.cw_metainformation()
- self.assertEqual(metainf['source'], {'type': 'pyrorql', 'base-url': 'http://extern.org/', 'uri': 'extern', 'use-cwuri-as-url': False})
- self.assertEqual(metainf['type'], 'Card')
- self.assert_(metainf['extid'])
- etype = self.sexecute('Any ETN WHERE X is ET, ET name ETN, X eid %(x)s',
- {'x': externent.eid})[0][0]
- self.assertEqual(etype, 'Card')
-
- def test_order_limit_offset(self):
- rsetbase = self.sexecute('Any W,X ORDERBY W,X WHERE X wikiid W')
- self.assertEqual(len(rsetbase), 4)
- self.assertEqual(sorted(rsetbase.rows), rsetbase.rows)
- rset = self.sexecute('Any W,X ORDERBY W,X LIMIT 2 OFFSET 2 WHERE X wikiid W')
- self.assertEqual(rset.rows, rsetbase.rows[2:4])
-
- def test_has_text(self):
- self.repo.sources_by_uri['extern'].synchronize(MTIME) # in case fti_update has been run before
- self.assertTrue(self.sexecute('Any X WHERE X has_text "affref"'))
- self.assertTrue(self.sexecute('Affaire X WHERE X has_text "affref"'))
- self.assertTrue(self.sexecute('Any X ORDERBY FTIRANK(X) WHERE X has_text "affref"'))
- self.assertTrue(self.sexecute('Affaire X ORDERBY FTIRANK(X) WHERE X has_text "affref"'))
-
- def test_anon_has_text(self):
- self.repo.sources_by_uri['extern'].synchronize(MTIME) # in case fti_update has been run before
- self.sexecute('INSERT Affaire X: X ref "no readable card"')[0][0]
- aff1 = self.sexecute('INSERT Affaire X: X ref "card"')[0][0]
- # grant read access
- self.sexecute('SET X owned_by U WHERE X eid %(x)s, U login "anon"', {'x': aff1})
- self.commit()
- cnx = self.login('anon')
- cu = cnx.cursor()
- rset = cu.execute('Any X WHERE X has_text "card"')
- # 5: 4 card + 1 readable affaire
- self.assertEqual(len(rset), 5, zip(rset.rows, rset.description))
- rset = cu.execute('Any X ORDERBY FTIRANK(X) WHERE X has_text "card"')
- self.assertEqual(len(rset), 5, zip(rset.rows, rset.description))
- Connection_close(cnx.cnx) # cnx is a TestCaseConnectionProxy
-
- def test_synchronization(self):
- cu = self.cnx2.cursor()
- assert cu.execute('Any X WHERE X eid %(x)s', {'x': self.aff1})
- cu.execute('SET X ref "BLAH" WHERE X eid %(x)s', {'x': self.aff1})
- aff2 = cu.execute('INSERT Affaire X: X ref "AFFREUX"')[0][0]
- self.cnx2.commit()
- try:
- # force sync
- self.repo.sources_by_uri['extern'].synchronize(MTIME)
- self.assertTrue(self.sexecute('Any X WHERE X has_text "blah"'))
- self.assertTrue(self.sexecute('Any X WHERE X has_text "affreux"'))
- cu.execute('DELETE Affaire X WHERE X eid %(x)s', {'x': aff2})
- self.cnx2.commit()
- self.repo.sources_by_uri['extern'].synchronize(MTIME)
- rset = self.sexecute('Any X WHERE X has_text "affreux"')
- self.assertFalse(rset)
- finally:
- # restore state
- cu.execute('SET X ref "AFFREF" WHERE X eid %(x)s', {'x': self.aff1})
- self.cnx2.commit()
-
- def test_simplifiable_var(self):
- affeid = self.sexecute('Affaire X WHERE X ref "AFFREF"')[0][0]
- rset = self.sexecute('Any X,AA,AB WHERE E eid %(x)s, E in_state X, X name AA, X modification_date AB',
- {'x': affeid})
- self.assertEqual(len(rset), 1)
- self.assertEqual(rset[0][1], "pitetre")
-
- def test_simplifiable_var_2(self):
- affeid = self.sexecute('Affaire X WHERE X ref "AFFREF"')[0][0]
- rset = self.sexecute('Any E WHERE E eid %(x)s, E in_state S, NOT S name "moved"',
- {'x': affeid, 'u': self.session.user.eid})
- self.assertEqual(len(rset), 1)
-
- def test_sort_func(self):
- self.sexecute('Affaire X ORDERBY DUMB_SORT(RF) WHERE X ref RF')
-
- def test_sort_func_ambigous(self):
- self.sexecute('Any X ORDERBY DUMB_SORT(RF) WHERE X title RF')
-
- def test_in_eid(self):
- iec1 = self.repo.extid2eid(self.repo.sources_by_uri['extern'], str(self.ec1),
- 'Card', self.session)
- rset = self.sexecute('Any X WHERE X eid IN (%s, %s)' % (iec1, self.ic1))
- self.assertEqual(sorted(r[0] for r in rset.rows), sorted([iec1, self.ic1]))
-
- def test_greater_eid(self):
- rset = self.sexecute('Any X WHERE X eid > %s' % (self.ic1 - 1))
- self.assertEqual(len(rset.rows), 2) # self.ic1 and self.ic2
- cu = self.cnx2.cursor()
- ec2 = cu.execute('INSERT Card X: X title "glup"')[0][0]
- self.cnx2.commit()
- # 'X eid > something' should not trigger discovery
- rset = self.sexecute('Any X WHERE X eid > %s' % (self.ic1 - 1))
- self.assertEqual(len(rset.rows), 2)
- # trigger discovery using another query
- crset = self.sexecute('Card X WHERE X title "glup"')
- self.assertEqual(len(crset.rows), 1)
- rset = self.sexecute('Any X WHERE X eid > %s' % (self.ic1 - 1))
- self.assertEqual(len(rset.rows), 3)
- rset = self.sexecute('Any MAX(X)')
- self.assertEqual(len(rset.rows), 1)
- self.assertEqual(rset.rows[0][0], crset[0][0])
-
- def test_attr_unification_1(self):
- n1 = self.sexecute('INSERT Note X: X type "AFFREF"')[0][0]
- n2 = self.sexecute('INSERT Note X: X type "AFFREU"')[0][0]
- rset = self.sexecute('Any X,Y WHERE X is Note, Y is Affaire, X type T, Y ref T')
- self.assertEqual(len(rset), 1, rset.rows)
-
- def test_attr_unification_2(self):
- cu = self.cnx2.cursor()
- ec2 = cu.execute('INSERT Card X: X title "AFFREF"')[0][0]
- self.cnx2.commit()
- try:
- c1 = self.sexecute('INSERT Card C: C title "AFFREF"')[0][0]
- rset = self.sexecute('Any X,Y WHERE X is Card, Y is Affaire, X title T, Y ref T')
- self.assertEqual(len(rset), 2, rset.rows)
- finally:
- cu.execute('DELETE Card X WHERE X eid %(x)s', {'x': ec2})
- self.cnx2.commit()
-
- def test_attr_unification_neq_1(self):
- # XXX complete
- self.sexecute('Any X,Y WHERE X is Note, Y is Affaire, X creation_date D, Y creation_date > D')
-
- def test_attr_unification_neq_2(self):
- # XXX complete
- self.sexecute('Any X,Y WHERE X is Card, Y is Affaire, X creation_date D, Y creation_date > D')
-
- def test_union(self):
- afeids = self.sexecute('Affaire X')
- ueids = self.sexecute('CWUser X')
- rset = self.sexecute('(Any X WHERE X is Affaire) UNION (Any X WHERE X is CWUser)')
- self.assertEqual(sorted(r[0] for r in rset.rows),
- sorted(r[0] for r in afeids + ueids))
-
- def test_subquery1(self):
- rsetbase = self.sexecute('Any W,X WITH W,X BEING (Any W,X ORDERBY W,X WHERE X wikiid W)')
- self.assertEqual(len(rsetbase), 4)
- self.assertEqual(sorted(rsetbase.rows), rsetbase.rows)
- rset = self.sexecute('Any W,X LIMIT 2 OFFSET 2 WITH W,X BEING (Any W,X ORDERBY W,X WHERE X wikiid W)')
- self.assertEqual(rset.rows, rsetbase.rows[2:4])
- rset = self.sexecute('Any W,X ORDERBY W,X LIMIT 2 OFFSET 2 WITH W,X BEING (Any W,X WHERE X wikiid W)')
- self.assertEqual(rset.rows, rsetbase.rows[2:4])
- rset = self.sexecute('Any W,X WITH W,X BEING (Any W,X ORDERBY W,X LIMIT 2 OFFSET 2 WHERE X wikiid W)')
- self.assertEqual(rset.rows, rsetbase.rows[2:4])
-
- def test_subquery2(self):
- affeid = self.sexecute('Affaire X WHERE X ref "AFFREF"')[0][0]
- rset = self.sexecute('Any X,AA,AB WITH X,AA,AB BEING (Any X,AA,AB WHERE E eid %(x)s, E in_state X, X name AA, X modification_date AB)',
- {'x': affeid})
- self.assertEqual(len(rset), 1)
- self.assertEqual(rset[0][1], "pitetre")
-
- def test_not_relation(self):
- states = set(tuple(x) for x in self.sexecute('Any S,SN WHERE S is State, S name SN'))
- userstate = self.session.user.in_state[0]
- states.remove((userstate.eid, userstate.name))
- notstates = set(tuple(x) for x in self.sexecute('Any S,SN WHERE S is State, S name SN, NOT X in_state S, X eid %(x)s',
- {'x': self.session.user.eid}))
- self.assertSetEqual(notstates, states)
- aff1 = self.sexecute('Any X WHERE X is Affaire, X ref "AFFREF"')[0][0]
- aff1stateeid, aff1statename = self.sexecute('Any S,SN WHERE X eid %(x)s, X in_state S, S name SN', {'x': aff1})[0]
- self.assertEqual(aff1statename, 'pitetre')
- states.add((userstate.eid, userstate.name))
- states.remove((aff1stateeid, aff1statename))
- notstates = set(tuple(x) for x in self.sexecute('Any S,SN WHERE S is State, S name SN, NOT X in_state S, X eid %(x)s',
- {'x': aff1}))
- self.assertSetEqual(notstates, states)
-
- def test_absolute_url_base_url(self):
- cu = self.cnx2.cursor()
- ceid = cu.execute('INSERT Card X: X title "without wikiid to get eid based url"')[0][0]
- self.cnx2.commit()
- lc = self.sexecute('Card X WHERE X title "without wikiid to get eid based url"').get_entity(0, 0)
- self.assertEqual(lc.absolute_url(), 'http://extern.org/%s' % ceid)
- cu.execute('DELETE Card X WHERE X eid %(x)s', {'x':ceid})
- self.cnx2.commit()
-
- def test_absolute_url_no_base_url(self):
- cu = self.cnx3.cursor()
- ceid = cu.execute('INSERT Card X: X title "without wikiid to get eid based url"')[0][0]
- self.cnx3.commit()
- lc = self.sexecute('Card X WHERE X title "without wikiid to get eid based url"').get_entity(0, 0)
- self.assertEqual(lc.absolute_url(), 'http://testing.fr/cubicweb/%s' % lc.eid)
- cu.execute('DELETE Card X WHERE X eid %(x)s', {'x':ceid})
- self.cnx3.commit()
-
- def test_crossed_relation_noeid_needattr(self):
- """http://www.cubicweb.org/ticket/1382452"""
- aff1 = self.sexecute('INSERT Affaire X: X ref "AFFREF"')[0][0]
- # link within extern source
- ec1 = self.sexecute('Card X WHERE X wikiid "zzz"')[0][0]
- self.sexecute('SET A documented_by C WHERE E eid %(a)s, C eid %(c)s',
- {'a': aff1, 'c': ec1})
- # link from system to extern source
- self.sexecute('SET A documented_by C WHERE E eid %(a)s, C eid %(c)s',
- {'a': aff1, 'c': self.ic2})
- rset = self.sexecute('DISTINCT Any DEP WHERE P ref "AFFREF", P documented_by DEP, DEP wikiid LIKE "z%"')
- self.assertEqual(sorted(rset.rows), [[ec1], [self.ic2]])
-
- def test_nonregr1(self):
- ueid = self.session.user.eid
- affaire = self.sexecute('Affaire X WHERE X ref "AFFREF"').get_entity(0, 0)
- self.sexecute('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR (X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
- {'x': affaire.eid, 'u': ueid})
-
- def test_nonregr2(self):
- iworkflowable = self.session.user.cw_adapt_to('IWorkflowable')
- iworkflowable.fire_transition('deactivate')
- treid = iworkflowable.latest_trinfo().eid
- rset = self.sexecute('Any X ORDERBY D DESC WHERE E eid %(x)s, E wf_info_for X, X modification_date D',
- {'x': treid})
- self.assertEqual(len(rset), 1)
- self.assertEqual(rset.rows[0], [self.session.user.eid])
-
- def test_nonregr3(self):
- self.sexecute('DELETE Card X WHERE X eid %(x)s, NOT X multisource_inlined_rel Y', {'x': self.ic1})
-
- def test_nonregr4(self):
- self.sexecute('Any X,S,U WHERE X in_state S, X todo_by U')
-
- def test_delete_source(self):
- req = self.request()
- req.execute('DELETE CWSource S WHERE S name "extern"')
- self.commit()
- cu = self.session.system_sql("SELECT * FROM entities WHERE source='extern'")
- self.assertFalse(cu.fetchall())
-
-if __name__ == '__main__':
- from logilab.common.testlib import unittest_main
- unittest_main()
--- a/server/test/unittest_postgres.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_postgres.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -16,27 +16,40 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-import socket
from datetime import datetime
+from threading import Thread
from logilab.common.testlib import SkipTest
-from cubicweb.devtools import ApptestConfiguration
+from cubicweb.devtools import PostgresApptestConfiguration
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.predicates import is_instance
from cubicweb.entities.adapters import IFTIndexableAdapter
-AT_LOGILAB = socket.gethostname().endswith('.logilab.fr') # XXX
-
from unittest_querier import FixedOffset
class PostgresFTITC(CubicWebTC):
- @classmethod
- def setUpClass(cls):
- if not AT_LOGILAB: # XXX here until we can raise SkipTest in setUp to detect we can't connect to the db
- raise SkipTest('XXX %s: require logilab configuration' % cls.__name__)
- cls.config = ApptestConfiguration('data', sourcefile='sources_postgres',
- apphome=cls.datadir)
+ configcls = PostgresApptestConfiguration
+
+ def test_eid_range(self):
+ # concurrent allocation of eid ranges
+ source = self.session.repo.sources_by_uri['system']
+ range1 = []
+ range2 = []
+ def allocate_eid_ranges(session, target):
+ for x in xrange(1, 10):
+ eid = source.create_eid(session, count=x)
+ target.extend(range(eid-x, eid))
+
+ t1 = Thread(target=lambda: allocate_eid_ranges(self.session, range1))
+ t2 = Thread(target=lambda: allocate_eid_ranges(self.session, range2))
+ t1.start()
+ t2.start()
+ t1.join()
+ t2.join()
+ self.assertEqual(range1, sorted(range1))
+ self.assertEqual(range2, sorted(range2))
+ self.assertEqual(set(), set(range1) & set(range2))
def test_occurence_count(self):
req = self.request()
@@ -48,7 +61,7 @@
content=u'cubicweb cubicweb')
self.commit()
self.assertEqual(req.execute('Card X ORDERBY FTIRANK(X) DESC WHERE X has_text "cubicweb"').rows,
- [(c1.eid,), (c3.eid,), (c2.eid,)])
+ [[c1.eid,], [c3.eid,], [c2.eid,]])
def test_attr_weight(self):
@@ -65,7 +78,7 @@
content=u'autre chose')
self.commit()
self.assertEqual(req.execute('Card X ORDERBY FTIRANK(X) DESC WHERE X has_text "cubicweb"').rows,
- [(c3.eid,), (c1.eid,), (c2.eid,)])
+ [[c3.eid,], [c1.eid,], [c2.eid,]])
def test_entity_weight(self):
class PersonneIFTIndexableAdapter(IFTIndexableAdapter):
@@ -78,7 +91,7 @@
c3 = req.create_entity('Comment', content=u'cubicweb cubicweb cubicweb', comments=c1)
self.commit()
self.assertEqual(req.execute('Any X ORDERBY FTIRANK(X) DESC WHERE X has_text "cubicweb"').rows,
- [(c1.eid,), (c3.eid,), (c2.eid,)])
+ [[c1.eid,], [c3.eid,], [c2.eid,]])
def test_tz_datetime(self):
--- a/server/test/unittest_querier.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_querier.py Tue Jun 10 09:49:45 2014 +0200
@@ -27,7 +27,6 @@
from cubicweb import QueryError, Unauthorized, Binary
from cubicweb.server.sqlutils import SQL_PREFIX
from cubicweb.server.utils import crypt_password
-from cubicweb.server.sources.native import make_schema
from cubicweb.server.querier import manual_build_descr, _make_description
from cubicweb.devtools import get_test_db_handler, TestServerConfiguration
from cubicweb.devtools.testlib import CubicWebTC
@@ -60,17 +59,6 @@
SQL_CONNECT_HOOKS['sqlite'].append(init_sqlite_connexion)
-from logilab.database import _GenericAdvFuncHelper
-TYPEMAP = _GenericAdvFuncHelper.TYPE_MAPPING
-
-class MakeSchemaTC(TestCase):
- def test_known_values(self):
- solution = {'A': 'String', 'B': 'CWUser'}
- self.assertEqual(make_schema((Variable('A'), Variable('B')), solution,
- 'table0', TYPEMAP),
- ('C0 text,C1 integer', {'A': 'table0.C0', 'B': 'table0.C1'}))
-
-
def setUpClass(cls, *args):
global repo, cnx
config = TestServerConfiguration(apphome=UtilsTC.datadir)
@@ -139,7 +127,7 @@
def test_preprocess_security(self):
plan = self._prepare_plan('Any ETN,COUNT(X) GROUPBY ETN '
'WHERE X is ET, ET name ETN')
- plan.session = self.user_groups_session('users')
+ plan.cnx = self.user_groups_session('users')
union = plan.rqlst
plan.preprocess(union)
self.assertEqual(len(union.children), 1)
@@ -222,7 +210,7 @@
def test_preprocess_security_aggregat(self):
plan = self._prepare_plan('Any MAX(X)')
- plan.session = self.user_groups_session('users')
+ plan.cnx = self.user_groups_session('users')
union = plan.rqlst
plan.preprocess(union)
self.assertEqual(len(union.children), 1)
@@ -1169,7 +1157,7 @@
#'INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y'
eeid, = self.execute('INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y WHERE Y is EmailAddress')[0]
self.execute("DELETE Email X")
- sqlc = self.session.cnxset['system']
+ sqlc = self.session.cnxset.cu
sqlc.execute('SELECT * FROM recipients_relation')
self.assertEqual(len(sqlc.fetchall()), 0)
sqlc.execute('SELECT * FROM owned_by_relation WHERE eid_from=%s'%eeid)
@@ -1310,7 +1298,7 @@
self.assertEqual(rset.description, [('CWUser',)])
self.assertRaises(Unauthorized,
self.execute, "Any P WHERE X is CWUser, X login 'bob', X upassword P")
- cursor = self.cnxset['system']
+ cursor = self.cnxset.cu
cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'"
% (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX))
passwd = str(cursor.fetchone()[0])
@@ -1325,7 +1313,7 @@
self.assertEqual(rset.description[0][0], 'CWUser')
rset = self.execute("SET X upassword %(pwd)s WHERE X is CWUser, X login 'bob'",
{'pwd': 'tutu'})
- cursor = self.cnxset['system']
+ cursor = self.cnxset.cu
cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'"
% (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX))
passwd = str(cursor.fetchone()[0])
--- a/server/test/unittest_repository.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_repository.py Tue Jun 10 09:49:45 2014 +0200
@@ -424,12 +424,8 @@
cnxid = repo.connect(self.admlogin, password=self.admpassword)
session = repo._get_session(cnxid, setcnxset=True)
self.assertEqual(repo.type_and_source_from_eid(2, session),
- ('CWGroup', 'system', None, 'system'))
+ ('CWGroup', None, 'system'))
self.assertEqual(repo.type_from_eid(2, session), 'CWGroup')
- self.assertEqual(repo.source_from_eid(2, session).uri, 'system')
- self.assertEqual(repo.eid2extid(repo.system_source, 2, session), None)
- class dummysource: uri = 'toto'
- self.assertRaises(UnknownEid, repo.eid2extid, dummysource, 2, session)
repo.close(cnxid)
def test_public_api(self):
@@ -445,7 +441,9 @@
repo = self.repo
cnxid = repo.connect(self.admlogin, password=self.admpassword)
self.assertEqual(repo.user_info(cnxid), (6, 'admin', set([u'managers']), {}))
- self.assertEqual(repo.describe(cnxid, 2), (u'CWGroup', u'system', None, 'system'))
+ self.assertEqual({'type': u'CWGroup', 'extid': None, 'source': 'system'},
+ repo.entity_metas(cnxid, 2))
+ self.assertEqual(repo.describe(cnxid, 2), (u'CWGroup', 'system', None, 'system'))
repo.close(cnxid)
self.assertRaises(BadConnectionId, repo.user_info, cnxid)
self.assertRaises(BadConnectionId, repo.describe, cnxid, 1)
@@ -670,15 +668,6 @@
self.session.set_cnxset()
self.assert_(self.repo.system_source.create_eid(self.session))
- def test_source_from_eid(self):
- self.session.set_cnxset()
- self.assertEqual(self.repo.source_from_eid(1, self.session),
- self.repo.sources_by_uri['system'])
-
- def test_source_from_eid_raise(self):
- self.session.set_cnxset()
- self.assertRaises(UnknownEid, self.repo.source_from_eid, -2, self.session)
-
def test_type_from_eid(self):
self.session.set_cnxset()
self.assertEqual(self.repo.type_from_eid(2, self.session), 'CWGroup')
@@ -695,12 +684,8 @@
self.repo.add_info(self.session, entity, self.repo.system_source)
cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1')
data = cu.fetchall()
- self.assertIsInstance(data[0][4], datetime)
- data[0] = list(data[0])
- data[0][4] = None
- self.assertEqual(tuplify(data), [(-1, 'Personne', 'system', 'system',
- None, None)])
- self.repo.delete_info(self.session, entity, 'system', None)
+ self.assertEqual(tuplify(data), [(-1, 'Personne', 'system', None)])
+ self.repo.delete_info(self.session, entity, 'system')
#self.repo.commit()
cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1')
data = cu.fetchall()
@@ -709,38 +694,6 @@
class FTITC(CubicWebTC):
- def test_reindex_and_modified_since(self):
- self.repo.system_source.multisources_etypes.add('Personne')
- eidp = self.execute('INSERT Personne X: X nom "toto", X prenom "tutu"')[0][0]
- self.commit()
- ts = datetime.now()
- self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1)
- self.session.set_cnxset()
- cu = self.session.system_sql('SELECT mtime, eid FROM entities WHERE eid = %s' % eidp)
- omtime = cu.fetchone()[0]
- # our sqlite datetime adapter is ignore seconds fraction, so we have to
- # ensure update is done the next seconds
- time.sleep(1 - (ts.second - int(ts.second)))
- self.execute('SET X nom "tata" WHERE X eid %(x)s', {'x': eidp})
- self.commit()
- self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1)
- self.session.set_cnxset()
- cu = self.session.system_sql('SELECT mtime FROM entities WHERE eid = %s' % eidp)
- mtime = cu.fetchone()[0]
- self.assertTrue(omtime < mtime)
- self.commit()
- date, modified, deleted = self.repo.entities_modified_since(('Personne',), omtime)
- self.assertEqual(modified, [('Personne', eidp)])
- self.assertEqual(deleted, [])
- date, modified, deleted = self.repo.entities_modified_since(('Personne',), mtime)
- self.assertEqual(modified, [])
- self.assertEqual(deleted, [])
- self.execute('DELETE Personne X WHERE X eid %(x)s', {'x': eidp})
- self.commit()
- date, modified, deleted = self.repo.entities_modified_since(('Personne',), omtime)
- self.assertEqual(modified, [])
- self.assertEqual(deleted, [('Personne', eidp)])
-
def test_fulltext_container_entity(self):
assert self.schema.rschema('use_email').fulltext_container == 'subject'
req = self.request()
--- a/server/test/unittest_rqlannotation.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_rqlannotation.py Tue Jun 10 09:49:45 2014 +0200
@@ -342,7 +342,7 @@
def test_remove_from_deleted_source_1(self):
rqlst = self._prepare('Note X WHERE X eid 999998, NOT X cw_source Y')
- self.assertFalse('X' in rqlst.defined_vars) # simplified
+ self.assertNotIn('X', rqlst.defined_vars) # simplified
self.assertEqual(rqlst.defined_vars['Y']._q_invariant, True)
def test_remove_from_deleted_source_2(self):
--- a/server/test/unittest_schemaserial.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_schemaserial.py Tue Jun 10 09:49:45 2014 +0200
@@ -82,10 +82,6 @@
self.assertListEqual([('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
{'et': None, 'x': None}),
('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
- {'et': None, 'x': None}),
- ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
- {'et': None, 'x': None}),
- ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
{'et': None, 'x': None})],
sorted(specialize2rql(schema)))
@@ -184,7 +180,7 @@
'extra_props': '{"jungle_speed": 42}',
'indexed': False,
'oe': None,
- 'ordernum': 19,
+ 'ordernum': 4,
'rt': None,
'se': None})]
--- a/server/test/unittest_session.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_session.py Tue Jun 10 09:49:45 2014 +0200
@@ -17,7 +17,7 @@
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
from cubicweb.devtools.testlib import CubicWebTC
-from cubicweb.server.session import HOOKS_ALLOW_ALL, HOOKS_DENY_ALL
+from cubicweb.server.session import HOOKS_ALLOW_ALL, HOOKS_DENY_ALL, Connection
class InternalSessionTC(CubicWebTC):
def test_dbapi_query(self):
@@ -39,10 +39,16 @@
def test_hooks_control(self):
session = self.session
+ # this test check the "old" behavior of session with automatic connection management
+ # close the default cnx, we do nto want it to interfer with the test
+ self.cnx.close()
+ # open a dedicated one
+ session.set_cnx('Some-random-cnx-unrelated-to-the-default-one')
+ # go test go
self.assertEqual(HOOKS_ALLOW_ALL, session.hooks_mode)
self.assertEqual(set(), session.disabled_hook_categories)
self.assertEqual(set(), session.enabled_hook_categories)
- self.assertEqual(1, len(session._txs))
+ self.assertEqual(1, len(session._cnxs))
with session.deny_all_hooks_but('metadata'):
self.assertEqual(HOOKS_DENY_ALL, session.hooks_mode)
self.assertEqual(set(), session.disabled_hook_categories)
@@ -64,12 +70,35 @@
self.assertEqual(set(('metadata',)), session.enabled_hook_categories)
# leaving context manager with no transaction running should reset the
# transaction local storage (and associated cnxset)
- self.assertEqual({}, session._txs)
+ self.assertEqual({}, session._cnxs)
self.assertEqual(None, session.cnxset)
self.assertEqual(HOOKS_ALLOW_ALL, session.hooks_mode, session.HOOKS_ALLOW_ALL)
self.assertEqual(set(), session.disabled_hook_categories)
self.assertEqual(set(), session.enabled_hook_categories)
+ def test_explicite_connection(self):
+ with self.session.new_cnx() as cnx:
+ rset = cnx.execute('Any X LIMIT 1 WHERE X is CWUser')
+ self.assertEqual(1, len(rset))
+ user = rset.get_entity(0, 0)
+ user.cw_delete()
+ cnx.rollback()
+ new_user = cnx.entity_from_eid(user.eid)
+ self.assertIsNotNone(new_user.login)
+ self.assertFalse(cnx._open)
+
+ def test_internal_cnx(self):
+ with self.repo.internal_cnx() as cnx:
+ rset = cnx.execute('Any X LIMIT 1 WHERE X is CWUser')
+ self.assertEqual(1, len(rset))
+ user = rset.get_entity(0, 0)
+ user.cw_delete()
+ cnx.rollback()
+ new_user = cnx.entity_from_eid(user.eid)
+ self.assertIsNotNone(new_user.login)
+ self.assertFalse(cnx._open)
+
+
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
--- a/server/test/unittest_ssplanner.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_ssplanner.py Tue Jun 10 09:49:45 2014 +0200
@@ -51,8 +51,7 @@
[{'X': 'Basket', 'XN': 'String'},
{'X': 'State', 'XN': 'String'},
{'X': 'Folder', 'XN': 'String'}])],
- None, None,
- [self.system], None, [])])
+ None, [])])
def test_groupeded_ambigous_sol(self):
self._test('Any XN,COUNT(X) GROUPBY XN WHERE X name XN, X is IN (Basket, State, Folder)',
@@ -60,8 +59,7 @@
[{'X': 'Basket', 'XN': 'String'},
{'X': 'State', 'XN': 'String'},
{'X': 'Folder', 'XN': 'String'}])],
- None, None,
- [self.system], None, [])])
+ None, [])])
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
--- a/server/test/unittest_undo.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/test/unittest_undo.py Tue Jun 10 09:49:45 2014 +0200
@@ -20,7 +20,7 @@
from cubicweb import ValidationError
from cubicweb.devtools.testlib import CubicWebTC
import cubicweb.server.session
-from cubicweb.server.session import Transaction as OldTransaction
+from cubicweb.server.session import Connection as OldConnection
from cubicweb.transaction import *
from cubicweb.server.sources.native import UndoTransactionException, _UndoException
@@ -35,14 +35,14 @@
self.txuuid = self.commit()
def setUp(self):
- class Transaction(OldTransaction):
+ class Connection(OldConnection):
"""Force undo feature to be turned on in all case"""
undo_actions = property(lambda tx: True, lambda x, y:None)
- cubicweb.server.session.Transaction = Transaction
+ cubicweb.server.session.Connection = Connection
super(UndoableTransactionTC, self).setUp()
def tearDown(self):
- cubicweb.server.session.Transaction = OldTransaction
+ cubicweb.server.session.Connection = OldConnection
self.restore_connection()
self.session.undo_support = set()
super(UndoableTransactionTC, self).tearDown()
--- a/server/utils.py Tue Jun 10 09:35:26 2014 +0200
+++ b/server/utils.py Tue Jun 10 09:49:45 2014 +0200
@@ -72,32 +72,14 @@
# wrong password
return ''
-def cartesian_product(seqin):
- """returns a generator which returns the cartesian product of `seqin`
- for more details, see :
- http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302478
- """
- def rloop(seqin, comb):
- """recursive looping function"""
- if seqin: # any more sequences to process?
- for item in seqin[0]:
- newcomb = comb + [item] # add next item to current combination
- # call rloop w/ remaining seqs, newcomb
- for item in rloop(seqin[1:], newcomb):
- yield item # seqs and newcomb
- else: # processing last sequence
- yield comb # comb finished, add to list
- return rloop(seqin, [])
-
-
-def eschema_eid(session, eschema):
+def eschema_eid(cnx, eschema):
"""get eid of the CWEType entity for the given yams type. You should use
this because when schema has been loaded from the file-system, not from the
database, (e.g. during tests), eschema.eid is not set.
"""
if eschema.eid is None:
- eschema.eid = session.execute(
+ eschema.eid = cnx.execute(
'Any X WHERE X is CWEType, X name %(name)s',
{'name': str(eschema)})[0][0]
return eschema.eid
@@ -126,7 +108,7 @@
return user, passwd
-_MARKER=object()
+_MARKER = object()
def func_name(func):
name = getattr(func, '__name__', _MARKER)
if name is _MARKER:
--- a/sobjects/cwxmlparser.py Tue Jun 10 09:35:26 2014 +0200
+++ b/sobjects/cwxmlparser.py Tue Jun 10 09:49:45 2014 +0200
@@ -31,8 +31,7 @@
"""
-from datetime import datetime, timedelta, time
-from urllib import urlencode
+from datetime import datetime, time
from cgi import parse_qs # in urlparse with python >= 2.6
from logilab.common.date import todate, totime
@@ -57,7 +56,7 @@
DEFAULT_CONVERTERS['Date'] = convert_date
def convert_datetime(ustr):
if '.' in ustr: # assume %Y-%m-%d %H:%M:%S.mmmmmm
- ustr = ustr.split('.',1)[0]
+ ustr = ustr.split('.', 1)[0]
return datetime.strptime(ustr, '%Y-%m-%d %H:%M:%S')
DEFAULT_CONVERTERS['Datetime'] = convert_datetime
# XXX handle timezone, though this will be enough as TZDatetime are
@@ -169,7 +168,7 @@
ttype = schemacfg.schema.stype.name
etyperules = self.source.mapping.setdefault(etype, {})
etyperules.setdefault((rtype, role, action), []).append(
- (ttype, options) )
+ (ttype, options))
self.source.mapping_idx[schemacfg.eid] = (
etype, rtype, role, action, ttype)
@@ -204,7 +203,7 @@
* `rels` is for relations and structured as
{role: {relation: [(related item, related rels)...]}
"""
- entity = self.extid2entity(str(item['cwuri']), item['cwtype'],
+ entity = self.extid2entity(str(item['cwuri']), item['cwtype'],
cwsource=item['cwsource'], item=item)
if entity is None:
return None
@@ -432,7 +431,7 @@
self._related_link(ttype, others, searchattrs)
def _related_link(self, ttype, others, searchattrs):
- def issubset(x,y):
+ def issubset(x, y):
return all(z in y for z in x)
eids = [] # local eids
log = self.parser.import_log
@@ -468,7 +467,7 @@
self._clear_relation((ttype,))
def _find_entities(self, item, kwargs):
- return tuple(self._cw.find_entities(item['cwtype'], **kwargs))
+ return tuple(self._cw.find(item['cwtype'], **kwargs).entities())
class CWEntityXMLActionLinkInState(CWEntityXMLActionLink):
--- a/sobjects/ldapparser.py Tue Jun 10 09:35:26 2014 +0200
+++ b/sobjects/ldapparser.py Tue Jun 10 09:49:45 2014 +0200
@@ -142,9 +142,11 @@
try:
tdict[tattr] = sdict[sattr]
except KeyError:
- raise ConfigurationError('source attribute %s is not present '
- 'in the source, please check the '
- '%s-attrs-map field' %
+ raise ConfigurationError('source attribute %s has not '
+ 'been found in the source, '
+ 'please check the %s-attrs-map '
+ 'field and the permissions of '
+ 'the LDAP binding user' %
(sattr, etype[2:].lower()))
return tdict
@@ -168,7 +170,7 @@
etype = entity.cw_etype
if etype == 'EmailAddress':
return
- # all CWUsers must be treated before CWGroups to have to in_group relation
+ # all CWUsers must be treated before CWGroups to have the in_group relation
# set correctly in _associate_ldapusers
elif etype == 'CWUser':
groups = filter(None, [self._get_group(name)
@@ -196,7 +198,7 @@
if not isinstance(emailaddrs, list):
emailaddrs = [emailaddrs]
for emailaddr in emailaddrs:
- # search for existant email first, may be coming from another source
+ # search for existing email first, may be coming from another source
rset = self._cw.execute('EmailAddress X WHERE X address %(addr)s',
{'addr': emailaddr})
if not rset:
--- a/sobjects/notification.py Tue Jun 10 09:35:26 2014 +0200
+++ b/sobjects/notification.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -30,7 +30,7 @@
from cubicweb.view import Component, EntityView
from cubicweb.server.hook import SendMailOp
from cubicweb.mail import construct_message_id, format_mail
-from cubicweb.server.session import Session
+from cubicweb.server.session import Session, InternalManager
class RecipientsFinder(Component):
@@ -115,56 +115,51 @@
msgid = None
req = self._cw
self.user_data = req.user_data()
- origlang = req.lang
for something in recipients:
- if isinstance(something, Entity):
- # hi-jack self._cw to get a session for the returned user
- self._cw = Session(something, self._cw.repo)
- self._cw.set_cnxset()
+ if isinstance(something, tuple):
+ emailaddr, lang = something
+ user = InternalManager(lang=lang)
+ else:
emailaddr = something.cw_adapt_to('IEmailable').get_email()
- else:
- emailaddr, lang = something
- self._cw.set_language(lang)
- # since the same view (eg self) may be called multiple time and we
- # need a fresh stream at each iteration, reset it explicitly
- self.w = None
- try:
+ user = something
+ # hi-jack self._cw to get a session for the returned user
+ session = Session(user, self._cw.repo)
+ with session.new_cnx() as cnx:
+ self._cw = cnx
try:
- # XXX forcing the row & col here may make the content and
- # subject inconsistent because subject will depend on
- # self.cw_row & self.cw_col if they are set.
- content = self.render(row=0, col=0, **kwargs)
- subject = self.subject()
- except SkipEmail:
- continue
- except Exception as ex:
- # shouldn't make the whole transaction fail because of rendering
- # error (unauthorized or such) XXX check it doesn't actually
- # occurs due to rollback on such error
- self.exception(str(ex))
- continue
- msg = format_mail(self.user_data, [emailaddr], content, subject,
- config=self._cw.vreg.config, msgid=msgid, references=refs)
- yield [emailaddr], msg
- except:
- if isinstance(something, Entity):
- self._cw.rollback()
- raise
- else:
- if isinstance(something, Entity):
- self._cw.commit()
- finally:
- if isinstance(something, Entity):
- self._cw.close()
+ # since the same view (eg self) may be called multiple time and we
+ # need a fresh stream at each iteration, reset it explicitly
+ self.w = None
+ try:
+ # XXX forcing the row & col here may make the content and
+ # subject inconsistent because subject will depend on
+ # self.cw_row & self.cw_col if they are set.
+ content = self.render(row=0, col=0, **kwargs)
+ subject = self.subject()
+ except SkipEmail:
+ continue
+ except Exception as ex:
+ # shouldn't make the whole transaction fail because of rendering
+ # error (unauthorized or such) XXX check it doesn't actually
+ # occurs due to rollback on such error
+ self.exception(str(ex))
+ continue
+ msg = format_mail(self.user_data, [emailaddr], content, subject,
+ config=self._cw.vreg.config, msgid=msgid, references=refs)
+ yield [emailaddr], msg
+ finally:
+ # ensure we have a cnxset since commit will fail if there is
+ # some operation but no cnxset. This may occurs in this very
+ # specific case (eg SendMailOp)
+ with cnx.ensure_cnx_set:
+ cnx.commit()
self._cw = req
- # restore language
- req.set_language(origlang)
# recipients / email sending ###############################################
def recipients(self):
"""return a list of either 2-uple (email, language) or user entity to
- who this email should be sent
+ whom this email should be sent
"""
finder = self._cw.vreg['components'].select(
'recipients_finder', self._cw, rset=self.cw_rset,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sobjects/services.py Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,158 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
+"""Define server side service provided by cubicweb"""
+
+import threading
+
+from yams.schema import role_name
+from cubicweb import ValidationError
+from cubicweb.server import Service
+from cubicweb.predicates import match_user_groups, match_kwargs
+
+class StatsService(Service):
+ """Return a dictionary containing some statistics about the repository
+ resources usage.
+ """
+
+ __regid__ = 'repo_stats'
+ __select__ = match_user_groups('managers')
+
+ def call(self):
+ repo = self._cw.repo # Service are repo side only.
+ results = {}
+ querier = repo.querier
+ source = repo.system_source
+ for size, maxsize, hits, misses, title in (
+ (len(querier._rql_cache), repo.config['rql-cache-size'],
+ querier.cache_hit, querier.cache_miss, 'rqlt_st'),
+ (len(source._cache), repo.config['rql-cache-size'],
+ source.cache_hit, source.cache_miss, 'sql'),
+ ):
+ results['%s_cache_size' % title] = '%s / %s' % (size, maxsize)
+ results['%s_cache_hit' % title] = hits
+ results['%s_cache_miss' % title] = misses
+ results['%s_cache_hit_percent' % title] = (hits * 100) / (hits + misses)
+ results['type_source_cache_size'] = len(repo._type_source_cache)
+ results['extid_cache_size'] = len(repo._extid_cache)
+ results['sql_no_cache'] = repo.system_source.no_cache
+ results['nb_open_sessions'] = len(repo._sessions)
+ results['nb_active_threads'] = threading.activeCount()
+ looping_tasks = repo._tasks_manager._looping_tasks
+ results['looping_tasks'] = ', '.join(str(t) for t in looping_tasks)
+ results['available_cnxsets'] = repo._cnxsets_pool.qsize()
+ results['threads'] = ', '.join(sorted(str(t) for t in threading.enumerate()))
+ return results
+
+class GcStatsService(Service):
+ """Return a dictionary containing some statistics about the repository
+ resources usage.
+ """
+
+ __regid__ = 'repo_gc_stats'
+ __select__ = match_user_groups('managers')
+
+ def call(self, nmax=20):
+ """Return a dictionary containing some statistics about the repository
+ memory usage.
+
+ This is a public method, not requiring a session id.
+
+ nmax is the max number of (most) referenced object returned as
+ the 'referenced' result
+ """
+
+ from cubicweb._gcdebug import gc_info
+ from cubicweb.appobject import AppObject
+ from cubicweb.rset import ResultSet
+ from cubicweb.dbapi import Connection, Cursor
+ from cubicweb.web.request import CubicWebRequestBase
+ from rql.stmts import Union
+
+ lookupclasses = (AppObject,
+ Union, ResultSet,
+ Connection, Cursor,
+ CubicWebRequestBase)
+ try:
+ from cubicweb.server.session import Session, InternalSession
+ lookupclasses += (InternalSession, Session)
+ except ImportError:
+ pass # no server part installed
+
+ results = {}
+ counters, ocounters, garbage = gc_info(lookupclasses,
+ viewreferrersclasses=())
+ values = sorted(counters.iteritems(), key=lambda x: x[1], reverse=True)
+ results['lookupclasses'] = values
+ values = sorted(ocounters.iteritems(), key=lambda x: x[1], reverse=True)[:nmax]
+ results['referenced'] = values
+ results['unreachable'] = len(garbage)
+ return results
+
+
+class RegisterUserService(Service):
+ """check if a user with the given login exists, if not create it with the
+ given password. This service is designed to be used for anonymous
+ registration on public web sites.
+
+ To use it, do:
+ with self.appli.repo.internal_cnx() as cnx:
+ cnx.call_service('register_user',
+ login=login,
+ password=password,
+ **cwuserkwargs)
+ """
+ __regid__ = 'register_user'
+ __select__ = Service.__select__ & match_kwargs('login', 'password')
+ default_groups = ('users',)
+
+ def call(self, login, password, email=None, groups=None, **cwuserkwargs):
+ cnx = self._cw
+ errmsg = cnx._('the value "%s" is already used, use another one')
+
+ if (cnx.execute('CWUser X WHERE X login %(login)s', {'login': login},
+ build_descr=False)
+ or cnx.execute('CWUser X WHERE X use_email C, C address %(login)s',
+ {'login': login}, build_descr=False)):
+ qname = role_name('login', 'subject')
+ raise ValidationError(None, {qname: errmsg % login})
+
+ if isinstance(password, unicode):
+ # password should *always* be utf8 encoded
+ password = password.encode('UTF8')
+ cwuserkwargs['login'] = login
+ cwuserkwargs['upassword'] = password
+ # we have to create the user
+ user = cnx.create_entity('CWUser', **cwuserkwargs)
+ if groups is None:
+ groups = self.default_groups
+ assert groups, "CWUsers must belong to at least one CWGroup"
+ group_names = ', '.join('%r' % group for group in groups)
+ cnx.execute('SET X in_group G WHERE X eid %%(x)s, G name IN (%s)' % group_names,
+ {'x': user.eid})
+
+ if email or '@' in login:
+ d = {'login': login, 'email': email or login}
+ if cnx.execute('EmailAddress X WHERE X address %(email)s', d,
+ build_descr=False):
+ qname = role_name('address', 'subject')
+ raise ValidationError(None, {qname: errmsg % d['email']})
+ cnx.execute('INSERT EmailAddress X: X address %(email)s, '
+ 'U primary_email X, U use_email X '
+ 'WHERE U login %(login)s', d, build_descr=False)
+
+ return user
--- a/sobjects/supervising.py Tue Jun 10 09:35:26 2014 +0200
+++ b/sobjects/supervising.py Tue Jun 10 09:49:45 2014 +0200
@@ -142,16 +142,16 @@
self.w(u' %s' % entity.absolute_url())
def _relation_context(self, changedescr):
- session = self._cw
+ cnx = self._cw
def describe(eid):
try:
- return session._(session.describe(eid)[0]).lower()
+ return cnx._(cnx.entity_metas(eid)['type']).lower()
except UnknownEid:
# may occurs when an entity has been deleted from an external
# source and we're cleaning its relation
- return session._('unknown external entity')
+ return cnx._('unknown external entity')
eidfrom, rtype, eidto = changedescr.eidfrom, changedescr.rtype, changedescr.eidto
- return {'rtype': session._(rtype),
+ return {'rtype': cnx._(rtype),
'eidfrom': eidfrom,
'frometype': describe(eidfrom),
'eidto': eidto,
@@ -171,16 +171,15 @@
of changes
"""
def _get_view(self):
- return self.session.vreg['components'].select('supervision_notif',
- self.session)
+ return self.cnx.vreg['components'].select('supervision_notif', self.cnx)
def _prepare_email(self):
- session = self.session
- config = session.vreg.config
+ cnx = self.cnx
+ config = cnx.vreg.config
uinfo = {'email': config['sender-addr'],
'name': config['sender-name']}
view = self._get_view()
- content = view.render(changes=session.transaction_data.get('pendingchanges'))
+ content = view.render(changes=cnx.transaction_data.get('pendingchanges'))
recipients = view.recipients()
msg = format_mail(uinfo, recipients, content, view.subject(), config=config)
self.to_send = [(msg, recipients)]
--- a/sobjects/test/unittest_cwxmlparser.py Tue Jun 10 09:35:26 2014 +0200
+++ b/sobjects/test/unittest_cwxmlparser.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2011-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2011-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -157,17 +157,18 @@
def test_complete_url(self):
dfsource = self.repo.sources_by_uri['myfeed']
- parser = dfsource._get_parser(self.session)
- self.assertEqual(parser.complete_url('http://www.cubicweb.org/CWUser'),
- 'http://www.cubicweb.org/CWUser?relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject')
- self.assertEqual(parser.complete_url('http://www.cubicweb.org/cwuser'),
- 'http://www.cubicweb.org/cwuser?relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject')
- self.assertEqual(parser.complete_url('http://www.cubicweb.org/cwuser?vid=rdf&relation=hop'),
- 'http://www.cubicweb.org/cwuser?relation=hop&relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject&vid=rdf')
- self.assertEqual(parser.complete_url('http://www.cubicweb.org/?rql=cwuser&vid=rdf&relation=hop'),
- 'http://www.cubicweb.org/?rql=cwuser&relation=hop&vid=rdf')
- self.assertEqual(parser.complete_url('http://www.cubicweb.org/?rql=cwuser&relation=hop'),
- 'http://www.cubicweb.org/?rql=cwuser&relation=hop')
+ with self.admin_access.repo_cnx() as cnx:
+ parser = dfsource._get_parser(cnx)
+ self.assertEqual(parser.complete_url('http://www.cubicweb.org/CWUser'),
+ 'http://www.cubicweb.org/CWUser?relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject')
+ self.assertEqual(parser.complete_url('http://www.cubicweb.org/cwuser'),
+ 'http://www.cubicweb.org/cwuser?relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject')
+ self.assertEqual(parser.complete_url('http://www.cubicweb.org/cwuser?vid=rdf&relation=hop'),
+ 'http://www.cubicweb.org/cwuser?relation=hop&relation=tags-object&relation=in_group-subject&relation=in_state-subject&relation=use_email-subject&vid=rdf')
+ self.assertEqual(parser.complete_url('http://www.cubicweb.org/?rql=cwuser&vid=rdf&relation=hop'),
+ 'http://www.cubicweb.org/?rql=cwuser&relation=hop&vid=rdf')
+ self.assertEqual(parser.complete_url('http://www.cubicweb.org/?rql=cwuser&relation=hop'),
+ 'http://www.cubicweb.org/?rql=cwuser&relation=hop')
def test_actions(self):
@@ -192,113 +193,105 @@
(u'Tag', {u'linkattr': u'name'})],
},
})
- session = self.repo.internal_session(safe=True)
- stats = dfsource.pull_data(session, force=True, raise_on_error=True)
- self.assertEqual(sorted(stats), ['checked', 'created', 'updated'])
- self.assertEqual(len(stats['created']), 2)
- self.assertEqual(stats['updated'], set())
+ with self.repo.internal_cnx() as cnx:
+ stats = dfsource.pull_data(cnx, force=True, raise_on_error=True)
+ self.assertEqual(sorted(stats), ['checked', 'created', 'updated'])
+ self.assertEqual(len(stats['created']), 2)
+ self.assertEqual(stats['updated'], set())
- user = self.execute('CWUser X WHERE X login "sthenault"').get_entity(0, 0)
- self.assertEqual(user.creation_date, datetime(2010, 01, 22, 10, 27, 59))
- self.assertEqual(user.modification_date, datetime(2011, 01, 25, 14, 14, 06))
- self.assertEqual(user.cwuri, 'http://pouet.org/5')
- self.assertEqual(user.cw_source[0].name, 'myfeed')
- self.assertEqual(user.absolute_url(), 'http://pouet.org/5')
- self.assertEqual(len(user.use_email), 1)
- # copy action
- email = user.use_email[0]
- self.assertEqual(email.address, 'syt@logilab.fr')
- self.assertEqual(email.cwuri, 'http://pouet.org/6')
- self.assertEqual(email.absolute_url(), 'http://pouet.org/6')
- self.assertEqual(email.cw_source[0].name, 'myfeed')
- self.assertEqual(len(email.reverse_tags), 1)
- self.assertEqual(email.reverse_tags[0].name, 'hop')
- # link action
- self.assertFalse(self.execute('CWGroup X WHERE X name "unknown"'))
- groups = sorted([g.name for g in user.in_group])
- self.assertEqual(groups, ['users'])
- group = user.in_group[0]
- self.assertEqual(len(group.reverse_tags), 1)
- self.assertEqual(group.reverse_tags[0].name, 'hop')
- # link or create action
- tags = set([(t.name, t.cwuri.replace(str(t.eid), ''), t.cw_source[0].name)
- for t in user.reverse_tags])
- self.assertEqual(tags, set((('hop', 'http://testing.fr/cubicweb/', 'system'),
- ('unknown', 'http://testing.fr/cubicweb/', 'system')))
- )
- session.set_cnxset()
- with session.security_enabled(read=False): # avoid Unauthorized due to password selection
- stats = dfsource.pull_data(session, force=True, raise_on_error=True)
- self.assertEqual(stats['created'], set())
- self.assertEqual(len(stats['updated']), 0)
- self.assertEqual(len(stats['checked']), 2)
- self.repo._type_source_cache.clear()
- self.repo._extid_cache.clear()
- session.set_cnxset()
- with session.security_enabled(read=False): # avoid Unauthorized due to password selection
- stats = dfsource.pull_data(session, force=True, raise_on_error=True)
- self.assertEqual(stats['created'], set())
- self.assertEqual(len(stats['updated']), 0)
- self.assertEqual(len(stats['checked']), 2)
- session.commit()
+ with self.admin_access.web_request() as req:
+ user = req.execute('CWUser X WHERE X login "sthenault"').get_entity(0, 0)
+ self.assertEqual(user.creation_date, datetime(2010, 01, 22, 10, 27, 59))
+ self.assertEqual(user.modification_date, datetime(2011, 01, 25, 14, 14, 06))
+ self.assertEqual(user.cwuri, 'http://pouet.org/5')
+ self.assertEqual(user.cw_source[0].name, 'myfeed')
+ self.assertEqual(user.absolute_url(), 'http://pouet.org/5')
+ self.assertEqual(len(user.use_email), 1)
+ # copy action
+ email = user.use_email[0]
+ self.assertEqual(email.address, 'syt@logilab.fr')
+ self.assertEqual(email.cwuri, 'http://pouet.org/6')
+ self.assertEqual(email.absolute_url(), 'http://pouet.org/6')
+ self.assertEqual(email.cw_source[0].name, 'myfeed')
+ self.assertEqual(len(email.reverse_tags), 1)
+ self.assertEqual(email.reverse_tags[0].name, 'hop')
+ # link action
+ self.assertFalse(req.execute('CWGroup X WHERE X name "unknown"'))
+ groups = sorted([g.name for g in user.in_group])
+ self.assertEqual(groups, ['users'])
+ group = user.in_group[0]
+ self.assertEqual(len(group.reverse_tags), 1)
+ self.assertEqual(group.reverse_tags[0].name, 'hop')
+ # link or create action
+ tags = set([(t.name, t.cwuri.replace(str(t.eid), ''), t.cw_source[0].name)
+ for t in user.reverse_tags])
+ self.assertEqual(tags, set((('hop', 'http://testing.fr/cubicweb/', 'system'),
+ ('unknown', 'http://testing.fr/cubicweb/', 'system')))
+ )
+ with self.repo.internal_cnx() as cnx:
+ stats = dfsource.pull_data(cnx, force=True, raise_on_error=True)
+ self.assertEqual(stats['created'], set())
+ self.assertEqual(len(stats['updated']), 0)
+ self.assertEqual(len(stats['checked']), 2)
+ self.repo._type_source_cache.clear()
+ self.repo._extid_cache.clear()
+ stats = dfsource.pull_data(cnx, force=True, raise_on_error=True)
+ self.assertEqual(stats['created'], set())
+ self.assertEqual(len(stats['updated']), 0)
+ self.assertEqual(len(stats['checked']), 2)
- # test move to system source
- self.sexecute('SET X cw_source S WHERE X eid %(x)s, S name "system"', {'x': email.eid})
- self.commit()
- rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"')
- self.assertEqual(len(rset), 1)
- e = rset.get_entity(0, 0)
- self.assertEqual(e.eid, email.eid)
- self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system',
- 'use-cwuri-as-url': False},
- 'type': 'EmailAddress',
- 'extid': None})
- self.assertEqual(e.cw_source[0].name, 'system')
- self.assertEqual(e.reverse_use_email[0].login, 'sthenault')
- self.commit()
- # test everything is still fine after source synchronization
- session.set_cnxset()
- with session.security_enabled(read=False): # avoid Unauthorized due to password selection
- stats = dfsource.pull_data(session, force=True, raise_on_error=True)
- rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"')
- self.assertEqual(len(rset), 1)
- e = rset.get_entity(0, 0)
- self.assertEqual(e.eid, email.eid)
- self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system',
- 'use-cwuri-as-url': False},
- 'type': 'EmailAddress',
- 'extid': None})
- self.assertEqual(e.cw_source[0].name, 'system')
- self.assertEqual(e.reverse_use_email[0].login, 'sthenault')
- session.commit()
+ # test move to system source
+ cnx.execute('SET X cw_source S WHERE X eid %(x)s, S name "system"', {'x': email.eid})
+ cnx.commit()
+ rset = cnx.execute('EmailAddress X WHERE X address "syt@logilab.fr"')
+ self.assertEqual(len(rset), 1)
+ e = rset.get_entity(0, 0)
+ self.assertEqual(e.eid, email.eid)
+ self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system',
+ 'use-cwuri-as-url': False},
+ 'type': 'EmailAddress',
+ 'extid': None})
+ self.assertEqual(e.cw_source[0].name, 'system')
+ self.assertEqual(e.reverse_use_email[0].login, 'sthenault')
+ # test everything is still fine after source synchronization
+ stats = dfsource.pull_data(cnx, force=True, raise_on_error=True)
+ rset = cnx.execute('EmailAddress X WHERE X address "syt@logilab.fr"')
+ self.assertEqual(len(rset), 1)
+ e = rset.get_entity(0, 0)
+ self.assertEqual(e.eid, email.eid)
+ self.assertEqual(e.cw_metainformation(), {'source': {'type': u'native', 'uri': u'system',
+ 'use-cwuri-as-url': False},
+ 'type': 'EmailAddress',
+ 'extid': None})
+ self.assertEqual(e.cw_source[0].name, 'system')
+ self.assertEqual(e.reverse_use_email[0].login, 'sthenault')
+ cnx.commit()
- # test delete entity
- e.cw_delete()
- self.commit()
- # test everything is still fine after source synchronization
- session.set_cnxset()
- with session.security_enabled(read=False): # avoid Unauthorized due to password selection
- stats = dfsource.pull_data(session, force=True, raise_on_error=True)
- rset = self.sexecute('EmailAddress X WHERE X address "syt@logilab.fr"')
- self.assertEqual(len(rset), 0)
- rset = self.sexecute('Any X WHERE X use_email E, X login "sthenault"')
- self.assertEqual(len(rset), 0)
+ # test delete entity
+ e.cw_delete()
+ cnx.commit()
+ # test everything is still fine after source synchronization
+ stats = dfsource.pull_data(cnx, force=True, raise_on_error=True)
+ rset = cnx.execute('EmailAddress X WHERE X address "syt@logilab.fr"')
+ self.assertEqual(len(rset), 0)
+ rset = cnx.execute('Any X WHERE X use_email E, X login "sthenault"')
+ self.assertEqual(len(rset), 0)
def test_external_entity(self):
dfsource = self.repo.sources_by_uri['myotherfeed']
- session = self.repo.internal_session(safe=True)
- stats = dfsource.pull_data(session, force=True, raise_on_error=True)
- user = self.execute('CWUser X WHERE X login "sthenault"').get_entity(0, 0)
- self.assertEqual(user.creation_date, datetime(2010, 01, 22, 10, 27, 59))
- self.assertEqual(user.modification_date, datetime(2011, 01, 25, 14, 14, 06))
- self.assertEqual(user.cwuri, 'http://pouet.org/5')
- self.assertEqual(user.cw_source[0].name, 'myfeed')
+ with self.repo.internal_cnx() as cnx:
+ stats = dfsource.pull_data(cnx, force=True, raise_on_error=True)
+ user = cnx.execute('CWUser X WHERE X login "sthenault"').get_entity(0, 0)
+ self.assertEqual(user.creation_date, datetime(2010, 01, 22, 10, 27, 59))
+ self.assertEqual(user.modification_date, datetime(2011, 01, 25, 14, 14, 06))
+ self.assertEqual(user.cwuri, 'http://pouet.org/5')
+ self.assertEqual(user.cw_source[0].name, 'myfeed')
def test_noerror_missing_fti_attribute(self):
dfsource = self.repo.sources_by_uri['myfeed']
- session = self.repo.internal_session(safe=True)
- parser = dfsource._get_parser(session)
- dfsource.process_urls(parser, ['''
+ with self.repo.internal_cnx() as cnx:
+ parser = dfsource._get_parser(cnx)
+ dfsource.process_urls(parser, ['''
<rset size="1">
<Card eid="50" cwuri="http://pouet.org/50" cwsource="system">
<title>how-to</title>
@@ -308,9 +301,9 @@
def test_noerror_unspecified_date(self):
dfsource = self.repo.sources_by_uri['myfeed']
- session = self.repo.internal_session(safe=True)
- parser = dfsource._get_parser(session)
- dfsource.process_urls(parser, ['''
+ with self.repo.internal_cnx() as cnx:
+ parser = dfsource._get_parser(cnx)
+ dfsource.process_urls(parser, ['''
<rset size="1">
<Card eid="50" cwuri="http://pouet.org/50" cwsource="system">
<title>how-to</title>
--- a/sobjects/test/unittest_email.py Tue Jun 10 09:35:26 2014 +0200
+++ b/sobjects/test/unittest_email.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -15,9 +15,6 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-"""
from cubicweb import Unauthorized
from cubicweb.devtools.testlib import CubicWebTC
@@ -25,45 +22,46 @@
class EmailAddressHooksTC(CubicWebTC):
def test_use_email_set_primary_email(self):
- self.execute('INSERT EmailAddress X: X address "admin@logilab.fr", U use_email X WHERE U login "admin"')
- self.assertEqual(self.execute('Any A WHERE U primary_email X, U login "admin", X address A').rows,
- [])
- self.commit()
- self.assertEqual(self.execute('Any A WHERE U primary_email X, U login "admin", X address A')[0][0],
- 'admin@logilab.fr')
- # having another email should'nt change anything
- self.execute('INSERT EmailAddress X: X address "a@logilab.fr", U use_email X WHERE U login "admin"')
- self.commit()
- self.assertEqual(self.execute('Any A WHERE U primary_email X, U login "admin", X address A')[0][0],
- 'admin@logilab.fr')
+ with self.admin_access.client_cnx() as cnx:
+ cnx.execute('INSERT EmailAddress X: X address "admin@logilab.fr", U use_email X WHERE U login "admin"')
+ self.assertEqual(cnx.execute('Any A WHERE U primary_email X, U login "admin", X address A').rows,
+ [])
+ cnx.commit()
+ self.assertEqual(cnx.execute('Any A WHERE U primary_email X, U login "admin", X address A')[0][0],
+ 'admin@logilab.fr')
+ # having another email should'nt change anything
+ cnx.execute('INSERT EmailAddress X: X address "a@logilab.fr", U use_email X WHERE U login "admin"')
+ cnx.commit()
+ self.assertEqual(cnx.execute('Any A WHERE U primary_email X, U login "admin", X address A')[0][0],
+ 'admin@logilab.fr')
def test_primary_email_set_use_email(self):
- self.execute('INSERT EmailAddress X: X address "admin@logilab.fr", U primary_email X WHERE U login "admin"')
- self.assertEqual(self.execute('Any A WHERE U use_email X, U login "admin", X address A').rows,
- [])
- self.commit()
- self.assertEqual(self.execute('Any A WHERE U use_email X, U login "admin", X address A')[0][0],
- 'admin@logilab.fr')
+ with self.admin_access.client_cnx() as cnx:
+ cnx.execute('INSERT EmailAddress X: X address "admin@logilab.fr", U primary_email X WHERE U login "admin"')
+ self.assertEqual(cnx.execute('Any A WHERE U use_email X, U login "admin", X address A').rows,
+ [])
+ cnx.commit()
+ self.assertEqual(cnx.execute('Any A WHERE U use_email X, U login "admin", X address A')[0][0],
+ 'admin@logilab.fr')
def test_cardinality_check(self):
- email1 = self.execute('INSERT EmailAddress E: E address "client@client.com", U use_email E WHERE U login "admin"')[0][0]
- self.commit()
- self.execute('SET U primary_email E WHERE U login "anon", E address "client@client.com"')
- self.commit()
- rset = self.execute('Any X WHERE X use_email E, E eid %(e)s', {'e': email1})
- self.assertFalse(rset.rowcount != 1, rset)
+ with self.admin_access.client_cnx() as cnx:
+ email1 = cnx.execute('INSERT EmailAddress E: E address "client@client.com", U use_email E WHERE U login "admin"')[0][0]
+ cnx.commit()
+ cnx.execute('SET U primary_email E WHERE U login "anon", E address "client@client.com"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X use_email E, E eid %(e)s', {'e': email1})
+ self.assertFalse(rset.rowcount != 1, rset)
def test_security_check(self):
- req = self.request()
- self.create_user(req, 'toto')
- email1 = self.execute('INSERT EmailAddress E: E address "client@client.com", U use_email E WHERE U login "admin"')[0][0]
- self.commit()
- cnx = self.login('toto')
- cu = cnx.cursor()
- self.assertRaises(Unauthorized,
- cu.execute, 'SET U primary_email E WHERE E eid %(e)s, U login "toto"',
- {'e': email1})
- cnx.close()
+ with self.admin_access.client_cnx() as cnx:
+ self.create_user(cnx, 'toto')
+ email1 = cnx.execute('INSERT EmailAddress E: E address "client@client.com", U use_email E WHERE U login "admin"')[0][0]
+ cnx.commit()
+ with self.new_access('toto').client_cnx() as cnx:
+ self.assertRaises(Unauthorized,
+ cnx.execute, 'SET U primary_email E WHERE E eid %(e)s, U login "toto"',
+ {'e': email1})
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
--- a/sobjects/test/unittest_notification.py Tue Jun 10 09:35:26 2014 +0200
+++ b/sobjects/test/unittest_notification.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,5 +1,5 @@
# -*- coding: iso-8859-1 -*-
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -16,9 +16,7 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""
-"""
from socket import gethostname
from logilab.common.testlib import unittest_main, TestCase
@@ -31,16 +29,16 @@
msgid1 = construct_message_id('testapp', 21)
msgid2 = construct_message_id('testapp', 21)
self.assertNotEqual(msgid1, msgid2)
- self.assertFalse('&' in msgid1)
- self.assertFalse('=' in msgid1)
- self.assertFalse('/' in msgid1)
- self.assertFalse('+' in msgid1)
+ self.assertNotIn('&', msgid1)
+ self.assertNotIn('=', msgid1)
+ self.assertNotIn('/', msgid1)
+ self.assertNotIn('+', msgid1)
values = parse_message_id(msgid1, 'testapp')
self.assertTrue(values)
# parse_message_id should work with or without surrounding <>
self.assertEqual(values, parse_message_id(msgid1[1:-1], 'testapp'))
self.assertEqual(values['eid'], '21')
- self.assertTrue('timestamp' in values)
+ self.assertIn('timestamp', values)
self.assertEqual(parse_message_id(msgid1[1:-1], 'anotherapp'), None)
def test_notimestamp(self):
@@ -63,33 +61,34 @@
class NotificationTC(CubicWebTC):
def test_recipients_finder(self):
- urset = self.execute('CWUser X WHERE X login "admin"')
- self.execute('INSERT EmailAddress X: X address "admin@logilab.fr", U primary_email X '
- 'WHERE U eid %(x)s', {'x': urset[0][0]})
- self.execute('INSERT CWProperty X: X pkey "ui.language", X value "fr", X for_user U '
- 'WHERE U eid %(x)s', {'x': urset[0][0]})
- self.commit() # commit so that admin get its properties updated
- finder = self.vreg['components'].select('recipients_finder',
- self.request(), rset=urset)
- self.set_option('default-recipients-mode', 'none')
- self.assertEqual(finder.recipients(), [])
- self.set_option('default-recipients-mode', 'users')
- self.assertEqual(finder.recipients(), [(u'admin@logilab.fr', 'fr')])
- self.set_option('default-recipients-mode', 'default-dest-addrs')
- self.set_option('default-dest-addrs', 'abcd@logilab.fr, efgh@logilab.fr')
- self.assertEqual(finder.recipients(), [('abcd@logilab.fr', 'en'), ('efgh@logilab.fr', 'en')])
+ with self.admin_access.web_request() as req:
+ urset = req.execute('CWUser X WHERE X login "admin"')
+ req.execute('INSERT EmailAddress X: X address "admin@logilab.fr", U primary_email X '
+ 'WHERE U eid %(x)s', {'x': urset[0][0]})
+ req.execute('INSERT CWProperty X: X pkey "ui.language", X value "fr", X for_user U '
+ 'WHERE U eid %(x)s', {'x': urset[0][0]})
+ req.cnx.commit() # commit so that admin get its properties updated
+ finder = self.vreg['components'].select('recipients_finder',
+ req, rset=urset)
+ self.set_option('default-recipients-mode', 'none')
+ self.assertEqual(finder.recipients(), [])
+ self.set_option('default-recipients-mode', 'users')
+ self.assertEqual(finder.recipients(), [(u'admin@logilab.fr', 'fr')])
+ self.set_option('default-recipients-mode', 'default-dest-addrs')
+ self.set_option('default-dest-addrs', 'abcd@logilab.fr, efgh@logilab.fr')
+ self.assertEqual(finder.recipients(), [('abcd@logilab.fr', 'en'), ('efgh@logilab.fr', 'en')])
def test_status_change_view(self):
- req = self.request()
- u = self.create_user(req, 'toto')
- iwfable = u.cw_adapt_to('IWorkflowable')
- iwfable.fire_transition('deactivate', comment=u'yeah')
- self.assertFalse(MAILBOX)
- self.commit()
- self.assertEqual(len(MAILBOX), 1)
- email = MAILBOX[0]
- self.assertEqual(email.content,
- '''
+ with self.admin_access.web_request() as req:
+ u = self.create_user(req, 'toto')
+ iwfable = u.cw_adapt_to('IWorkflowable')
+ iwfable.fire_transition('deactivate', comment=u'yeah')
+ self.assertFalse(MAILBOX)
+ req.cnx.commit()
+ self.assertEqual(len(MAILBOX), 1)
+ email = MAILBOX[0]
+ self.assertEqual(email.content,
+ '''
admin changed status from <activated> to <deactivated> for entity
'toto'
@@ -97,8 +96,8 @@
url: http://testing.fr/cubicweb/cwuser/toto
''')
- self.assertEqual(email.subject,
- 'status changed CWUser #%s (admin)' % u.eid)
+ self.assertEqual(email.subject,
+ 'status changed CWUser #%s (admin)' % u.eid)
if __name__ == '__main__':
unittest_main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sobjects/test/unittest_register_user.py Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,95 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
+"""unittest for cubicweb.dbapi"""
+
+from cubicweb import ValidationError
+from cubicweb.web import Unauthorized
+from cubicweb.devtools.testlib import CubicWebTC
+
+
+class RegisterUserTC(CubicWebTC):
+
+ def test_register_user_service(self):
+ acc = self.admin_access
+ with acc.client_cnx() as cnx:
+ cnx.call_service('register_user', login=u'foo1', password=u'bar1',
+ email=u'foo1@bar1.com', firstname=u'Foo1',
+ surname=u'Bar1')
+
+ acc = self.new_access('anon')
+ with acc.client_cnx() as cnx:
+ self.assertRaises(Unauthorized, cnx.call_service, 'register_user',
+ login=u'foo2', password=u'bar2',
+ email=u'foo2@bar2.com', firstname=u'Foo2', surname=u'Bar2')
+
+ with self.repo.internal_cnx() as cnx:
+ cnx.call_service('register_user', login=u'foo3',
+ password=u'bar3', email=u'foo3@bar3.com',
+ firstname=u'Foo3', surname=u'Bar3')
+ # same login
+ with self.assertRaises(ValidationError):
+ cnx.call_service('register_user', login=u'foo3',
+ password=u'bar3')
+
+ def test_register_user_attributes(self):
+ with self.repo.internal_cnx() as cnx:
+ cnx.call_service('register_user', login=u'foo3',
+ password=u'bar3', email=u'foo3@bar3.com',
+ firstname=u'Foo3', surname=u'Bar3')
+ cnx.commit()
+
+ with self.admin_access.client_cnx() as cnx:
+ user = cnx.find('CWUser', login=u'foo3').one()
+ self.assertEqual(user.firstname, u'Foo3')
+ self.assertEqual(user.use_email[0].address, u'foo3@bar3.com')
+
+ def test_register_user_groups(self):
+ with self.repo.internal_cnx() as cnx:
+ # default
+ cnx.call_service('register_user', login=u'foo_user',
+ password=u'bar_user', email=u'foo_user@bar_user.com',
+ firstname=u'Foo_user', surname=u'Bar_user')
+
+ # group kwarg
+ cnx.call_service('register_user', login=u'foo_admin',
+ password=u'bar_admin', email=u'foo_admin@bar_admin.com',
+ firstname=u'Foo_admin', surname=u'Bar_admin',
+ groups=('managers', 'users'))
+
+ # class attribute
+ from cubicweb.sobjects import services
+ services.RegisterUserService.default_groups = ('guests',)
+ cnx.call_service('register_user', login=u'foo_guest',
+ password=u'bar_guest', email=u'foo_guest@bar_guest.com',
+ firstname=u'Foo_guest', surname=u'Bar_guest')
+ cnx.commit()
+
+ with self.admin_access.client_cnx() as cnx:
+ user = cnx.find('CWUser', login=u'foo_user').one()
+ self.assertEqual([g.name for g in user.in_group], ['users'])
+
+ admin = cnx.find('CWUser', login=u'foo_admin').one()
+ self.assertEqual(sorted(g.name for g in admin.in_group), ['managers', 'users'])
+
+ guest = cnx.find('CWUser', login=u'foo_guest').one()
+ self.assertEqual([g.name for g in guest.in_group], ['guests'])
+
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
--- a/sobjects/test/unittest_supervising.py Tue Jun 10 09:35:26 2014 +0200
+++ b/sobjects/test/unittest_supervising.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,5 +1,5 @@
# -*- coding: iso-8859-1 -*-
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -16,9 +16,6 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""
-
-"""
import re
from logilab.common.testlib import unittest_main
@@ -30,38 +27,38 @@
class SupervisingTC(CubicWebTC):
def setup_database(self):
- req = self.request()
- req.create_entity('Card', title=u"une news !", content=u"cubicweb c'est beau")
- req.create_entity('Card', title=u"une autre news !", content=u"cubicweb c'est beau")
- req.create_entity('Bookmark', title=u"un signet !", path=u"view?vid=index")
- req.create_entity('Comment', content=u"Yo !")
- self.execute('SET C comments B WHERE B title "une autre news !", C content "Yo !"')
+ with self.admin_access.client_cnx() as cnx:
+ cnx.create_entity('Card', title=u"une news !", content=u"cubicweb c'est beau")
+ card = cnx.create_entity('Card', title=u"une autre news !", content=u"cubicweb c'est beau")
+ cnx.create_entity('Bookmark', title=u"un signet !", path=u"view?vid=index")
+ cnx.create_entity('Comment', content=u"Yo !", comments=card)
+ cnx.commit()
self.vreg.config.global_set_option('supervising-addrs', 'test@logilab.fr')
def test_supervision(self):
# do some modification
- user = self.execute('INSERT CWUser X: X login "toto", X upassword "sosafe", X in_group G '
- 'WHERE G name "users"').get_entity(0, 0)
- self.execute('SET X last_login_time NOW WHERE X eid %(x)s', {'x': user.eid})
- self.execute('DELETE Card B WHERE B title "une news !"')
- self.execute('SET X bookmarked_by U WHERE X is Bookmark, U eid %(x)s', {'x': user.eid})
- self.execute('SET X content "duh?" WHERE X is Comment')
- self.execute('DELETE X comments Y WHERE Y is Card, Y title "une autre news !"')
- # check only one supervision email operation
- session = self.session
- sentops = [op for op in session.pending_operations
- if isinstance(op, SupervisionMailOp)]
- self.assertEqual(len(sentops), 1)
- # check view content
- op = sentops[0]
- view = sentops[0]._get_view()
- self.assertEqual(view.recipients(), ['test@logilab.fr'])
- self.assertEqual(view.subject(), '[data supervision] changes summary')
- data = view.render(changes=session.transaction_data.get('pendingchanges')).strip()
- data = re.sub('#\d+', '#EID', data)
- data = re.sub('/\d+', '/EID', data)
- self.assertMultiLineEqual('''user admin has made the following change(s):
+ with self.admin_access.repo_cnx() as cnx:
+ user = cnx.execute('INSERT CWUser X: X login "toto", X upassword "sosafe", X in_group G '
+ 'WHERE G name "users"').get_entity(0, 0)
+ cnx.execute('SET X last_login_time NOW WHERE X eid %(x)s', {'x': user.eid})
+ cnx.execute('DELETE Card B WHERE B title "une news !"')
+ cnx.execute('SET X bookmarked_by U WHERE X is Bookmark, U eid %(x)s', {'x': user.eid})
+ cnx.execute('SET X content "duh?" WHERE X is Comment')
+ cnx.execute('DELETE Comment C WHERE C comments Y, Y is Card, Y title "une autre news !"')
+ # check only one supervision email operation
+ sentops = [op for op in cnx.pending_operations
+ if isinstance(op, SupervisionMailOp)]
+ self.assertEqual(len(sentops), 1)
+ # check view content
+ op = sentops[0]
+ view = sentops[0]._get_view()
+ self.assertEqual(view.recipients(), ['test@logilab.fr'])
+ self.assertEqual(view.subject(), '[data supervision] changes summary')
+ data = view.render(changes=cnx.transaction_data.get('pendingchanges')).strip()
+ data = re.sub('#\d+', '#EID', data)
+ data = re.sub('/\d+', '/EID', data)
+ self.assertMultiLineEqual('''user admin has made the following change(s):
* added cwuser #EID (toto)
http://testing.fr/cubicweb/cwuser/toto
@@ -75,26 +72,26 @@
* updated comment #EID (duh?)
http://testing.fr/cubicweb/comment/EID
-* deleted relation comments from comment #EID to card #EID''',
+* deleted comment #EID (duh?)''',
data)
- # check prepared email
- op._prepare_email()
- self.assertEqual(len(op.to_send), 1)
- self.assert_(op.to_send[0][0])
- self.assertEqual(op.to_send[0][1], ['test@logilab.fr'])
- self.commit()
- # some other changes #######
- user.cw_adapt_to('IWorkflowable').fire_transition('deactivate')
- sentops = [op for op in session.pending_operations
- if isinstance(op, SupervisionMailOp)]
- self.assertEqual(len(sentops), 1)
- # check view content
- op = sentops[0]
- view = sentops[0]._get_view()
- data = view.render(changes=session.transaction_data.get('pendingchanges')).strip()
- data = re.sub('#\d+', '#EID', data)
- data = re.sub('/\d+', '/EID', data)
- self.assertMultiLineEqual('''user admin has made the following change(s):
+ # check prepared email
+ op._prepare_email()
+ self.assertEqual(len(op.to_send), 1)
+ self.assert_(op.to_send[0][0])
+ self.assertEqual(op.to_send[0][1], ['test@logilab.fr'])
+ cnx.commit()
+ # some other changes #######
+ user.cw_adapt_to('IWorkflowable').fire_transition('deactivate')
+ sentops = [op for op in cnx.pending_operations
+ if isinstance(op, SupervisionMailOp)]
+ self.assertEqual(len(sentops), 1)
+ # check view content
+ op = sentops[0]
+ view = sentops[0]._get_view()
+ data = view.render(changes=cnx.transaction_data.get('pendingchanges')).strip()
+ data = re.sub('#\d+', '#EID', data)
+ data = re.sub('/\d+', '/EID', data)
+ self.assertMultiLineEqual('''user admin has made the following change(s):
* changed state of cwuser #EID (toto)
from state activated to state deactivated
@@ -102,10 +99,10 @@
data)
def test_nonregr1(self):
- session = self.session
- # do some unlogged modification
- self.execute('SET X last_login_time NOW WHERE X eid %(x)s', {'x': session.user.eid})
- self.commit() # no crash
+ with self.admin_access.repo_cnx() as cnx:
+ # do some unlogged modification
+ cnx.execute('SET X last_login_time NOW WHERE X eid %(x)s', {'x': cnx.user.eid})
+ cnx.commit() # no crash
if __name__ == '__main__':
--- a/test/unittest_cwconfig.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_cwconfig.py Tue Jun 10 09:49:45 2014 +0200
@@ -123,7 +123,7 @@
self.assertEqual(self.config.cubes_search_path(),
[CUSTOM_CUBES_DIR,
self.config.CUBES_DIR])
- self.assertTrue('mycube' in self.config.available_cubes())
+ self.assertIn('mycube', self.config.available_cubes())
# test cubes python path
self.config.adjust_sys_path()
import cubes
--- a/test/unittest_cwctl.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_cwctl.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -43,21 +43,22 @@
class CubicWebShellTC(CubicWebTC):
def test_process_script_args_context(self):
- repo = self.cnx._repo
- mih = ServerMigrationHelper(None, repo=repo, cnx=self.cnx,
- interactive=False,
- # hack so it don't try to load fs schema
- schema=1)
- scripts = {'script1.py': list(),
- 'script2.py': ['-v'],
- 'script3.py': ['-vd', '-f', 'FILE.TXT'],
- }
- mih.cmd_process_script(join(self.datadir, 'scripts', 'script1.py'),
- funcname=None)
- for script, args in scripts.items():
- scriptname = os.path.join(self.datadir, 'scripts', script)
- self.assert_(os.path.exists(scriptname))
- mih.cmd_process_script(scriptname, None, scriptargs=args)
+ repo = self.repo
+ with self.admin_access.client_cnx() as cnx:
+ mih = ServerMigrationHelper(None, repo=repo, cnx=cnx,
+ interactive=False,
+ # hack so it don't try to load fs schema
+ schema=1)
+ scripts = {'script1.py': list(),
+ 'script2.py': ['-v'],
+ 'script3.py': ['-vd', '-f', 'FILE.TXT'],
+ }
+ mih.cmd_process_script(join(self.datadir, 'scripts', 'script1.py'),
+ funcname=None)
+ for script, args in scripts.items():
+ scriptname = os.path.join(self.datadir, 'scripts', script)
+ self.assert_(os.path.exists(scriptname))
+ mih.cmd_process_script(scriptname, None, scriptargs=args)
if __name__ == '__main__':
--- a/test/unittest_dbapi.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_dbapi.py Tue Jun 10 09:49:45 2014 +0200
@@ -22,41 +22,43 @@
from logilab.common import tempattr
from cubicweb import ConnectionError, cwconfig, NoSelectableObject
-from cubicweb.dbapi import ProgrammingError
+from cubicweb.dbapi import ProgrammingError, _repo_connect
from cubicweb.devtools.testlib import CubicWebTC
class DBAPITC(CubicWebTC):
def test_public_repo_api(self):
- cnx = self.login('anon')
+ cnx = _repo_connect(self.repo, login='anon', password='anon')
self.assertEqual(cnx.get_schema(), self.repo.schema)
self.assertEqual(cnx.source_defs(), {'system': {'type': 'native', 'uri': 'system',
'use-cwuri-as-url': False}})
- self.restore_connection() # proper way to close cnx
+ cnx.close()
self.assertRaises(ProgrammingError, cnx.get_schema)
self.assertRaises(ProgrammingError, cnx.source_defs)
def test_db_api(self):
- cnx = self.login('anon')
+ cnx = _repo_connect(self.repo, login='anon', password='anon')
self.assertEqual(cnx.rollback(), None)
self.assertEqual(cnx.commit(), None)
- self.restore_connection() # proper way to close cnx
- #self.assertEqual(cnx.close(), None)
+ cnx.close()
self.assertRaises(ProgrammingError, cnx.rollback)
self.assertRaises(ProgrammingError, cnx.commit)
self.assertRaises(ProgrammingError, cnx.close)
def test_api(self):
- cnx = self.login('anon')
+ cnx = _repo_connect(self.repo, login='anon', password='anon')
self.assertEqual(cnx.user(None).login, 'anon')
+ self.assertEqual({'type': u'CWSource', 'source': u'system', 'extid': None},
+ cnx.entity_metas(1))
self.assertEqual(cnx.describe(1), (u'CWSource', u'system', None))
- self.restore_connection() # proper way to close cnx
+ cnx.close()
self.assertRaises(ProgrammingError, cnx.user, None)
+ self.assertRaises(ProgrammingError, cnx.entity_metas, 1)
self.assertRaises(ProgrammingError, cnx.describe, 1)
def test_shared_data_api(self):
- cnx = self.login('anon')
+ cnx = _repo_connect(self.repo, login='anon', password='anon')
self.assertEqual(cnx.get_shared_data('data'), None)
cnx.set_shared_data('data', 4)
self.assertEqual(cnx.get_shared_data('data'), 4)
@@ -65,16 +67,17 @@
self.assertEqual(cnx.get_shared_data('data'), None)
cnx.set_shared_data('data', 4)
self.assertEqual(cnx.get_shared_data('data'), 4)
- self.restore_connection() # proper way to close cnx
+ cnx.close()
self.assertRaises(ProgrammingError, cnx.check)
self.assertRaises(ProgrammingError, cnx.set_shared_data, 'data', 0)
self.assertRaises(ProgrammingError, cnx.get_shared_data, 'data')
def test_web_compatible_request(self):
config = cwconfig.CubicWebNoAppConfiguration()
- with tempattr(self.cnx.vreg, 'config', config):
- self.cnx.use_web_compatible_requests('http://perdu.com')
- req = self.cnx.request()
+ cnx = _repo_connect(self.repo, login='admin', password='gingkow')
+ with tempattr(cnx.vreg, 'config', config):
+ cnx.use_web_compatible_requests('http://perdu.com')
+ req = cnx.request()
self.assertEqual(req.base_url(), 'http://perdu.com')
self.assertEqual(req.from_controller(), 'view')
self.assertEqual(req.relative_path(), '')
--- a/test/unittest_entity.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_entity.py Tue Jun 10 09:49:45 2014 +0200
@@ -57,151 +57,154 @@
self.assertEqual(1, len(produit.fabrique_par))
def test_boolean_value(self):
- e = self.vreg['etypes'].etype_class('CWUser')(self.request())
- self.assertTrue(e)
+ with self.admin_access.web_request() as req:
+ e = self.vreg['etypes'].etype_class('CWUser')(req)
+ self.assertTrue(e)
def test_yams_inheritance(self):
from entities import Note
- e = self.vreg['etypes'].etype_class('SubNote')(self.request())
- self.assertIsInstance(e, Note)
- e2 = self.vreg['etypes'].etype_class('SubNote')(self.request())
- self.assertIs(e.__class__, e2.__class__)
+ with self.admin_access.web_request() as req:
+ e = self.vreg['etypes'].etype_class('SubNote')(req)
+ self.assertIsInstance(e, Note)
+ e2 = self.vreg['etypes'].etype_class('SubNote')(req)
+ self.assertIs(e.__class__, e2.__class__)
def test_has_eid(self):
- e = self.vreg['etypes'].etype_class('CWUser')(self.request())
- self.assertEqual(e.eid, None)
- self.assertEqual(e.has_eid(), False)
- e.eid = 'X'
- self.assertEqual(e.has_eid(), False)
- e.eid = 0
- self.assertEqual(e.has_eid(), True)
- e.eid = 2
- self.assertEqual(e.has_eid(), True)
+ with self.admin_access.web_request() as req:
+ e = self.vreg['etypes'].etype_class('CWUser')(req)
+ self.assertEqual(e.eid, None)
+ self.assertEqual(e.has_eid(), False)
+ e.eid = 'X'
+ self.assertEqual(e.has_eid(), False)
+ e.eid = 0
+ self.assertEqual(e.has_eid(), True)
+ e.eid = 2
+ self.assertEqual(e.has_eid(), True)
def test_copy(self):
- req = self.request()
- req.create_entity('Tag', name=u'x')
- p = req.create_entity('Personne', nom=u'toto')
- oe = req.create_entity('Note', type=u'x')
- self.execute('SET T ecrit_par U WHERE T eid %(t)s, U eid %(u)s',
- {'t': oe.eid, 'u': p.eid})
- self.execute('SET TAG tags X WHERE X eid %(x)s', {'x': oe.eid})
- e = req.create_entity('Note', type=u'z')
- e.copy_relations(oe.eid)
- self.assertEqual(len(e.ecrit_par), 1)
- self.assertEqual(e.ecrit_par[0].eid, p.eid)
- self.assertEqual(len(e.reverse_tags), 1)
- # check meta-relations are not copied, set on commit
- self.assertEqual(len(e.created_by), 0)
+ with self.admin_access.web_request() as req:
+ req.create_entity('Tag', name=u'x')
+ p = req.create_entity('Personne', nom=u'toto')
+ oe = req.create_entity('Note', type=u'x')
+ req.execute('SET T ecrit_par U WHERE T eid %(t)s, U eid %(u)s',
+ {'t': oe.eid, 'u': p.eid})
+ req.execute('SET TAG tags X WHERE X eid %(x)s', {'x': oe.eid})
+ e = req.create_entity('Note', type=u'z')
+ e.copy_relations(oe.eid)
+ self.assertEqual(len(e.ecrit_par), 1)
+ self.assertEqual(e.ecrit_par[0].eid, p.eid)
+ self.assertEqual(len(e.reverse_tags), 1)
+ # check meta-relations are not copied, set on commit
+ self.assertEqual(len(e.created_by), 0)
def test_copy_with_nonmeta_composite_inlined(self):
- req = self.request()
- p = req.create_entity('Personne', nom=u'toto')
- oe = req.create_entity('Note', type=u'x')
- self.schema['ecrit_par'].rdef('Note', 'Personne').composite = 'subject'
- self.execute('SET T ecrit_par U WHERE T eid %(t)s, U eid %(u)s',
- {'t': oe.eid, 'u': p.eid})
- e = req.create_entity('Note', type=u'z')
- e.copy_relations(oe.eid)
- self.assertFalse(e.ecrit_par)
- self.assertTrue(oe.ecrit_par)
+ with self.admin_access.web_request() as req:
+ p = req.create_entity('Personne', nom=u'toto')
+ oe = req.create_entity('Note', type=u'x')
+ self.schema['ecrit_par'].rdef('Note', 'Personne').composite = 'subject'
+ req.execute('SET T ecrit_par U WHERE T eid %(t)s, U eid %(u)s',
+ {'t': oe.eid, 'u': p.eid})
+ e = req.create_entity('Note', type=u'z')
+ e.copy_relations(oe.eid)
+ self.assertFalse(e.ecrit_par)
+ self.assertTrue(oe.ecrit_par)
def test_copy_with_composite(self):
- user = self.user()
- adeleid = self.execute('INSERT EmailAddress X: X address "toto@logilab.org", U use_email X WHERE U login "admin"')[0][0]
- e = self.execute('Any X WHERE X eid %(x)s', {'x': user.eid}).get_entity(0, 0)
- self.assertEqual(e.use_email[0].address, "toto@logilab.org")
- self.assertEqual(e.use_email[0].eid, adeleid)
- usereid = self.execute('INSERT CWUser X: X login "toto", X upassword "toto", X in_group G '
- 'WHERE G name "users"')[0][0]
- e = self.execute('Any X WHERE X eid %(x)s', {'x': usereid}).get_entity(0, 0)
- e.copy_relations(user.eid)
- self.assertFalse(e.use_email)
- self.assertFalse(e.primary_email)
+ with self.admin_access.web_request() as req:
+ adeleid = req.execute('INSERT EmailAddress X: X address "toto@logilab.org", U use_email X WHERE U login "admin"')[0][0]
+ e = req.execute('Any X WHERE X eid %(x)s', {'x': req.user.eid}).get_entity(0, 0)
+ self.assertEqual(e.use_email[0].address, "toto@logilab.org")
+ self.assertEqual(e.use_email[0].eid, adeleid)
+ usereid = req.execute('INSERT CWUser X: X login "toto", X upassword "toto", X in_group G '
+ 'WHERE G name "users"')[0][0]
+ e = req.execute('Any X WHERE X eid %(x)s', {'x': usereid}).get_entity(0, 0)
+ e.copy_relations(req.user.eid)
+ self.assertFalse(e.use_email)
+ self.assertFalse(e.primary_email)
def test_copy_with_non_initial_state(self):
- user = self.user()
- user = self.execute('INSERT CWUser X: X login "toto", X upassword %(pwd)s, X in_group G WHERE G name "users"',
- {'pwd': 'toto'}).get_entity(0, 0)
- self.commit()
- user.cw_adapt_to('IWorkflowable').fire_transition('deactivate')
- self.commit()
- eid2 = self.execute('INSERT CWUser X: X login "tutu", X upassword %(pwd)s', {'pwd': 'toto'})[0][0]
- e = self.execute('Any X WHERE X eid %(x)s', {'x': eid2}).get_entity(0, 0)
- e.copy_relations(user.eid)
- self.commit()
- e.cw_clear_relation_cache('in_state', 'subject')
- self.assertEqual(e.cw_adapt_to('IWorkflowable').state, 'activated')
+ with self.admin_access.web_request() as req:
+ user = req.execute('INSERT CWUser X: X login "toto", X upassword %(pwd)s, X in_group G WHERE G name "users"',
+ {'pwd': 'toto'}).get_entity(0, 0)
+ req.cnx.commit()
+ user.cw_adapt_to('IWorkflowable').fire_transition('deactivate')
+ req.cnx.commit()
+ eid2 = req.execute('INSERT CWUser X: X login "tutu", X upassword %(pwd)s', {'pwd': 'toto'})[0][0]
+ e = req.execute('Any X WHERE X eid %(x)s', {'x': eid2}).get_entity(0, 0)
+ e.copy_relations(user.eid)
+ req.cnx.commit()
+ e.cw_clear_relation_cache('in_state', 'subject')
+ self.assertEqual(e.cw_adapt_to('IWorkflowable').state, 'activated')
def test_related_cache_both(self):
- user = self.execute('Any X WHERE X eid %(x)s', {'x':self.user().eid}).get_entity(0, 0)
- adeleid = self.execute('INSERT EmailAddress X: X address "toto@logilab.org", U use_email X WHERE U login "admin"')[0][0]
- self.commit()
- self.assertEqual(user._cw_related_cache, {})
- email = user.primary_email[0]
- self.assertEqual(sorted(user._cw_related_cache), ['primary_email_subject'])
- self.assertEqual(list(email._cw_related_cache), ['primary_email_object'])
- groups = user.in_group
- self.assertEqual(sorted(user._cw_related_cache), ['in_group_subject', 'primary_email_subject'])
- for group in groups:
- self.assertFalse('in_group_subject' in group._cw_related_cache, list(group._cw_related_cache))
- user.cw_clear_all_caches()
- user.related('in_group', entities=True)
- self.assertIn('in_group_subject', user._cw_related_cache)
- user.cw_clear_all_caches()
- user.related('in_group', targettypes=('CWGroup',), entities=True)
- self.assertNotIn('in_group_subject', user._cw_related_cache)
+ with self.admin_access.web_request() as req:
+ user = req.execute('Any X WHERE X eid %(x)s', {'x':req.user.eid}).get_entity(0, 0)
+ adeleid = req.execute('INSERT EmailAddress X: X address "toto@logilab.org", U use_email X WHERE U login "admin"')[0][0]
+ req.cnx.commit()
+ self.assertEqual(user._cw_related_cache, {})
+ email = user.primary_email[0]
+ self.assertEqual(sorted(user._cw_related_cache), ['primary_email_subject'])
+ self.assertEqual(list(email._cw_related_cache), ['primary_email_object'])
+ groups = user.in_group
+ self.assertEqual(sorted(user._cw_related_cache), ['in_group_subject', 'primary_email_subject'])
+ for group in groups:
+ self.assertNotIn('in_group_subject', group._cw_related_cache)
+ user.cw_clear_all_caches()
+ user.related('in_group', entities=True)
+ self.assertIn('in_group_subject', user._cw_related_cache)
+ user.cw_clear_all_caches()
+ user.related('in_group', targettypes=('CWGroup',), entities=True)
+ self.assertNotIn('in_group_subject', user._cw_related_cache)
def test_related_limit(self):
- req = self.request()
- p = req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien')
- for tag in u'abcd':
- req.create_entity('Tag', name=tag)
- self.execute('SET X tags Y WHERE X is Tag, Y is Personne')
- self.assertEqual(len(p.related('tags', 'object', limit=2)), 2)
- self.assertEqual(len(p.related('tags', 'object')), 4)
- p.cw_clear_all_caches()
- self.assertEqual(len(p.related('tags', 'object', entities=True, limit=2)), 2)
- self.assertEqual(len(p.related('tags', 'object', entities=True)), 4)
+ with self.admin_access.web_request() as req:
+ p = req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien')
+ for tag in u'abcd':
+ req.create_entity('Tag', name=tag)
+ req.execute('SET X tags Y WHERE X is Tag, Y is Personne')
+ self.assertEqual(len(p.related('tags', 'object', limit=2)), 2)
+ self.assertEqual(len(p.related('tags', 'object')), 4)
+ p.cw_clear_all_caches()
+ self.assertEqual(len(p.related('tags', 'object', entities=True, limit=2)), 2)
+ self.assertEqual(len(p.related('tags', 'object', entities=True)), 4)
def test_related_targettypes(self):
- req = self.request()
- p = req.create_entity('Personne', nom=u'Loxodonta', prenom=u'Babar')
- n = req.create_entity('Note', type=u'scratch', ecrit_par=p)
- t = req.create_entity('Tag', name=u'a tag', tags=(p, n))
- self.commit()
- req = self.request()
- t = req.entity_from_eid(t.eid)
- self.assertEqual(2, t.related('tags').rowcount)
- self.assertEqual(1, t.related('tags', targettypes=('Personne',)).rowcount)
- self.assertEqual(1, t.related('tags', targettypes=('Note',)).rowcount)
+ with self.admin_access.web_request() as req:
+ p = req.create_entity('Personne', nom=u'Loxodonta', prenom=u'Babar')
+ n = req.create_entity('Note', type=u'scratch', ecrit_par=p)
+ t = req.create_entity('Tag', name=u'a tag', tags=(p, n))
+ req.cnx.commit()
+ with self.admin_access.web_request() as req:
+ t = req.entity_from_eid(t.eid)
+ self.assertEqual(2, t.related('tags').rowcount)
+ self.assertEqual(1, t.related('tags', targettypes=('Personne',)).rowcount)
+ self.assertEqual(1, t.related('tags', targettypes=('Note',)).rowcount)
def test_cw_instantiate_relation(self):
- req = self.request()
- p1 = req.create_entity('Personne', nom=u'di')
- p2 = req.create_entity('Personne', nom=u'mascio')
- t = req.create_entity('Tag', name=u't0', tags=[])
- self.assertCountEqual(t.tags, [])
- t = req.create_entity('Tag', name=u't1', tags=p1)
- self.assertCountEqual(t.tags, [p1])
- t = req.create_entity('Tag', name=u't2', tags=p1.eid)
- self.assertCountEqual(t.tags, [p1])
- t = req.create_entity('Tag', name=u't3', tags=[p1, p2.eid])
- self.assertCountEqual(t.tags, [p1, p2])
+ with self.admin_access.web_request() as req:
+ p1 = req.create_entity('Personne', nom=u'di')
+ p2 = req.create_entity('Personne', nom=u'mascio')
+ t = req.create_entity('Tag', name=u't0', tags=[])
+ self.assertCountEqual(t.tags, [])
+ t = req.create_entity('Tag', name=u't1', tags=p1)
+ self.assertCountEqual(t.tags, [p1])
+ t = req.create_entity('Tag', name=u't2', tags=p1.eid)
+ self.assertCountEqual(t.tags, [p1])
+ t = req.create_entity('Tag', name=u't3', tags=[p1, p2.eid])
+ self.assertCountEqual(t.tags, [p1, p2])
def test_cw_instantiate_reverse_relation(self):
- req = self.request()
- t1 = req.create_entity('Tag', name=u't1')
- t2 = req.create_entity('Tag', name=u't2')
- p = req.create_entity('Personne', nom=u'di mascio', reverse_tags=t1)
- self.assertCountEqual(p.reverse_tags, [t1])
- p = req.create_entity('Personne', nom=u'di mascio', reverse_tags=t1.eid)
- self.assertCountEqual(p.reverse_tags, [t1])
- p = req.create_entity('Personne', nom=u'di mascio', reverse_tags=[t1, t2.eid])
- self.assertCountEqual(p.reverse_tags, [t1, t2])
+ with self.admin_access.web_request() as req:
+ t1 = req.create_entity('Tag', name=u't1')
+ t2 = req.create_entity('Tag', name=u't2')
+ p = req.create_entity('Personne', nom=u'di mascio', reverse_tags=t1)
+ self.assertCountEqual(p.reverse_tags, [t1])
+ p = req.create_entity('Personne', nom=u'di mascio', reverse_tags=t1.eid)
+ self.assertCountEqual(p.reverse_tags, [t1])
+ p = req.create_entity('Personne', nom=u'di mascio', reverse_tags=[t1, t2.eid])
+ self.assertCountEqual(p.reverse_tags, [t1, t2])
def test_fetch_rql(self):
- user = self.user()
Personne = self.vreg['etypes'].etype_class('Personne')
Societe = self.vreg['etypes'].etype_class('Societe')
Note = self.vreg['etypes'].etype_class('Note')
@@ -216,47 +219,49 @@
cm.__enter__()
torestore.append(cm)
try:
- # testing basic fetch_attrs attribute
- self.assertEqual(Personne.fetch_rql(user),
- 'Any X,AA,AB,AC ORDERBY AA '
- 'WHERE X is_instance_of Personne, X nom AA, X prenom AB, X modification_date AC')
- # testing unknown attributes
- Personne.fetch_attrs = ('bloug', 'beep')
- self.assertEqual(Personne.fetch_rql(user), 'Any X WHERE X is_instance_of Personne')
- # testing one non final relation
- Personne.fetch_attrs = ('nom', 'prenom', 'travaille')
- self.assertEqual(Personne.fetch_rql(user),
- 'Any X,AA,AB,AC,AD ORDERBY AA '
- 'WHERE X is_instance_of Personne, X nom AA, X prenom AB, X travaille AC?, AC nom AD')
- # testing two non final relations
- Personne.fetch_attrs = ('nom', 'prenom', 'travaille', 'evaluee')
- self.assertEqual(Personne.fetch_rql(user),
- 'Any X,AA,AB,AC,AD,AE ORDERBY AA '
- 'WHERE X is_instance_of Personne, X nom AA, X prenom AB, X travaille AC?, AC nom AD, '
- 'X evaluee AE?')
- # testing one non final relation with recursion
- Personne.fetch_attrs = ('nom', 'prenom', 'travaille')
- Societe.fetch_attrs = ('nom', 'evaluee')
- self.assertEqual(Personne.fetch_rql(user),
- 'Any X,AA,AB,AC,AD,AE,AF ORDERBY AA,AF DESC '
- 'WHERE X is_instance_of Personne, X nom AA, X prenom AB, X travaille AC?, AC nom AD, '
- 'AC evaluee AE?, AE modification_date AF'
- )
- # testing symmetric relation
- Personne.fetch_attrs = ('nom', 'connait')
- self.assertEqual(Personne.fetch_rql(user), 'Any X,AA,AB ORDERBY AA '
- 'WHERE X is_instance_of Personne, X nom AA, X connait AB?')
- # testing optional relation
- peschema.subjrels['travaille'].rdef(peschema, seschema).cardinality = '?*'
- Personne.fetch_attrs = ('nom', 'prenom', 'travaille')
- Societe.fetch_attrs = ('nom',)
- self.assertEqual(Personne.fetch_rql(user),
- 'Any X,AA,AB,AC,AD ORDERBY AA WHERE X is_instance_of Personne, X nom AA, X prenom AB, X travaille AC?, AC nom AD')
- # testing relation with cardinality > 1
- peschema.subjrels['travaille'].rdef(peschema, seschema).cardinality = '**'
- self.assertEqual(Personne.fetch_rql(user),
- 'Any X,AA,AB ORDERBY AA WHERE X is_instance_of Personne, X nom AA, X prenom AB')
- # XXX test unauthorized attribute
+ with self.admin_access.web_request() as req:
+ user = req.user
+ # testing basic fetch_attrs attribute
+ self.assertEqual(Personne.fetch_rql(user),
+ 'Any X,AA,AB,AC ORDERBY AA '
+ 'WHERE X is_instance_of Personne, X nom AA, X prenom AB, X modification_date AC')
+ # testing unknown attributes
+ Personne.fetch_attrs = ('bloug', 'beep')
+ self.assertEqual(Personne.fetch_rql(user), 'Any X WHERE X is_instance_of Personne')
+ # testing one non final relation
+ Personne.fetch_attrs = ('nom', 'prenom', 'travaille')
+ self.assertEqual(Personne.fetch_rql(user),
+ 'Any X,AA,AB,AC,AD ORDERBY AA '
+ 'WHERE X is_instance_of Personne, X nom AA, X prenom AB, X travaille AC?, AC nom AD')
+ # testing two non final relations
+ Personne.fetch_attrs = ('nom', 'prenom', 'travaille', 'evaluee')
+ self.assertEqual(Personne.fetch_rql(user),
+ 'Any X,AA,AB,AC,AD,AE ORDERBY AA '
+ 'WHERE X is_instance_of Personne, X nom AA, X prenom AB, X travaille AC?, AC nom AD, '
+ 'X evaluee AE?')
+ # testing one non final relation with recursion
+ Personne.fetch_attrs = ('nom', 'prenom', 'travaille')
+ Societe.fetch_attrs = ('nom', 'evaluee')
+ self.assertEqual(Personne.fetch_rql(user),
+ 'Any X,AA,AB,AC,AD,AE,AF ORDERBY AA,AF DESC '
+ 'WHERE X is_instance_of Personne, X nom AA, X prenom AB, X travaille AC?, AC nom AD, '
+ 'AC evaluee AE?, AE modification_date AF'
+ )
+ # testing symmetric relation
+ Personne.fetch_attrs = ('nom', 'connait')
+ self.assertEqual(Personne.fetch_rql(user), 'Any X,AA,AB ORDERBY AA '
+ 'WHERE X is_instance_of Personne, X nom AA, X connait AB?')
+ # testing optional relation
+ peschema.subjrels['travaille'].rdef(peschema, seschema).cardinality = '?*'
+ Personne.fetch_attrs = ('nom', 'prenom', 'travaille')
+ Societe.fetch_attrs = ('nom',)
+ self.assertEqual(Personne.fetch_rql(user),
+ 'Any X,AA,AB,AC,AD ORDERBY AA WHERE X is_instance_of Personne, X nom AA, X prenom AB, X travaille AC?, AC nom AD')
+ # testing relation with cardinality > 1
+ peschema.subjrels['travaille'].rdef(peschema, seschema).cardinality = '**'
+ self.assertEqual(Personne.fetch_rql(user),
+ 'Any X,AA,AB ORDERBY AA WHERE X is_instance_of Personne, X nom AA, X prenom AB')
+ # XXX test unauthorized attribute
finally:
# fetch_attrs restored by generic tearDown
for cm in torestore:
@@ -270,97 +275,106 @@
Personne.fetch_attrs, Personne.cw_fetch_order = fetch_config(('nom', 'type'))
Note.fetch_attrs, Note.cw_fetch_order = fetch_config(('type',))
SubNote.fetch_attrs, SubNote.cw_fetch_order = fetch_config(('type',))
- p = self.request().create_entity('Personne', nom=u'pouet')
- self.assertEqual(p.cw_related_rql('evaluee'),
- 'Any X,AA,AB ORDERBY AA WHERE E eid %(x)s, E evaluee X, '
- 'X type AA, X modification_date AB')
- n = self.request().create_entity('Note')
- self.assertEqual(n.cw_related_rql('evaluee', role='object',
- targettypes=('Societe', 'Personne')),
- "Any X,AA ORDERBY AB DESC WHERE E eid %(x)s, X evaluee E, "
- "X is IN(Personne, Societe), X nom AA, "
- "X modification_date AB")
- Personne.fetch_attrs, Personne.cw_fetch_order = fetch_config(('nom', ))
- # XXX
- self.assertEqual(p.cw_related_rql('evaluee'),
- 'Any X,AA ORDERBY AA DESC '
- 'WHERE E eid %(x)s, E evaluee X, X modification_date AA')
+ with self.admin_access.web_request() as req:
+ p = req.create_entity('Personne', nom=u'pouet')
+ self.assertEqual(p.cw_related_rql('evaluee'),
+ 'Any X,AA,AB ORDERBY AA WHERE E eid %(x)s, E evaluee X, '
+ 'X type AA, X modification_date AB')
+ n = req.create_entity('Note')
+ self.assertEqual(n.cw_related_rql('evaluee', role='object',
+ targettypes=('Societe', 'Personne')),
+ "Any X,AA ORDERBY AB DESC WHERE E eid %(x)s, X evaluee E, "
+ "X is IN(Personne, Societe), X nom AA, "
+ "X modification_date AB")
+ Personne.fetch_attrs, Personne.cw_fetch_order = fetch_config(('nom', ))
+ # XXX
+ self.assertEqual(p.cw_related_rql('evaluee'),
+ 'Any X,AA ORDERBY AA DESC '
+ 'WHERE E eid %(x)s, E evaluee X, X modification_date AA')
- tag = self.vreg['etypes'].etype_class('Tag')(self.request())
- self.assertEqual(tag.cw_related_rql('tags', 'subject'),
- 'Any X,AA ORDERBY AA DESC '
- 'WHERE E eid %(x)s, E tags X, X modification_date AA')
- self.assertEqual(tag.cw_related_rql('tags', 'subject', ('Personne',)),
- 'Any X,AA,AB ORDERBY AA '
- 'WHERE E eid %(x)s, E tags X, X is Personne, X nom AA, '
- 'X modification_date AB')
+ tag = self.vreg['etypes'].etype_class('Tag')(req)
+ self.assertEqual(tag.cw_related_rql('tags', 'subject'),
+ 'Any X,AA ORDERBY AA DESC '
+ 'WHERE E eid %(x)s, E tags X, X modification_date AA')
+ self.assertEqual(tag.cw_related_rql('tags', 'subject', ('Personne',)),
+ 'Any X,AA,AB ORDERBY AA '
+ 'WHERE E eid %(x)s, E tags X, X is Personne, X nom AA, '
+ 'X modification_date AB')
def test_related_rql_ambiguous_cant_use_fetch_order(self):
- tag = self.vreg['etypes'].etype_class('Tag')(self.request())
- for ttype in self.schema['tags'].objects():
- self.vreg['etypes'].etype_class(ttype).fetch_attrs = ('modification_date',)
- self.assertEqual(tag.cw_related_rql('tags', 'subject'),
- 'Any X,AA ORDERBY AA DESC '
- 'WHERE E eid %(x)s, E tags X, X modification_date AA')
+ with self.admin_access.web_request() as req:
+ tag = self.vreg['etypes'].etype_class('Tag')(req)
+ for ttype in self.schema['tags'].objects():
+ self.vreg['etypes'].etype_class(ttype).fetch_attrs = ('modification_date',)
+ self.assertEqual(tag.cw_related_rql('tags', 'subject'),
+ 'Any X,AA ORDERBY AA DESC '
+ 'WHERE E eid %(x)s, E tags X, X modification_date AA')
def test_related_rql_fetch_ambiguous_rtype(self):
- soc_etype = self.vreg['etypes'].etype_class('Societe')
- soc = soc_etype(self.request())
- soc_etype.fetch_attrs = ('fournit',)
- self.vreg['etypes'].etype_class('Service').fetch_attrs = ('fabrique_par',)
- self.vreg['etypes'].etype_class('Produit').fetch_attrs = ('fabrique_par',)
- self.vreg['etypes'].etype_class('Usine').fetch_attrs = ('lieu',)
- self.vreg['etypes'].etype_class('Personne').fetch_attrs = ('nom',)
- self.assertEqual(soc.cw_related_rql('fournit', 'subject'),
- 'Any X,A WHERE E eid %(x)s, E fournit X, X fabrique_par A')
+ etvreg = self.vreg['etypes']
+ soc_etype = etvreg.etype_class('Societe')
+ with self.admin_access.web_request() as req:
+ soc = soc_etype(req)
+ soc_etype.fetch_attrs = ('fournit',)
+ etvreg.etype_class('Service').fetch_attrs = ('fabrique_par',)
+ etvreg.etype_class('Produit').fetch_attrs = ('fabrique_par',)
+ etvreg.etype_class('Usine').fetch_attrs = ('lieu',)
+ etvreg.etype_class('Personne').fetch_attrs = ('nom',)
+ self.assertEqual(soc.cw_related_rql('fournit', 'subject'),
+ 'Any X,A WHERE E eid %(x)s, E fournit X, X fabrique_par A')
def test_unrelated_rql_security_1_manager(self):
- user = self.request().user
- rql = user.cw_unrelated_rql('use_email', 'EmailAddress', 'subject')[0]
- self.assertEqual(rql, 'Any O,AA,AB,AC ORDERBY AC DESC '
- 'WHERE NOT A use_email O, S eid %(x)s, '
- 'O is_instance_of EmailAddress, O address AA, O alias AB, O modification_date AC')
+ with self.admin_access.web_request() as req:
+ user = req.user
+ rql = user.cw_unrelated_rql('use_email', 'EmailAddress', 'subject')[0]
+ self.assertEqual(rql,
+ 'Any O,AA,AB,AC ORDERBY AC DESC '
+ 'WHERE NOT A use_email O, S eid %(x)s, '
+ 'O is_instance_of EmailAddress, O address AA, O alias AB, '
+ 'O modification_date AC')
def test_unrelated_rql_security_1_user(self):
- req = self.request()
- self.create_user(req, 'toto')
- self.login('toto')
- user = req.user
- rql = user.cw_unrelated_rql('use_email', 'EmailAddress', 'subject')[0]
- self.assertEqual(rql, 'Any O,AA,AB,AC ORDERBY AC DESC '
- 'WHERE NOT A use_email O, S eid %(x)s, '
- 'O is_instance_of EmailAddress, O address AA, O alias AB, O modification_date AC')
- user = self.execute('Any X WHERE X login "admin"').get_entity(0, 0)
- rql = user.cw_unrelated_rql('use_email', 'EmailAddress', 'subject')[0]
- self.assertEqual(rql, 'Any O,AA,AB,AC ORDERBY AC DESC '
- 'WHERE NOT A use_email O, S eid %(x)s, '
- 'O is EmailAddress, O address AA, O alias AB, O modification_date AC, AD eid %(AE)s, '
- 'EXISTS(S identity AD, NOT AD in_group AF, AF name "guests", AF is CWGroup), A is CWUser')
+ with self.admin_access.web_request() as req:
+ self.create_user(req, 'toto')
+ with self.new_access('toto').web_request() as req:
+ user = req.user # XXX
+ rql = user.cw_unrelated_rql('use_email', 'EmailAddress', 'subject')[0]
+ self.assertEqual(rql,
+ 'Any O,AA,AB,AC ORDERBY AC DESC '
+ 'WHERE NOT A use_email O, S eid %(x)s, '
+ 'O is_instance_of EmailAddress, O address AA, O alias AB, O modification_date AC')
+ user = req.execute('Any X WHERE X login "admin"').get_entity(0, 0)
+ rql = user.cw_unrelated_rql('use_email', 'EmailAddress', 'subject')[0]
+ self.assertEqual(rql, 'Any O,AA,AB,AC ORDERBY AC DESC '
+ 'WHERE NOT A use_email O, S eid %(x)s, '
+ 'O is EmailAddress, O address AA, O alias AB, O modification_date AC, AD eid %(AE)s, '
+ 'EXISTS(S identity AD, NOT AD in_group AF, AF name "guests", AF is CWGroup), A is CWUser')
def test_unrelated_rql_security_1_anon(self):
- self.login('anon')
- user = self.request().user
- rql = user.cw_unrelated_rql('use_email', 'EmailAddress', 'subject')[0]
- self.assertEqual(rql, 'Any O,AA,AB,AC ORDERBY AC DESC '
- 'WHERE NOT A use_email O, S eid %(x)s, '
- 'O is EmailAddress, O address AA, O alias AB, O modification_date AC, AD eid %(AE)s, '
- 'EXISTS(S identity AD, NOT AD in_group AF, AF name "guests", AF is CWGroup), A is CWUser')
+ with self.new_access('anon').web_request() as req:
+ user = req.user
+ rql = user.cw_unrelated_rql('use_email', 'EmailAddress', 'subject')[0]
+ self.assertEqual(rql, 'Any O,AA,AB,AC ORDERBY AC DESC '
+ 'WHERE NOT A use_email O, S eid %(x)s, '
+ 'O is EmailAddress, O address AA, O alias AB, O modification_date AC, AD eid %(AE)s, '
+ 'EXISTS(S identity AD, NOT AD in_group AF, AF name "guests", AF is CWGroup), A is CWUser')
def test_unrelated_rql_security_2(self):
- email = self.execute('INSERT EmailAddress X: X address "hop"').get_entity(0, 0)
- rql = email.cw_unrelated_rql('use_email', 'CWUser', 'object')[0]
- self.assertEqual(rql, 'Any S,AA,AB,AC,AD ORDERBY AA '
- 'WHERE NOT S use_email O, O eid %(x)s, S is_instance_of CWUser, '
- 'S login AA, S firstname AB, S surname AC, S modification_date AD')
- self.login('anon')
+ with self.admin_access.web_request() as req:
+ email = req.execute('INSERT EmailAddress X: X address "hop"').get_entity(0, 0)
+ rql = email.cw_unrelated_rql('use_email', 'CWUser', 'object')[0]
+ self.assertEqual(rql, 'Any S,AA,AB,AC,AD ORDERBY AA '
+ 'WHERE NOT S use_email O, O eid %(x)s, S is_instance_of CWUser, '
+ 'S login AA, S firstname AB, S surname AC, S modification_date AD')
rperms = self.schema['EmailAddress'].permissions['read']
clear_cache(self.schema['EmailAddress'], 'get_groups')
clear_cache(self.schema['EmailAddress'], 'get_rqlexprs')
self.schema['EmailAddress'].permissions['read'] = ('managers', 'users', 'guests',)
try:
- email = self.execute('Any X WHERE X eid %(x)s', {'x': email.eid}).get_entity(0, 0)
- rql = email.cw_unrelated_rql('use_email', 'CWUser', 'object')[0]
- self.assertEqual(rql, 'Any S,AA,AB,AC,AD ORDERBY AA '
+ with self.new_access('anon').web_request() as req:
+ email = req.execute('Any X WHERE X eid %(x)s', {'x': email.eid}).get_entity(0, 0)
+ rql = email.cw_unrelated_rql('use_email', 'CWUser', 'object')[0]
+ self.assertEqual(rql, 'Any S,AA,AB,AC,AD ORDERBY AA '
'WHERE NOT S use_email O, O eid %(x)s, S is CWUser, '
'S login AA, S firstname AB, S surname AC, S modification_date AD, '
'AE eid %(AF)s, EXISTS(S identity AE, NOT AE in_group AG, AG name "guests", AG is CWGroup)')
@@ -369,27 +383,36 @@
clear_cache(self.schema['EmailAddress'], 'get_rqlexprs')
self.schema['EmailAddress'].permissions['read'] = rperms
+ def test_cw_linkable_rql(self):
+ with self.admin_access.web_request() as req:
+ email = req.execute('INSERT EmailAddress X: X address "hop"').get_entity(0, 0)
+ rql = email.cw_linkable_rql('use_email', 'CWUser', 'object')[0]
+ self.assertEqual(rql, 'Any S,AA,AB,AC,AD ORDERBY AA '
+ 'WHERE O eid %(x)s, S is_instance_of CWUser, '
+ 'S login AA, S firstname AB, S surname AC, S modification_date AD')
def test_unrelated_rql_security_nonexistant(self):
- self.login('anon')
- email = self.vreg['etypes'].etype_class('EmailAddress')(self.request())
- rql = email.cw_unrelated_rql('use_email', 'CWUser', 'object')[0]
- self.assertEqual(rql, 'Any S,AA,AB,AC,AD ORDERBY AA '
+ with self.new_access('anon').web_request() as req:
+ email = self.vreg['etypes'].etype_class('EmailAddress')(req)
+ rql = email.cw_unrelated_rql('use_email', 'CWUser', 'object')[0]
+ self.assertEqual(rql, 'Any S,AA,AB,AC,AD ORDERBY AA '
'WHERE S is CWUser, '
'S login AA, S firstname AB, S surname AC, S modification_date AD, '
'AE eid %(AF)s, EXISTS(S identity AE, NOT AE in_group AG, AG name "guests", AG is CWGroup)')
def test_unrelated_rql_constraints_creation_subject(self):
- person = self.vreg['etypes'].etype_class('Personne')(self.request())
- rql = person.cw_unrelated_rql('connait', 'Personne', 'subject')[0]
- self.assertEqual(
+ with self.admin_access.web_request() as req:
+ person = self.vreg['etypes'].etype_class('Personne')(req)
+ rql = person.cw_unrelated_rql('connait', 'Personne', 'subject')[0]
+ self.assertEqual(
rql, 'Any O,AA,AB,AC ORDERBY AC DESC WHERE '
'O is_instance_of Personne, O nom AA, O prenom AB, O modification_date AC')
def test_unrelated_rql_constraints_creation_object(self):
- person = self.vreg['etypes'].etype_class('Personne')(self.request())
- rql = person.cw_unrelated_rql('connait', 'Personne', 'object')[0]
- self.assertEqual(
+ with self.admin_access.web_request() as req:
+ person = self.vreg['etypes'].etype_class('Personne')(req)
+ rql = person.cw_unrelated_rql('connait', 'Personne', 'object')[0]
+ self.assertEqual(
rql, 'Any S,AA,AB,AC ORDERBY AC DESC WHERE '
'S is Personne, S nom AA, S prenom AB, S modification_date AC, '
'NOT (S connait AD, AD nom "toto"), AD is Personne, '
@@ -401,25 +424,28 @@
rdef = self.schema['Personne'].rdef('connait')
perm_rrqle = RRQLExpression('U has_update_permission S')
with self.temporary_permissions((rdef, {'add': (perm_rrqle,)})):
- person = self.vreg['etypes'].etype_class('Personne')(self.request())
- rql = person.cw_unrelated_rql('connait', 'Personne', 'subject')[0]
- self.assertEqual(rql, 'Any O,AA,AB,AC ORDERBY AC DESC WHERE '
+ with self.admin_access.web_request() as req:
+ person = self.vreg['etypes'].etype_class('Personne')(req)
+ rql = person.cw_unrelated_rql('connait', 'Personne', 'subject')[0]
+ self.assertEqual(rql, 'Any O,AA,AB,AC ORDERBY AC DESC WHERE '
'O is_instance_of Personne, O nom AA, O prenom AB, '
'O modification_date AC')
def test_unrelated_rql_constraints_edition_subject(self):
- person = self.request().create_entity('Personne', nom=u'sylvain')
- rql = person.cw_unrelated_rql('connait', 'Personne', 'subject')[0]
- self.assertEqual(
- rql, 'Any O,AA,AB,AC ORDERBY AC DESC WHERE '
+ with self.admin_access.web_request() as req:
+ person = req.create_entity('Personne', nom=u'sylvain')
+ rql = person.cw_unrelated_rql('connait', 'Personne', 'subject')[0]
+ self.assertEqual(
+ rql, 'Any O,AA,AB,AC ORDERBY AC DESC WHERE '
'NOT S connait O, S eid %(x)s, O is Personne, '
'O nom AA, O prenom AB, O modification_date AC, '
'NOT S identity O')
def test_unrelated_rql_constraints_edition_object(self):
- person = self.request().create_entity('Personne', nom=u'sylvain')
- rql = person.cw_unrelated_rql('connait', 'Personne', 'object')[0]
- self.assertEqual(
+ with self.admin_access.web_request() as req:
+ person = req.create_entity('Personne', nom=u'sylvain')
+ rql = person.cw_unrelated_rql('connait', 'Personne', 'object')[0]
+ self.assertEqual(
rql, 'Any S,AA,AB,AC ORDERBY AC DESC WHERE '
'NOT S connait O, O eid %(x)s, S is Personne, '
'S nom AA, S prenom AB, S modification_date AC, '
@@ -427,95 +453,95 @@
'EXISTS(S travaille AE, AE nom "tutu")')
def test_unrelated_rql_s_linkto_s(self):
- req = self.request()
- person = self.vreg['etypes'].etype_class('Personne')(req)
- self.vreg['etypes'].etype_class('Personne').fetch_attrs = ()
- soc = req.create_entity('Societe', nom=u'logilab')
- lt_infos = {('actionnaire', 'subject'): [soc.eid]}
- rql, args = person.cw_unrelated_rql('associe', 'Personne', 'subject',
+ with self.admin_access.web_request() as req:
+ person = self.vreg['etypes'].etype_class('Personne')(req)
+ self.vreg['etypes'].etype_class('Personne').fetch_attrs = ()
+ soc = req.create_entity('Societe', nom=u'logilab')
+ lt_infos = {('actionnaire', 'subject'): [soc.eid]}
+ rql, args = person.cw_unrelated_rql('associe', 'Personne', 'subject',
lt_infos=lt_infos)
- self.assertEqual(u'Any O ORDERBY O WHERE O is Personne, '
- u'EXISTS(AA eid %(SOC)s, O actionnaire AA)', rql)
- self.assertEqual({'SOC': soc.eid}, args)
+ self.assertEqual(u'Any O ORDERBY O WHERE O is Personne, '
+ u'EXISTS(AA eid %(SOC)s, O actionnaire AA)', rql)
+ self.assertEqual({'SOC': soc.eid}, args)
def test_unrelated_rql_s_linkto_o(self):
- req = self.request()
- person = self.vreg['etypes'].etype_class('Personne')(req)
- self.vreg['etypes'].etype_class('Societe').fetch_attrs = ()
- soc = req.create_entity('Societe', nom=u'logilab')
- lt_infos = {('contrat_exclusif', 'object'): [soc.eid]}
- rql, args = person.cw_unrelated_rql('actionnaire', 'Societe', 'subject',
- lt_infos=lt_infos)
- self.assertEqual(u'Any O ORDERBY O WHERE NOT A actionnaire O, '
- u'O is_instance_of Societe, NOT EXISTS(O eid %(O)s), '
- u'A is Personne', rql)
- self.assertEqual({'O': soc.eid}, args)
+ with self.admin_access.web_request() as req:
+ person = self.vreg['etypes'].etype_class('Personne')(req)
+ self.vreg['etypes'].etype_class('Societe').fetch_attrs = ()
+ soc = req.create_entity('Societe', nom=u'logilab')
+ lt_infos = {('contrat_exclusif', 'object'): [soc.eid]}
+ rql, args = person.cw_unrelated_rql('actionnaire', 'Societe', 'subject',
+ lt_infos=lt_infos)
+ self.assertEqual(u'Any O ORDERBY O WHERE NOT A actionnaire O, '
+ u'O is_instance_of Societe, NOT EXISTS(O eid %(O)s), '
+ u'A is Personne', rql)
+ self.assertEqual({'O': soc.eid}, args)
def test_unrelated_rql_o_linkto_s(self):
- req = self.request()
- soc = self.vreg['etypes'].etype_class('Societe')(req)
- self.vreg['etypes'].etype_class('Personne').fetch_attrs = ()
- person = req.create_entity('Personne', nom=u'florent')
- lt_infos = {('contrat_exclusif', 'subject'): [person.eid]}
- rql, args = soc.cw_unrelated_rql('actionnaire', 'Personne', 'object',
- lt_infos=lt_infos)
- self.assertEqual(u'Any S ORDERBY S WHERE NOT S actionnaire A, '
- u'S is_instance_of Personne, NOT EXISTS(S eid %(S)s), '
- u'A is Societe', rql)
- self.assertEqual({'S': person.eid}, args)
+ with self.admin_access.web_request() as req:
+ soc = self.vreg['etypes'].etype_class('Societe')(req)
+ self.vreg['etypes'].etype_class('Personne').fetch_attrs = ()
+ person = req.create_entity('Personne', nom=u'florent')
+ lt_infos = {('contrat_exclusif', 'subject'): [person.eid]}
+ rql, args = soc.cw_unrelated_rql('actionnaire', 'Personne', 'object',
+ lt_infos=lt_infos)
+ self.assertEqual(u'Any S ORDERBY S WHERE NOT S actionnaire A, '
+ u'S is_instance_of Personne, NOT EXISTS(S eid %(S)s), '
+ u'A is Societe', rql)
+ self.assertEqual({'S': person.eid}, args)
def test_unrelated_rql_o_linkto_o(self):
- req = self.request()
- soc = self.vreg['etypes'].etype_class('Societe')(req)
- self.vreg['etypes'].etype_class('Personne').fetch_attrs = ()
- person = req.create_entity('Personne', nom=u'florent')
- lt_infos = {('actionnaire', 'object'): [person.eid]}
- rql, args = soc.cw_unrelated_rql('dirige', 'Personne', 'object',
- lt_infos=lt_infos)
- self.assertEqual(u'Any S ORDERBY S WHERE NOT S dirige A, '
- u'S is_instance_of Personne, EXISTS(S eid %(S)s), '
- u'A is Societe', rql)
- self.assertEqual({'S': person.eid}, args)
+ with self.admin_access.web_request() as req:
+ soc = self.vreg['etypes'].etype_class('Societe')(req)
+ self.vreg['etypes'].etype_class('Personne').fetch_attrs = ()
+ person = req.create_entity('Personne', nom=u'florent')
+ lt_infos = {('actionnaire', 'object'): [person.eid]}
+ rql, args = soc.cw_unrelated_rql('dirige', 'Personne', 'object',
+ lt_infos=lt_infos)
+ self.assertEqual(u'Any S ORDERBY S WHERE NOT S dirige A, '
+ u'S is_instance_of Personne, EXISTS(S eid %(S)s), '
+ u'A is Societe', rql)
+ self.assertEqual({'S': person.eid}, args)
def test_unrelated_rql_s_linkto_s_no_info(self):
- req = self.request()
- person = self.vreg['etypes'].etype_class('Personne')(req)
- self.vreg['etypes'].etype_class('Personne').fetch_attrs = ()
- soc = req.create_entity('Societe', nom=u'logilab')
- rql, args = person.cw_unrelated_rql('associe', 'Personne', 'subject')
- self.assertEqual(u'Any O ORDERBY O WHERE O is_instance_of Personne', rql)
- self.assertEqual({}, args)
+ with self.admin_access.web_request() as req:
+ person = self.vreg['etypes'].etype_class('Personne')(req)
+ self.vreg['etypes'].etype_class('Personne').fetch_attrs = ()
+ soc = req.create_entity('Societe', nom=u'logilab')
+ rql, args = person.cw_unrelated_rql('associe', 'Personne', 'subject')
+ self.assertEqual(u'Any O ORDERBY O WHERE O is_instance_of Personne', rql)
+ self.assertEqual({}, args)
def test_unrelated_rql_s_linkto_s_unused_info(self):
- req = self.request()
- person = self.vreg['etypes'].etype_class('Personne')(req)
- self.vreg['etypes'].etype_class('Personne').fetch_attrs = ()
- other_p = req.create_entity('Personne', nom=u'titi')
- lt_infos = {('dirige', 'subject'): [other_p.eid]}
- rql, args = person.cw_unrelated_rql('associe', 'Personne', 'subject',
- lt_infos=lt_infos)
- self.assertEqual(u'Any O ORDERBY O WHERE O is_instance_of Personne', rql)
+ with self.admin_access.web_request() as req:
+ person = self.vreg['etypes'].etype_class('Personne')(req)
+ self.vreg['etypes'].etype_class('Personne').fetch_attrs = ()
+ other_p = req.create_entity('Personne', nom=u'titi')
+ lt_infos = {('dirige', 'subject'): [other_p.eid]}
+ rql, args = person.cw_unrelated_rql('associe', 'Personne', 'subject',
+ lt_infos=lt_infos)
+ self.assertEqual(u'Any O ORDERBY O WHERE O is_instance_of Personne', rql)
def test_unrelated_base(self):
- req = self.request()
- p = req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien')
- e = req.create_entity('Tag', name=u'x')
- related = [r.eid for r in e.tags]
- self.assertEqual(related, [])
- unrelated = [r[0] for r in e.unrelated('tags', 'Personne', 'subject')]
- self.assertTrue(p.eid in unrelated)
- self.execute('SET X tags Y WHERE X is Tag, Y is Personne')
- e = self.execute('Any X WHERE X is Tag').get_entity(0, 0)
- unrelated = [r[0] for r in e.unrelated('tags', 'Personne', 'subject')]
- self.assertFalse(p.eid in unrelated)
+ with self.admin_access.web_request() as req:
+ p = req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien')
+ e = req.create_entity('Tag', name=u'x')
+ related = [r.eid for r in e.tags]
+ self.assertEqual(related, [])
+ unrelated = [r[0] for r in e.unrelated('tags', 'Personne', 'subject')]
+ self.assertIn(p.eid, unrelated)
+ req.execute('SET X tags Y WHERE X is Tag, Y is Personne')
+ e = req.execute('Any X WHERE X is Tag').get_entity(0, 0)
+ unrelated = [r[0] for r in e.unrelated('tags', 'Personne', 'subject')]
+ self.assertNotIn(p.eid, unrelated)
def test_unrelated_limit(self):
- req = self.request()
- e = req.create_entity('Tag', name=u'x')
- req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien')
- req.create_entity('Personne', nom=u'thenault', prenom=u'sylvain')
- self.assertEqual(len(e.unrelated('tags', 'Personne', 'subject', limit=1)),
- 1)
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Tag', name=u'x')
+ req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien')
+ req.create_entity('Personne', nom=u'thenault', prenom=u'sylvain')
+ self.assertEqual(len(e.unrelated('tags', 'Personne', 'subject', limit=1)),
+ 1)
def test_unrelated_security(self):
rperms = self.schema['EmailAddress'].permissions['read']
@@ -523,206 +549,210 @@
clear_cache(self.schema['EmailAddress'], 'get_rqlexprs')
self.schema['EmailAddress'].permissions['read'] = ('managers', 'users', 'guests',)
try:
- email = self.execute('INSERT EmailAddress X: X address "hop"').get_entity(0, 0)
- rset = email.unrelated('use_email', 'CWUser', 'object')
- self.assertEqual([x.login for x in rset.entities()], [u'admin', u'anon'])
- user = self.request().user
- rset = user.unrelated('use_email', 'EmailAddress', 'subject')
- self.assertEqual([x.address for x in rset.entities()], [u'hop'])
- req = self.request()
- self.create_user(req, 'toto')
- self.login('toto')
- email = self.execute('Any X WHERE X eid %(x)s', {'x': email.eid}).get_entity(0, 0)
- rset = email.unrelated('use_email', 'CWUser', 'object')
- self.assertEqual([x.login for x in rset.entities()], ['toto'])
- user = self.request().user
- rset = user.unrelated('use_email', 'EmailAddress', 'subject')
- self.assertEqual([x.address for x in rset.entities()], ['hop'])
- user = self.execute('Any X WHERE X login "admin"').get_entity(0, 0)
- rset = user.unrelated('use_email', 'EmailAddress', 'subject')
- self.assertEqual([x.address for x in rset.entities()], [])
- self.login('anon')
- email = self.execute('Any X WHERE X eid %(x)s', {'x': email.eid}).get_entity(0, 0)
- rset = email.unrelated('use_email', 'CWUser', 'object')
- self.assertEqual([x.login for x in rset.entities()], [])
- user = self.request().user
- rset = user.unrelated('use_email', 'EmailAddress', 'subject')
- self.assertEqual([x.address for x in rset.entities()], [])
+ with self.admin_access.web_request() as req:
+ email = req.execute('INSERT EmailAddress X: X address "hop"').get_entity(0, 0)
+ rset = email.unrelated('use_email', 'CWUser', 'object')
+ self.assertEqual([x.login for x in rset.entities()], [u'admin', u'anon'])
+ user = req.user
+ rset = user.unrelated('use_email', 'EmailAddress', 'subject')
+ self.assertEqual([x.address for x in rset.entities()], [u'hop'])
+ self.create_user(req, 'toto')
+ with self.new_access('toto').web_request() as req:
+ email = req.execute('Any X WHERE X eid %(x)s', {'x': email.eid}).get_entity(0, 0)
+ rset = email.unrelated('use_email', 'CWUser', 'object')
+ self.assertEqual([x.login for x in rset.entities()], ['toto'])
+ user = req.user
+ rset = user.unrelated('use_email', 'EmailAddress', 'subject')
+ self.assertEqual([x.address for x in rset.entities()], ['hop'])
+ user = req.execute('Any X WHERE X login "admin"').get_entity(0, 0)
+ rset = user.unrelated('use_email', 'EmailAddress', 'subject')
+ self.assertEqual([x.address for x in rset.entities()], [])
+ with self.new_access('anon').web_request() as req:
+ email = req.execute('Any X WHERE X eid %(x)s', {'x': email.eid}).get_entity(0, 0)
+ rset = email.unrelated('use_email', 'CWUser', 'object')
+ self.assertEqual([x.login for x in rset.entities()], [])
+ user = req.user
+ rset = user.unrelated('use_email', 'EmailAddress', 'subject')
+ self.assertEqual([x.address for x in rset.entities()], [])
finally:
clear_cache(self.schema['EmailAddress'], 'get_groups')
clear_cache(self.schema['EmailAddress'], 'get_rqlexprs')
self.schema['EmailAddress'].permissions['read'] = rperms
def test_unrelated_new_entity(self):
- e = self.vreg['etypes'].etype_class('CWUser')(self.request())
- unrelated = [r[0] for r in e.unrelated('in_group', 'CWGroup', 'subject')]
- # should be default groups but owners, i.e. managers, users, guests
- self.assertEqual(len(unrelated), 3)
+ with self.admin_access.web_request() as req:
+ e = self.vreg['etypes'].etype_class('CWUser')(req)
+ unrelated = [r[0] for r in e.unrelated('in_group', 'CWGroup', 'subject')]
+ # should be default groups but owners, i.e. managers, users, guests
+ self.assertEqual(len(unrelated), 3)
def test_printable_value_string(self):
- e = self.request().create_entity('Card', title=u'rest test', content=u'du :eid:`1:*ReST*`',
- content_format=u'text/rest')
- self.assertEqual(e.printable_value('content'),
- '<p>du <a class="reference" href="http://testing.fr/cubicweb/cwsource/system">*ReST*</a></p>')
- e.cw_attr_cache['content'] = 'du <em>html</em> <ref rql="CWUser X">users</ref>'
- e.cw_attr_cache['content_format'] = 'text/html'
- self.assertEqual(e.printable_value('content'),
- 'du <em>html</em> <a href="http://testing.fr/cubicweb/view?rql=CWUser%20X">users</a>')
- e.cw_attr_cache['content'] = 'du *texte*'
- e.cw_attr_cache['content_format'] = 'text/plain'
- self.assertEqual(e.printable_value('content'),
- '<p>\ndu *texte*<br/></p>')
- e.cw_attr_cache['title'] = 'zou'
- e.cw_attr_cache['content'] = '''\
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Card', title=u'rest test',
+ content=u'du :eid:`1:*ReST*`',
+ content_format=u'text/rest')
+ self.assertEqual(e.printable_value('content'),
+ '<p>du <a class="reference" href="http://testing.fr/cubicweb/cwsource/system">*ReST*</a></p>')
+ e.cw_attr_cache['content'] = 'du <em>html</em> <ref rql="CWUser X">users</ref>'
+ e.cw_attr_cache['content_format'] = 'text/html'
+ self.assertEqual(e.printable_value('content'),
+ 'du <em>html</em> <a href="http://testing.fr/cubicweb/view?rql=CWUser%20X">users</a>')
+ e.cw_attr_cache['content'] = 'du *texte*'
+ e.cw_attr_cache['content_format'] = 'text/plain'
+ self.assertEqual(e.printable_value('content'),
+ '<p>\ndu *texte*<br/></p>')
+ e.cw_attr_cache['title'] = 'zou'
+ e.cw_attr_cache['content'] = '''\
a title
=======
du :eid:`1:*ReST*`'''
- e.cw_attr_cache['content_format'] = 'text/rest'
- self.assertEqual(e.printable_value('content', format='text/plain'),
- e.cw_attr_cache['content'])
+ e.cw_attr_cache['content_format'] = 'text/rest'
+ self.assertEqual(e.printable_value('content', format='text/plain'),
+ e.cw_attr_cache['content'])
- e.cw_attr_cache['content'] = u'<b>yo (zou éà ;)</b>'
- e.cw_attr_cache['content_format'] = 'text/html'
- self.assertEqual(e.printable_value('content', format='text/plain').strip(),
- u'**yo (zou éà ;)**')
- if HAS_TAL:
- e.cw_attr_cache['content'] = '<h1 tal:content="self/title">titre</h1>'
- e.cw_attr_cache['content_format'] = 'text/cubicweb-page-template'
- self.assertEqual(e.printable_value('content'),
- '<h1>zou</h1>')
+ e.cw_attr_cache['content'] = u'<b>yo (zou éà ;)</b>'
+ e.cw_attr_cache['content_format'] = 'text/html'
+ self.assertEqual(e.printable_value('content', format='text/plain').strip(),
+ u'**yo (zou éà ;)**')
+ if HAS_TAL:
+ e.cw_attr_cache['content'] = '<h1 tal:content="self/title">titre</h1>'
+ e.cw_attr_cache['content_format'] = 'text/cubicweb-page-template'
+ self.assertEqual(e.printable_value('content'),
+ '<h1>zou</h1>')
def test_printable_value_bytes(self):
- req = self.request()
- e = req.create_entity('File', data=Binary('lambda x: 1'), data_format=u'text/x-python',
- data_encoding=u'ascii', data_name=u'toto.py')
- from cubicweb import mttransforms
- if mttransforms.HAS_PYGMENTS_TRANSFORMS:
- import pygments
- if tuple(int(i) for i in pygments.__version__.split('.')[:2]) >= (1, 3):
- self.assertEqual(e.printable_value('data'),
- '''<div class="highlight"><pre><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="mi">1</span>
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('File', data=Binary('lambda x: 1'), data_format=u'text/x-python',
+ data_encoding=u'ascii', data_name=u'toto.py')
+ from cubicweb import mttransforms
+ if mttransforms.HAS_PYGMENTS_TRANSFORMS:
+ import pygments
+ if tuple(int(i) for i in pygments.__version__.split('.')[:2]) >= (1, 3):
+ self.assertEqual(e.printable_value('data'),
+ '''<div class="highlight"><pre><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="mi">1</span>
+</pre></div>''')
+ else:
+ self.assertEqual(e.printable_value('data'),
+ '''<div class="highlight"><pre><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="mf">1</span>
</pre></div>''')
else:
self.assertEqual(e.printable_value('data'),
- '''<div class="highlight"><pre><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="mf">1</span>
-</pre></div>''')
- else:
- self.assertEqual(e.printable_value('data'),
- '''<pre class="python">
-<span style="color: #C00000;">lambda</span> <span style="color: #000000;">x</span><span style="color: #0000C0;">:</span> <span style="color: #0080C0;">1</span>
+ '''<pre class="python">
+ <span style="color: #C00000;">lambda</span> <span style="color: #000000;">x</span><span style="color: #0000C0;">:</span> <span style="color: #0080C0;">1</span>
</pre>''')
- e = req.create_entity('File', data=Binary('*héhéhé*'), data_format=u'text/rest',
- data_encoding=u'utf-8', data_name=u'toto.txt')
- self.assertEqual(e.printable_value('data'),
- u'<p><em>héhéhé</em></p>')
+ e = req.create_entity('File', data=Binary('*héhéhé*'), data_format=u'text/rest',
+ data_encoding=u'utf-8', data_name=u'toto.txt')
+ self.assertEqual(e.printable_value('data'),
+ u'<p><em>héhéhé</em></p>')
def test_printable_value_bad_html(self):
"""make sure we don't crash if we try to render invalid XHTML strings"""
- req = self.request()
- e = req.create_entity('Card', title=u'bad html', content=u'<div>R&D<br>',
- content_format=u'text/html')
- tidy = lambda x: x.replace('\n', '')
- self.assertEqual(tidy(e.printable_value('content')),
- '<div>R&D<br/></div>')
- e.cw_attr_cache['content'] = u'yo !! R&D <div> pas fermé'
- self.assertEqual(tidy(e.printable_value('content')),
- u'yo !! R&D <div> pas fermé</div>')
- e.cw_attr_cache['content'] = u'R&D'
- self.assertEqual(tidy(e.printable_value('content')), u'R&D')
- e.cw_attr_cache['content'] = u'R&D;'
- self.assertEqual(tidy(e.printable_value('content')), u'R&D;')
- e.cw_attr_cache['content'] = u'yo !! R&D <div> pas fermé'
- self.assertEqual(tidy(e.printable_value('content')),
- u'yo !! R&D <div> pas fermé</div>')
- e.cw_attr_cache['content'] = u'été <div> été'
- self.assertEqual(tidy(e.printable_value('content')),
- u'été <div> été</div>')
- e.cw_attr_cache['content'] = u'C'est un exemple sérieux'
- self.assertEqual(tidy(e.printable_value('content')),
- u"C'est un exemple sérieux")
- # make sure valid xhtml is left untouched
- e.cw_attr_cache['content'] = u'<div>R&D<br/></div>'
- self.assertEqual(e.printable_value('content'), e.cw_attr_cache['content'])
- e.cw_attr_cache['content'] = u'<div>été</div>'
- self.assertEqual(e.printable_value('content'), e.cw_attr_cache['content'])
- e.cw_attr_cache['content'] = u'été'
- self.assertEqual(e.printable_value('content'), e.cw_attr_cache['content'])
- e.cw_attr_cache['content'] = u'hop\r\nhop\nhip\rmomo'
- self.assertEqual(e.printable_value('content'), u'hop\nhop\nhip\nmomo')
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Card', title=u'bad html', content=u'<div>R&D<br>',
+ content_format=u'text/html')
+ tidy = lambda x: x.replace('\n', '')
+ self.assertEqual(tidy(e.printable_value('content')),
+ '<div>R&D<br/></div>')
+ e.cw_attr_cache['content'] = u'yo !! R&D <div> pas fermé'
+ self.assertEqual(tidy(e.printable_value('content')),
+ u'yo !! R&D <div> pas fermé</div>')
+ e.cw_attr_cache['content'] = u'R&D'
+ self.assertEqual(tidy(e.printable_value('content')), u'R&D')
+ e.cw_attr_cache['content'] = u'R&D;'
+ self.assertEqual(tidy(e.printable_value('content')), u'R&D;')
+ e.cw_attr_cache['content'] = u'yo !! R&D <div> pas fermé'
+ self.assertEqual(tidy(e.printable_value('content')),
+ u'yo !! R&D <div> pas fermé</div>')
+ e.cw_attr_cache['content'] = u'été <div> été'
+ self.assertEqual(tidy(e.printable_value('content')),
+ u'été <div> été</div>')
+ e.cw_attr_cache['content'] = u'C'est un exemple sérieux'
+ self.assertEqual(tidy(e.printable_value('content')),
+ u"C'est un exemple sérieux")
+ # make sure valid xhtml is left untouched
+ e.cw_attr_cache['content'] = u'<div>R&D<br/></div>'
+ self.assertEqual(e.printable_value('content'), e.cw_attr_cache['content'])
+ e.cw_attr_cache['content'] = u'<div>été</div>'
+ self.assertEqual(e.printable_value('content'), e.cw_attr_cache['content'])
+ e.cw_attr_cache['content'] = u'été'
+ self.assertEqual(e.printable_value('content'), e.cw_attr_cache['content'])
+ e.cw_attr_cache['content'] = u'hop\r\nhop\nhip\rmomo'
+ self.assertEqual(e.printable_value('content'), u'hop\nhop\nhip\nmomo')
def test_printable_value_bad_html_ms(self):
- req = self.request()
- e = req.create_entity('Card', title=u'bad html', content=u'<div>R&D<br>',
- content_format=u'text/html')
- tidy = lambda x: x.replace('\n', '')
- e.cw_attr_cache['content'] = u'<div x:foo="bar">ms orifice produces weird html</div>'
- # Caution! current implementation of soup2xhtml strips first div element
- content = soup2xhtml(e.printable_value('content'), 'utf-8')
- self.assertMultiLineEqual(content, u'<div>ms orifice produces weird html</div>')
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Card', title=u'bad html', content=u'<div>R&D<br>',
+ content_format=u'text/html')
+ tidy = lambda x: x.replace('\n', '')
+ e.cw_attr_cache['content'] = u'<div x:foo="bar">ms orifice produces weird html</div>'
+ # Caution! current implementation of soup2xhtml strips first div element
+ content = soup2xhtml(e.printable_value('content'), 'utf-8')
+ self.assertMultiLineEqual(content, u'<div>ms orifice produces weird html</div>')
def test_fulltextindex(self):
- e = self.vreg['etypes'].etype_class('File')(self.request())
- e.cw_attr_cache['description'] = 'du <em>html</em>'
- e.cw_attr_cache['description_format'] = 'text/html'
- e.cw_attr_cache['data'] = Binary('some <em>data</em>')
- e.cw_attr_cache['data_name'] = 'an html file'
- e.cw_attr_cache['data_format'] = 'text/html'
- e.cw_attr_cache['data_encoding'] = 'ascii'
- e._cw.transaction_data = {} # XXX req should be a session
- words = e.cw_adapt_to('IFTIndexable').get_words()
- words['C'].sort()
- self.assertEqual({'C': sorted(['an', 'html', 'file', 'du', 'html', 'some', 'data'])},
- words)
+ with self.admin_access.web_request() as req:
+ e = self.vreg['etypes'].etype_class('File')(req)
+ e.cw_attr_cache['description'] = 'du <em>html</em>'
+ e.cw_attr_cache['description_format'] = 'text/html'
+ e.cw_attr_cache['data'] = Binary('some <em>data</em>')
+ e.cw_attr_cache['data_name'] = 'an html file'
+ e.cw_attr_cache['data_format'] = 'text/html'
+ e.cw_attr_cache['data_encoding'] = 'ascii'
+ e._cw.transaction_data = {} # XXX req should be a session
+ words = e.cw_adapt_to('IFTIndexable').get_words()
+ words['C'].sort()
+ self.assertEqual({'C': sorted(['an', 'html', 'file', 'du', 'html', 'some', 'data'])},
+ words)
def test_nonregr_relation_cache(self):
- req = self.request()
- p1 = req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien')
- p2 = req.create_entity('Personne', nom=u'toto')
- self.execute('SET X evaluee Y WHERE X nom "di mascio", Y nom "toto"')
- self.assertEqual(p1.evaluee[0].nom, "toto")
- self.assertTrue(not p1.reverse_evaluee)
+ with self.admin_access.web_request() as req:
+ p1 = req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien')
+ p2 = req.create_entity('Personne', nom=u'toto')
+ req.execute('SET X evaluee Y WHERE X nom "di mascio", Y nom "toto"')
+ self.assertEqual(p1.evaluee[0].nom, "toto")
+ self.assertFalse(p1.reverse_evaluee)
def test_complete_relation(self):
- session = self.session
- eid = session.execute(
- 'INSERT TrInfo X: X comment "zou", X wf_info_for U, X from_state S1, X to_state S2 '
- 'WHERE U login "admin", S1 name "activated", S2 name "deactivated"')[0][0]
- trinfo = self.execute('Any X WHERE X eid %(x)s', {'x': eid}).get_entity(0, 0)
- trinfo.complete()
- self.assertTrue(isinstance(trinfo.cw_attr_cache['creation_date'], datetime))
- self.assertTrue(trinfo.cw_relation_cached('from_state', 'subject'))
- self.assertTrue(trinfo.cw_relation_cached('to_state', 'subject'))
- self.assertTrue(trinfo.cw_relation_cached('wf_info_for', 'subject'))
- self.assertEqual(trinfo.by_transition, ())
+ with self.admin_access.repo_cnx() as session:
+ eid = session.execute(
+ 'INSERT TrInfo X: X comment "zou", X wf_info_for U, X from_state S1, X to_state S2 '
+ 'WHERE U login "admin", S1 name "activated", S2 name "deactivated"')[0][0]
+ trinfo = session.execute('Any X WHERE X eid %(x)s', {'x': eid}).get_entity(0, 0)
+ trinfo.complete()
+ self.assertIsInstance(trinfo.cw_attr_cache['creation_date'], datetime)
+ self.assertTrue(trinfo.cw_relation_cached('from_state', 'subject'))
+ self.assertTrue(trinfo.cw_relation_cached('to_state', 'subject'))
+ self.assertTrue(trinfo.cw_relation_cached('wf_info_for', 'subject'))
+ self.assertEqual(trinfo.by_transition, ())
def test_request_cache(self):
- req = self.request()
- user = self.execute('CWUser X WHERE X login "admin"', req=req).get_entity(0, 0)
- state = user.in_state[0]
- samestate = self.execute('State X WHERE X name "activated"', req=req).get_entity(0, 0)
- self.assertTrue(state is samestate)
+ with self.admin_access.web_request() as req:
+ user = req.execute('CWUser X WHERE X login "admin"').get_entity(0, 0)
+ state = user.in_state[0]
+ samestate = req.execute('State X WHERE X name "activated"').get_entity(0, 0)
+ self.assertIs(state, samestate)
def test_rest_path(self):
- req = self.request()
- note = req.create_entity('Note', type=u'z')
- self.assertEqual(note.rest_path(), 'note/%s' % note.eid)
- # unique attr
- tag = req.create_entity('Tag', name=u'x')
- self.assertEqual(tag.rest_path(), 'tag/x')
- # test explicit rest_attr
- person = req.create_entity('Personne', prenom=u'john', nom=u'doe')
- self.assertEqual(person.rest_path(), 'personne/doe')
- # ambiguity test
- person2 = req.create_entity('Personne', prenom=u'remi', nom=u'doe')
- person.cw_clear_all_caches()
- self.assertEqual(person.rest_path(), unicode(person.eid))
- self.assertEqual(person2.rest_path(), unicode(person2.eid))
- # unique attr with None value (nom in this case)
- friend = req.create_entity('Ami', prenom=u'bob')
- self.assertEqual(friend.rest_path(), unicode(friend.eid))
+ with self.admin_access.web_request() as req:
+ note = req.create_entity('Note', type=u'z')
+ self.assertEqual(note.rest_path(), 'note/%s' % note.eid)
+ # unique attr
+ tag = req.create_entity('Tag', name=u'x')
+ self.assertEqual(tag.rest_path(), 'tag/x')
+ # test explicit rest_attr
+ person = req.create_entity('Personne', prenom=u'john', nom=u'doe')
+ self.assertEqual(person.rest_path(), 'personne/doe')
+ # ambiguity test
+ person2 = req.create_entity('Personne', prenom=u'remi', nom=u'doe')
+ person.cw_clear_all_caches()
+ self.assertEqual(person.rest_path(), unicode(person.eid))
+ self.assertEqual(person2.rest_path(), unicode(person2.eid))
+ # unique attr with None value (nom in this case)
+ friend = req.create_entity('Ami', prenom=u'bob')
+ self.assertEqual(friend.rest_path(), unicode(friend.eid))
def test_can_use_rest_path(self):
self.assertTrue(can_use_rest_path(u'zobi'))
@@ -732,66 +762,66 @@
self.assertFalse(can_use_rest_path(u'zo?bi'))
def test_cw_set_attributes(self):
- req = self.request()
- person = req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien')
- self.assertEqual(person.prenom, u'adrien')
- self.assertEqual(person.nom, u'di mascio')
- person.cw_set(prenom=u'sylvain', nom=u'thénault')
- person = self.execute('Personne P').get_entity(0, 0) # XXX retreival needed ?
- self.assertEqual(person.prenom, u'sylvain')
- self.assertEqual(person.nom, u'thénault')
+ with self.admin_access.web_request() as req:
+ person = req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien')
+ self.assertEqual(person.prenom, u'adrien')
+ self.assertEqual(person.nom, u'di mascio')
+ person.cw_set(prenom=u'sylvain', nom=u'thénault')
+ person = req.execute('Personne P').get_entity(0, 0) # XXX retreival needed ?
+ self.assertEqual(person.prenom, u'sylvain')
+ self.assertEqual(person.nom, u'thénault')
def test_cw_set_relations(self):
- req = self.request()
- person = req.create_entity('Personne', nom=u'chauvat', prenom=u'nicolas')
- note = req.create_entity('Note', type=u'x')
- note.cw_set(ecrit_par=person)
- note = req.create_entity('Note', type=u'y')
- note.cw_set(ecrit_par=person.eid)
- self.assertEqual(len(person.reverse_ecrit_par), 2)
+ with self.admin_access.web_request() as req:
+ person = req.create_entity('Personne', nom=u'chauvat', prenom=u'nicolas')
+ note = req.create_entity('Note', type=u'x')
+ note.cw_set(ecrit_par=person)
+ note = req.create_entity('Note', type=u'y')
+ note.cw_set(ecrit_par=person.eid)
+ self.assertEqual(len(person.reverse_ecrit_par), 2)
def test_metainformation_and_external_absolute_url(self):
- req = self.request()
- note = req.create_entity('Note', type=u'z')
- metainf = note.cw_metainformation()
- self.assertEqual(metainf, {'source': {'type': 'native', 'uri': 'system',
- 'use-cwuri-as-url': False},
- 'type': u'Note', 'extid': None})
- self.assertEqual(note.absolute_url(), 'http://testing.fr/cubicweb/note/%s' % note.eid)
- metainf['source'] = metainf['source'].copy()
- metainf['source']['base-url'] = 'http://cubicweb2.com/'
- metainf['extid'] = 1234
- self.assertEqual(note.absolute_url(), 'http://cubicweb2.com/note/1234')
+ with self.admin_access.web_request() as req:
+ note = req.create_entity('Note', type=u'z')
+ metainf = note.cw_metainformation()
+ self.assertEqual(metainf, {'source': {'type': 'native', 'uri': 'system',
+ 'use-cwuri-as-url': False},
+ 'type': u'Note', 'extid': None})
+ self.assertEqual(note.absolute_url(), 'http://testing.fr/cubicweb/note/%s' % note.eid)
+ metainf['source'] = metainf['source'].copy()
+ metainf['source']['base-url'] = 'http://cubicweb2.com/'
+ metainf['extid'] = 1234
+ self.assertEqual(note.absolute_url(), 'http://cubicweb2.com/note/1234')
def test_absolute_url_empty_field(self):
- req = self.request()
- card = req.create_entity('Card', wikiid=u'', title=u'test')
- self.assertEqual(card.absolute_url(),
- 'http://testing.fr/cubicweb/%s' % card.eid)
+ with self.admin_access.web_request() as req:
+ card = req.create_entity('Card', wikiid=u'', title=u'test')
+ self.assertEqual(card.absolute_url(),
+ 'http://testing.fr/cubicweb/%s' % card.eid)
def test_create_and_compare_entity(self):
- req = self.request()
- p1 = req.create_entity('Personne', nom=u'fayolle', prenom=u'alexandre')
- p2 = req.create_entity('Personne', nom=u'campeas', prenom=u'aurelien')
- note = req.create_entity('Note', type=u'z')
- req = self.request()
- p = req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien',
- connait=p1, evaluee=[p1, p2],
- reverse_ecrit_par=note)
- self.assertEqual(p.nom, 'di mascio')
- self.assertEqual([c.nom for c in p.connait], ['fayolle'])
- self.assertEqual(sorted([c.nom for c in p.evaluee]), ['campeas', 'fayolle'])
- self.assertEqual([c.type for c in p.reverse_ecrit_par], ['z'])
-
- req = self.request()
- auc = req.execute('Personne P WHERE P prenom "aurelien"').get_entity(0,0)
- persons = set()
- persons.add(p1)
- persons.add(p2)
- persons.add(auc)
- self.assertEqual(2, len(persons))
- self.assertNotEqual(p1, p2)
- self.assertEqual(p2, auc)
+ access = self.admin_access
+ with access.web_request() as req:
+ p1 = req.create_entity('Personne', nom=u'fayolle', prenom=u'alexandre')
+ p2 = req.create_entity('Personne', nom=u'campeas', prenom=u'aurelien')
+ note = req.create_entity('Note', type=u'z')
+ p = req.create_entity('Personne', nom=u'di mascio', prenom=u'adrien',
+ connait=p1, evaluee=[p1, p2],
+ reverse_ecrit_par=note)
+ self.assertEqual(p.nom, 'di mascio')
+ self.assertEqual([c.nom for c in p.connait], ['fayolle'])
+ self.assertEqual(sorted([c.nom for c in p.evaluee]), ['campeas', 'fayolle'])
+ self.assertEqual([c.type for c in p.reverse_ecrit_par], ['z'])
+ req.cnx.commit()
+ with access.web_request() as req:
+ auc = req.execute('Personne P WHERE P prenom "aurelien"').get_entity(0,0)
+ persons = set()
+ persons.add(p1)
+ persons.add(p2)
+ persons.add(auc)
+ self.assertEqual(2, len(persons))
+ self.assertNotEqual(p1, p2)
+ self.assertEqual(p2, auc)
if __name__ == '__main__':
--- a/test/unittest_migration.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_migration.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -76,7 +76,7 @@
def test_filter_scripts_for_mode(self):
config = CubicWebConfiguration('data')
config.verbosity = 0
- self.assert_(not isinstance(config.migration_handler(), ServerMigrationHelper))
+ self.assertNotIsInstance(config.migration_handler(), ServerMigrationHelper)
self.assertIsInstance(config.migration_handler(), MigrationHelper)
config = self.config
config.__class__.name = 'repository'
@@ -99,16 +99,15 @@
def test_db_creation(self):
"""make sure database can be created"""
config = ApptestConfiguration('data', apphome=self.datadir)
- source = config.sources()['system']
+ source = config.system_source_config
self.assertEqual(source['db-driver'], 'sqlite')
handler = get_test_db_handler(config)
handler.init_test_database()
handler.build_db_cache()
repo, cnx = handler.get_repo_and_cnx()
- cu = cnx.cursor()
- self.assertEqual(cu.execute('Any SN WHERE X is CWUser, X login "admin", X in_state S, S name SN').rows,
- [['activated']])
- cnx.close()
+ with cnx:
+ self.assertEqual(cnx.execute('Any SN WHERE X is CWUser, X login "admin", X in_state S, S name SN').rows,
+ [['activated']])
repo.shutdown()
if __name__ == '__main__':
--- a/test/unittest_predicates.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_predicates.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -18,7 +18,10 @@
"""unit tests for selectors mechanism"""
from operator import eq, lt, le, gt
+from contextlib import contextmanager
+
from logilab.common.testlib import TestCase, unittest_main
+from logilab.common.decorators import clear_cache
from cubicweb import Binary
from cubicweb.devtools.testlib import CubicWebTC
@@ -31,54 +34,39 @@
-class ImplementsSelectorTC(CubicWebTC):
+class ImplementsTC(CubicWebTC):
def test_etype_priority(self):
- req = self.request()
- f = req.create_entity('File', data_name=u'hop.txt', data=Binary('hop'))
- rset = f.as_rset()
- anyscore = is_instance('Any')(f.__class__, req, rset=rset)
- idownscore = adaptable('IDownloadable')(f.__class__, req, rset=rset)
- self.assertTrue(idownscore > anyscore, (idownscore, anyscore))
- filescore = is_instance('File')(f.__class__, req, rset=rset)
- self.assertTrue(filescore > idownscore, (filescore, idownscore))
+ with self.admin_access.web_request() as req:
+ f = req.create_entity('File', data_name=u'hop.txt', data=Binary('hop'))
+ rset = f.as_rset()
+ anyscore = is_instance('Any')(f.__class__, req, rset=rset)
+ idownscore = adaptable('IDownloadable')(f.__class__, req, rset=rset)
+ self.assertTrue(idownscore > anyscore, (idownscore, anyscore))
+ filescore = is_instance('File')(f.__class__, req, rset=rset)
+ self.assertTrue(filescore > idownscore, (filescore, idownscore))
def test_etype_inheritance_no_yams_inheritance(self):
cls = self.vreg['etypes'].etype_class('Personne')
- self.assertFalse(is_instance('Societe').score_class(cls, self.request()))
+ with self.admin_access.web_request() as req:
+ self.assertFalse(is_instance('Societe').score_class(cls, req))
def test_yams_inheritance(self):
cls = self.vreg['etypes'].etype_class('Transition')
- self.assertEqual(is_instance('BaseTransition').score_class(cls, self.request()),
- 3)
+ with self.admin_access.web_request() as req:
+ self.assertEqual(is_instance('BaseTransition').score_class(cls, req),
+ 3)
def test_outer_join(self):
- req = self.request()
- rset = req.execute('Any U,B WHERE B? bookmarked_by U, U login "anon"')
- self.assertEqual(is_instance('Bookmark')(None, req, rset=rset, row=0, col=1),
- 0)
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Any U,B WHERE B? bookmarked_by U, U login "anon"')
+ self.assertEqual(is_instance('Bookmark')(None, req, rset=rset, row=0, col=1),
+ 0)
class WorkflowSelectorTC(CubicWebTC):
- def _commit(self):
- self.commit()
- self.wf_entity.cw_clear_all_caches()
-
- def setup_database(self):
- wf = self.shell().add_workflow("wf_test", 'StateFull', default=True)
- created = wf.add_state('created', initial=True)
- validated = wf.add_state('validated')
- abandoned = wf.add_state('abandoned')
- wf.add_transition('validate', created, validated, ('managers',))
- wf.add_transition('forsake', (created, validated,), abandoned, ('managers',))
def setUp(self):
super(WorkflowSelectorTC, self).setUp()
- self.req = self.request()
- self.wf_entity = self.req.create_entity('StateFull', name=u'')
- self.rset = self.wf_entity.as_rset()
- self.adapter = self.wf_entity.cw_adapt_to('IWorkflowable')
- self._commit()
- self.assertEqual(self.adapter.state, 'created')
# enable debug mode to state/transition validation on the fly
self.vreg.config.debugmode = True
@@ -86,122 +74,154 @@
self.vreg.config.debugmode = False
super(WorkflowSelectorTC, self).tearDown()
+ def setup_database(self):
+ with self.admin_access.shell() as shell:
+ wf = shell.add_workflow("wf_test", 'StateFull', default=True)
+ created = wf.add_state('created', initial=True)
+ validated = wf.add_state('validated')
+ abandoned = wf.add_state('abandoned')
+ wf.add_transition('validate', created, validated, ('managers',))
+ wf.add_transition('forsake', (created, validated,), abandoned, ('managers',))
+
+ @contextmanager
+ def statefull_stuff(self):
+ with self.admin_access.web_request() as req:
+ wf_entity = req.create_entity('StateFull', name=u'')
+ rset = wf_entity.as_rset()
+ adapter = wf_entity.cw_adapt_to('IWorkflowable')
+ req.cnx.commit()
+ self.assertEqual(adapter.state, 'created')
+ yield req, wf_entity, rset, adapter
+
def test_is_in_state(self):
- for state in ('created', 'validated', 'abandoned'):
- selector = is_in_state(state)
- self.assertEqual(selector(None, self.req, rset=self.rset),
- state=="created")
-
- self.adapter.fire_transition('validate')
- self._commit()
- self.assertEqual(self.adapter.state, 'validated')
+ with self.statefull_stuff() as (req, wf_entity, rset, adapter):
+ for state in ('created', 'validated', 'abandoned'):
+ selector = is_in_state(state)
+ self.assertEqual(selector(None, req, rset=rset),
+ state=="created")
- selector = is_in_state('created')
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
- selector = is_in_state('validated')
- self.assertEqual(selector(None, self.req, rset=self.rset), 1)
- selector = is_in_state('validated', 'abandoned')
- self.assertEqual(selector(None, self.req, rset=self.rset), 1)
- selector = is_in_state('abandoned')
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
+ adapter.fire_transition('validate')
+ req.cnx.commit(); wf_entity.cw_clear_all_caches()
+ self.assertEqual(adapter.state, 'validated')
+
+ clear_cache(rset, 'get_entity')
+
+ selector = is_in_state('created')
+ self.assertEqual(selector(None, req, rset=rset), 0)
+ selector = is_in_state('validated')
+ self.assertEqual(selector(None, req, rset=rset), 1)
+ selector = is_in_state('validated', 'abandoned')
+ self.assertEqual(selector(None, req, rset=rset), 1)
+ selector = is_in_state('abandoned')
+ self.assertEqual(selector(None, req, rset=rset), 0)
- self.adapter.fire_transition('forsake')
- self._commit()
- self.assertEqual(self.adapter.state, 'abandoned')
+ adapter.fire_transition('forsake')
+ req.cnx.commit(); wf_entity.cw_clear_all_caches()
+ self.assertEqual(adapter.state, 'abandoned')
+
+ clear_cache(rset, 'get_entity')
- selector = is_in_state('created')
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
- selector = is_in_state('validated')
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
- selector = is_in_state('validated', 'abandoned')
- self.assertEqual(selector(None, self.req, rset=self.rset), 1)
- self.assertEqual(self.adapter.state, 'abandoned')
- self.assertEqual(selector(None, self.req, rset=self.rset), 1)
+ selector = is_in_state('created')
+ self.assertEqual(selector(None, req, rset=rset), 0)
+ selector = is_in_state('validated')
+ self.assertEqual(selector(None, req, rset=rset), 0)
+ selector = is_in_state('validated', 'abandoned')
+ self.assertEqual(selector(None, req, rset=rset), 1)
+ self.assertEqual(adapter.state, 'abandoned')
+ self.assertEqual(selector(None, req, rset=rset), 1)
def test_is_in_state_unvalid_names(self):
- selector = is_in_state("unknown")
- with self.assertRaises(ValueError) as cm:
- selector(None, self.req, rset=self.rset)
- self.assertEqual(str(cm.exception),
- "wf_test: unknown state(s): unknown")
- selector = is_in_state("weird", "unknown", "created", "weird")
- with self.assertRaises(ValueError) as cm:
- selector(None, self.req, rset=self.rset)
- self.assertEqual(str(cm.exception),
- "wf_test: unknown state(s): unknown,weird")
+ with self.statefull_stuff() as (req, wf_entity, rset, adapter):
+ selector = is_in_state("unknown")
+ with self.assertRaises(ValueError) as cm:
+ selector(None, req, rset=rset)
+ self.assertEqual(str(cm.exception),
+ "wf_test: unknown state(s): unknown")
+ selector = is_in_state("weird", "unknown", "created", "weird")
+ with self.assertRaises(ValueError) as cm:
+ selector(None, req, rset=rset)
+ self.assertEqual(str(cm.exception),
+ "wf_test: unknown state(s): unknown,weird")
def test_on_transition(self):
- for transition in ('validate', 'forsake'):
- selector = on_transition(transition)
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
+ with self.statefull_stuff() as (req, wf_entity, rset, adapter):
+ for transition in ('validate', 'forsake'):
+ selector = on_transition(transition)
+ self.assertEqual(selector(None, req, rset=rset), 0)
- self.adapter.fire_transition('validate')
- self._commit()
- self.assertEqual(self.adapter.state, 'validated')
+ adapter.fire_transition('validate')
+ req.cnx.commit(); wf_entity.cw_clear_all_caches()
+ self.assertEqual(adapter.state, 'validated')
+
+ clear_cache(rset, 'get_entity')
- selector = on_transition("validate")
- self.assertEqual(selector(None, self.req, rset=self.rset), 1)
- selector = on_transition("validate", "forsake")
- self.assertEqual(selector(None, self.req, rset=self.rset), 1)
- selector = on_transition("forsake")
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
+ selector = on_transition("validate")
+ self.assertEqual(selector(None, req, rset=rset), 1)
+ selector = on_transition("validate", "forsake")
+ self.assertEqual(selector(None, req, rset=rset), 1)
+ selector = on_transition("forsake")
+ self.assertEqual(selector(None, req, rset=rset), 0)
- self.adapter.fire_transition('forsake')
- self._commit()
- self.assertEqual(self.adapter.state, 'abandoned')
+ adapter.fire_transition('forsake')
+ req.cnx.commit(); wf_entity.cw_clear_all_caches()
+ self.assertEqual(adapter.state, 'abandoned')
+
+ clear_cache(rset, 'get_entity')
- selector = on_transition("validate")
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
- selector = on_transition("validate", "forsake")
- self.assertEqual(selector(None, self.req, rset=self.rset), 1)
- selector = on_transition("forsake")
- self.assertEqual(selector(None, self.req, rset=self.rset), 1)
+ selector = on_transition("validate")
+ self.assertEqual(selector(None, req, rset=rset), 0)
+ selector = on_transition("validate", "forsake")
+ self.assertEqual(selector(None, req, rset=rset), 1)
+ selector = on_transition("forsake")
+ self.assertEqual(selector(None, req, rset=rset), 1)
def test_on_transition_unvalid_names(self):
- selector = on_transition("unknown")
- with self.assertRaises(ValueError) as cm:
- selector(None, self.req, rset=self.rset)
- self.assertEqual(str(cm.exception),
- "wf_test: unknown transition(s): unknown")
- selector = on_transition("weird", "unknown", "validate", "weird")
- with self.assertRaises(ValueError) as cm:
- selector(None, self.req, rset=self.rset)
- self.assertEqual(str(cm.exception),
- "wf_test: unknown transition(s): unknown,weird")
+ with self.statefull_stuff() as (req, wf_entity, rset, adapter):
+ selector = on_transition("unknown")
+ with self.assertRaises(ValueError) as cm:
+ selector(None, req, rset=rset)
+ self.assertEqual(str(cm.exception),
+ "wf_test: unknown transition(s): unknown")
+ selector = on_transition("weird", "unknown", "validate", "weird")
+ with self.assertRaises(ValueError) as cm:
+ selector(None, req, rset=rset)
+ self.assertEqual(str(cm.exception),
+ "wf_test: unknown transition(s): unknown,weird")
def test_on_transition_with_no_effect(self):
"""selector will not be triggered with `change_state()`"""
- self.adapter.change_state('validated')
- self._commit()
- self.assertEqual(self.adapter.state, 'validated')
+ with self.statefull_stuff() as (req, wf_entity, rset, adapter):
+ adapter.change_state('validated')
+ req.cnx.commit(); wf_entity.cw_clear_all_caches()
+ self.assertEqual(adapter.state, 'validated')
- selector = on_transition("validate")
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
- selector = on_transition("validate", "forsake")
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
- selector = on_transition("forsake")
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
+ selector = on_transition("validate")
+ self.assertEqual(selector(None, req, rset=rset), 0)
+ selector = on_transition("validate", "forsake")
+ self.assertEqual(selector(None, req, rset=rset), 0)
+ selector = on_transition("forsake")
+ self.assertEqual(selector(None, req, rset=rset), 0)
class RelationPossibleTC(CubicWebTC):
def test_rqlst_1(self):
- req = self.request()
- selector = relation_possible('in_group')
- select = self.vreg.parse(req, 'Any X WHERE X is CWUser').children[0]
- score = selector(None, req, rset=1,
- select=select, filtered_variable=select.defined_vars['X'])
- self.assertEqual(score, 1)
+ with self.admin_access.web_request() as req:
+ selector = relation_possible('in_group')
+ select = self.vreg.parse(req, 'Any X WHERE X is CWUser').children[0]
+ score = selector(None, req, rset=1,
+ select=select, filtered_variable=select.defined_vars['X'])
+ self.assertEqual(score, 1)
def test_rqlst_2(self):
- req = self.request()
- selector = relation_possible('in_group')
- select = self.vreg.parse(req, 'Any 1, COUNT(X) WHERE X is CWUser, X creation_date XD, '
- 'Y creation_date YD, Y is CWGroup '
- 'HAVING DAY(XD)=DAY(YD)').children[0]
- score = selector(None, req, rset=1,
- select=select, filtered_variable=select.defined_vars['X'])
- self.assertEqual(score, 1)
+ with self.admin_access.web_request() as req:
+ selector = relation_possible('in_group')
+ select = self.vreg.parse(req, 'Any 1, COUNT(X) WHERE X is CWUser, X creation_date XD, '
+ 'Y creation_date YD, Y is CWGroup '
+ 'HAVING DAY(XD)=DAY(YD)').children[0]
+ score = selector(None, req, rset=1,
+ select=select, filtered_variable=select.defined_vars['X'])
+ self.assertEqual(score, 1)
def test_ambiguous(self):
# Ambiguous relations are :
@@ -210,10 +230,11 @@
# checking case.
selector = relation_possible('fabrique_par', role='object',
target_etype='Personne', strict=True)
- req = self.request()
- usine = req.create_entity('Usine', lieu=u'here')
- score = selector(None, req, rset=usine.as_rset())
- self.assertEqual(0, score)
+ with self.admin_access.web_request() as req:
+ usine = req.create_entity('Usine', lieu=u'here')
+ score = selector(None, req, rset=usine.as_rset())
+ self.assertEqual(0, score)
+
class MatchUserGroupsTC(CubicWebTC):
def test_owners_group(self):
@@ -227,79 +248,85 @@
SomeAction.__registered__(self.vreg['actions'])
self.assertTrue(SomeAction in self.vreg['actions']['yo'], self.vreg['actions'])
try:
+ with self.admin_access.web_request() as req:
+ self.create_user(req, 'john')
# login as a simple user
- req = self.request()
- self.create_user(req, 'john')
- self.login('john')
- # it should not be possible to use SomeAction not owned objects
- req = self.request()
- rset = req.execute('Any G WHERE G is CWGroup, G name "managers"')
- self.assertFalse('yo' in dict(self.pactions(req, rset)))
- # insert a new card, and check that we can use SomeAction on our object
- self.execute('INSERT Card C: C title "zoubidou"')
- self.commit()
- req = self.request()
- rset = req.execute('Card C WHERE C title "zoubidou"')
- self.assertTrue('yo' in dict(self.pactions(req, rset)), self.pactions(req, rset))
+ john_access = self.new_access('john')
+ with john_access.web_request() as req:
+ # it should not be possible to use SomeAction not owned objects
+ rset = req.execute('Any G WHERE G is CWGroup, G name "managers"')
+ self.assertFalse('yo' in dict(self.pactions(req, rset)))
+ # insert a new card, and check that we can use SomeAction on our object
+ req.execute('INSERT Card C: C title "zoubidou"')
+ req.cnx.commit()
+ with john_access.web_request() as req:
+ rset = req.execute('Card C WHERE C title "zoubidou"')
+ self.assertTrue('yo' in dict(self.pactions(req, rset)), self.pactions(req, rset))
# make sure even managers can't use the action
- self.restore_connection()
- req = self.request()
- rset = req.execute('Card C WHERE C title "zoubidou"')
- self.assertFalse('yo' in dict(self.pactions(req, rset)))
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Card C WHERE C title "zoubidou"')
+ self.assertFalse('yo' in dict(self.pactions(req, rset)))
finally:
del self.vreg[SomeAction.__registry__][SomeAction.__regid__]
-class MultiLinesRsetSelectorTC(CubicWebTC):
- def setUp(self):
- super(MultiLinesRsetSelectorTC, self).setUp()
- self.req = self.request()
- self.req.execute('INSERT CWGroup G: G name "group1"')
- self.req.execute('INSERT CWGroup G: G name "group2"')
- self.commit()
- self.rset = self.req.execute('Any G WHERE G is CWGroup')
+class MultiLinesRsetTC(CubicWebTC):
+ def setup_database(self):
+ with self.admin_access.web_request() as req:
+ req.execute('INSERT CWGroup G: G name "group1"')
+ req.execute('INSERT CWGroup G: G name "group2"')
+ req.cnx.commit()
def test_default_op_in_selector(self):
- expected = len(self.rset)
- selector = multi_lines_rset(expected)
- self.assertEqual(selector(None, self.req, rset=self.rset), 1)
- self.assertEqual(selector(None, self.req, None), 0)
- selector = multi_lines_rset(expected + 1)
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
- self.assertEqual(selector(None, self.req, None), 0)
- selector = multi_lines_rset(expected - 1)
- self.assertEqual(selector(None, self.req, rset=self.rset), 0)
- self.assertEqual(selector(None, self.req, None), 0)
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Any G WHERE G is CWGroup')
+ expected = len(rset)
+ selector = multi_lines_rset(expected)
+ self.assertEqual(selector(None, req, rset=rset), 1)
+ self.assertEqual(selector(None, req, None), 0)
+ selector = multi_lines_rset(expected + 1)
+ self.assertEqual(selector(None, req, rset=rset), 0)
+ self.assertEqual(selector(None, req, None), 0)
+ selector = multi_lines_rset(expected - 1)
+ self.assertEqual(selector(None, req, rset=rset), 0)
+ self.assertEqual(selector(None, req, None), 0)
def test_without_rset(self):
- expected = len(self.rset)
- selector = multi_lines_rset(expected)
- self.assertEqual(selector(None, self.req, None), 0)
- selector = multi_lines_rset(expected + 1)
- self.assertEqual(selector(None, self.req, None), 0)
- selector = multi_lines_rset(expected - 1)
- self.assertEqual(selector(None, self.req, None), 0)
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Any G WHERE G is CWGroup')
+ expected = len(rset)
+ selector = multi_lines_rset(expected)
+ self.assertEqual(selector(None, req, None), 0)
+ selector = multi_lines_rset(expected + 1)
+ self.assertEqual(selector(None, req, None), 0)
+ selector = multi_lines_rset(expected - 1)
+ self.assertEqual(selector(None, req, None), 0)
def test_with_operators(self):
- expected = len(self.rset)
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Any G WHERE G is CWGroup')
+ expected = len(rset)
- # Format 'expected', 'operator', 'assert'
- testdata = (( expected, eq, 1),
- ( expected+1, eq, 0),
- ( expected-1, eq, 0),
- ( expected, le, 1),
- ( expected+1, le, 1),
- ( expected-1, le, 0),
- ( expected-1, gt, 1),
- ( expected, gt, 0),
- ( expected+1, gt, 0),
- ( expected+1, lt, 1),
- ( expected, lt, 0),
- ( expected-1, lt, 0))
+ # Format 'expected', 'operator', 'assert'
+ testdata = (( expected, eq, 1),
+ ( expected+1, eq, 0),
+ ( expected-1, eq, 0),
+ ( expected, le, 1),
+ ( expected+1, le, 1),
+ ( expected-1, le, 0),
+ ( expected-1, gt, 1),
+ ( expected, gt, 0),
+ ( expected+1, gt, 0),
+ ( expected+1, lt, 1),
+ ( expected, lt, 0),
+ ( expected-1, lt, 0))
- for (expected, operator, assertion) in testdata:
- selector = multi_lines_rset(expected, operator)
- yield self.assertEqual, selector(None, self.req, rset=self.rset), assertion
+ for (expected, operator, assertion) in testdata:
+ selector = multi_lines_rset(expected, operator)
+ yield self.assertEqual, selector(None, req, rset=rset), assertion
+
+
+class MatchKwargsTC(TestCase):
def test_match_kwargs_default(self):
selector = match_kwargs( set( ('a', 'b') ) )
@@ -316,37 +343,37 @@
self.assertEqual(selector(None, None, a=1, c=1), 1)
-class ScoreEntitySelectorTC(CubicWebTC):
+class ScoreEntityTC(CubicWebTC):
def test_intscore_entity_selector(self):
- req = self.request()
- rset = req.execute('Any E WHERE E eid 1')
- selector = score_entity(lambda x: None)
- self.assertEqual(selector(None, req, rset=rset), 0)
- selector = score_entity(lambda x: "something")
- self.assertEqual(selector(None, req, rset=rset), 1)
- selector = score_entity(lambda x: object)
- self.assertEqual(selector(None, req, rset=rset), 1)
- rset = req.execute('Any G LIMIT 2 WHERE G is CWGroup')
- selector = score_entity(lambda x: 10)
- self.assertEqual(selector(None, req, rset=rset), 20)
- selector = score_entity(lambda x: 10, mode='any')
- self.assertEqual(selector(None, req, rset=rset), 10)
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Any E WHERE E eid 1')
+ selector = score_entity(lambda x: None)
+ self.assertEqual(selector(None, req, rset=rset), 0)
+ selector = score_entity(lambda x: "something")
+ self.assertEqual(selector(None, req, rset=rset), 1)
+ selector = score_entity(lambda x: object)
+ self.assertEqual(selector(None, req, rset=rset), 1)
+ rset = req.execute('Any G LIMIT 2 WHERE G is CWGroup')
+ selector = score_entity(lambda x: 10)
+ self.assertEqual(selector(None, req, rset=rset), 20)
+ selector = score_entity(lambda x: 10, mode='any')
+ self.assertEqual(selector(None, req, rset=rset), 10)
def test_rql_condition_entity(self):
- req = self.request()
- selector = rql_condition('X identity U')
- rset = req.user.as_rset()
- self.assertEqual(selector(None, req, rset=rset), 1)
- self.assertEqual(selector(None, req, entity=req.user), 1)
- self.assertEqual(selector(None, req), 0)
+ with self.admin_access.web_request() as req:
+ selector = rql_condition('X identity U')
+ rset = req.user.as_rset()
+ self.assertEqual(selector(None, req, rset=rset), 1)
+ self.assertEqual(selector(None, req, entity=req.user), 1)
+ self.assertEqual(selector(None, req), 0)
def test_rql_condition_user(self):
- req = self.request()
- selector = rql_condition('U login "admin"', user_condition=True)
- self.assertEqual(selector(None, req), 1)
- selector = rql_condition('U login "toto"', user_condition=True)
- self.assertEqual(selector(None, req), 0)
+ with self.admin_access.web_request() as req:
+ selector = rql_condition('U login "admin"', user_condition=True)
+ self.assertEqual(selector(None, req), 1)
+ selector = rql_condition('U login "toto"', user_condition=True)
+ self.assertEqual(selector(None, req), 0)
class AdaptablePredicateTC(CubicWebTC):
@@ -359,10 +386,10 @@
__regid__ = 'IWhatever'
__select__ = is_instance('CWGroup')
with self.temporary_appobjects(CWUserIWhatever, CWGroupIWhatever):
- req = self.request()
- selector = adaptable('IWhatever')
- rset = req.execute('Any X WHERE X is IN(CWGroup, CWUser)')
- self.assertTrue(selector(None, req, rset=rset))
+ with self.admin_access.web_request() as req:
+ selector = adaptable('IWhatever')
+ rset = req.execute('Any X WHERE X is IN(CWGroup, CWUser)')
+ self.assertTrue(selector(None, req, rset=rset))
if __name__ == '__main__':
unittest_main()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/unittest_repoapi.py Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,89 @@
+# copyright 2013-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
+"""unittest for cubicweb.dbapi"""
+
+
+from cubicweb.devtools.testlib import CubicWebTC
+
+from cubicweb import ProgrammingError
+from cubicweb.repoapi import ClientConnection, connect, anonymous_cnx
+
+
+class REPOAPITC(CubicWebTC):
+
+ def test_clt_cnx_basic_usage(self):
+ """Test that a client connection can be used to access the database"""
+ with self.admin_access.client_cnx() as cltcnx:
+ # (1) some RQL request
+ rset = cltcnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+ # (2) ORM usage
+ random_user = rset.get_entity(0, 0)
+ # (3) Write operation
+ random_user.cw_set(surname=u'babar')
+ # (4) commit
+ cltcnx.commit()
+ rset = cltcnx.execute('''Any X WHERE X is CWUser,
+ X surname "babar"
+ ''')
+ self.assertTrue(rset)
+ # prepare test for implicit rollback
+ random_user = rset.get_entity(0, 0)
+ random_user.cw_set(surname=u'celestine')
+ # implicit rollback on exit
+ with self.admin_access.client_cnx() as cltcnx:
+ rset = cltcnx.execute('''Any X WHERE X is CWUser,
+ X surname "babar"
+ ''')
+ self.assertTrue(rset)
+
+ def test_clt_cnx_life_cycle(self):
+ """Check that ClientConnection requires explicit open and close
+ """
+ access = self.admin_access
+ cltcnx = ClientConnection(access._session)
+ # connection not open yet
+ with self.assertRaises(ProgrammingError):
+ cltcnx.execute('Any X WHERE X is CWUser')
+ # connection open and working
+ with cltcnx:
+ cltcnx.execute('Any X WHERE X is CWUser')
+ # connection closed
+ with self.assertRaises(ProgrammingError):
+ cltcnx.execute('Any X WHERE X is CWUser')
+
+ def test_connect(self):
+ """check that repoapi.connect works and returns a usable connection"""
+ clt_cnx = connect(self.repo, login='admin', password='gingkow')
+ self.assertEqual('admin', clt_cnx.user.login)
+ with clt_cnx:
+ rset = clt_cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+ def test_anonymous_connect(self):
+ """check that you can get anonymous connection when the data exist"""
+ clt_cnx = anonymous_cnx(self.repo)
+ self.assertEqual('anon', clt_cnx.user.login)
+ with clt_cnx:
+ rset = clt_cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
--- a/test/unittest_req.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_req.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -56,94 +56,98 @@
def test_base_url(self):
base_url = self.config['base-url']
- self.assertEqual(self.session.base_url(), base_url)
- assert 'https-url' not in self.config
- self.assertEqual(self.session.base_url(secure=True), base_url)
- secure_base_url = base_url.replace('http', 'https')
- self.config.global_set_option('https-url', secure_base_url)
- self.assertEqual(self.session.base_url(secure=True), secure_base_url)
+ with self.admin_access.repo_cnx() as session:
+ self.assertEqual(session.base_url(), base_url)
+ assert 'https-url' not in self.config
+ self.assertEqual(session.base_url(secure=True), base_url)
+ secure_base_url = base_url.replace('http', 'https')
+ self.config.global_set_option('https-url', secure_base_url)
+ self.assertEqual(session.base_url(secure=True), secure_base_url)
def test_view_catch_ex(self):
- req = self.request()
- rset = self.execute('CWUser X WHERE X login "hop"')
- self.assertEqual(req.view('oneline', rset, 'null'), '')
- self.assertRaises(ObjectNotFound, req.view, 'onelinee', rset, 'null')
+ with self.admin_access.web_request() as req:
+ rset = req.execute('CWUser X WHERE X login "hop"')
+ self.assertEqual(req.view('oneline', rset, 'null'), '')
+ self.assertRaises(ObjectNotFound, req.view, 'onelinee', rset, 'null')
def test_find_one_entity(self):
- self.request().create_entity(
- 'CWUser', login=u'cdevienne', upassword=u'cdevienne',
- surname=u'de Vienne', firstname=u'Christophe',
- in_group=self.request().find('CWGroup', name=u'users').one())
+ with self.admin_access.web_request() as req:
+ req.create_entity(
+ 'CWUser', login=u'cdevienne', upassword=u'cdevienne',
+ surname=u'de Vienne', firstname=u'Christophe',
+ in_group=req.find('CWGroup', name=u'users').one())
- self.request().create_entity(
- 'CWUser', login=u'adim', upassword='adim', surname=u'di mascio',
- firstname=u'adrien',
- in_group=self.request().find('CWGroup', name=u'users').one())
+ req.create_entity(
+ 'CWUser', login=u'adim', upassword='adim', surname=u'di mascio',
+ firstname=u'adrien',
+ in_group=req.find('CWGroup', name=u'users').one())
- u = self.request().find_one_entity('CWUser', login=u'cdevienne')
- self.assertEqual(u.firstname, u"Christophe")
+ u = req.find_one_entity('CWUser', login=u'cdevienne')
+ self.assertEqual(u.firstname, u"Christophe")
- with self.assertRaises(FindEntityError):
- self.request().find_one_entity('CWUser', login=u'patanok')
+ with self.assertRaises(FindEntityError):
+ req.find_one_entity('CWUser', login=u'patanok')
- with self.assertRaises(FindEntityError):
- self.request().find_one_entity('CWUser')
+ with self.assertRaises(FindEntityError):
+ req.find_one_entity('CWUser')
def test_find_entities(self):
- self.request().create_entity(
- 'CWUser', login=u'cdevienne', upassword=u'cdevienne',
- surname=u'de Vienne', firstname=u'Christophe',
- in_group=self.request().find('CWGroup', name=u'users').one())
-
- self.request().create_entity(
- 'CWUser', login=u'adim', upassword='adim', surname=u'di mascio',
- firstname=u'adrien',
- in_group=self.request().find('CWGroup', name=u'users').one())
+ with self.admin_access.web_request() as req:
+ req.create_entity(
+ 'CWUser', login=u'cdevienne', upassword=u'cdevienne',
+ surname=u'de Vienne', firstname=u'Christophe',
+ in_group=req.find('CWGroup', name=u'users').one())
- l = list(self.request().find_entities('CWUser', login=u'cdevienne'))
- self.assertEqual(1, len(l))
- self.assertEqual(l[0].firstname, u"Christophe")
+ req.create_entity(
+ 'CWUser', login=u'adim', upassword='adim', surname=u'di mascio',
+ firstname=u'adrien',
+ in_group=req.find('CWGroup', name=u'users').one())
- l = list(self.request().find_entities('CWUser', login=u'patanok'))
- self.assertEqual(0, len(l))
+ l = list(req.find_entities('CWUser', login=u'cdevienne'))
+ self.assertEqual(1, len(l))
+ self.assertEqual(l[0].firstname, u"Christophe")
- l = list(self.request().find_entities('CWUser'))
- self.assertEqual(4, len(l))
+ l = list(req.find_entities('CWUser', login=u'patanok'))
+ self.assertEqual(0, len(l))
+
+ l = list(req.find_entities('CWUser'))
+ self.assertEqual(4, len(l))
def test_find(self):
- self.request().create_entity(
- 'CWUser', login=u'cdevienne', upassword=u'cdevienne',
- surname=u'de Vienne', firstname=u'Christophe',
- in_group=self.request().find('CWGroup', name=u'users').one())
+ with self.admin_access.web_request() as req:
+ req.create_entity(
+ 'CWUser', login=u'cdevienne', upassword=u'cdevienne',
+ surname=u'de Vienne', firstname=u'Christophe',
+ in_group=req.find('CWGroup', name=u'users').one())
- self.request().create_entity(
- 'CWUser', login=u'adim', upassword='adim', surname=u'di mascio',
- firstname=u'adrien',
- in_group=self.request().find('CWGroup', name=u'users').one())
+ req.create_entity(
+ 'CWUser', login=u'adim', upassword='adim', surname=u'di mascio',
+ firstname=u'adrien',
+ in_group=req.find('CWGroup', name=u'users').one())
- u = self.request().find('CWUser', login=u'cdevienne').one()
- self.assertEqual(u.firstname, u"Christophe")
+ u = req.find('CWUser', login=u'cdevienne').one()
+ self.assertEqual(u.firstname, u"Christophe")
- users = list(self.request().find('CWUser').entities())
- self.assertEqual(len(users), 4)
+ users = list(req.find('CWUser').entities())
+ self.assertEqual(len(users), 4)
- groups = list(
- self.request().find('CWGroup', reverse_in_group=u).entities())
- self.assertEqual(len(groups), 1)
- self.assertEqual(groups[0].name, u'users')
+ groups = list(
+ req.find('CWGroup', reverse_in_group=u).entities())
+ self.assertEqual(len(groups), 1)
+ self.assertEqual(groups[0].name, u'users')
- users = self.request().find('CWUser', in_group=groups[0]).entities()
- users = list(users)
- self.assertEqual(len(users), 2)
+ users = req.find('CWUser', in_group=groups[0]).entities()
+ users = list(users)
+ self.assertEqual(len(users), 2)
- with self.assertRaises(AssertionError):
- self.request().find('CWUser', chapeau=u"melon")
+ with self.assertRaises(AssertionError):
+ req.find('CWUser', chapeau=u"melon")
- with self.assertRaises(AssertionError):
- self.request().find('CWUser', reverse_buddy=users[0])
+ with self.assertRaises(AssertionError):
+ req.find('CWUser', reverse_buddy=users[0])
- with self.assertRaises(NotImplementedError):
- self.request().find('CWUser', in_group=[1, 2])
+ with self.assertRaises(NotImplementedError):
+ req.find('CWUser', in_group=[1, 2])
if __name__ == '__main__':
unittest_main()
--- a/test/unittest_rqlrewrite.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_rqlrewrite.py Tue Jun 10 09:49:45 2014 +0200
@@ -129,7 +129,7 @@
"F name 'read', F require_group E, A is State, E is CWGroup, F is CWPermission), "
"(EXISTS(S ref LIKE 'PUBLIC%')) OR (EXISTS(B in_group G, G name 'public', G is CWGroup)), "
"S is Affaire")
- self.assertTrue('D' in kwargs)
+ self.assertIn('D', kwargs)
def test_or(self):
constraint = '(X identity U) OR (X in_state ST, CL identity U, CL in_state ST, ST name "subscribed")'
@@ -507,11 +507,12 @@
args = {}
querier = self.repo.querier
union = querier.parse(rql)
- querier.solutions(self.session, union, args)
- querier._annotate(union)
- plan = querier.plan_factory(union, args, self.session)
- plan.preprocess(union)
- return union
+ with self.admin_access.repo_cnx() as cnx:
+ querier.solutions(cnx, union, args)
+ querier._annotate(union)
+ plan = querier.plan_factory(union, args, cnx)
+ plan.preprocess(union)
+ return union
def test_ambiguous_optional_same_exprs(self):
"""See #3013535"""
--- a/test/unittest_rset.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_rset.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,5 +1,5 @@
# coding: utf-8
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -27,7 +27,6 @@
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.rset import NotAnEntity, ResultSet, attr_desc_iterator
-
from cubicweb import NoResultError, MultipleResultsError
@@ -104,27 +103,27 @@
self.assertEqual(len(pickle.dumps(self.rset)), 392)
def test_build_url(self):
- req = self.request()
- baseurl = req.base_url()
- self.compare_urls(req.build_url('view', vid='foo', rql='yo'),
- '%sview?vid=foo&rql=yo' % baseurl)
- self.compare_urls(req.build_url('view', _restpath='task/title/go'),
- '%stask/title/go' % baseurl)
- #self.compare_urls(req.build_url('view', _restpath='/task/title/go'),
- # '%stask/title/go' % baseurl)
- # empty _restpath should not crash
- self.compare_urls(req.build_url('view', _restpath=''), baseurl)
- self.assertNotIn('https', req.build_url('view', vid='foo', rql='yo',
- __secure__=True))
- try:
- self.config.global_set_option('https-url', 'https://testing.fr/')
- self.assertTrue('https', req.build_url('view', vid='foo', rql='yo',
- __secure__=True))
- self.compare_urls(req.build_url('view', vid='foo', rql='yo',
- __secure__=True),
- '%sview?vid=foo&rql=yo' % req.base_url(secure=True))
- finally:
- self.config.global_set_option('https-url', None)
+ with self.admin_access.web_request() as req:
+ baseurl = req.base_url()
+ self.compare_urls(req.build_url('view', vid='foo', rql='yo'),
+ '%sview?vid=foo&rql=yo' % baseurl)
+ self.compare_urls(req.build_url('view', _restpath='task/title/go'),
+ '%stask/title/go' % baseurl)
+ #self.compare_urls(req.build_url('view', _restpath='/task/title/go'),
+ # '%stask/title/go' % baseurl)
+ # empty _restpath should not crash
+ self.compare_urls(req.build_url('view', _restpath=''), baseurl)
+ self.assertNotIn('https', req.build_url('view', vid='foo', rql='yo',
+ __secure__=True))
+ try:
+ self.config.global_set_option('https-url', 'https://testing.fr/')
+ self.assertTrue('https', req.build_url('view', vid='foo', rql='yo',
+ __secure__=True))
+ self.compare_urls(req.build_url('view', vid='foo', rql='yo',
+ __secure__=True),
+ '%sview?vid=foo&rql=yo' % req.base_url(secure=True))
+ finally:
+ self.config.global_set_option('https-url', None)
def test_build(self):
@@ -139,88 +138,92 @@
rs = ResultSet([[12000, 'adim'], [13000, 'syt'], [14000, 'nico']],
'Any U,L where U is CWUser, U login L',
description=[['CWUser', 'String']] * 3)
- rs.req = self.request()
- rs.vreg = self.vreg
- self.assertEqual(rs.limit(2).rows, [[12000, 'adim'], [13000, 'syt']])
- rs2 = rs.limit(2, offset=1)
- self.assertEqual(rs2.rows, [[13000, 'syt'], [14000, 'nico']])
- self.assertEqual(rs2.get_entity(0, 0).cw_row, 0)
- self.assertEqual(rs.limit(2, offset=2).rows, [[14000, 'nico']])
- self.assertEqual(rs.limit(2, offset=3).rows, [])
+ with self.admin_access.web_request() as req:
+ rs.req = req
+ rs.vreg = self.vreg
+ self.assertEqual(rs.limit(2).rows, [[12000, 'adim'], [13000, 'syt']])
+ rs2 = rs.limit(2, offset=1)
+ self.assertEqual(rs2.rows, [[13000, 'syt'], [14000, 'nico']])
+ self.assertEqual(rs2.get_entity(0, 0).cw_row, 0)
+ self.assertEqual(rs.limit(2, offset=2).rows, [[14000, 'nico']])
+ self.assertEqual(rs.limit(2, offset=3).rows, [])
def test_limit_2(self):
- req = self.request()
- # drop user from cache for the sake of this test
- req.drop_entity_cache(req.user.eid)
- rs = req.execute('Any E,U WHERE E is CWEType, E created_by U')
- # get entity on row 9. This will fill its created_by relation cache,
- # with cwuser on row 9 as well
- e1 = rs.get_entity(9, 0)
- # get entity on row 10. This will fill its created_by relation cache,
- # with cwuser built on row 9
- e2 = rs.get_entity(10, 0)
- # limit result set from row 10
- rs.limit(1, 10, inplace=True)
- # get back eid
- e = rs.get_entity(0, 0)
- self.assertTrue(e2 is e)
- # rs.limit has properly removed cwuser for request cache, but it's
- # still referenced by e/e2 relation cache
- u = e.created_by[0]
- # now ensure this doesn't trigger IndexError because cwuser.cw_row is 9
- # while now rset has only one row
- u.cw_rset[u.cw_row]
+ with self.admin_access.web_request() as req:
+ # drop user from cache for the sake of this test
+ req.drop_entity_cache(req.user.eid)
+ rs = req.execute('Any E,U WHERE E is CWEType, E created_by U')
+ # get entity on row 9. This will fill its created_by relation cache,
+ # with cwuser on row 9 as well
+ e1 = rs.get_entity(9, 0)
+ # get entity on row 10. This will fill its created_by relation cache,
+ # with cwuser built on row 9
+ e2 = rs.get_entity(10, 0)
+ # limit result set from row 10
+ rs.limit(1, 10, inplace=True)
+ # get back eid
+ e = rs.get_entity(0, 0)
+ self.assertTrue(e2 is e)
+ # rs.limit has properly removed cwuser for request cache, but it's
+ # still referenced by e/e2 relation cache
+ u = e.created_by[0]
+ # now ensure this doesn't trigger IndexError because cwuser.cw_row is 9
+ # while now rset has only one row
+ u.cw_rset[u.cw_row]
def test_filter(self):
rs = ResultSet([[12000, 'adim'], [13000, 'syt'], [14000, 'nico']],
'Any U,L where U is CWUser, U login L',
description=[['CWUser', 'String']] * 3)
- rs.req = self.request()
- rs.vreg = self.vreg
- def test_filter(entity):
- return entity.login != 'nico'
+ with self.admin_access.web_request() as req:
+ rs.req = req
+ rs.vreg = self.vreg
+ def test_filter(entity):
+ return entity.login != 'nico'
- rs2 = rs.filtered_rset(test_filter)
- self.assertEqual(len(rs2), 2)
- self.assertEqual([login for _, login in rs2], ['adim', 'syt'])
- self.assertEqual(rs2.description, rs.description[1:])
+ rs2 = rs.filtered_rset(test_filter)
+ self.assertEqual(len(rs2), 2)
+ self.assertEqual([login for _, login in rs2], ['adim', 'syt'])
+ self.assertEqual(rs2.description, rs.description[1:])
def test_transform(self):
rs = ResultSet([[12, 'adim'], [13, 'syt'], [14, 'nico']],
'Any U,L where U is CWUser, U login L',
description=[['CWUser', 'String']] * 3)
- rs.req = self.request()
- def test_transform(row, desc):
- return row[1:], desc[1:]
- rs2 = rs.transformed_rset(test_transform)
+ with self.admin_access.web_request() as req:
+ rs.req = req
+ def test_transform(row, desc):
+ return row[1:], desc[1:]
+ rs2 = rs.transformed_rset(test_transform)
- self.assertEqual(len(rs2), 3)
- self.assertEqual(list(rs2), [['adim'],['syt'],['nico']])
+ self.assertEqual(len(rs2), 3)
+ self.assertEqual(list(rs2), [['adim'],['syt'],['nico']])
def test_sort(self):
rs = ResultSet([[12000, 'adim'], [13000, 'syt'], [14000, 'nico']],
'Any U,L where U is CWUser, U login L',
description=[['CWUser', 'String']] * 3)
- rs.req = self.request()
- rs.vreg = self.vreg
+ with self.admin_access.web_request() as req:
+ rs.req = req
+ rs.vreg = self.vreg
- rs2 = rs.sorted_rset(lambda e:e.cw_attr_cache['login'])
- self.assertEqual(len(rs2), 3)
- self.assertEqual([login for _, login in rs2], ['adim', 'nico', 'syt'])
- # make sure rs is unchanged
- self.assertEqual([login for _, login in rs], ['adim', 'syt', 'nico'])
+ rs2 = rs.sorted_rset(lambda e:e.cw_attr_cache['login'])
+ self.assertEqual(len(rs2), 3)
+ self.assertEqual([login for _, login in rs2], ['adim', 'nico', 'syt'])
+ # make sure rs is unchanged
+ self.assertEqual([login for _, login in rs], ['adim', 'syt', 'nico'])
- rs2 = rs.sorted_rset(lambda e:e.cw_attr_cache['login'], reverse=True)
- self.assertEqual(len(rs2), 3)
- self.assertEqual([login for _, login in rs2], ['syt', 'nico', 'adim'])
- # make sure rs is unchanged
- self.assertEqual([login for _, login in rs], ['adim', 'syt', 'nico'])
+ rs2 = rs.sorted_rset(lambda e:e.cw_attr_cache['login'], reverse=True)
+ self.assertEqual(len(rs2), 3)
+ self.assertEqual([login for _, login in rs2], ['syt', 'nico', 'adim'])
+ # make sure rs is unchanged
+ self.assertEqual([login for _, login in rs], ['adim', 'syt', 'nico'])
- rs3 = rs.sorted_rset(lambda row: row[1], col=-1)
- self.assertEqual(len(rs3), 3)
- self.assertEqual([login for _, login in rs3], ['adim', 'nico', 'syt'])
- # make sure rs is unchanged
- self.assertEqual([login for _, login in rs], ['adim', 'syt', 'nico'])
+ rs3 = rs.sorted_rset(lambda row: row[1], col=-1)
+ self.assertEqual(len(rs3), 3)
+ self.assertEqual([login for _, login in rs3], ['adim', 'nico', 'syt'])
+ # make sure rs is unchanged
+ self.assertEqual([login for _, login in rs], ['adim', 'syt', 'nico'])
def test_split(self):
rs = ResultSet([[12000, 'adim', u'Adim chez les pinguins'],
@@ -231,40 +234,41 @@
'Any U, L, T WHERE U is CWUser, U login L,'\
'D created_by U, D title T',
description=[['CWUser', 'String', 'String']] * 5)
- rs.req = self.request()
- rs.vreg = self.vreg
- rsets = rs.split_rset(lambda e:e.cw_attr_cache['login'])
- self.assertEqual(len(rsets), 3)
- self.assertEqual([login for _, login,_ in rsets[0]], ['adim', 'adim'])
- self.assertEqual([login for _, login,_ in rsets[1]], ['syt'])
- self.assertEqual([login for _, login,_ in rsets[2]], ['nico', 'nico'])
- # make sure rs is unchanged
- self.assertEqual([login for _, login,_ in rs], ['adim', 'adim', 'syt', 'nico', 'nico'])
+ with self.admin_access.web_request() as req:
+ rs.req = req
+ rs.vreg = self.vreg
+ rsets = rs.split_rset(lambda e:e.cw_attr_cache['login'])
+ self.assertEqual(len(rsets), 3)
+ self.assertEqual([login for _, login,_ in rsets[0]], ['adim', 'adim'])
+ self.assertEqual([login for _, login,_ in rsets[1]], ['syt'])
+ self.assertEqual([login for _, login,_ in rsets[2]], ['nico', 'nico'])
+ # make sure rs is unchanged
+ self.assertEqual([login for _, login,_ in rs], ['adim', 'adim', 'syt', 'nico', 'nico'])
- rsets = rs.split_rset(lambda e:e.cw_attr_cache['login'], return_dict=True)
- self.assertEqual(len(rsets), 3)
- self.assertEqual([login for _, login,_ in rsets['nico']], ['nico', 'nico'])
- self.assertEqual([login for _, login,_ in rsets['adim']], ['adim', 'adim'])
- self.assertEqual([login for _, login,_ in rsets['syt']], ['syt'])
- # make sure rs is unchanged
- self.assertEqual([login for _, login,_ in rs], ['adim', 'adim', 'syt', 'nico', 'nico'])
+ rsets = rs.split_rset(lambda e:e.cw_attr_cache['login'], return_dict=True)
+ self.assertEqual(len(rsets), 3)
+ self.assertEqual([login for _, login,_ in rsets['nico']], ['nico', 'nico'])
+ self.assertEqual([login for _, login,_ in rsets['adim']], ['adim', 'adim'])
+ self.assertEqual([login for _, login,_ in rsets['syt']], ['syt'])
+ # make sure rs is unchanged
+ self.assertEqual([login for _, login,_ in rs], ['adim', 'adim', 'syt', 'nico', 'nico'])
- rsets = rs.split_rset(lambda s: s.count('d'), col=2)
- self.assertEqual(len(rsets), 2)
- self.assertEqual([title for _, _, title in rsets[0]],
- [u"Adim chez les pinguins",
- u"Jardiner facile",
- u"L'épluchage du castor commun",])
- self.assertEqual([title for _, _, title in rsets[1]],
- [u"Le carrelage en 42 leçons",
- u"La tarte tatin en 15 minutes",])
- # make sure rs is unchanged
- self.assertEqual([title for _, _, title in rs],
- [u'Adim chez les pinguins',
- u'Jardiner facile',
- u'Le carrelage en 42 leçons',
- u'La tarte tatin en 15 minutes',
- u"L'épluchage du castor commun"])
+ rsets = rs.split_rset(lambda s: s.count('d'), col=2)
+ self.assertEqual(len(rsets), 2)
+ self.assertEqual([title for _, _, title in rsets[0]],
+ [u"Adim chez les pinguins",
+ u"Jardiner facile",
+ u"L'épluchage du castor commun",])
+ self.assertEqual([title for _, _, title in rsets[1]],
+ [u"Le carrelage en 42 leçons",
+ u"La tarte tatin en 15 minutes",])
+ # make sure rs is unchanged
+ self.assertEqual([title for _, _, title in rs],
+ [u'Adim chez les pinguins',
+ u'Jardiner facile',
+ u'Le carrelage en 42 leçons',
+ u'La tarte tatin en 15 minutes',
+ u"L'épluchage du castor commun"])
def test_cached_syntax_tree(self):
"""make sure syntax tree is cached"""
@@ -273,265 +277,291 @@
self.assert_(rqlst1 is rqlst2)
def test_get_entity_simple(self):
- self.request().create_entity('CWUser', login=u'adim', upassword='adim',
- surname=u'di mascio', firstname=u'adrien')
- e = self.execute('Any X,T WHERE X login "adim", X surname T').get_entity(0, 0)
- self.assertEqual(e.cw_attr_cache['surname'], 'di mascio')
- self.assertRaises(KeyError, e.cw_attr_cache.__getitem__, 'firstname')
- self.assertRaises(KeyError, e.cw_attr_cache.__getitem__, 'creation_date')
- self.assertEqual(pprelcachedict(e._cw_related_cache), [])
- e.complete()
- self.assertEqual(e.cw_attr_cache['firstname'], 'adrien')
- self.assertEqual(pprelcachedict(e._cw_related_cache), [])
+ with self.admin_access.web_request() as req:
+ req.create_entity('CWUser', login=u'adim', upassword='adim',
+ surname=u'di mascio', firstname=u'adrien')
+ req.cnx.drop_entity_cache()
+ e = req.execute('Any X,T WHERE X login "adim", X surname T').get_entity(0, 0)
+ self.assertEqual(e.cw_attr_cache['surname'], 'di mascio')
+ self.assertRaises(KeyError, e.cw_attr_cache.__getitem__, 'firstname')
+ self.assertRaises(KeyError, e.cw_attr_cache.__getitem__, 'creation_date')
+ self.assertEqual(pprelcachedict(e._cw_related_cache), [])
+ e.complete()
+ self.assertEqual(e.cw_attr_cache['firstname'], 'adrien')
+ self.assertEqual(pprelcachedict(e._cw_related_cache), [])
def test_get_entity_advanced(self):
- self.request().create_entity('Bookmark', title=u'zou', path=u'/view')
- self.execute('SET X bookmarked_by Y WHERE X is Bookmark, Y login "anon"')
- rset = self.execute('Any X,Y,XT,YN WHERE X bookmarked_by Y, X title XT, Y login YN')
+ with self.admin_access.web_request() as req:
+ req.create_entity('Bookmark', title=u'zou', path=u'/view')
+ req.cnx.drop_entity_cache()
+ req.execute('SET X bookmarked_by Y WHERE X is Bookmark, Y login "anon"')
+ rset = req.execute('Any X,Y,XT,YN WHERE X bookmarked_by Y, X title XT, Y login YN')
- e = rset.get_entity(0, 0)
- self.assertEqual(e.cw_row, 0)
- self.assertEqual(e.cw_col, 0)
- self.assertEqual(e.cw_attr_cache['title'], 'zou')
- self.assertRaises(KeyError, e.cw_attr_cache.__getitem__, 'path')
- self.assertEqual(e.view('text'), 'zou')
- self.assertEqual(pprelcachedict(e._cw_related_cache), [])
+ e = rset.get_entity(0, 0)
+ self.assertEqual(e.cw_row, 0)
+ self.assertEqual(e.cw_col, 0)
+ self.assertEqual(e.cw_attr_cache['title'], 'zou')
+ self.assertRaises(KeyError, e.cw_attr_cache.__getitem__, 'path')
+ self.assertEqual(e.view('text'), 'zou')
+ self.assertEqual(pprelcachedict(e._cw_related_cache), [])
- e = rset.get_entity(0, 1)
- self.assertEqual(e.cw_row, 0)
- self.assertEqual(e.cw_col, 1)
- self.assertEqual(e.cw_attr_cache['login'], 'anon')
- self.assertRaises(KeyError, e.cw_attr_cache.__getitem__, 'firstname')
- self.assertEqual(pprelcachedict(e._cw_related_cache),
- [])
- e.complete()
- self.assertEqual(e.cw_attr_cache['firstname'], None)
- self.assertEqual(e.view('text'), 'anon')
- self.assertEqual(pprelcachedict(e._cw_related_cache),
- [])
+ e = rset.get_entity(0, 1)
+ self.assertEqual(e.cw_row, 0)
+ self.assertEqual(e.cw_col, 1)
+ self.assertEqual(e.cw_attr_cache['login'], 'anon')
+ self.assertRaises(KeyError, e.cw_attr_cache.__getitem__, 'firstname')
+ self.assertEqual(pprelcachedict(e._cw_related_cache),
+ [])
+ e.complete()
+ self.assertEqual(e.cw_attr_cache['firstname'], None)
+ self.assertEqual(e.view('text'), 'anon')
+ self.assertEqual(pprelcachedict(e._cw_related_cache),
+ [])
- self.assertRaises(NotAnEntity, rset.get_entity, 0, 2)
- self.assertRaises(NotAnEntity, rset.get_entity, 0, 3)
+ self.assertRaises(NotAnEntity, rset.get_entity, 0, 2)
+ self.assertRaises(NotAnEntity, rset.get_entity, 0, 3)
def test_get_entity_relation_cache_compt(self):
- rset = self.execute('Any X,S WHERE X in_state S, X login "anon"')
- e = rset.get_entity(0, 0)
- seid = self.execute('State X WHERE X name "activated"')[0][0]
- # for_user / in_group are prefetched in CWUser __init__, in_state should
- # be filed from our query rset
- self.assertEqual(pprelcachedict(e._cw_related_cache),
- [('in_state_subject', [seid])])
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Any X,S WHERE X in_state S, X login "anon"')
+ e = rset.get_entity(0, 0)
+ seid = req.execute('State X WHERE X name "activated"')[0][0]
+ # for_user / in_group are prefetched in CWUser __init__, in_state should
+ # be filed from our query rset
+ self.assertEqual(pprelcachedict(e._cw_related_cache),
+ [('in_state_subject', [seid])])
def test_get_entity_advanced_prefilled_cache(self):
- e = self.request().create_entity('Bookmark', title=u'zou', path=u'path')
- self.commit()
- rset = self.execute('Any X,U,S,XT,UL,SN WHERE X created_by U, U in_state S, '
- 'X title XT, S name SN, U login UL, X eid %s' % e.eid)
- e = rset.get_entity(0, 0)
- self.assertEqual(e.cw_attr_cache['title'], 'zou')
- self.assertEqual(pprelcachedict(e._cw_related_cache),
- [('created_by_subject', [self.user().eid])])
- # first level of recursion
- u = e.created_by[0]
- self.assertEqual(u.cw_attr_cache['login'], 'admin')
- self.assertRaises(KeyError, u.cw_attr_cache.__getitem__, 'firstname')
- # second level of recursion
- s = u.in_state[0]
- self.assertEqual(s.cw_attr_cache['name'], 'activated')
- self.assertRaises(KeyError, s.cw_attr_cache.__getitem__, 'description')
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Bookmark', title=u'zou', path=u'path')
+ req.cnx.commit()
+ rset = req.execute('Any X,U,S,XT,UL,SN WHERE X created_by U, U in_state S, '
+ 'X title XT, S name SN, U login UL, X eid %s' % e.eid)
+ e = rset.get_entity(0, 0)
+ self.assertEqual(e.cw_attr_cache['title'], 'zou')
+ self.assertEqual(pprelcachedict(e._cw_related_cache),
+ [('created_by_subject', [req.user.eid])])
+ # first level of recursion
+ u = e.created_by[0]
+ self.assertEqual(u.cw_attr_cache['login'], 'admin')
+ self.assertRaises(KeyError, u.cw_attr_cache.__getitem__, 'firstname')
+ # second level of recursion
+ s = u.in_state[0]
+ self.assertEqual(s.cw_attr_cache['name'], 'activated')
+ self.assertRaises(KeyError, s.cw_attr_cache.__getitem__, 'description')
def test_get_entity_cache_with_left_outer_join(self):
- eid = self.execute('INSERT CWUser E: E login "joe", E upassword "joe", E in_group G '
- 'WHERE G name "users"')[0][0]
- rset = self.execute('Any X,E WHERE X eid %(x)s, X primary_email E?', {'x': eid})
- e = rset.get_entity(0, 0)
- # if any of the assertion below fails with a KeyError, the relation is not cached
- # related entities should be an empty list
- self.assertEqual(e._cw_related_cache['primary_email_subject'][True], ())
- # related rset should be an empty rset
- cached = e._cw_related_cache['primary_email_subject'][False]
- self.assertIsInstance(cached, ResultSet)
- self.assertEqual(cached.rowcount, 0)
+ with self.admin_access.web_request() as req:
+ eid = req.execute('INSERT CWUser E: E login "joe", E upassword "joe", E in_group G '
+ 'WHERE G name "users"')[0][0]
+ rset = req.execute('Any X,E WHERE X eid %(x)s, X primary_email E?', {'x': eid})
+ e = rset.get_entity(0, 0)
+ # if any of the assertion below fails with a KeyError, the relation is not cached
+ # related entities should be an empty list
+ self.assertEqual(e._cw_related_cache['primary_email_subject'][True], ())
+ # related rset should be an empty rset
+ cached = e._cw_related_cache['primary_email_subject'][False]
+ self.assertIsInstance(cached, ResultSet)
+ self.assertEqual(cached.rowcount, 0)
def test_get_entity_union(self):
- e = self.request().create_entity('Bookmark', title=u'manger', path=u'path')
- rset = self.execute('Any X,N ORDERBY N WITH X,N BEING '
- '((Any X,N WHERE X is Bookmark, X title N)'
- ' UNION '
- ' (Any X,N WHERE X is CWGroup, X name N))')
- expected = (('CWGroup', 'guests'), ('CWGroup', 'managers'),
- ('Bookmark', 'manger'), ('CWGroup', 'owners'),
- ('CWGroup', 'users'))
- for entity in rset.entities(): # test get_entity for each row actually
- etype, n = expected[entity.cw_row]
- self.assertEqual(entity.cw_etype, etype)
- attr = etype == 'Bookmark' and 'title' or 'name'
- self.assertEqual(entity.cw_attr_cache[attr], n)
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Bookmark', title=u'manger', path=u'path')
+ req.cnx.drop_entity_cache()
+ rset = req.execute('Any X,N ORDERBY N WITH X,N BEING '
+ '((Any X,N WHERE X is Bookmark, X title N)'
+ ' UNION '
+ ' (Any X,N WHERE X is CWGroup, X name N))')
+ expected = (('CWGroup', 'guests'), ('CWGroup', 'managers'),
+ ('Bookmark', 'manger'), ('CWGroup', 'owners'),
+ ('CWGroup', 'users'))
+ for entity in rset.entities(): # test get_entity for each row actually
+ etype, n = expected[entity.cw_row]
+ self.assertEqual(entity.cw_etype, etype)
+ attr = etype == 'Bookmark' and 'title' or 'name'
+ self.assertEqual(entity.cw_attr_cache[attr], n)
def test_one(self):
- self.request().create_entity('CWUser', login=u'cdevienne',
- upassword=u'cdevienne',
- surname=u'de Vienne',
- firstname=u'Christophe')
- e = self.execute('Any X WHERE X login "cdevienne"').one()
-
- self.assertEqual(e.surname, u'de Vienne')
+ with self.admin_access.web_request() as req:
+ req.create_entity('CWUser', login=u'cdevienne',
+ upassword=u'cdevienne',
+ surname=u'de Vienne',
+ firstname=u'Christophe')
+ e = req.execute('Any X WHERE X login "cdevienne"').one()
- e = self.execute(
- 'Any X, N WHERE X login "cdevienne", X surname N').one()
- self.assertEqual(e.surname, u'de Vienne')
+ self.assertEqual(e.surname, u'de Vienne')
- e = self.execute(
- 'Any N, X WHERE X login "cdevienne", X surname N').one(col=1)
- self.assertEqual(e.surname, u'de Vienne')
+ e = req.execute(
+ 'Any X, N WHERE X login "cdevienne", X surname N').one()
+ self.assertEqual(e.surname, u'de Vienne')
+
+ e = req.execute(
+ 'Any N, X WHERE X login "cdevienne", X surname N').one(col=1)
+ self.assertEqual(e.surname, u'de Vienne')
def test_one_no_rows(self):
- with self.assertRaises(NoResultError):
- self.execute('Any X WHERE X login "patanok"').one()
+ with self.admin_access.web_request() as req:
+ with self.assertRaises(NoResultError):
+ req.execute('Any X WHERE X login "patanok"').one()
def test_one_multiple_rows(self):
- self.request().create_entity(
- 'CWUser', login=u'cdevienne', upassword=u'cdevienne',
- surname=u'de Vienne', firstname=u'Christophe')
+ with self.admin_access.web_request() as req:
+ req.create_entity(
+ 'CWUser', login=u'cdevienne', upassword=u'cdevienne',
+ surname=u'de Vienne', firstname=u'Christophe')
- self.request().create_entity(
- 'CWUser', login=u'adim', upassword='adim', surname=u'di mascio',
- firstname=u'adrien')
+ req.create_entity(
+ 'CWUser', login=u'adim', upassword='adim', surname=u'di mascio',
+ firstname=u'adrien')
- with self.assertRaises(MultipleResultsError):
- self.execute('Any X WHERE X is CWUser').one()
+ with self.assertRaises(MultipleResultsError):
+ req.execute('Any X WHERE X is CWUser').one()
def test_related_entity_optional(self):
- e = self.request().create_entity('Bookmark', title=u'aaaa', path=u'path')
- rset = self.execute('Any B,U,L WHERE B bookmarked_by U?, U login L')
- entity, rtype = rset.related_entity(0, 2)
- self.assertEqual(entity, None)
- self.assertEqual(rtype, None)
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Bookmark', title=u'aaaa', path=u'path')
+ rset = req.execute('Any B,U,L WHERE B bookmarked_by U?, U login L')
+ entity, rtype = rset.related_entity(0, 2)
+ self.assertEqual(entity, None)
+ self.assertEqual(rtype, None)
def test_related_entity_union_subquery_1(self):
- e = self.request().create_entity('Bookmark', title=u'aaaa', path=u'path')
- rset = self.execute('Any X,N ORDERBY N WITH X,N BEING '
- '((Any X,N WHERE X is CWGroup, X name N)'
- ' UNION '
- ' (Any X,N WHERE X is Bookmark, X title N))')
- entity, rtype = rset.related_entity(0, 1)
- self.assertEqual(entity.eid, e.eid)
- self.assertEqual(rtype, 'title')
- self.assertEqual(entity.title, 'aaaa')
- entity, rtype = rset.related_entity(1, 1)
- self.assertEqual(entity.cw_etype, 'CWGroup')
- self.assertEqual(rtype, 'name')
- self.assertEqual(entity.name, 'guests')
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Bookmark', title=u'aaaa', path=u'path')
+ rset = req.execute('Any X,N ORDERBY N WITH X,N BEING '
+ '((Any X,N WHERE X is CWGroup, X name N)'
+ ' UNION '
+ ' (Any X,N WHERE X is Bookmark, X title N))')
+ entity, rtype = rset.related_entity(0, 1)
+ self.assertEqual(entity.eid, e.eid)
+ self.assertEqual(rtype, 'title')
+ self.assertEqual(entity.title, 'aaaa')
+ entity, rtype = rset.related_entity(1, 1)
+ self.assertEqual(entity.cw_etype, 'CWGroup')
+ self.assertEqual(rtype, 'name')
+ self.assertEqual(entity.name, 'guests')
def test_related_entity_union_subquery_2(self):
- e = self.request().create_entity('Bookmark', title=u'aaaa', path=u'path')
- rset = self.execute('Any X,N ORDERBY N WHERE X is Bookmark WITH X,N BEING '
- '((Any X,N WHERE X is CWGroup, X name N)'
- ' UNION '
- ' (Any X,N WHERE X is Bookmark, X title N))')
- entity, rtype = rset.related_entity(0, 1)
- self.assertEqual(entity.eid, e.eid)
- self.assertEqual(rtype, 'title')
- self.assertEqual(entity.title, 'aaaa')
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Bookmark', title=u'aaaa', path=u'path')
+ rset = req.execute('Any X,N ORDERBY N WHERE X is Bookmark WITH X,N BEING '
+ '((Any X,N WHERE X is CWGroup, X name N)'
+ ' UNION '
+ ' (Any X,N WHERE X is Bookmark, X title N))')
+ entity, rtype = rset.related_entity(0, 1)
+ self.assertEqual(entity.eid, e.eid)
+ self.assertEqual(rtype, 'title')
+ self.assertEqual(entity.title, 'aaaa')
def test_related_entity_union_subquery_3(self):
- e = self.request().create_entity('Bookmark', title=u'aaaa', path=u'path')
- rset = self.execute('Any X,N ORDERBY N WITH N,X BEING '
- '((Any N,X WHERE X is CWGroup, X name N)'
- ' UNION '
- ' (Any N,X WHERE X is Bookmark, X title N))')
- entity, rtype = rset.related_entity(0, 1)
- self.assertEqual(entity.eid, e.eid)
- self.assertEqual(rtype, 'title')
- self.assertEqual(entity.title, 'aaaa')
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Bookmark', title=u'aaaa', path=u'path')
+ rset = req.execute('Any X,N ORDERBY N WITH N,X BEING '
+ '((Any N,X WHERE X is CWGroup, X name N)'
+ ' UNION '
+ ' (Any N,X WHERE X is Bookmark, X title N))')
+ entity, rtype = rset.related_entity(0, 1)
+ self.assertEqual(entity.eid, e.eid)
+ self.assertEqual(rtype, 'title')
+ self.assertEqual(entity.title, 'aaaa')
def test_related_entity_union_subquery_4(self):
- e = self.request().create_entity('Bookmark', title=u'aaaa', path=u'path')
- rset = self.execute('Any X,X, N ORDERBY N WITH X,N BEING '
- '((Any X,N WHERE X is CWGroup, X name N)'
- ' UNION '
- ' (Any X,N WHERE X is Bookmark, X title N))')
- entity, rtype = rset.related_entity(0, 2)
- self.assertEqual(entity.eid, e.eid)
- self.assertEqual(rtype, 'title')
- self.assertEqual(entity.title, 'aaaa')
+ with self.admin_access.web_request() as req:
+ e = req.create_entity('Bookmark', title=u'aaaa', path=u'path')
+ rset = req.execute('Any X,X, N ORDERBY N WITH X,N BEING '
+ '((Any X,N WHERE X is CWGroup, X name N)'
+ ' UNION '
+ ' (Any X,N WHERE X is Bookmark, X title N))')
+ entity, rtype = rset.related_entity(0, 2)
+ self.assertEqual(entity.eid, e.eid)
+ self.assertEqual(rtype, 'title')
+ self.assertEqual(entity.title, 'aaaa')
def test_related_entity_trap_subquery(self):
- req = self.request()
- req.create_entity('Bookmark', title=u'test bookmark', path=u'')
- self.execute('SET B bookmarked_by U WHERE U login "admin"')
- rset = self.execute('Any B,T,L WHERE B bookmarked_by U, U login L '
- 'WITH B,T BEING (Any B,T WHERE B is Bookmark, B title T)')
- rset.related_entity(0, 2)
+ with self.admin_access.web_request() as req:
+ req.create_entity('Bookmark', title=u'test bookmark', path=u'')
+ req.execute('SET B bookmarked_by U WHERE U login "admin"')
+ rset = req.execute('Any B,T,L WHERE B bookmarked_by U, U login L '
+ 'WITH B,T BEING (Any B,T WHERE B is Bookmark, B title T)')
+ rset.related_entity(0, 2)
def test_related_entity_subquery_outerjoin(self):
- rset = self.execute('Any X,S,L WHERE X in_state S '
- 'WITH X, L BEING (Any X,MAX(L) GROUPBY X '
- 'WHERE X is CWUser, T? wf_info_for X, T creation_date L)')
- self.assertEqual(len(rset), 2)
- rset.related_entity(0, 1)
- rset.related_entity(0, 2)
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Any X,S,L WHERE X in_state S '
+ 'WITH X, L BEING (Any X,MAX(L) GROUPBY X '
+ 'WHERE X is CWUser, T? wf_info_for X, T creation_date L)')
+ self.assertEqual(len(rset), 2)
+ rset.related_entity(0, 1)
+ rset.related_entity(0, 2)
def test_entities(self):
- rset = self.execute('Any U,G WHERE U in_group G')
- # make sure we have at least one element
- self.assertTrue(rset)
- self.assertEqual(set(e.e_schema.type for e in rset.entities(0)),
- set(['CWUser',]))
- self.assertEqual(set(e.e_schema.type for e in rset.entities(1)),
- set(['CWGroup',]))
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Any U,G WHERE U in_group G')
+ # make sure we have at least one element
+ self.assertTrue(rset)
+ self.assertEqual(set(e.e_schema.type for e in rset.entities(0)),
+ set(['CWUser',]))
+ self.assertEqual(set(e.e_schema.type for e in rset.entities(1)),
+ set(['CWGroup',]))
def test_iter_rows_with_entities(self):
- rset = self.execute('Any U,UN,G,GN WHERE U in_group G, U login UN, G name GN')
- # make sure we have at least one element
- self.assertTrue(rset)
- out = list(rset.iter_rows_with_entities())[0]
- self.assertEqual( out[0].login, out[1] )
- self.assertEqual( out[2].name, out[3] )
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Any U,UN,G,GN WHERE U in_group G, U login UN, G name GN')
+ # make sure we have at least one element
+ self.assertTrue(rset)
+ out = list(rset.iter_rows_with_entities())[0]
+ self.assertEqual( out[0].login, out[1] )
+ self.assertEqual( out[2].name, out[3] )
def test_printable_rql(self):
- rset = self.execute(u'CWEType X WHERE X final FALSE')
- self.assertEqual(rset.printable_rql(),
- 'Any X WHERE X final FALSE, X is CWEType')
+ with self.admin_access.web_request() as req:
+ rset = req.execute(u'CWEType X WHERE X final FALSE')
+ self.assertEqual(rset.printable_rql(),
+ 'Any X WHERE X final FALSE, X is CWEType')
def test_searched_text(self):
- rset = self.execute(u'Any X WHERE X has_text "foobar"')
- self.assertEqual(rset.searched_text(), 'foobar')
- rset = self.execute(u'Any X WHERE X has_text %(text)s', {'text' : 'foo'})
- self.assertEqual(rset.searched_text(), 'foo')
+ with self.admin_access.web_request() as req:
+ rset = req.execute(u'Any X WHERE X has_text "foobar"')
+ self.assertEqual(rset.searched_text(), 'foobar')
+ rset = req.execute(u'Any X WHERE X has_text %(text)s', {'text' : 'foo'})
+ self.assertEqual(rset.searched_text(), 'foo')
def test_union_limited_rql(self):
- rset = self.execute('(Any X,N WHERE X is Bookmark, X title N)'
- ' UNION '
- '(Any X,N WHERE X is CWGroup, X name N)')
- rset.limit(2, 10, inplace=True)
- self.assertEqual(rset.limited_rql(),
- 'Any A,B LIMIT 2 OFFSET 10 '
- 'WITH A,B BEING ('
- '(Any X,N WHERE X is Bookmark, X title N) '
- 'UNION '
- '(Any X,N WHERE X is CWGroup, X name N)'
- ')')
+ with self.admin_access.web_request() as req:
+ rset = req.execute('(Any X,N WHERE X is Bookmark, X title N)'
+ ' UNION '
+ '(Any X,N WHERE X is CWGroup, X name N)')
+ rset.limit(2, 10, inplace=True)
+ self.assertEqual(rset.limited_rql(),
+ 'Any A,B LIMIT 2 OFFSET 10 '
+ 'WITH A,B BEING ('
+ '(Any X,N WHERE X is Bookmark, X title N) '
+ 'UNION '
+ '(Any X,N WHERE X is CWGroup, X name N)'
+ ')')
def test_count_users_by_date(self):
- rset = self.execute('Any D, COUNT(U) GROUPBY D WHERE U is CWUser, U creation_date D')
- self.assertEqual(rset.related_entity(0,0), (None, None))
+ with self.admin_access.web_request() as req:
+ rset = req.execute('Any D, COUNT(U) GROUPBY D WHERE U is CWUser, U creation_date D')
+ self.assertEqual(rset.related_entity(0,0), (None, None))
def test_str(self):
- rset = self.execute('(Any X,N WHERE X is CWGroup, X name N)')
- self.assertIsInstance(str(rset), basestring)
- self.assertEqual(len(str(rset).splitlines()), 1)
+ with self.admin_access.web_request() as req:
+ rset = req.execute('(Any X,N WHERE X is CWGroup, X name N)')
+ self.assertIsInstance(str(rset), basestring)
+ self.assertEqual(len(str(rset).splitlines()), 1)
def test_repr(self):
- rset = self.execute('(Any X,N WHERE X is CWGroup, X name N)')
- self.assertIsInstance(repr(rset), basestring)
- self.assertTrue(len(repr(rset).splitlines()) > 1)
+ with self.admin_access.web_request() as req:
+ rset = req.execute('(Any X,N WHERE X is CWGroup, X name N)')
+ self.assertIsInstance(repr(rset), basestring)
+ self.assertTrue(len(repr(rset).splitlines()) > 1)
- rset = self.execute('(Any X WHERE X is CWGroup, X name "managers")')
- self.assertIsInstance(str(rset), basestring)
- self.assertEqual(len(str(rset).splitlines()), 1)
+ rset = req.execute('(Any X WHERE X is CWGroup, X name "managers")')
+ self.assertIsInstance(str(rset), basestring)
+ self.assertEqual(len(str(rset).splitlines()), 1)
if __name__ == '__main__':
--- a/test/unittest_schema.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_schema.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -248,8 +248,8 @@
def test_fulltext_container(self):
schema = loader.load(config)
- self.assertTrue('has_text' in schema['CWUser'].subject_relations())
- self.assertFalse('has_text' in schema['EmailAddress'].subject_relations())
+ self.assertIn('has_text', schema['CWUser'].subject_relations())
+ self.assertNotIn('has_text', schema['EmailAddress'].subject_relations())
def test_permission_settings(self):
schema = loader.load(config)
@@ -329,6 +329,7 @@
self.assertEqual(normalize_expression('X bla Y,Y blur Z , Z zigoulou X '),
'X bla Y, Y blur Z, Z zigoulou X')
+
class RQLExpressionTC(TestCase):
def test_comparison(self):
self.assertEqual(ERQLExpression('X is CWUser', 'X', 0),
@@ -336,6 +337,7 @@
self.assertNotEqual(ERQLExpression('X is CWUser', 'X', 0),
ERQLExpression('X is CWGroup', 'X', 0))
+
class GuessRrqlExprMainVarsTC(TestCase):
def test_exists(self):
mainvars = guess_rrqlexpr_mainvars(normalize_expression('NOT EXISTS(O team_competition C, C level < 3, C concerns S)'))
@@ -345,15 +347,111 @@
class RQLConstraintTC(CubicWebTC):
def test_user_constraint(self):
cstr = RQLConstraint('U identity O')
- anoneid = self.execute('Any X WHERE X login "anon"')[0][0]
- self.assertRaises(ValidationError, cstr.repo_check, self.session, 1, 'rel', anoneid)
- self.assertEqual(cstr.repo_check(self.session, 1, self.session.user.eid),
- None) # no validation error, constraint checked
+ with self.admin_access.repo_cnx() as cnx:
+ anoneid = cnx.execute('Any X WHERE X login "anon"')[0][0]
+ self.assertRaises(ValidationError,
+ cstr.repo_check, cnx, 1, 'rel', anoneid)
+ self.assertEqual(cstr.repo_check(cnx, 1, cnx.user.eid),
+ None) # no validation error, constraint checked
+
class WorkflowShemaTC(CubicWebTC):
def test_trinfo_default_format(self):
- tr = self.request().user.cw_adapt_to('IWorkflowable').fire_transition('deactivate')
- self.assertEqual(tr.comment_format, 'text/plain')
+ with self.admin_access.web_request() as req:
+ tr = req.user.cw_adapt_to('IWorkflowable').fire_transition('deactivate')
+ self.assertEqual(tr.comment_format, 'text/plain')
+
+
+class CompositeSchemaTC(CubicWebTC):
+ composites = {
+ 'BaseTransition': [('condition', 'BaseTransition', 'RQLExpression', 'subject')],
+ 'CWAttribute': [('add_permission', 'CWAttribute', 'RQLExpression', 'subject'),
+ ('constrained_by', 'CWAttribute', 'CWConstraint', 'subject'),
+ ('read_permission', 'CWAttribute', 'RQLExpression', 'subject'),
+ ('update_permission', 'CWAttribute', 'RQLExpression', 'subject')],
+ 'CWEType': [('add_permission', 'CWEType', 'RQLExpression', 'subject'),
+ ('constraint_of', 'CWUniqueTogetherConstraint', 'CWEType', 'object'),
+ ('cw_schema', 'CWSourceSchemaConfig', 'CWEType', 'object'),
+ ('delete_permission', 'CWEType', 'RQLExpression', 'subject'),
+ ('from_entity', 'CWAttribute', 'CWEType', 'object'),
+ ('from_entity', 'CWRelation', 'CWEType', 'object'),
+ ('read_permission', 'CWEType', 'RQLExpression', 'subject'),
+ ('to_entity', 'CWAttribute', 'CWEType', 'object'),
+ ('to_entity', 'CWRelation', 'CWEType', 'object'),
+ ('update_permission', 'CWEType', 'RQLExpression', 'subject')],
+ 'CWRType': [('cw_schema', 'CWSourceSchemaConfig', 'CWRType', 'object'),
+ ('relation_type', 'CWAttribute', 'CWRType', 'object'),
+ ('relation_type', 'CWRelation', 'CWRType', 'object')],
+ 'CWRelation': [('add_permission', 'CWRelation', 'RQLExpression', 'subject'),
+ ('constrained_by', 'CWRelation', 'CWConstraint', 'subject'),
+ ('cw_schema', 'CWSourceSchemaConfig', 'CWRelation', 'object'),
+ ('delete_permission', 'CWRelation', 'RQLExpression', 'subject'),
+ ('read_permission', 'CWRelation', 'RQLExpression', 'subject')],
+ 'CWSource': [('cw_for_source', 'CWSourceSchemaConfig', 'CWSource', 'object'),
+ ('cw_host_config_of', 'CWSourceHostConfig', 'CWSource', 'object'),
+ ('cw_import_of', 'CWDataImport', 'CWSource', 'object'),
+ ('cw_source', 'Ami', 'CWSource', 'object'),
+ ('cw_source', 'BaseTransition', 'CWSource', 'object'),
+ ('cw_source', 'Bookmark', 'CWSource', 'object'),
+ ('cw_source', 'CWAttribute', 'CWSource', 'object'),
+ ('cw_source', 'CWCache', 'CWSource', 'object'),
+ ('cw_source', 'CWConstraint', 'CWSource', 'object'),
+ ('cw_source', 'CWConstraintType', 'CWSource', 'object'),
+ ('cw_source', 'CWDataImport', 'CWSource', 'object'),
+ ('cw_source', 'CWEType', 'CWSource', 'object'),
+ ('cw_source', 'CWGroup', 'CWSource', 'object'),
+ ('cw_source', 'CWPermission', 'CWSource', 'object'),
+ ('cw_source', 'CWProperty', 'CWSource', 'object'),
+ ('cw_source', 'CWRType', 'CWSource', 'object'),
+ ('cw_source', 'CWRelation', 'CWSource', 'object'),
+ ('cw_source', 'CWSource', 'CWSource', 'object'),
+ ('cw_source', 'CWSourceHostConfig', 'CWSource', 'object'),
+ ('cw_source', 'CWSourceSchemaConfig', 'CWSource', 'object'),
+ ('cw_source', 'CWUniqueTogetherConstraint', 'CWSource', 'object'),
+ ('cw_source', 'CWUser', 'CWSource', 'object'),
+ ('cw_source', 'Card', 'CWSource', 'object'),
+ ('cw_source', 'EmailAddress', 'CWSource', 'object'),
+ ('cw_source', 'ExternalUri', 'CWSource', 'object'),
+ ('cw_source', 'File', 'CWSource', 'object'),
+ ('cw_source', 'Note', 'CWSource', 'object'),
+ ('cw_source', 'Personne', 'CWSource', 'object'),
+ ('cw_source', 'Produit', 'CWSource', 'object'),
+ ('cw_source', 'RQLExpression', 'CWSource', 'object'),
+ ('cw_source', 'Service', 'CWSource', 'object'),
+ ('cw_source', 'Societe', 'CWSource', 'object'),
+ ('cw_source', 'State', 'CWSource', 'object'),
+ ('cw_source', 'StateFull', 'CWSource', 'object'),
+ ('cw_source', 'SubNote', 'CWSource', 'object'),
+ ('cw_source', 'SubWorkflowExitPoint', 'CWSource', 'object'),
+ ('cw_source', 'Tag', 'CWSource', 'object'),
+ ('cw_source', 'TrInfo', 'CWSource', 'object'),
+ ('cw_source', 'Transition', 'CWSource', 'object'),
+ ('cw_source', 'Usine', 'CWSource', 'object'),
+ ('cw_source', 'Workflow', 'CWSource', 'object'),
+ ('cw_source', 'WorkflowTransition', 'CWSource', 'object')],
+ 'CWUser': [('for_user', 'CWProperty', 'CWUser', 'object'),
+ ('use_email', 'CWUser', 'EmailAddress', 'subject'),
+ ('wf_info_for', 'TrInfo', 'CWUser', 'object')],
+ 'StateFull': [('wf_info_for', 'TrInfo', 'StateFull', 'object')],
+ 'Transition': [('condition', 'Transition', 'RQLExpression', 'subject')],
+ 'Workflow': [('state_of', 'State', 'Workflow', 'object'),
+ ('transition_of', 'BaseTransition', 'Workflow', 'object'),
+ ('transition_of', 'Transition', 'Workflow', 'object'),
+ ('transition_of', 'WorkflowTransition', 'Workflow', 'object')],
+ 'WorkflowTransition': [('condition', 'WorkflowTransition', 'RQLExpression', 'subject'),
+ ('subworkflow_exit', 'WorkflowTransition', 'SubWorkflowExitPoint', 'subject')]
+ }
+
+ def test_composite_entities(self):
+ schema = self.vreg.schema
+ self.assertEqual(sorted(self.composites),
+ [eschema.type for eschema in sorted(schema.entities())
+ if eschema.is_composite])
+ for etype in self.composites:
+ self.set_description('composite rdefs for %s' % etype)
+ yield self.assertEqual, self.composites[etype], \
+ sorted([(r.rtype.type, r.subject.type, r.object.type, role)
+ for r, role in sorted(schema[etype].composite_rdef_roles)])
if __name__ == '__main__':
unittest_main()
--- a/test/unittest_utils.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_utils.py Tue Jun 10 09:49:45 2014 +0200
@@ -229,11 +229,11 @@
class HTMLHeadTC(CubicWebTC):
def htmlhead(self, datadir_url):
- req = self.request()
- base_url = u'http://test.fr/data/'
- req.datadir_url = base_url
- head = HTMLHead(req)
- return head
+ with self.admin_access.web_request() as req:
+ base_url = u'http://test.fr/data/'
+ req.datadir_url = base_url
+ head = HTMLHead(req)
+ return head
def test_concat_urls(self):
base_url = u'http://test.fr/data/'
--- a/test/unittest_vregistry.py Tue Jun 10 09:35:26 2014 +0200
+++ b/test/unittest_vregistry.py Tue Jun 10 09:49:45 2014 +0200
@@ -74,7 +74,7 @@
def test_properties(self):
self.vreg.reset()
- self.assertFalse('system.version.cubicweb' in self.vreg['propertydefs'])
+ self.assertNotIn('system.version.cubicweb', self.vreg['propertydefs'])
self.assertTrue(self.vreg.property_info('system.version.cubicweb'))
self.assertRaises(UnknownProperty, self.vreg.property_info, 'a.non.existent.key')
--- a/transaction.py Tue Jun 10 09:35:26 2014 +0200
+++ b/transaction.py Tue Jun 10 09:49:45 2014 +0200
@@ -53,7 +53,17 @@
self.datetime = time
self.user_eid = ueid
# should be set by the dbapi connection
- self.req = None
+ self.req = None # old style
+ self.cnx = None # new style
+
+ def _execute(self, *args, **kwargs):
+ """execute a query using either the req or the cnx"""
+ if self.req is None:
+ execute = self.cnx.execute
+ else:
+ execute = self.req
+ return execute(*args, **kwargs)
+
def __repr__(self):
return '<Transaction %s by %s on %s>' % (
@@ -63,8 +73,8 @@
"""return the user entity which has done the transaction,
none if not found.
"""
- return self.req.execute('Any X WHERE X eid %(x)s',
- {'x': self.user_eid}).get_entity(0, 0)
+ return self._execute('Any X WHERE X eid %(x)s',
+ {'x': self.user_eid}).get_entity(0, 0)
def actions_list(self, public=True):
"""return an ordered list of action effectued during that transaction
@@ -72,7 +82,11 @@
if public is true, return only 'public' action, eg not ones triggered
under the cover by hooks.
"""
- return self.req.cnx.transaction_actions(self.uuid, public)
+ if self.req is not None:
+ cnx = self.req.cnx
+ else:
+ cnx = self.cnx
+ return cnx.transaction_actions(self.uuid, public)
class AbstractAction(object):
--- a/utils.py Tue Jun 10 09:35:26 2014 +0200
+++ b/utils.py Tue Jun 10 09:49:45 2014 +0200
@@ -26,6 +26,7 @@
import datetime
import random
import re
+import json
from operator import itemgetter
from inspect import getargspec
@@ -39,6 +40,7 @@
from logilab.mtconverter import xml_escape
from logilab.common.deprecation import deprecated
+from logilab.common.date import ustrftime
_MARKER = object()
@@ -465,77 +467,66 @@
self.head.getvalue(),
self.body.getvalue())
-try:
- # may not be there if cubicweb-web not installed
- if sys.version_info < (2, 6):
- import simplejson as json
- else:
- import json
-except ImportError:
- json_dumps = JSString = None
-else:
- from logilab.common.date import ustrftime
-
- class CubicWebJsonEncoder(json.JSONEncoder):
- """define a json encoder to be able to encode yams std types"""
+class CubicWebJsonEncoder(json.JSONEncoder):
+ """define a json encoder to be able to encode yams std types"""
- def default(self, obj):
- if hasattr(obj, '__json_encode__'):
- return obj.__json_encode__()
- if isinstance(obj, datetime.datetime):
- return ustrftime(obj, '%Y/%m/%d %H:%M:%S')
- elif isinstance(obj, datetime.date):
- return ustrftime(obj, '%Y/%m/%d')
- elif isinstance(obj, datetime.time):
- return obj.strftime('%H:%M:%S')
- elif isinstance(obj, datetime.timedelta):
- return (obj.days * 24 * 60 * 60) + obj.seconds
- elif isinstance(obj, decimal.Decimal):
- return float(obj)
- try:
- return json.JSONEncoder.default(self, obj)
- except TypeError:
- # we never ever want to fail because of an unknown type,
- # just return None in those cases.
- return None
+ def default(self, obj):
+ if hasattr(obj, '__json_encode__'):
+ return obj.__json_encode__()
+ if isinstance(obj, datetime.datetime):
+ return ustrftime(obj, '%Y/%m/%d %H:%M:%S')
+ elif isinstance(obj, datetime.date):
+ return ustrftime(obj, '%Y/%m/%d')
+ elif isinstance(obj, datetime.time):
+ return obj.strftime('%H:%M:%S')
+ elif isinstance(obj, datetime.timedelta):
+ return (obj.days * 24 * 60 * 60) + obj.seconds
+ elif isinstance(obj, decimal.Decimal):
+ return float(obj)
+ try:
+ return json.JSONEncoder.default(self, obj)
+ except TypeError:
+ # we never ever want to fail because of an unknown type,
+ # just return None in those cases.
+ return None
- def json_dumps(value, **kwargs):
- return json.dumps(value, cls=CubicWebJsonEncoder, **kwargs)
+def json_dumps(value, **kwargs):
+ return json.dumps(value, cls=CubicWebJsonEncoder, **kwargs)
- class JSString(str):
- """use this string sub class in values given to :func:`js_dumps` to
- insert raw javascript chain in some JSON string
- """
+class JSString(str):
+ """use this string sub class in values given to :func:`js_dumps` to
+ insert raw javascript chain in some JSON string
+ """
- def _dict2js(d, predictable=False):
- res = [key + ': ' + js_dumps(val, predictable)
- for key, val in d.iteritems()]
- return '{%s}' % ', '.join(res)
+def _dict2js(d, predictable=False):
+ res = [key + ': ' + js_dumps(val, predictable)
+ for key, val in d.iteritems()]
+ return '{%s}' % ', '.join(res)
- def _list2js(l, predictable=False):
- return '[%s]' % ', '.join([js_dumps(val, predictable) for val in l])
+def _list2js(l, predictable=False):
+ return '[%s]' % ', '.join([js_dumps(val, predictable) for val in l])
- def js_dumps(something, predictable=False):
- """similar as :func:`json_dumps`, except values which are instances of
- :class:`JSString` are expected to be valid javascript and will be output
- as is
+def js_dumps(something, predictable=False):
+ """similar as :func:`json_dumps`, except values which are instances of
+ :class:`JSString` are expected to be valid javascript and will be output
+ as is
- >>> js_dumps({'hop': JSString('$.hop'), 'bar': None}, predictable=True)
- '{bar: null, hop: $.hop}'
- >>> js_dumps({'hop': '$.hop'})
- '{hop: "$.hop"}'
- >>> js_dumps({'hip': {'hop': JSString('momo')}})
- '{hip: {hop: momo}}'
- """
- if isinstance(something, dict):
- return _dict2js(something, predictable)
- if isinstance(something, list):
- return _list2js(something, predictable)
- if isinstance(something, JSString):
- return something
- return json_dumps(something)
+ >>> js_dumps({'hop': JSString('$.hop'), 'bar': None}, predictable=True)
+ '{bar: null, hop: $.hop}'
+ >>> js_dumps({'hop': '$.hop'})
+ '{hop: "$.hop"}'
+ >>> js_dumps({'hip': {'hop': JSString('momo')}})
+ '{hip: {hop: momo}}'
+ """
+ if isinstance(something, dict):
+ return _dict2js(something, predictable)
+ if isinstance(something, list):
+ return _list2js(something, predictable)
+ if isinstance(something, JSString):
+ return something
+ return json_dumps(something)
PERCENT_IN_URLQUOTE_RE = re.compile(r'%(?=[0-9a-fA-F]{2})')
def js_href(javascript_code):
--- a/web/action.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/action.py Tue Jun 10 09:49:45 2014 +0200
@@ -25,43 +25,11 @@
The most important method from a developper point of view in the
:meth:'Action.url' method, which returns a URL on which the navigation
-should directed to perform the action. There are two common ways of
-writing that method:
-
-* do nothing special and simply return a URL to the current rset with
- a special view (with `self._cw.build_url(...)` for instance)
-
-* define an inner function `callback_func(req, *args)` which will do
- the work and call it through `self._cw.user_callback(callback_func,
- args, msg)`: this method will return a URL which calls the inner
- function, and displays the message in the web interface when the
- callback has completed (and report any exception occuring in the
- callback too)
-
-Many examples of the first approach are available in :mod:`cubicweb.web.views.actions`.
-
-Here is an example of the second approach:
-
-.. sourcecode:: python
+should be directed to perform the action. The common way of
+writing that method is to simply return a URL to the current rset with a
+special view (with `self._cw.build_url(...)` for instance)
- from cubicweb.web import action
- class SomeAction(action.Action):
- __regid__ = 'mycube_some_action'
- title = _(some action)
- __select__ = action.Action.__select__ & is_instance('TargetEntity')
-
- def url(self):
- if self.cw_row is None:
- eids = [row[0] for row in self.cw_rset]
- else:
- eids = (self.cw_rset[self.cw_row][self.cw_col or 0],)
- def do_action(req, eids):
- for eid in eids:
- entity = req.entity_from_eid(eid, 'TargetEntity')
- entity.perform_action()
- msg = self._cw._('some_action performed')
- return self._cw.user_callback(do_action, (eids,), msg)
-
+Many examples are available in :mod:`cubicweb.web.views.actions`.
"""
__docformat__ = "restructuredtext en"
--- a/web/application.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/application.py Tue Jun 10 09:49:45 2014 +0200
@@ -35,8 +35,8 @@
ValidationError, Unauthorized, Forbidden,
AuthenticationError, NoSelectableObject,
BadConnectionId, CW_EVENT_MANAGER)
-from cubicweb.dbapi import DBAPISession, anonymous_session
-from cubicweb.web import LOGGER, component
+from cubicweb.repoapi import anonymous_cnx
+from cubicweb.web import LOGGER, component, cors
from cubicweb.web import (
StatusResponse, DirectResponse, Redirect, NotFound, LogOut,
RemoteCallFailed, InvalidSession, RequestError, PublishException)
@@ -50,20 +50,23 @@
@contextmanager
def anonymized_request(req):
- orig_session = req.session
- req.set_session(anonymous_session(req.vreg))
+ orig_cnx = req.cnx
+ anon_clt_cnx = anonymous_cnx(orig_cnx._session.repo)
+ req.set_cnx(anon_clt_cnx)
try:
- yield req
+ with anon_clt_cnx:
+ yield req
finally:
- req.set_session(orig_session)
+ req.set_cnx(orig_cnx)
class AbstractSessionManager(component.Component):
"""manage session data associated to a session identifier"""
__regid__ = 'sessionmanager'
- def __init__(self, vreg):
+ def __init__(self, repo):
+ vreg = repo.vreg
self.session_time = vreg.config['http-session-time'] or None
- self.authmanager = vreg['components'].select('authmanager', vreg=vreg)
+ self.authmanager = vreg['components'].select('authmanager', repo=repo)
interval = (self.session_time or 0) / 2.
if vreg.config.anonymous_user()[0] is not None:
self.cleanup_anon_session_time = vreg.config['cleanup-anonymous-session-time'] or 5 * 60
@@ -111,8 +114,7 @@
raise NotImplementedError()
def open_session(self, req):
- """open and return a new session for the given request. The session is
- also bound to the request.
+ """open and return a new session for the given request.
raise :exc:`cubicweb.AuthenticationError` if authentication failed
(no authentication info found or wrong user/password)
@@ -130,8 +132,8 @@
"""authenticate user associated to a request and check session validity"""
__regid__ = 'authmanager'
- def __init__(self, vreg):
- self.vreg = vreg
+ def __init__(self, repo):
+ self.vreg = repo.vreg
def validate_session(self, req, session):
"""check session validity, reconnecting it to the repository if the
@@ -159,9 +161,10 @@
"""a session handler using a cookie to store the session identifier"""
def __init__(self, appli):
+ self.repo = appli.repo
self.vreg = appli.vreg
self.session_manager = self.vreg['components'].select('sessionmanager',
- vreg=self.vreg)
+ repo=self.repo)
global SESSION_MANAGER
SESSION_MANAGER = self.session_manager
if self.vreg.config.mode != 'test':
@@ -173,7 +176,7 @@
def reset_session_manager(self):
data = self.session_manager.dump_data()
self.session_manager = self.vreg['components'].select('sessionmanager',
- vreg=self.vreg)
+ repo=self.repo)
self.session_manager.restore_data(data)
global SESSION_MANAGER
SESSION_MANAGER = self.session_manager
@@ -196,66 +199,40 @@
return '__%s_https_session' % self.vreg.config.appid
return '__%s_session' % self.vreg.config.appid
- def set_session(self, req):
- """associate a session to the request
+ def get_session(self, req):
+ """Return a session object corresponding to credentials held by the req
Session id is searched from :
- # form variable
- cookie
- if no session id is found, open a new session for the connected user
- or request authentification as needed
+ If no session id is found, try opening a new session with credentials
+ found in the request.
- :raise Redirect: if authentication has occurred and succeed
+ Raises AuthenticationError if no session can be found or created.
"""
cookie = req.get_cookie()
sessioncookie = self.session_cookie(req)
try:
sessionid = str(cookie[sessioncookie].value)
- except KeyError: # no session cookie
+ session = self.get_session_by_id(req, sessionid)
+ except (KeyError, InvalidSession): # no valid session cookie
session = self.open_session(req)
- else:
- try:
- session = self.get_session(req, sessionid)
- except InvalidSession:
- # try to open a new session, so we get an anonymous session if
- # allowed
- session = self.open_session(req)
- else:
- if not session.cnx:
- # session exists but is not bound to a connection. We should
- # try to authenticate
- loginsucceed = False
- try:
- if self.open_session(req, allow_no_cnx=False):
- loginsucceed = True
- except Redirect:
- # may be raised in open_session (by postlogin mechanism)
- # on successful connection
- loginsucceed = True
- raise
- except AuthenticationError:
- # authentication failed, continue to use this session
- req.set_session(session)
- finally:
- if loginsucceed:
- # session should be replaced by new session created
- # in open_session
- self.session_manager.close_session(session)
+ return session
- def get_session(self, req, sessionid):
+ def get_session_by_id(self, req, sessionid):
session = self.session_manager.get_session(req, sessionid)
session.mtime = time()
return session
- def open_session(self, req, allow_no_cnx=True):
- session = self.session_manager.open_session(req, allow_no_cnx=allow_no_cnx)
+ def open_session(self, req):
+ session = self.session_manager.open_session(req)
sessioncookie = self.session_cookie(req)
secure = req.https and req.base_url().startswith('https://')
req.set_cookie(sessioncookie, session.sessionid,
maxage=None, secure=secure)
if not session.anonymous_session:
- self.session_manager.postlogin(req)
+ self.session_manager.postlogin(req, session)
return session
def logout(self, req, goto_url):
@@ -277,21 +254,20 @@
The http server will call its main entry point ``application.handle_request``.
.. automethod:: cubicweb.web.application.CubicWebPublisher.main_handle_request
+
+ You have to provide both a repository and web-server config at
+ initialization. In all in one instance both config will be the same.
"""
- def __init__(self, config,
- session_handler_fact=CookieSessionHandler,
- vreg=None):
+ def __init__(self, repo, config, session_handler_fact=CookieSessionHandler):
self.info('starting web instance from %s', config.apphome)
- if vreg is None:
- vreg = cwvreg.CWRegistryStore(config)
- self.vreg = vreg
- # connect to the repository and get instance's schema
- self.repo = config.repository(vreg)
- if not vreg.initialized:
+ self.repo = repo
+ self.vreg = repo.vreg
+ # get instance's schema
+ if not self.vreg.initialized:
config.init_cubes(self.repo.get_cubes())
- vreg.init_properties(self.repo.properties())
- vreg.set_schema(self.repo.get_schema())
+ self.vreg.init_properties(self.repo.properties())
+ self.vreg.set_schema(self.repo.get_schema())
# set the correct publish method
if config['query-log-file']:
from threading import Lock
@@ -310,12 +286,12 @@
self.url_resolver = self.vreg['components'].select('urlpublisher',
vreg=self.vreg)
- def connect(self, req):
- """return a connection for a logged user object according to existing
- sessions (i.e. a new connection may be created or an already existing
- one may be reused
+ def get_session(self, req):
+ """Return a session object corresponding to credentials held by the req
+
+ May raise AuthenticationError.
"""
- self.session_handler.set_session(req)
+ return self.session_handler.get_session(req)
# publish methods #########################################################
@@ -362,7 +338,24 @@
req.set_header('WWW-Authenticate', [('Basic', {'realm' : realm })], raw=False)
content = ''
try:
- self.connect(req)
+ try:
+ session = self.get_session(req)
+ from cubicweb import repoapi
+ cnx = repoapi.ClientConnection(session)
+ req.set_cnx(cnx)
+ except AuthenticationError:
+ # Keep the dummy session set at initialisation.
+ # such session with work to an some extend but raise an
+ # AuthenticationError on any database access.
+ import contextlib
+ @contextlib.contextmanager
+ def dummy():
+ yield
+ cnx = dummy()
+ # XXX We want to clean up this approach in the future. But
+ # several cubes like registration or forgotten password rely on
+ # this principle.
+
# DENY https acces for anonymous_user
if (req.https
and req.session.anonymous_session
@@ -373,7 +366,8 @@
# handler
try:
### Try to generate the actual request content
- content = self.core_handle(req, path)
+ with cnx:
+ content = self.core_handle(req, path)
# Handle user log-out
except LogOut as ex:
# When authentification is handled by cookie the code that
@@ -421,6 +415,7 @@
content = self.need_login_content(req)
return content
+
def core_handle(self, req, path):
"""method called by the main publisher to process <path>
@@ -446,6 +441,8 @@
try:
### standard processing of the request
try:
+ # apply CORS sanity checks
+ cors.process_request(req, self.vreg.config)
ctrlid, rset = self.url_resolver.process(req, path)
try:
controller = self.vreg['controllers'].select(ctrlid, req,
@@ -454,6 +451,10 @@
raise Unauthorized(req._('not authorized'))
req.update_search_state()
result = controller.publish(rset=rset)
+ except cors.CORSPreflight:
+ # Return directly an empty 200
+ req.status_out = 200
+ result = ''
except StatusResponse as ex:
warn('[3.16] StatusResponse is deprecated use req.status_out',
DeprecationWarning, stacklevel=2)
@@ -479,7 +480,7 @@
except Unauthorized as ex:
req.data['errmsg'] = req._('You\'re not authorized to access this page. '
'If you think you should, please contact the site administrator.')
- req.status_out = httplib.UNAUTHORIZED
+ req.status_out = httplib.FORBIDDEN
result = self.error_handler(req, ex, tb=False)
except Forbidden as ex:
req.data['errmsg'] = req._('This action is forbidden. '
@@ -506,9 +507,6 @@
req.cnx.rollback()
except Exception:
pass # ignore rollback error at this point
- # request may be referenced by "onetime callback", so clear its entity
- # cache to avoid memory usage
- req.drop_entity_cache()
self.add_undo_link_to_msg(req)
self.debug('query %s executed in %s sec', req.relative_path(), clock() - tstart)
return result
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/web/cors.py Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+# copyright 2014 Logilab, PARIS
+
+"""A set of utility functions to handle CORS requests
+
+Unless specified, all references in this file are related to:
+ http://www.w3.org/TR/cors
+
+The provided implementation roughly follows:
+ http://www.html5rocks.com/static/images/cors_server_flowchart.png
+
+See also:
+ https://developer.mozilla.org/en-US/docs/HTTP/Access_control_CORS
+
+"""
+
+import urlparse
+
+from cubicweb.web import LOGGER
+info = LOGGER.info
+
+class CORSFailed(Exception):
+ """Raised when cross origin resource sharing checks failed"""
+
+
+class CORSPreflight(Exception):
+ """Raised when cross origin resource sharing checks detects the
+ request as a valid preflight request"""
+
+
+def process_request(req, config):
+ """
+ Process a request to apply CORS specification algorithms
+
+ Check whether the CORS specification is respected and set corresponding
+ headers to ensure response complies with the specification.
+
+ In case of non-compliance, no CORS-related header is set.
+ """
+ base_url = urlparse.urlsplit(req.base_url())
+ expected_host = '://'.join((base_url.scheme, base_url.netloc))
+ if not req.get_header('Origin') or req.get_header('Origin') == expected_host:
+ # not a CORS request, nothing to do
+ return
+ try:
+ # handle cross origin resource sharing (CORS)
+ if req.http_method() == 'OPTIONS':
+ if req.get_header('Access-Control-Request-Method'):
+ # preflight CORS request
+ process_preflight(req, config)
+ else: # Simple CORS or actual request
+ process_simple(req, config)
+ except CORSFailed, exc:
+ info('Cross origin resource sharing failed: %s' % exc)
+ except CORSPreflight:
+ info('Cross origin resource sharing: valid Preflight request %s')
+ raise
+
+def process_preflight(req, config):
+ """cross origin resource sharing (preflight)
+ Cf http://www.w3.org/TR/cors/#resource-preflight-requests
+ """
+ origin = check_origin(req, config)
+ allowed_methods = set(config['access-control-allow-methods'])
+ allowed_headers = set(config['access-control-allow-headers'])
+ try:
+ method = req.get_header('Access-Control-Request-Method')
+ except ValueError:
+ raise CORSFailed('Access-Control-Request-Method is incorrect')
+ if method not in allowed_methods:
+ raise CORSFailed('Method is not allowed')
+ try:
+ req.get_header('Access-Control-Request-Headers', ())
+ except ValueError:
+ raise CORSFailed('Access-Control-Request-Headers is incorrect')
+ req.set_header('Access-Control-Allow-Methods', allowed_methods, raw=False)
+ req.set_header('Access-Control-Allow-Headers', allowed_headers, raw=False)
+
+ process_common(req, config, origin)
+ raise CORSPreflight()
+
+def process_simple(req, config):
+ """Handle the Simple Cross-Origin Request case
+ """
+ origin = check_origin(req, config)
+ exposed_headers = config['access-control-expose-headers']
+ if exposed_headers:
+ req.set_header('Access-Control-Expose-Headers', exposed_headers, raw=False)
+ process_common(req, config, origin)
+
+def process_common(req, config, origin):
+ req.set_header('Access-Control-Allow-Origin', origin)
+ # in CW, we always support credential/authentication
+ req.set_header('Access-Control-Allow-Credentials', 'true')
+
+def check_origin(req, config):
+ origin = req.get_header('Origin').lower()
+ allowed_origins = config.get('access-control-allow-origin')
+ if not allowed_origins:
+ raise CORSFailed('access-control-allow-origin is not configured')
+ if '*' not in allowed_origins and origin not in allowed_origins:
+ raise CORSFailed('Origin is not allowed')
+ # bit of sanity check; see "6.3 Security"
+ myhost = urlparse.urlsplit(req.base_url()).netloc
+ host = req.get_header('Host')
+ if host != myhost:
+ info('cross origin resource sharing detected possible '
+ 'DNS rebinding attack Host header != host of base_url: '
+ '%s != %s' % (host, myhost))
+ raise CORSFailed('Host header and hostname do not match')
+ # include "Vary: Origin" header (see 6.4)
+ req.set_header('Vary', 'Origin')
+ return origin
+
--- a/web/data/cubicweb.ajax.js Tue Jun 10 09:35:26 2014 +0200
+++ b/web/data/cubicweb.ajax.js Tue Jun 10 09:49:45 2014 +0200
@@ -312,7 +312,7 @@
$.extend(form, {
'fname': fname,
'pageid': pageid,
- 'arg': $.map(cw.utils.sliceList(arguments, 2), jQuery.toJSON)
+ 'arg': $.map(cw.utils.sliceList(arguments, 2), JSON.stringify)
});
return form;
}
@@ -338,7 +338,6 @@
} else if (this.size() < 1) {
cw.log('loadxhtml called without an element');
}
- var callback = null;
var node = this.get(0); // only consider the first element
if (cursor) {
setProgressCursor();
@@ -362,9 +361,6 @@
jQuery(node).append(domnode);
}
_postAjaxLoad(node);
- while (jQuery.isFunction(callback)) {
- callback = callback.apply(this, [domnode]);
- }
});
d.addErrback(remoteCallFailed);
if (cursor) {
@@ -749,7 +745,7 @@
var props = {
fname: fname,
pageid: pageid,
- arg: $.map(cw.utils.sliceList(arguments, 1), jQuery.toJSON)
+ arg: $.map(cw.utils.sliceList(arguments, 1), JSON.stringify)
};
var result = jQuery.ajax({
url: AJAX_BASE_URL,
@@ -769,7 +765,7 @@
var props = {
fname: fname,
pageid: pageid,
- arg: $.map(cw.utils.sliceList(arguments, 1), jQuery.toJSON)
+ arg: $.map(cw.utils.sliceList(arguments, 1), JSON.stringify)
};
// XXX we should inline the content of loadRemote here
var deferred = loadRemote(AJAX_BASE_URL, props, 'POST');
--- a/web/data/cubicweb.edition.js Tue Jun 10 09:35:26 2014 +0200
+++ b/web/data/cubicweb.edition.js Tue Jun 10 09:49:45 2014 +0200
@@ -26,7 +26,7 @@
var args = {
fname: 'prop_widget',
pageid: pageid,
- arg: $.map([key, varname, tabindex], jQuery.toJSON)
+ arg: $.map([key, varname, tabindex], JSON.stringify)
};
cw.jqNode('div:value:' + varname).loadxhtml(AJAX_BASE_URL, args, 'post');
}
--- a/web/data/cubicweb.js Tue Jun 10 09:35:26 2014 +0200
+++ b/web/data/cubicweb.js Tue Jun 10 09:49:45 2014 +0200
@@ -384,7 +384,7 @@
*/
strFuncCall: function(fname /* ...*/) {
return (fname + '(' +
- $.map(cw.utils.sliceList(arguments, 1), jQuery.toJSON).join(',')
+ $.map(cw.utils.sliceList(arguments, 1), JSON.stringify).join(',')
+ ')'
);
}
--- a/web/data/cubicweb.old.css Tue Jun 10 09:35:26 2014 +0200
+++ b/web/data/cubicweb.old.css Tue Jun 10 09:49:45 2014 +0200
@@ -264,32 +264,48 @@
/* header */
table#header {
- background: %(headerBg)s;
+ background-image: linear-gradient(white, #e2e2e2);
width: 100%;
+ border-bottom: 1px solid #bbb;
+ text-shadow: 1px 1px 0 #f5f5f5;
}
table#header td {
vertical-align: middle;
}
-table#header a {
- color: #000;
+table#header, table#header a {
+ color: #444;
}
+
table#header td#headtext {
white-space: nowrap;
+ padding: 0 10px;
+ width: 10%;
+}
+
+#logo{
+ width: 150px;
+ height: 42px;
+ background-image: url(logo-cubicweb.svg);
+ background-repeat: no-repeat;
+ background-position: center center;
+ background-size: contain;
+ float: left;
}
table#header td#header-right {
- padding-top: 1em;
white-space: nowrap;
+ width: 10%;
}
table#header td#header-center{
- width: 100%;
+ border-bottom-left-radius: 10px;
+ border-top-left-radius: 10px;
+ padding-left: 1em;
}
span#appliName {
font-weight: bold;
- color: #000;
white-space: nowrap;
}
@@ -641,6 +657,8 @@
div#userActionsBox {
width: 14em;
text-align: right;
+ display: inline-block;
+ padding-right: 10px;
}
div#userActionsBox a.popupMenu {
Binary file web/data/favicon.ico has changed
--- a/web/data/jquery.json.js Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-(function($){function toIntegersAtLease(n)
-{return n<10?'0'+n:n;}
-Date.prototype.toJSON=function(date)
-{return date.getUTCFullYear()+'-'+
-toIntegersAtLease(date.getUTCMonth()+1)+'-'+
-toIntegersAtLease(date.getUTCDate());};var escapeable=/["\\\x00-\x1f\x7f-\x9f]/g;var meta={'\b':'\\b','\t':'\\t','\n':'\\n','\f':'\\f','\r':'\\r','"':'\\"','\\':'\\\\'}
-$.quoteString=function(string)
-{if(escapeable.test(string))
-{return'"'+string.replace(escapeable,function(a)
-{var c=meta[a];if(typeof c==='string'){return c;}
-c=a.charCodeAt();return'\\u00'+Math.floor(c/16).toString(16)+(c%16).toString(16);})+'"'}
-return'"'+string+'"';}
-$.toJSON=function(o)
-{var type=typeof(o);if(type=="undefined")
-return"undefined";else if(type=="number"||type=="boolean")
-return o+"";else if(o===null)
-return"null";if(type=="string")
-{return $.quoteString(o);}
-if(type=="object"&&typeof o.toJSON=="function")
-return o.toJSON();if(type!="function"&&typeof(o.length)=="number")
-{var ret=[];for(var i=0;i<o.length;i++){ret.push($.toJSON(o[i]));}
-return"["+ret.join(", ")+"]";}
-if(type=="function"){throw new TypeError("Unable to convert object of type 'function' to json.");}
-ret=[];for(var k in o){var name;var type=typeof(k);if(type=="number")
-name='"'+k+'"';else if(type=="string")
-name=$.quoteString(k);else
-continue;val=$.toJSON(o[k]);if(typeof(val)!="string"){continue;}
-ret.push(name+": "+val);}
-return"{"+ret.join(", ")+"}";}
-$.evalJSON=function(src)
-{return eval("("+src+")");}
-$.secureEvalJSON=function(src)
-{var filtered=src;filtered=filtered.replace(/\\["\\\/bfnrtu]/g,'@');filtered=filtered.replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,']');filtered=filtered.replace(/(?:^|:|,)(?:\s*\[)+/g,'');if(/^[\],:{}\s]*$/.test(filtered))
-return eval("("+src+")");else
-throw new SyntaxError("Error parsing JSON, source is not valid.");}})(jQuery);
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/web/data/logo-cubicweb-gray.svg Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,151 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="260.68375"
+ height="40.011749"
+ id="svg4127"
+ version="1.1"
+ inkscape:version="0.48.3.1 r9886"
+ sodipodi:docname="logo-cubicweb-gray.svg">
+ <defs
+ id="defs4129" />
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.979899"
+ inkscape:cx="129.03681"
+ inkscape:cy="-31.754963"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1362"
+ inkscape:window-height="729"
+ inkscape:window-x="0"
+ inkscape:window-y="18"
+ inkscape:window-maximized="0" />
+ <metadata
+ id="metadata4132">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Calque 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-327.77712,-550.4231)">
+ <g
+ transform="matrix(1.0580599,0,0,1.0580599,1312.264,209.71605)"
+ id="g3151-5"
+ style="font-size:32.60407639px;font-style:normal;font-weight:bold;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#404042;fill-opacity:1;stroke:none;font-family:orbitron;-inkscape-font-specification:orbitron">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3153-0"
+ style="fill:#404042;fill-opacity:1"
+ d="m -792.00021,355.53459 14.70444,0 0,4.27114 -14.70444,0 c -1.23896,0 -2.30402,-0.43473 -3.1952,-1.30417 -0.86944,-0.89117 -1.30416,-1.95624 -1.30416,-3.1952 l 0,-9.91164 c 0,-1.23894 0.43472,-2.29313 1.30416,-3.16259 0.89118,-0.89116 1.95624,-1.33675 3.1952,-1.33677 l 14.63923,0 0,4.27113 -14.63923,0 c -0.15215,2e-5 -0.22823,0.0761 -0.22822,0.22823 l 0,9.91164 c -10e-6,0.15216 0.0761,0.22824 0.22822,0.22823" />
+ <path
+ inkscape:connector-curvature="0"
+ style="fill:#404042;fill-opacity:1"
+ id="path3155-7"
+ d="m -833.03908,340.89536 4.27114,0 0,14.411 c -2e-5,1.23896 -0.44561,2.30403 -1.33677,3.1952 -0.86946,0.86944 -1.92366,1.30417 -3.1626,1.30417 l -10.20507,0 c -1.2607,0 -2.32576,-0.43473 -3.1952,-1.30417 -0.86945,-0.89117 -1.30417,-1.95624 -1.30416,-3.1952 l 0,-14.411 4.27113,0 0,14.411 c -10e-6,0.15216 0.0761,0.22824 0.22823,0.22823 l 10.20507,0 c 0.15214,1e-5 0.22822,-0.0761 0.22823,-0.22823 l 0,-14.411" />
+ <path
+ inkscape:connector-curvature="0"
+ style="fill:#404042;fill-opacity:1"
+ id="path3157-7"
+ d="m -811.05372,340.89536 c 1.23894,2e-5 2.29313,0.44561 3.16259,1.33677 0.89116,0.86946 1.33675,1.92365 1.33677,3.16259 l 0,9.91164 c -2e-5,1.23896 -0.44561,2.30403 -1.33677,3.1952 -0.86946,0.86944 -1.92365,1.30417 -3.16259,1.30417 l -14.70444,0 0,-25.10514 4.27113,0 0,6.19477 10.43331,0 m 0.22823,14.411 0,-9.91164 c -2e-5,-0.15213 -0.0761,-0.22821 -0.22823,-0.22823 l -10.20508,0 c -0.15216,2e-5 -0.22823,0.0761 -0.22823,0.22823 l 0,9.91164 c 0,0.15216 0.0761,0.22824 0.22823,0.22823 l 10.20508,0 c 0.15213,1e-5 0.22821,-0.0761 0.22823,-0.22823" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path3159-3"
+ style="fill:#404042;fill-opacity:1"
+ d="m -804.04487,359.80573 0,-18.91037 4.27114,0 0,18.91037 -4.27114,0 m 0,-25.10514 4.27114,0 0,4.30373 -4.27114,0 0,-4.30373" />
+ </g>
+ <path
+ style="font-size:51.94805145px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#404042;fill-opacity:1;stroke:none;font-family:Orbitron;-inkscape-font-specification:Orbitron Bold"
+ inkscape:connector-curvature="0"
+ d="m 411.51608,557.76053 -32.16667,0 c -0.25926,10e-5 -0.38889,0.12972 -0.38888,0.38895 l 0,24.55552 c -1e-5,0.25923 0.12962,0.38885 0.38888,0.38885 l 32.16667,0 0,7.33339 -32.16667,0 c -2.14815,0 -3.98148,-0.7408 -5.49999,-2.2222 -1.48149,-1.51861 -2.22223,-3.35185 -2.22223,-5.50004 l 0,-24.55552 c 0,-2.14819 0.74074,-3.96293 2.22223,-5.44443 1.51851,-1.51851 3.35184,-2.27781 5.49999,-2.27781 l 32.16667,0 0,7.33329"
+ id="path3161-2" />
+ <path
+ style="font-size:51.94805145px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#404042;fill-opacity:1;stroke:none;font-family:Orbitron;-inkscape-font-specification:Orbitron Bold"
+ d="m 541.44864,550.4231 7.77778,0 -14.55555,40 -5.77778,0 -10.3889,-28.38894 -10.38888,28.38894 -5.72222,0 -14.55556,-40 7.77778,0 9.66667,26.38886 9.66666,-26.38886 7.16667,0 9.66667,26.38886 9.66666,-26.38886"
+ id="path3163-5"
+ inkscape:connector-curvature="0" />
+ <g
+ id="g3165-0"
+ style="font-size:32.25769424px;font-style:normal;font-weight:bold;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#404042;fill-opacity:1;stroke:none;font-family:orbitron;-inkscape-font-specification:orbitron"
+ transform="matrix(1.0694444,0,0,1.0694444,1321.9048,205.52534)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3167-8"
+ style="fill:#404042;fill-opacity:1"
+ d="m -712.66923,341.16755 c 1.22577,2e-5 2.26877,0.44087 3.12899,1.32257 0.88169,0.86022 1.32255,1.90322 1.32257,3.12899 l 0,7.03218 -14.77402,0 0,2.77416 c -1e-5,0.15054 0.0753,0.22581 0.2258,0.22581 l 14.54822,0 0,4.22575 -14.54822,0 c -1.2258,0 -2.27955,-0.4301 -3.16125,-1.2903 -0.86021,-0.88171 -1.29031,-1.93546 -1.29031,-3.16126 l 0,-9.80634 c 0,-1.22577 0.4301,-2.26877 1.29031,-3.12899 0.8817,-0.8817 1.93545,-1.32255 3.16125,-1.32257 l 10.09666,0 m -10.32246,7.22573 10.54826,0 0,-2.77417 c -10e-6,-0.15052 -0.0753,-0.22579 -0.2258,-0.2258 l -10.09666,0 c -0.15054,1e-5 -0.22581,0.0753 -0.2258,0.2258 l 0,2.77417" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path3169-7"
+ style="fill:#404042;fill-opacity:1"
+ d="m -690.26928,341.16755 c 1.22577,2e-5 2.26877,0.44087 3.12899,1.32257 0.88169,0.86022 1.32255,1.90322 1.32257,3.12899 l 0,9.80634 c -2e-5,1.2258 -0.44088,2.27955 -1.32257,3.16126 -0.86022,0.8602 -1.90322,1.2903 -3.12899,1.2903 l -14.54822,0 0,-24.83842 4.22576,0 0,6.12896 10.32246,0 m 0.2258,14.2579 0,-9.80634 c -10e-6,-0.15052 -0.0753,-0.22579 -0.2258,-0.2258 l -10.09666,0 c -0.15054,1e-5 -0.22581,0.0753 -0.2258,0.2258 l 0,9.80634 c -1e-5,0.15054 0.0753,0.22581 0.2258,0.22581 l 10.09666,0 c 0.15052,0 0.22579,-0.0753 0.2258,-0.22581" />
+ </g>
+ <g
+ transform="translate(156.68927,208.07625)"
+ id="g3408"
+ style="fill:#404042;fill-opacity:1">
+ <path
+ sodipodi:nodetypes="ccccc"
+ inkscape:connector-curvature="0"
+ id="path3410"
+ d="m 196.89624,349.49384 -8.06851,4.8684 0,10.62195 8.06851,-4.8684 z"
+ style="fill:#404042;fill-opacity:1;stroke:none" />
+ <path
+ sodipodi:nodetypes="ccccc"
+ inkscape:connector-curvature="0"
+ style="fill:#404042;fill-opacity:1;stroke:none"
+ d="m 188.28608,343.70506 8.00049,4.84516 -8.00049,4.78822 -8.00051,-4.78822 z"
+ id="path3412" />
+ <path
+ style="fill:#404042;fill-opacity:1;stroke:none"
+ d="m 187.69852,366.86825 -8.06851,4.8684 0,10.62195 8.06851,-4.8684 z"
+ id="path3414"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccc" />
+ <path
+ id="path3416"
+ d="m 179.08836,361.07947 8.00049,4.84516 -8.00049,4.78822 -8.00051,-4.78822 z"
+ style="fill:#404042;fill-opacity:1;stroke:none"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccc" />
+ <path
+ sodipodi:nodetypes="ccccc"
+ inkscape:connector-curvature="0"
+ id="path3418"
+ d="m 206.05702,366.86825 -8.06851,4.8684 0,10.62195 8.06851,-4.8684 z"
+ style="fill:#404042;fill-opacity:1;stroke:none" />
+ <path
+ sodipodi:nodetypes="cccccc"
+ inkscape:connector-curvature="0"
+ id="path3420"
+ d="m 197.41294,361.10753 -8.00027,4.84524 8.00686,4.78602 c 0.0406,-0.0455 6.64235,-2.44836 7.44329,-10.21298 -2.81627,1.46 -3.71023,2.32033 -7.44988,0.58175 z"
+ style="fill:#404042;fill-opacity:1;stroke:none" />
+ </g>
+ </g>
+</svg>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/web/data/logo-cubicweb-icon.svg Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,100 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="34.96917"
+ height="38.653542"
+ id="svg4127"
+ version="1.1"
+ inkscape:version="0.48.3.1 r9886"
+ sodipodi:docname="logo-cubicweb-icon.svg">
+ <defs
+ id="defs4129" />
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="13.633938"
+ inkscape:cx="17.899925"
+ inkscape:cy="19.290099"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1362"
+ inkscape:window-height="729"
+ inkscape:window-x="0"
+ inkscape:window-y="18"
+ inkscape:window-maximized="0" />
+ <metadata
+ id="metadata4132">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Calque 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-327.28442,-551.94182)">
+ <g
+ transform="translate(156.19657,208.23676)"
+ id="g3408">
+ <path
+ sodipodi:nodetypes="ccccc"
+ inkscape:connector-curvature="0"
+ id="path3410"
+ d="m 196.89624,349.49384 -8.06851,4.8684 0,10.62195 8.06851,-4.8684 z"
+ style="fill:#ff8800;fill-opacity:1;stroke:none" />
+ <path
+ sodipodi:nodetypes="ccccc"
+ inkscape:connector-curvature="0"
+ style="fill:#ff8800;fill-opacity:1;stroke:none"
+ d="m 188.28608,343.70506 8.00049,4.84516 -8.00049,4.78822 -8.00051,-4.78822 z"
+ id="path3412" />
+ <path
+ style="fill:#ff8800;fill-opacity:1;stroke:none"
+ d="m 187.69852,366.86825 -8.06851,4.8684 0,10.62195 8.06851,-4.8684 z"
+ id="path3414"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccc" />
+ <path
+ id="path3416"
+ d="m 179.08836,361.07947 8.00049,4.84516 -8.00049,4.78822 -8.00051,-4.78822 z"
+ style="fill:#ff8800;fill-opacity:1;stroke:none"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccc" />
+ <path
+ sodipodi:nodetypes="ccccc"
+ inkscape:connector-curvature="0"
+ id="path3418"
+ d="m 206.05702,366.86825 -8.06851,4.8684 0,10.62195 8.06851,-4.8684 z"
+ style="fill:#ff8800;fill-opacity:1;stroke:none" />
+ <path
+ sodipodi:nodetypes="cccccc"
+ inkscape:connector-curvature="0"
+ id="path3420"
+ d="m 197.41294,361.10753 -8.00027,4.84524 8.00686,4.78602 c 0.0406,-0.0455 6.64235,-2.44836 7.44329,-10.21298 -2.81627,1.46 -3.71023,2.32033 -7.44988,0.58175 z"
+ style="fill:#404042;fill-opacity:1;stroke:none" />
+ </g>
+ </g>
+</svg>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/web/data/logo-cubicweb-text.svg Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,110 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="216.83368"
+ height="40.004139"
+ id="svg4127"
+ version="1.1"
+ inkscape:version="0.48.3.1 r9886"
+ sodipodi:docname="logo-cubicweb-text.svg">
+ <defs
+ id="defs4129" />
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.979899"
+ inkscape:cx="184.14583"
+ inkscape:cy="-31.557783"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1362"
+ inkscape:window-height="1161"
+ inkscape:window-x="1920"
+ inkscape:window-y="18"
+ inkscape:window-maximized="0" />
+ <metadata
+ id="metadata4132">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Calque 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-370.6529,-550.62789)">
+ <g
+ transform="matrix(1.0580599,0,0,1.0580599,1311.2897,209.92084)"
+ id="g3151"
+ style="font-size:32.60407639px;font-style:normal;font-weight:bold;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#2b0000;fill-opacity:1;stroke:none;font-family:orbitron;-inkscape-font-specification:orbitron">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3153"
+ style="fill:#404042;fill-opacity:1"
+ d="m -792.00021,355.53459 14.70444,0 0,4.27114 -14.70444,0 c -1.23896,0 -2.30402,-0.43473 -3.1952,-1.30417 -0.86944,-0.89117 -1.30416,-1.95624 -1.30416,-3.1952 l 0,-9.91164 c 0,-1.23894 0.43472,-2.29313 1.30416,-3.16259 0.89118,-0.89116 1.95624,-1.33675 3.1952,-1.33677 l 14.63923,0 0,4.27113 -14.63923,0 c -0.15215,2e-5 -0.22823,0.0761 -0.22822,0.22823 l 0,9.91164 c -10e-6,0.15216 0.0761,0.22824 0.22822,0.22823" />
+ <path
+ inkscape:connector-curvature="0"
+ style="fill:#404042;fill-opacity:1"
+ id="path3155"
+ d="m -833.03908,340.89536 4.27114,0 0,14.411 c -2e-5,1.23896 -0.44561,2.30403 -1.33677,3.1952 -0.86946,0.86944 -1.92366,1.30417 -3.1626,1.30417 l -10.20507,0 c -1.2607,0 -2.32576,-0.43473 -3.1952,-1.30417 -0.86945,-0.89117 -1.30417,-1.95624 -1.30416,-3.1952 l 0,-14.411 4.27113,0 0,14.411 c -10e-6,0.15216 0.0761,0.22824 0.22823,0.22823 l 10.20507,0 c 0.15214,1e-5 0.22822,-0.0761 0.22823,-0.22823 l 0,-14.411" />
+ <path
+ inkscape:connector-curvature="0"
+ style="fill:#404042;fill-opacity:1"
+ id="path3157"
+ d="m -811.05372,340.89536 c 1.23894,2e-5 2.29313,0.44561 3.16259,1.33677 0.89116,0.86946 1.33675,1.92365 1.33677,3.16259 l 0,9.91164 c -2e-5,1.23896 -0.44561,2.30403 -1.33677,3.1952 -0.86946,0.86944 -1.92365,1.30417 -3.16259,1.30417 l -14.70444,0 0,-25.10514 4.27113,0 0,6.19477 10.43331,0 m 0.22823,14.411 0,-9.91164 c -2e-5,-0.15213 -0.0761,-0.22821 -0.22823,-0.22823 l -10.20508,0 c -0.15216,2e-5 -0.22823,0.0761 -0.22823,0.22823 l 0,9.91164 c 0,0.15216 0.0761,0.22824 0.22823,0.22823 l 10.20508,0 c 0.15213,1e-5 0.22821,-0.0761 0.22823,-0.22823" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path3159"
+ style="fill:#404042;fill-opacity:1"
+ d="m -804.04487,359.80573 0,-18.91037 4.27114,0 0,18.91037 -4.27114,0 m 0,-25.10514 4.27114,0 0,4.30373 -4.27114,0 0,-4.30373" />
+ </g>
+ <path
+ style="font-size:51.94805145px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#404042;fill-opacity:1;stroke:none;font-family:Orbitron;-inkscape-font-specification:Orbitron Bold"
+ inkscape:connector-curvature="0"
+ d="m 410.54179,557.96532 -32.16667,0 c -0.25926,10e-5 -0.38889,0.12972 -0.38888,0.38895 l 0,24.55552 c -1e-5,0.25923 0.12962,0.38885 0.38888,0.38885 l 32.16667,0 0,7.33339 -32.16667,0 c -2.14815,0 -3.98148,-0.7408 -5.49999,-2.2222 -1.48149,-1.51861 -2.22223,-3.35185 -2.22223,-5.50004 l 0,-24.55552 c 0,-2.14819 0.74074,-3.96293 2.22223,-5.44443 1.51851,-1.51851 3.35184,-2.27781 5.49999,-2.27781 l 32.16667,0 0,7.33329"
+ id="path3161" />
+ <path
+ style="font-size:51.94805145px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ff8800;fill-opacity:1;stroke:none;font-family:Orbitron;-inkscape-font-specification:Orbitron Bold"
+ d="m 540.47435,550.62789 7.77778,0 -14.55555,40 -5.77778,0 -10.3889,-28.38894 -10.38888,28.38894 -5.72222,0 -14.55556,-40 7.77778,0 9.66667,26.38886 9.66666,-26.38886 7.16667,0 9.66667,26.38886 9.66666,-26.38886"
+ id="path3163"
+ inkscape:connector-curvature="0" />
+ <g
+ id="g3165"
+ style="font-size:32.25769424px;font-style:normal;font-weight:bold;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#ff8800;fill-opacity:1;stroke:none;font-family:orbitron;-inkscape-font-specification:orbitron"
+ transform="matrix(1.0694444,0,0,1.0694444,1320.9305,205.73013)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3167"
+ style="fill:#ff8800;fill-opacity:1"
+ d="m -712.66923,341.16755 c 1.22577,2e-5 2.26877,0.44087 3.12899,1.32257 0.88169,0.86022 1.32255,1.90322 1.32257,3.12899 l 0,7.03218 -14.77402,0 0,2.77416 c -1e-5,0.15054 0.0753,0.22581 0.2258,0.22581 l 14.54822,0 0,4.22575 -14.54822,0 c -1.2258,0 -2.27955,-0.4301 -3.16125,-1.2903 -0.86021,-0.88171 -1.29031,-1.93546 -1.29031,-3.16126 l 0,-9.80634 c 0,-1.22577 0.4301,-2.26877 1.29031,-3.12899 0.8817,-0.8817 1.93545,-1.32255 3.16125,-1.32257 l 10.09666,0 m -10.32246,7.22573 10.54826,0 0,-2.77417 c -10e-6,-0.15052 -0.0753,-0.22579 -0.2258,-0.2258 l -10.09666,0 c -0.15054,1e-5 -0.22581,0.0753 -0.2258,0.2258 l 0,2.77417" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path3169"
+ style="fill:#ff8800;fill-opacity:1"
+ d="m -690.26928,341.16755 c 1.22577,2e-5 2.26877,0.44087 3.12899,1.32257 0.88169,0.86022 1.32255,1.90322 1.32257,3.12899 l 0,9.80634 c -2e-5,1.2258 -0.44088,2.27955 -1.32257,3.16126 -0.86022,0.8602 -1.90322,1.2903 -3.12899,1.2903 l -14.54822,0 0,-24.83842 4.22576,0 0,6.12896 10.32246,0 m 0.2258,14.2579 0,-9.80634 c -10e-6,-0.15052 -0.0753,-0.22579 -0.2258,-0.2258 l -10.09666,0 c -0.15054,1e-5 -0.22581,0.0753 -0.2258,0.2258 l 0,9.80634 c -1e-5,0.15054 0.0753,0.22581 0.2258,0.22581 l 10.09666,0 c 0.15052,0 0.22579,-0.0753 0.2258,-0.22581" />
+ </g>
+ </g>
+</svg>
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/web/data/logo-cubicweb.svg Tue Jun 10 09:49:45 2014 +0200
@@ -0,0 +1,157 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="260.68744"
+ height="40.004143"
+ id="svg4127"
+ version="1.1"
+ inkscape:version="0.48.3.1 r9886"
+ sodipodi:docname="logo-cubicweb.svg">
+ <defs
+ id="defs4129" />
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="5.6"
+ inkscape:cx="65.025864"
+ inkscape:cy="3.1272067"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1916"
+ inkscape:window-height="1161"
+ inkscape:window-x="1366"
+ inkscape:window-y="18"
+ inkscape:window-maximized="0"
+ showguides="true"
+ inkscape:guide-bbox="true">
+ <sodipodi:guide
+ orientation="1,0"
+ position="-144.19927,66.164991"
+ id="guide3458" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata4132">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Calque 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-326.79915,-550.62789)">
+ <g
+ transform="matrix(1.0580599,0,0,1.0580599,1311.2897,209.92084)"
+ id="g3151"
+ style="font-size:32.60407639px;font-style:normal;font-weight:bold;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#2b0000;fill-opacity:1;stroke:none;font-family:orbitron;-inkscape-font-specification:orbitron">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3153"
+ style="fill:#404042;fill-opacity:1"
+ d="m -792.00021,355.53459 14.70444,0 0,4.27114 -14.70444,0 c -1.23896,0 -2.30402,-0.43473 -3.1952,-1.30417 -0.86944,-0.89117 -1.30416,-1.95624 -1.30416,-3.1952 l 0,-9.91164 c 0,-1.23894 0.43472,-2.29313 1.30416,-3.16259 0.89118,-0.89116 1.95624,-1.33675 3.1952,-1.33677 l 14.63923,0 0,4.27113 -14.63923,0 c -0.15215,2e-5 -0.22823,0.0761 -0.22822,0.22823 l 0,9.91164 c -10e-6,0.15216 0.0761,0.22824 0.22822,0.22823" />
+ <path
+ inkscape:connector-curvature="0"
+ style="fill:#404042;fill-opacity:1"
+ id="path3155"
+ d="m -833.03908,340.89536 4.27114,0 0,14.411 c -2e-5,1.23896 -0.44561,2.30403 -1.33677,3.1952 -0.86946,0.86944 -1.92366,1.30417 -3.1626,1.30417 l -10.20507,0 c -1.2607,0 -2.32576,-0.43473 -3.1952,-1.30417 -0.86945,-0.89117 -1.30417,-1.95624 -1.30416,-3.1952 l 0,-14.411 4.27113,0 0,14.411 c -10e-6,0.15216 0.0761,0.22824 0.22823,0.22823 l 10.20507,0 c 0.15214,1e-5 0.22822,-0.0761 0.22823,-0.22823 l 0,-14.411" />
+ <path
+ inkscape:connector-curvature="0"
+ style="fill:#404042;fill-opacity:1"
+ id="path3157"
+ d="m -811.05372,340.89536 c 1.23894,2e-5 2.29313,0.44561 3.16259,1.33677 0.89116,0.86946 1.33675,1.92365 1.33677,3.16259 l 0,9.91164 c -2e-5,1.23896 -0.44561,2.30403 -1.33677,3.1952 -0.86946,0.86944 -1.92365,1.30417 -3.16259,1.30417 l -14.70444,0 0,-25.10514 4.27113,0 0,6.19477 10.43331,0 m 0.22823,14.411 0,-9.91164 c -2e-5,-0.15213 -0.0761,-0.22821 -0.22823,-0.22823 l -10.20508,0 c -0.15216,2e-5 -0.22823,0.0761 -0.22823,0.22823 l 0,9.91164 c 0,0.15216 0.0761,0.22824 0.22823,0.22823 l 10.20508,0 c 0.15213,1e-5 0.22821,-0.0761 0.22823,-0.22823" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path3159"
+ style="fill:#404042;fill-opacity:1"
+ d="m -804.04487,359.80573 0,-18.91037 4.27114,0 0,18.91037 -4.27114,0 m 0,-25.10514 4.27114,0 0,4.30373 -4.27114,0 0,-4.30373" />
+ </g>
+ <path
+ style="font-size:51.94805145px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#404042;fill-opacity:1;stroke:none;font-family:Orbitron;-inkscape-font-specification:Orbitron Bold"
+ inkscape:connector-curvature="0"
+ d="m 410.54179,557.96532 -32.16667,0 c -0.25926,10e-5 -0.38889,0.12972 -0.38888,0.38895 l 0,24.55552 c -1e-5,0.25923 0.12962,0.38885 0.38888,0.38885 l 32.16667,0 0,7.33339 -32.16667,0 c -2.14815,0 -3.98148,-0.7408 -5.49999,-2.2222 -1.48149,-1.51861 -2.22223,-3.35185 -2.22223,-5.50004 l 0,-24.55552 c 0,-2.14819 0.74074,-3.96293 2.22223,-5.44443 1.51851,-1.51851 3.35184,-2.27781 5.49999,-2.27781 l 32.16667,0 0,7.33329"
+ id="path3161" />
+ <path
+ style="font-size:51.94805145px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ff8800;fill-opacity:1;stroke:none;font-family:Orbitron;-inkscape-font-specification:Orbitron Bold"
+ d="m 540.47435,550.62789 7.77778,0 -14.55555,40 -5.77778,0 -10.3889,-28.38894 -10.38888,28.38894 -5.72222,0 -14.55556,-40 7.77778,0 9.66667,26.38886 9.66666,-26.38886 7.16667,0 9.66667,26.38886 9.66666,-26.38886"
+ id="path3163"
+ inkscape:connector-curvature="0" />
+ <g
+ id="g3165"
+ style="font-size:32.25769424px;font-style:normal;font-weight:bold;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#ff8800;fill-opacity:1;stroke:none;font-family:orbitron;-inkscape-font-specification:orbitron"
+ transform="matrix(1.0694444,0,0,1.0694444,1320.9305,205.73013)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path3167"
+ style="fill:#ff8800;fill-opacity:1"
+ d="m -712.66923,341.16755 c 1.22577,2e-5 2.26877,0.44087 3.12899,1.32257 0.88169,0.86022 1.32255,1.90322 1.32257,3.12899 l 0,7.03218 -14.77402,0 0,2.77416 c -1e-5,0.15054 0.0753,0.22581 0.2258,0.22581 l 14.54822,0 0,4.22575 -14.54822,0 c -1.2258,0 -2.27955,-0.4301 -3.16125,-1.2903 -0.86021,-0.88171 -1.29031,-1.93546 -1.29031,-3.16126 l 0,-9.80634 c 0,-1.22577 0.4301,-2.26877 1.29031,-3.12899 0.8817,-0.8817 1.93545,-1.32255 3.16125,-1.32257 l 10.09666,0 m -10.32246,7.22573 10.54826,0 0,-2.77417 c -10e-6,-0.15052 -0.0753,-0.22579 -0.2258,-0.2258 l -10.09666,0 c -0.15054,1e-5 -0.22581,0.0753 -0.2258,0.2258 l 0,2.77417" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path3169"
+ style="fill:#ff8800;fill-opacity:1"
+ d="m -690.26928,341.16755 c 1.22577,2e-5 2.26877,0.44087 3.12899,1.32257 0.88169,0.86022 1.32255,1.90322 1.32257,3.12899 l 0,9.80634 c -2e-5,1.2258 -0.44088,2.27955 -1.32257,3.16126 -0.86022,0.8602 -1.90322,1.2903 -3.12899,1.2903 l -14.54822,0 0,-24.83842 4.22576,0 0,6.12896 10.32246,0 m 0.2258,14.2579 0,-9.80634 c -10e-6,-0.15052 -0.0753,-0.22579 -0.2258,-0.2258 l -10.09666,0 c -0.15054,1e-5 -0.22581,0.0753 -0.2258,0.2258 l 0,9.80634 c -1e-5,0.15054 0.0753,0.22581 0.2258,0.22581 l 10.09666,0 c 0.15052,0 0.22579,-0.0753 0.2258,-0.22581" />
+ </g>
+ <g
+ transform="translate(155.71498,208.28104)"
+ id="g3408">
+ <path
+ sodipodi:nodetypes="ccccc"
+ inkscape:connector-curvature="0"
+ id="path3410"
+ d="m 196.89624,349.49384 -8.06851,4.8684 0,10.62195 8.06851,-4.8684 z"
+ style="fill:#ff8800;fill-opacity:1;stroke:none" />
+ <path
+ sodipodi:nodetypes="ccccc"
+ inkscape:connector-curvature="0"
+ style="fill:#ff8800;fill-opacity:1;stroke:none"
+ d="m 188.28608,343.70506 8.00049,4.84516 -8.00049,4.78822 -8.00051,-4.78822 z"
+ id="path3412" />
+ <path
+ style="fill:#ff8800;fill-opacity:1;stroke:none"
+ d="m 187.69852,366.86825 -8.06851,4.8684 0,10.62195 8.06851,-4.8684 z"
+ id="path3414"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccc" />
+ <path
+ id="path3416"
+ d="m 179.08836,361.07947 8.00049,4.84516 -8.00049,4.78822 -8.00051,-4.78822 z"
+ style="fill:#ff8800;fill-opacity:1;stroke:none"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="ccccc" />
+ <path
+ sodipodi:nodetypes="ccccc"
+ inkscape:connector-curvature="0"
+ id="path3418"
+ d="m 206.05702,366.86825 -8.06851,4.8684 0,10.62195 8.06851,-4.8684 z"
+ style="fill:#ff8800;fill-opacity:1;stroke:none" />
+ <path
+ sodipodi:nodetypes="cccccc"
+ inkscape:connector-curvature="0"
+ id="path3420"
+ d="m 197.41294,361.10753 -8.00027,4.84524 8.00686,4.78602 c 0.0406,-0.0455 6.64235,-2.44836 7.44329,-10.21298 -2.81627,1.46 -3.71023,2.32033 -7.44988,0.58175 z"
+ style="fill:#404042;fill-opacity:1;stroke:none" />
+ </g>
+ </g>
+</svg>
--- a/web/data/uiprops.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/data/uiprops.py Tue Jun 10 09:49:45 2014 +0200
@@ -10,7 +10,6 @@
# Javascripts files to include systematically in HTML headers
JAVASCRIPTS = [data('jquery.js'),
data('jquery-migrate.js'),
- data('jquery.json.js'),
data('cubicweb.js'),
data('cubicweb.compat.js'),
data('cubicweb.python.js'),
--- a/web/facet.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/facet.py Tue Jun 10 09:49:45 2014 +0200
@@ -34,6 +34,9 @@
.. autoclass:: cubicweb.web.facet.RangeFacet
.. autoclass:: cubicweb.web.facet.DateRangeFacet
.. autoclass:: cubicweb.web.facet.BitFieldFacet
+.. autoclass:: cubicweb.web.facet.AbstractRangeRQLPathFacet
+.. autoclass:: cubicweb.web.facet.RangeRQLPathFacet
+.. autoclass:: cubicweb.web.facet.DateRangeRQLPathFacet
Classes for facets implementor
------------------------------
@@ -1301,7 +1304,6 @@
self.target_attr_type, operator)
-
class DateRangeFacet(RangeFacet):
"""This class works similarly as the :class:`RangeFacet` but for attribute
of date type.
@@ -1325,6 +1327,110 @@
return '"%s"' % ustrftime(date_value, '%Y/%m/%d')
+class AbstractRangeRQLPathFacet(RQLPathFacet):
+ """
+ The :class:`AbstractRangeRQLPathFacet` is the base class for
+ RQLPathFacet-type facets allowing the use of RangeWidgets-like
+ widgets (such as (:class:`FacetRangeWidget`,
+ class:`DateFacetRangeWidget`) on the parent :class:`RQLPathFacet`
+ target attribute.
+ """
+ __abstract__ = True
+
+ def vocabulary(self):
+ """return vocabulary for this facet, eg a list of (label,
+ value)"""
+ select = self.select
+ select.save_state()
+ try:
+ filtered_variable = self.filtered_variable
+ cleanup_select(select, filtered_variable)
+ varmap, restrvar = self.add_path_to_select()
+ if self.label_variable:
+ attrvar = varmap[self.label_variable]
+ else:
+ attrvar = restrvar
+ # start RangeRQLPathFacet
+ minf = nodes.Function('MIN')
+ minf.append(nodes.VariableRef(restrvar))
+ select.add_selected(minf)
+ maxf = nodes.Function('MAX')
+ maxf.append(nodes.VariableRef(restrvar))
+ select.add_selected(maxf)
+ # add is restriction if necessary
+ if filtered_variable.stinfo['typerel'] is None:
+ etypes = frozenset(sol[filtered_variable.name] for sol in select.solutions)
+ select.add_type_restriction(filtered_variable, etypes)
+ # end RangeRQLPathFacet
+ try:
+ rset = self.rqlexec(select.as_string(), self.cw_rset.args)
+ except Exception:
+ self.exception('error while getting vocabulary for %s, rql: %s',
+ self, select.as_string())
+ return ()
+ finally:
+ select.recover()
+ # don't call rset_vocabulary on empty result set, it may be an empty
+ # *list* (see rqlexec implementation)
+ if rset:
+ minv, maxv = rset[0]
+ return [(unicode(minv), minv), (unicode(maxv), maxv)]
+ return []
+
+
+ def possible_values(self):
+ """return a list of possible values (as string since it's used to
+ compare to a form value in javascript) for this facet
+ """
+ return [strval for strval, val in self.vocabulary()]
+
+ def add_rql_restrictions(self):
+ infvalue = self.infvalue()
+ supvalue = self.supvalue()
+ if infvalue is None or supvalue is None: # nothing sent
+ return
+ varmap, restrvar = self.add_path_to_select(
+ skiplabel=True, skipattrfilter=True)
+ restrel = None
+ for part in self.path:
+ if isinstance(part, basestring):
+ part = part.split()
+ subject, rtype, object = part
+ if object == self.filter_variable:
+ restrel = rtype
+ assert restrel
+ # when a value is equal to one of the limit, don't add the restriction,
+ # else we filter out NULL values implicitly
+ if infvalue != self.infvalue(min=True):
+
+ self._add_restriction(infvalue, '>=', restrvar, restrel)
+ if supvalue != self.supvalue(max=True):
+ self._add_restriction(supvalue, '<=', restrvar, restrel)
+
+ def _add_restriction(self, value, operator, restrvar, restrel):
+ self.select.add_constant_restriction(restrvar,
+ restrel,
+ self.formatvalue(value),
+ self.target_attr_type, operator)
+
+
+class RangeRQLPathFacet(AbstractRangeRQLPathFacet, RQLPathFacet):
+ """
+ The :class:`RangeRQLPathFacet` uses the :class:`FacetRangeWidget`
+ on the :class:`AbstractRangeRQLPathFacet` target attribute
+ """
+ pass
+
+
+class DateRangeRQLPathFacet(AbstractRangeRQLPathFacet, DateRangeFacet):
+ """
+ The :class:`DateRangeRQLPathFacet` uses the
+ :class:`DateFacetRangeWidget` on the
+ :class:`AbstractRangeRQLPathFacet` target attribute
+ """
+ pass
+
+
class HasRelationFacet(AbstractFacet):
"""This class simply filter according to the presence of a relation
(whatever the entity at the other end). It display a simple checkbox that
--- a/web/formfields.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/formfields.py Tue Jun 10 09:49:45 2014 +0200
@@ -1033,6 +1033,10 @@
# while it has no value, hence generating a false error.
return list(self.fields)
+ @property
+ def needs_multipart(self):
+ return any(f.needs_multipart for f in self.fields)
+
class RelationField(Field):
"""Use this field to edit a relation of an entity.
--- a/web/formwidgets.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/formwidgets.py Tue Jun 10 09:49:45 2014 +0200
@@ -34,6 +34,7 @@
.. autoclass:: cubicweb.web.formwidgets.HiddenInput
.. autoclass:: cubicweb.web.formwidgets.TextInput
+.. autoclass:: cubicweb.web.formwidgets.EmailInput
.. autoclass:: cubicweb.web.formwidgets.PasswordSingleInput
.. autoclass:: cubicweb.web.formwidgets.FileInput
.. autoclass:: cubicweb.web.formwidgets.ButtonInput
@@ -314,6 +315,11 @@
type = 'text'
+class EmailInput(Input):
+ """Simple <input type='email'>, will return a unicode string."""
+ type = 'email'
+
+
class PasswordSingleInput(Input):
"""Simple <input type='password'>, will return a utf-8 encoded string.
--- a/web/http_headers.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/http_headers.py Tue Jun 10 09:49:45 2014 +0200
@@ -8,6 +8,7 @@
from calendar import timegm
import base64
import re
+import urlparse
def dashCapitalize(s):
''' Capitalize a string, making sure to treat - as a word seperator '''
@@ -27,11 +28,11 @@
def casemappingify(d):
global header_case_mapping
- newd = dict([(key.lower(),key) for key in d])
+ newd = dict([(key.lower(), key) for key in d])
header_case_mapping.update(newd)
def lowerify(d):
- return dict([(key.lower(),value) for key,value in d.items()])
+ return dict([(key.lower(), value) for key, value in d.items()])
class HeaderHandler(object):
@@ -73,13 +74,13 @@
try:
for p in parser:
- # print "Parsing %s: %s(%s)" % (name, repr(p), repr(h))
+ #print "==> Parsing %s: %s(%s)" % (name, repr(p), repr(header))
header = p(header)
# if isinstance(h, types.GeneratorType):
- # h=list(h)
+ # h = list(h)
except ValueError as v:
# print v
- header=None
+ header = None
return header
@@ -187,7 +188,7 @@
# Two digit year, yucko.
day, month, year = parts[1].split('-')
time = parts[2]
- year=int(year)
+ year = int(year)
if year < 69:
year = year + 2000
elif year < 100:
@@ -242,8 +243,8 @@
Takes a raw header value (list of strings), and
Returns a generator of strings and Token class instances.
"""
- tokens=http_tokens
- ctls=http_ctls
+ tokens = http_tokens
+ ctls = http_ctls
string = ",".join(header)
list = []
@@ -265,7 +266,7 @@
elif x == '"':
quoted = False
yield qstring+string[start:cur]
- qstring=None
+ qstring = None
start = cur+1
elif x in tokens:
if start != cur:
@@ -339,7 +340,7 @@
hurt anything, in any case.
"""
- l=[]
+ l = []
for x in seq:
if not isinstance(x, Token):
l.append(x)
@@ -353,16 +354,16 @@
def parseKeyValue(val):
if len(val) == 1:
- return val[0],None
+ return val[0], None
elif len(val) == 3 and val[1] == Token('='):
- return val[0],val[2]
+ return val[0], val[2]
raise ValueError, "Expected key or key=value, but got %s." % (val,)
def parseArgs(field):
- args=split(field, Token(';'))
+ args = split(field, Token(';'))
val = args.next()
args = [parseKeyValue(arg) for arg in args]
- return val,args
+ return val, args
def listParser(fun):
"""Return a function which applies 'fun' to every element in the
@@ -377,8 +378,44 @@
def last(seq):
"""Return seq[-1]"""
+ return seq[-1]
- return seq[-1]
+def unique(seq):
+ '''if seq is not a string, check it's a sequence of one element and return it'''
+ if isinstance(seq, basestring):
+ return seq
+ if len(seq) != 1:
+ raise ValueError('single value required, not %s' % seq)
+ return seq[0]
+
+def parseHTTPMethod(method):
+ """Ensure a HTTP method is valid according the rfc2616, but extension-method ones"""
+ method = method.strip()
+ if method not in ("OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE",
+ "TRACE", "CONNECT"):
+ raise ValueError('Unsupported HTTP method %s' % method)
+ return method
+
+def parseAllowOrigin(origin):
+ """Ensure origin is a valid URL-base stuff, or null"""
+ if origin == 'null':
+ return origin
+ p = urlparse.urlparse(origin)
+ if p.params or p.query or p.username or p.path not in ('', '/'):
+ raise ValueError('Incorrect Accept-Control-Allow-Origin value %s' % origin)
+ if p.scheme not in ('http', 'https'):
+ raise ValueError('Unsupported Accept-Control-Allow-Origin URL scheme %s' % origin)
+ if not p.netloc:
+ raise ValueError('Accept-Control-Allow-Origin: host name cannot be unset (%s)' % origin)
+ return origin
+
+def parseAllowCreds(cred):
+ """Can be "true" """
+ if cred:
+ cred = cred.lower()
+ if cred and cred != 'true':
+ raise ValueError('Accept-Control-Allow-Credentials can only be "true" (%s)' % cred)
+ return cred
##### Generation utilities
def quoteString(s):
@@ -401,11 +438,11 @@
def generateKeyValues(kvs):
l = []
# print kvs
- for k,v in kvs:
+ for k, v in kvs:
if v is None:
l.append('%s' % k)
else:
- l.append('%s=%s' % (k,v))
+ l.append('%s=%s' % (k, v))
return ";".join(l)
@@ -453,7 +490,7 @@
##### Specific header parsers.
def parseAccept(field):
- type,args = parseArgs(field)
+ type, args = parseArgs(field)
if len(type) != 3 or type[1] != Token('/'):
raise ValueError, "MIME Type "+str(type)+" invalid."
@@ -465,30 +502,30 @@
num = 0
for arg in args:
if arg[0] == 'q':
- mimeparams=tuple(args[0:num])
- params=args[num:]
+ mimeparams = tuple(args[0:num])
+ params = args[num:]
break
num = num + 1
else:
- mimeparams=tuple(args)
- params=[]
+ mimeparams = tuple(args)
+ params = []
# Default values for parameters:
qval = 1.0
# Parse accept parameters:
for param in params:
- if param[0] =='q':
+ if param[0] == 'q':
qval = float(param[1])
else:
# Warn? ignored parameter.
pass
- ret = MimeType(type[0],type[2],mimeparams),qval
+ ret = MimeType(type[0], type[2], mimeparams), qval
return ret
def parseAcceptQvalue(field):
- type,args=parseArgs(field)
+ type, args = parseArgs(field)
type = checkSingleToken(type)
@@ -496,7 +533,7 @@
for arg in args:
if arg[0] == 'q':
qvalue = float(arg[1])
- return type,qvalue
+ return type, qvalue
def addDefaultCharset(charsets):
if charsets.get('*') is None and charsets.get('iso-8859-1') is None:
@@ -516,7 +553,7 @@
# Content-Type: multipart/form-data; boundary=CaSeFuLsTuFf
# So, we need to explicitly .lower() the type/subtype and arg keys.
- type,args = parseArgs(header)
+ type, args = parseArgs(header)
if len(type) != 3 or type[1] != Token('/'):
raise ValueError, "MIME Type "+str(type)+" invalid."
@@ -535,14 +572,14 @@
"""Parse a content-range header into (kind, start, end, realLength).
realLength might be None if real length is not known ('*').
- start and end might be None if start,end unspecified (for response code 416)
+ start and end might be None if start, end unspecified (for response code 416)
"""
kind, other = header.strip().split()
if kind.lower() != "bytes":
raise ValueError("a range of type %r is not supported")
startend, realLength = other.split("/")
if startend.strip() == '*':
- start,end=None,None
+ start, end = None, None
else:
start, end = map(int, startend.split("-"))
if realLength == "*":
@@ -552,9 +589,9 @@
return (kind, start, end, realLength)
def parseExpect(field):
- type,args=parseArgs(field)
+ type, args = parseArgs(field)
- type=parseKeyValue(type)
+ type = parseKeyValue(type)
return (type[0], (lambda *args:args)(type[1], *args))
def parseExpires(header):
@@ -586,16 +623,16 @@
if len(range) < 3 or range[1] != Token('='):
raise ValueError("Invalid range header format: %s" %(range,))
- type=range[0]
+ type = range[0]
if type != 'bytes':
raise ValueError("Unknown range unit: %s." % (type,))
- rangeset=split(range[2:], Token(','))
+ rangeset = split(range[2:], Token(','))
ranges = []
for byterangespec in rangeset:
if len(byterangespec) != 1:
raise ValueError("Invalid range header format: %s" % (range,))
- start,end=byterangespec[0].split('-')
+ start, end = byterangespec[0].split('-')
if not start and not end:
raise ValueError("Invalid range header format: %s" % (range,))
@@ -612,8 +649,8 @@
if start and end and start > end:
raise ValueError("Invalid range header, start > end: %s" % (range,))
- ranges.append((start,end))
- return type,ranges
+ ranges.append((start, end))
+ return type, ranges
def parseRetryAfter(header):
try:
@@ -676,9 +713,9 @@
#### Header generators
def generateAccept(accept):
- mimeType,q = accept
+ mimeType, q = accept
- out="%s/%s"%(mimeType.mediaType, mimeType.mediaSubtype)
+ out ="%s/%s"%(mimeType.mediaType, mimeType.mediaSubtype)
if mimeType.params:
out+=';'+generateKeyValues(mimeType.params.iteritems())
@@ -724,7 +761,7 @@
# quoted list of values
v = quoteString(generateList(
[header_case_mapping.get(name) or dashCapitalize(name) for name in v]))
- return '%s=%s' % (k,v)
+ return '%s=%s' % (k, v)
def generateContentRange(tup):
"""tup is (type, start, end, len)
@@ -767,7 +804,7 @@
return ''
return s
- type,ranges=range
+ type, ranges = range
if type != 'bytes':
raise ValueError("Unknown range unit: "+type+".")
@@ -781,9 +818,9 @@
return str(int(when - time.time()))
def generateContentType(mimeType):
- out="%s/%s"%(mimeType.mediaType, mimeType.mediaSubtype)
+ out = "%s/%s" % (mimeType.mediaType, mimeType.mediaSubtype)
if mimeType.params:
- out+=';'+generateKeyValues(mimeType.params.iteritems())
+ out += ';' + generateKeyValues(mimeType.params.iteritems())
return out
def generateIfRange(dateOrETag):
@@ -804,7 +841,7 @@
try:
l = []
- for k,v in dict(challenge).iteritems():
+ for k, v in dict(challenge).iteritems():
l.append("%s=%s" % (k, quoteString(v)))
_generated.append("%s %s" % (scheme, ", ".join(l)))
@@ -849,7 +886,7 @@
return "Etag(%r, weak=%r)" % (self.tag, self.weak)
def parse(tokens):
- tokens=tuple(tokens)
+ tokens = tuple(tokens)
if len(tokens) == 1 and not isinstance(tokens[0], Token):
return ETag(tokens[0])
@@ -859,7 +896,7 @@
raise ValueError("Invalid ETag.")
- parse=staticmethod(parse)
+ parse = staticmethod(parse)
def generate(self):
if self.weak:
@@ -868,14 +905,14 @@
return quoteString(self.tag)
def parseStarOrETag(tokens):
- tokens=tuple(tokens)
+ tokens = tuple(tokens)
if tokens == ('*',):
return '*'
else:
return ETag.parse(tokens)
def generateStarOrETag(etag):
- if etag=='*':
+ if etag == '*':
return etag
else:
return etag.generate()
@@ -885,20 +922,20 @@
# __slots__ = ['name', 'value', 'path', 'domain', 'ports', 'expires', 'discard', 'secure', 'comment', 'commenturl', 'version']
def __init__(self, name, value, path=None, domain=None, ports=None, expires=None, discard=False, secure=False, comment=None, commenturl=None, version=0):
- self.name=name
- self.value=value
- self.path=path
- self.domain=domain
- self.ports=ports
- self.expires=expires
- self.discard=discard
- self.secure=secure
- self.comment=comment
- self.commenturl=commenturl
- self.version=version
+ self.name = name
+ self.value = value
+ self.path = path
+ self.domain = domain
+ self.ports = ports
+ self.expires = expires
+ self.discard = discard
+ self.secure = secure
+ self.comment = comment
+ self.commenturl = commenturl
+ self.version = version
def __repr__(self):
- s="Cookie(%r=%r" % (self.name, self.value)
+ s = "Cookie(%r=%r" % (self.name, self.value)
if self.path is not None: s+=", path=%r" % (self.path,)
if self.domain is not None: s+=", domain=%r" % (self.domain,)
if self.ports is not None: s+=", ports=%r" % (self.ports,)
@@ -941,7 +978,7 @@
header = ';'.join(headers)
if header[0:8].lower() == "$version":
# RFC2965 cookie
- h=tokenize([header], foldCase=False)
+ h = tokenize([header], foldCase=False)
r_cookies = split(h, Token(','))
for r_cookie in r_cookies:
last_cookie = None
@@ -954,20 +991,20 @@
(name,), = nameval
value = None
- name=name.lower()
+ name = name.lower()
if name == '$version':
continue
if name[0] == '$':
if last_cookie is not None:
if name == '$path':
- last_cookie.path=value
+ last_cookie.path = value
elif name == '$domain':
- last_cookie.domain=value
+ last_cookie.domain = value
elif name == '$port':
if value is None:
last_cookie.ports = ()
else:
- last_cookie.ports=tuple([int(s) for s in value.split(',')])
+ last_cookie.ports = tuple([int(s) for s in value.split(',')])
else:
last_cookie = Cookie(name, value, version=1)
cookies.append(last_cookie)
@@ -978,9 +1015,9 @@
# however.
r_cookies = header.split(';')
for r_cookie in r_cookies:
- name,value = r_cookie.split('=', 1)
- name=name.strip(' \t')
- value=value.strip(' \t')
+ name, value = r_cookie.split('=', 1)
+ name = name.strip(' \t')
+ value = value.strip(' \t')
cookies.append(Cookie(name, value))
@@ -1048,7 +1085,7 @@
if cookie_validname_re.match(cookie.name) is None:
continue
- value=cookie.value
+ value = cookie.value
if cookie_validvalue_re.match(cookie.value) is None:
value = quoteString(value)
@@ -1078,13 +1115,13 @@
for part in parts:
namevalue = part.split('=',1)
if len(namevalue) == 1:
- name=namevalue[0]
- value=None
+ name = namevalue[0]
+ value = None
else:
- name,value=namevalue
- value=value.strip(' \t')
+ name, value = namevalue
+ value = value.strip(' \t')
- name=name.strip(' \t')
+ name = name.strip(' \t')
l.append((name, value))
@@ -1115,7 +1152,7 @@
cookie = Cookie(name, value)
hadMaxAge = False
- for name,value in tup[1:]:
+ for name, value in tup[1:]:
name = name.lower()
if value is None:
@@ -1229,15 +1266,15 @@
# def getMimeQuality(mimeType, accepts):
-# type,args = parseArgs(mimeType)
-# type=type.split(Token('/'))
+# type, args = parseArgs(mimeType)
+# type = type.split(Token('/'))
# if len(type) != 2:
# raise ValueError, "MIME Type "+s+" invalid."
# for accept in accepts:
-# accept,acceptQual=accept
-# acceptType=accept[0:1]
-# acceptArgs=accept[2]
+# accept, acceptQual = accept
+# acceptType = accept[0:1]
+# acceptArgs = accept[2]
# if ((acceptType == type or acceptType == (type[0],'*') or acceptType==('*','*')) and
# (args == acceptArgs or len(acceptArgs) == 0)):
@@ -1299,7 +1336,7 @@
def getRawHeaders(self, name, default=None):
"""Returns a list of headers matching the given name as the raw string given."""
- name=name.lower()
+ name = name.lower()
raw_header = self._raw_headers.get(name, default)
if raw_header is not _RecalcNeeded:
return raw_header
@@ -1314,7 +1351,7 @@
If the header doesn't exist, return default (or None if not specified)
"""
- name=name.lower()
+ name = name.lower()
parsed = self._headers.get(name, default)
if parsed is not _RecalcNeeded:
return parsed
@@ -1325,7 +1362,7 @@
Value should be a list of strings, each being one header of the
given name.
"""
- name=name.lower()
+ name = name.lower()
self._raw_headers[name] = value
self._headers[name] = _RecalcNeeded
@@ -1334,7 +1371,7 @@
Value should be a list of objects whose exact form depends
on the header in question.
"""
- name=name.lower()
+ name = name.lower()
self._raw_headers[name] = _RecalcNeeded
self._headers[name] = value
@@ -1344,7 +1381,7 @@
If it exists, add it as a separate header to output; do not
replace anything.
"""
- name=name.lower()
+ name = name.lower()
raw_header = self._raw_headers.get(name)
if raw_header is None:
# No header yet
@@ -1362,7 +1399,7 @@
If it exists, add it as a separate header to output; do not
replace anything.
"""
- name=name.lower()
+ name = name.lower()
header = self._headers.get(name)
if header is None:
# No header yet
@@ -1375,7 +1412,7 @@
def removeHeader(self, name):
"""Removes the header named."""
- name=name.lower()
+ name = name.lower()
if name in self._raw_headers:
del self._raw_headers[name]
del self._headers[name]
@@ -1389,10 +1426,10 @@
return header_case_mapping.get(name) or dashCapitalize(name)
def getAllRawHeaders(self):
- """Return an iterator of key,value pairs of all headers
+ """Return an iterator of key, value pairs of all headers
contained in this object, as strings. The keys are capitalized
in canonical capitalization."""
- for k,v in self._raw_headers.iteritems():
+ for k, v in self._raw_headers.iteritems():
if v is _RecalcNeeded:
v = self._toRaw(k)
yield self.canonicalNameCaps(k), v
@@ -1418,24 +1455,24 @@
parser_general_headers = {
- 'Cache-Control':(tokenize, listParser(parseCacheControl), dict),
- 'Connection':(tokenize,filterTokens),
- 'Date':(last,parseDateTime),
+ 'Cache-Control': (tokenize, listParser(parseCacheControl), dict),
+ 'Connection': (tokenize, filterTokens),
+ 'Date': (last, parseDateTime),
# 'Pragma':tokenize
# 'Trailer':tokenize
- 'Transfer-Encoding':(tokenize,filterTokens),
+ 'Transfer-Encoding': (tokenize, filterTokens),
# 'Upgrade':tokenize
-# 'Via':tokenize,stripComment
+# 'Via':tokenize, stripComment
# 'Warning':tokenize
}
generator_general_headers = {
- 'Cache-Control':(iteritems, listGenerator(generateCacheControl), singleHeader),
- 'Connection':(generateList,singleHeader),
- 'Date':(generateDateTime,singleHeader),
+ 'Cache-Control': (iteritems, listGenerator(generateCacheControl), singleHeader),
+ 'Connection': (generateList, singleHeader),
+ 'Date': (generateDateTime, singleHeader),
# 'Pragma':
# 'Trailer':
- 'Transfer-Encoding':(generateList,singleHeader),
+ 'Transfer-Encoding': (generateList, singleHeader),
# 'Upgrade':
# 'Via':
# 'Warning':
@@ -1444,102 +1481,114 @@
parser_request_headers = {
'Accept': (tokenize, listParser(parseAccept), dict),
'Accept-Charset': (tokenize, listParser(parseAcceptQvalue), dict, addDefaultCharset),
- 'Accept-Encoding':(tokenize, listParser(parseAcceptQvalue), dict, addDefaultEncoding),
- 'Accept-Language':(tokenize, listParser(parseAcceptQvalue), dict),
+ 'Accept-Encoding': (tokenize, listParser(parseAcceptQvalue), dict, addDefaultEncoding),
+ 'Accept-Language': (tokenize, listParser(parseAcceptQvalue), dict),
+ 'Access-Control-Allow-Origin': (last, parseAllowOrigin,),
+ 'Access-Control-Allow-Credentials': (last, parseAllowCreds,),
+ 'Access-Control-Allow-Methods': (tokenize, listParser(parseHTTPMethod), list),
+ 'Access-Control-Request-Method': (parseHTTPMethod, ),
+ 'Access-Control-Request-Headers': (filterTokens, ),
+ 'Access-Control-Expose-Headers': (filterTokens, ),
'Authorization': (last, parseAuthorization),
- 'Cookie':(parseCookie,),
- 'Expect':(tokenize, listParser(parseExpect), dict),
- 'From':(last,),
- 'Host':(last,),
- 'If-Match':(tokenize, listParser(parseStarOrETag), list),
- 'If-Modified-Since':(last, parseIfModifiedSince),
- 'If-None-Match':(tokenize, listParser(parseStarOrETag), list),
- 'If-Range':(parseIfRange,),
- 'If-Unmodified-Since':(last,parseDateTime),
- 'Max-Forwards':(last,int),
+ 'Cookie': (parseCookie,),
+ 'Expect': (tokenize, listParser(parseExpect), dict),
+ 'Origin': (last,),
+ 'From': (last,),
+ 'Host': (last,),
+ 'If-Match': (tokenize, listParser(parseStarOrETag), list),
+ 'If-Modified-Since': (last, parseIfModifiedSince),
+ 'If-None-Match': (tokenize, listParser(parseStarOrETag), list),
+ 'If-Range': (parseIfRange,),
+ 'If-Unmodified-Since': (last, parseDateTime),
+ 'Max-Forwards': (last, int),
# 'Proxy-Authorization':str, # what is "credentials"
- 'Range':(tokenize, parseRange),
- 'Referer':(last,str), # TODO: URI object?
- 'TE':(tokenize, listParser(parseAcceptQvalue), dict),
- 'User-Agent':(last,str),
+ 'Range': (tokenize, parseRange),
+ 'Referer': (last, str), # TODO: URI object?
+ 'TE': (tokenize, listParser(parseAcceptQvalue), dict),
+ 'User-Agent': (last, str),
}
generator_request_headers = {
- 'Accept': (iteritems,listGenerator(generateAccept),singleHeader),
- 'Accept-Charset': (iteritems, listGenerator(generateAcceptQvalue),singleHeader),
- 'Accept-Encoding': (iteritems, removeDefaultEncoding, listGenerator(generateAcceptQvalue),singleHeader),
- 'Accept-Language': (iteritems, listGenerator(generateAcceptQvalue),singleHeader),
+ 'Accept': (iteritems, listGenerator(generateAccept), singleHeader),
+ 'Accept-Charset': (iteritems, listGenerator(generateAcceptQvalue), singleHeader),
+ 'Accept-Encoding': (iteritems, removeDefaultEncoding,
+ listGenerator(generateAcceptQvalue), singleHeader),
+ 'Accept-Language': (iteritems, listGenerator(generateAcceptQvalue), singleHeader),
+ 'Access-Control-Request-Method': (unique, str, singleHeader, ),
+ 'Access-Control-Expose-Headers': (listGenerator(str), ),
+ 'Access-Control-Allow-Headers': (listGenerator(str), ),
'Authorization': (generateAuthorization,), # what is "credentials"
- 'Cookie':(generateCookie,singleHeader),
- 'Expect':(iteritems, listGenerator(generateExpect), singleHeader),
- 'From':(str,singleHeader),
- 'Host':(str,singleHeader),
- 'If-Match':(listGenerator(generateStarOrETag), singleHeader),
- 'If-Modified-Since':(generateDateTime,singleHeader),
- 'If-None-Match':(listGenerator(generateStarOrETag), singleHeader),
- 'If-Range':(generateIfRange, singleHeader),
- 'If-Unmodified-Since':(generateDateTime,singleHeader),
- 'Max-Forwards':(str, singleHeader),
+ 'Cookie': (generateCookie, singleHeader),
+ 'Expect': (iteritems, listGenerator(generateExpect), singleHeader),
+ 'From': (unique, str, singleHeader),
+ 'Host': (unique, str, singleHeader),
+ 'If-Match': (listGenerator(generateStarOrETag), singleHeader),
+ 'If-Modified-Since': (generateDateTime, singleHeader),
+ 'If-None-Match': (listGenerator(generateStarOrETag), singleHeader),
+ 'If-Range': (generateIfRange, singleHeader),
+ 'If-Unmodified-Since': (generateDateTime, singleHeader),
+ 'Max-Forwards': (unique, str, singleHeader),
+ 'Origin': (unique, str, singleHeader),
# 'Proxy-Authorization':str, # what is "credentials"
- 'Range':(generateRange,singleHeader),
- 'Referer':(str,singleHeader),
- 'TE': (iteritems, listGenerator(generateAcceptQvalue),singleHeader),
- 'User-Agent':(str,singleHeader),
+ 'Range': (generateRange, singleHeader),
+ 'Referer': (unique, str, singleHeader),
+ 'TE': (iteritems, listGenerator(generateAcceptQvalue), singleHeader),
+ 'User-Agent': (unique, str, singleHeader),
}
parser_response_headers = {
- 'Accept-Ranges':(tokenize, filterTokens),
- 'Age':(last,int),
- 'ETag':(tokenize, ETag.parse),
- 'Location':(last,), # TODO: URI object?
+ 'Accept-Ranges': (tokenize, filterTokens),
+ 'Age': (last, int),
+ 'ETag': (tokenize, ETag.parse),
+ 'Location': (last,), # TODO: URI object?
# 'Proxy-Authenticate'
- 'Retry-After':(last, parseRetryAfter),
- 'Server':(last,),
- 'Set-Cookie':(parseSetCookie,),
- 'Set-Cookie2':(tokenize, parseSetCookie2),
- 'Vary':(tokenize, filterTokens),
+ 'Retry-After': (last, parseRetryAfter),
+ 'Server': (last,),
+ 'Set-Cookie': (parseSetCookie,),
+ 'Set-Cookie2': (tokenize, parseSetCookie2),
+ 'Vary': (tokenize, filterTokens),
'WWW-Authenticate': (lambda h: tokenize(h, foldCase=False),
parseWWWAuthenticate,)
}
generator_response_headers = {
- 'Accept-Ranges':(generateList, singleHeader),
- 'Age':(str, singleHeader),
- 'ETag':(ETag.generate, singleHeader),
- 'Location':(str, singleHeader),
+ 'Accept-Ranges': (generateList, singleHeader),
+ 'Age': (unique, str, singleHeader),
+ 'ETag': (ETag.generate, singleHeader),
+ 'Location': (unique, str, singleHeader),
# 'Proxy-Authenticate'
- 'Retry-After':(generateRetryAfter, singleHeader),
- 'Server':(str, singleHeader),
- 'Set-Cookie':(generateSetCookie,),
- 'Set-Cookie2':(generateSetCookie2,),
- 'Vary':(generateList, singleHeader),
- 'WWW-Authenticate':(generateWWWAuthenticate,)
+ 'Retry-After': (generateRetryAfter, singleHeader),
+ 'Server': (unique, str, singleHeader),
+ 'Set-Cookie': (generateSetCookie,),
+ 'Set-Cookie2': (generateSetCookie2,),
+ 'Vary': (generateList, singleHeader),
+ 'WWW-Authenticate': (generateWWWAuthenticate,)
}
parser_entity_headers = {
- 'Allow':(lambda str:tokenize(str, foldCase=False), filterTokens),
- 'Content-Encoding':(tokenize, filterTokens),
- 'Content-Language':(tokenize, filterTokens),
- 'Content-Length':(last, int),
- 'Content-Location':(last,), # TODO: URI object?
- 'Content-MD5':(last, parseContentMD5),
- 'Content-Range':(last, parseContentRange),
- 'Content-Type':(lambda str:tokenize(str, foldCase=False), parseContentType),
- 'Expires':(last, parseExpires),
- 'Last-Modified':(last, parseDateTime),
+ 'Allow': (lambda str:tokenize(str, foldCase=False), filterTokens),
+ 'Content-Encoding': (tokenize, filterTokens),
+ 'Content-Language': (tokenize, filterTokens),
+ 'Content-Length': (last, int),
+ 'Content-Location': (last,), # TODO: URI object?
+ 'Content-MD5': (last, parseContentMD5),
+ 'Content-Range': (last, parseContentRange),
+ 'Content-Type': (lambda str:tokenize(str, foldCase=False), parseContentType),
+ 'Expires': (last, parseExpires),
+ 'Last-Modified': (last, parseDateTime),
}
generator_entity_headers = {
- 'Allow':(generateList, singleHeader),
- 'Content-Encoding':(generateList, singleHeader),
- 'Content-Language':(generateList, singleHeader),
- 'Content-Length':(str, singleHeader),
- 'Content-Location':(str, singleHeader),
- 'Content-MD5':(base64.encodestring, lambda x: x.strip("\n"), singleHeader),
- 'Content-Range':(generateContentRange, singleHeader),
- 'Content-Type':(generateContentType, singleHeader),
- 'Expires':(generateDateTime, singleHeader),
- 'Last-Modified':(generateDateTime, singleHeader),
+ 'Allow': (generateList, singleHeader),
+ 'Content-Encoding': (generateList, singleHeader),
+ 'Content-Language': (generateList, singleHeader),
+ 'Content-Length': (unique, str, singleHeader),
+ 'Content-Location': (unique, str, singleHeader),
+ 'Content-MD5': (base64.encodestring, lambda x: x.strip("\n"), singleHeader),
+ 'Content-Range': (generateContentRange, singleHeader),
+ 'Content-Type': (generateContentType, singleHeader),
+ 'Expires': (generateDateTime, singleHeader),
+ 'Last-Modified': (generateDateTime, singleHeader),
}
DefaultHTTPHandler.updateParsers(parser_general_headers)
--- a/web/request.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/request.py Tue Jun 10 09:49:45 2014 +0200
@@ -39,6 +39,7 @@
from logilab.common.deprecation import deprecated
from logilab.mtconverter import xml_escape
+from cubicweb.req import RequestSessionBase
from cubicweb.dbapi import DBAPIRequest
from cubicweb.uilib import remove_html_tags, js
from cubicweb.utils import SizeConstrainedList, HTMLHead, make_uid
@@ -82,19 +83,20 @@
-class CubicWebRequestBase(DBAPIRequest):
+class _CubicWebRequestBase(RequestSessionBase):
"""abstract HTTP request, should be extended according to the HTTP backend
Immutable attributes that describe the received query and generic configuration
"""
ajax_request = False # to be set to True by ajax controllers
- def __init__(self, vreg, https=False, form=None, headers={}):
+ def __init__(self, vreg, https=False, form=None, headers=None):
"""
:vreg: Vregistry,
:https: boolean, s this a https request
:form: Forms value
+ :headers: dict, request header
"""
- super(CubicWebRequestBase, self).__init__(vreg)
+ super(_CubicWebRequestBase, self).__init__(vreg)
#: (Boolean) Is this an https request.
self.https = https
#: User interface property (vary with https) (see :ref:`uiprops`)
@@ -113,12 +115,16 @@
self.html_headers = HTMLHead(self)
#: received headers
self._headers_in = Headers()
- for k, v in headers.iteritems():
- self._headers_in.addRawHeader(k, v)
+ if headers is not None:
+ for k, v in headers.iteritems():
+ self._headers_in.addRawHeader(k, v)
#: form parameters
self.setup_params(form)
#: received body
self.content = StringIO()
+ # set up language based on request headers or site default (we don't
+ # have a user yet, and might not get one)
+ self.set_user_language(None)
#: dictionary that may be used to store request data that has to be
#: shared among various components used to publish the request (views,
#: controller, application...)
@@ -169,7 +175,7 @@
if secure:
base_url = self.vreg.config.get('https-url')
if base_url is None:
- base_url = super(CubicWebRequestBase, self).base_url()
+ base_url = super(_CubicWebRequestBase, self).base_url()
return base_url
@property
@@ -206,31 +212,6 @@
self.set_page_data('rql_varmaker', varmaker)
return varmaker
- def set_session(self, session, user=None):
- """method called by the session handler when the user is authenticated
- or an anonymous connection is open
- """
- super(CubicWebRequestBase, self).set_session(session, user)
- # set request language
- vreg = self.vreg
- if self.user:
- try:
- # 1. user specified language
- lang = vreg.typed_value('ui.language',
- self.user.properties['ui.language'])
- self.set_language(lang)
- return
- except KeyError:
- pass
- if vreg.config['language-negociation']:
- # 2. http negociated language
- for lang in self.header_accept_language():
- if lang in self.translations:
- self.set_language(lang)
- return
- # 3. default language
- self.set_default_language(vreg)
-
# input form parameters management ########################################
# common form parameters which should be protected against html values
@@ -327,6 +308,7 @@
def set_message(self, msg):
assert isinstance(msg, unicode)
+ self.reset_message()
self._msg = msg
def set_message_id(self, msgid):
@@ -357,6 +339,7 @@
if hasattr(self, '_msg'):
del self._msg
if hasattr(self, '_msgid'):
+ self.session.data.pop(self._msgid, u'')
del self._msgid
def update_search_state(self):
@@ -423,6 +406,7 @@
req.execute(rql, args, key)
return self.user_callback(rqlexec, rqlargs, *args, **kwargs)
+ @deprecated('[3.19] use a traditional ajaxfunc / controller')
def user_callback(self, cb, cbargs, *args, **kwargs):
"""register the given user callback and return a URL which can
be inserted in an HTML view. When the URL is accessed, the
@@ -725,7 +709,13 @@
if '__message' in kwargs:
msg = kwargs.pop('__message')
kwargs['_cwmsgid'] = self.set_redirect_message(msg)
- return super(CubicWebRequestBase, self).build_url(*args, **kwargs)
+ if not args:
+ method = 'view'
+ if (self.from_controller() == 'view'
+ and not '_restpath' in kwargs):
+ method = self.relative_path(includeparams=False) or 'view'
+ args = (method,)
+ return super(_CubicWebRequestBase, self).build_url(*args, **kwargs)
def url(self, includeparams=True):
"""return currently accessed url"""
@@ -986,6 +976,108 @@
def html_content_type(self):
return 'text/html'
+ def set_user_language(self, user):
+ vreg = self.vreg
+ if user is not None:
+ try:
+ # 1. user-specified language
+ lang = vreg.typed_value('ui.language', user.properties['ui.language'])
+ self.set_language(lang)
+ return
+ except KeyError:
+ pass
+ if vreg.config.get('language-negociation', False):
+ # 2. http accept-language
+ for lang in self.header_accept_language():
+ if lang in self.translations:
+ self.set_language(lang)
+ return
+ # 3. site's default language
+ self.set_default_language(vreg)
+
+
+class DBAPICubicWebRequestBase(_CubicWebRequestBase, DBAPIRequest):
+
+ def set_session(self, session):
+ """method called by the session handler when the user is authenticated
+ or an anonymous connection is open
+ """
+ super(CubicWebRequestBase, self).set_session(session)
+ # set request language
+ self.set_user_language(session.user)
+
+
+def _cnx_func(name):
+ def proxy(req, *args, **kwargs):
+ return getattr(req.cnx, name)(*args, **kwargs)
+ return proxy
+
+
+class ConnectionCubicWebRequestBase(_CubicWebRequestBase):
+
+ def __init__(self, vreg, https=False, form=None, headers={}):
+ """"""
+ self.cnx = None
+ self.session = None
+ self.vreg = vreg
+ try:
+ # no vreg or config which doesn't handle translations
+ self.translations = vreg.config.translations
+ except AttributeError:
+ self.translations = {}
+ super(ConnectionCubicWebRequestBase, self).__init__(vreg, https=https,
+ form=form, headers=headers)
+ from cubicweb.dbapi import DBAPISession, _NeedAuthAccessMock
+ self.session = DBAPISession(None)
+ self.cnx = self.user = _NeedAuthAccessMock()
+
+ def set_cnx(self, cnx):
+ self.cnx = cnx
+ self.session = cnx._session
+ self._set_user(cnx.user)
+ self.set_user_language(cnx.user)
+
+ def execute(self, *args, **kwargs):
+ rset = self.cnx.execute(*args, **kwargs)
+ rset.req = self
+ return rset
+
+ def set_default_language(self, vreg):
+ # XXX copy from dbapi
+ try:
+ lang = vreg.property_value('ui.language')
+ except Exception: # property may not be registered
+ lang = 'en'
+ try:
+ self.set_language(lang)
+ except KeyError:
+ # this occurs usually during test execution
+ self._ = self.__ = unicode
+ self.pgettext = lambda x, y: unicode(y)
+
+ entity_metas = _cnx_func('entity_metas')
+ source_defs = _cnx_func('source_defs')
+ get_shared_data = _cnx_func('get_shared_data')
+ set_shared_data = _cnx_func('set_shared_data')
+ describe = _cnx_func('describe') # deprecated XXX
+
+ # server-side service call #################################################
+
+ def call_service(self, regid, **kwargs):
+ return self.cnx.call_service(regid, **kwargs)
+
+ # entities cache management ###############################################
+
+ entity_cache = _cnx_func('entity_cache')
+ set_entity_cache = _cnx_func('set_entity_cache')
+ cached_entities = _cnx_func('cached_entities')
+ drop_entity_cache = _cnx_func('drop_entity_cache')
+
+
+
+
+CubicWebRequestBase = ConnectionCubicWebRequestBase
+
## HTTP-accept parsers / utilies ##############################################
def _mimetype_sort_key(accept_info):
@@ -1083,4 +1175,4 @@
}
from cubicweb import set_log_methods
-set_log_methods(CubicWebRequestBase, LOGGER)
+set_log_methods(_CubicWebRequestBase, LOGGER)
--- a/web/test/data/views.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/data/views.py Tue Jun 10 09:49:45 2014 +0200
@@ -16,32 +16,8 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-from cubicweb.web import Redirect
-from cubicweb.web.application import CubicWebPublisher
from cubicweb.web.views.ajaxcontroller import ajaxfunc
-# proof of concept : monkey patch handle method so that if we are in an
-# anonymous session and __fblogin is found is req.form, the user with the
-# given login is created if necessary and then a session is opened for that
-# user
-# NOTE: this require "cookie" authentication mode
-def auto_login_handle_request(self, req, path):
- if (not req.cnx or req.cnx.anonymous_connection) and req.form.get('__fblogin'):
- login = password = req.form.pop('__fblogin')
- self.repo.register_user(login, password)
- req.form['__login'] = login
- req.form['__password'] = password
- if req.cnx:
- req.cnx.close()
- req.cnx = None
- try:
- self.session_handler.set_session(req)
- except Redirect:
- pass
- assert req.user.login == login
- return orig_handle(self, req, path)
-
-
def _recursive_replace_stream_by_content(tree):
""" Search for streams (i.e. object that have a 'read' method) in a tree
(which branches are lists or tuples), and substitute them by their content,
@@ -70,6 +46,3 @@
except Exception, ex:
import traceback as tb
tb.print_exc(ex)
-
-orig_handle = CubicWebPublisher.main_handle_request
-CubicWebPublisher.main_handle_request = auto_login_handle_request
--- a/web/test/unittest_application.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_application.py Tue Jun 10 09:49:45 2014 +0200
@@ -32,6 +32,8 @@
from cubicweb.web import LogOut, Redirect, INTERNAL_FIELD_VALUE
from cubicweb.web.views.basecontrollers import ViewController
from cubicweb.web.application import anonymized_request
+from cubicweb.dbapi import DBAPISession, _NeedAuthAccessMock
+from cubicweb import repoapi
class FakeMapping:
"""emulates a mapping module"""
@@ -165,48 +167,40 @@
return config
def test_cnx_user_groups_sync(self):
- user = self.user()
- self.assertEqual(user.groups, set(('managers',)))
- self.execute('SET X in_group G WHERE X eid %s, G name "guests"' % user.eid)
- user = self.user()
- self.assertEqual(user.groups, set(('managers',)))
- self.commit()
- user = self.user()
- self.assertEqual(user.groups, set(('managers', 'guests')))
- # cleanup
- self.execute('DELETE X in_group G WHERE X eid %s, G name "guests"' % user.eid)
- self.commit()
-
- def test_nonregr_publish1(self):
- req = self.request(u'CWEType X WHERE X final FALSE, X meta FALSE')
- self.app.handle_request(req, 'view')
-
- def test_nonregr_publish2(self):
- req = self.request(u'Any count(N) WHERE N todo_by U, N is Note, U eid %s'
- % self.user().eid)
- self.app.handle_request(req, 'view')
+ with self.admin_access.client_cnx() as cnx:
+ user = cnx.user
+ self.assertEqual(user.groups, set(('managers',)))
+ cnx.execute('SET X in_group G WHERE X eid %s, G name "guests"' % user.eid)
+ user = cnx.user
+ self.assertEqual(user.groups, set(('managers',)))
+ cnx.commit()
+ user = cnx.user
+ self.assertEqual(user.groups, set(('managers', 'guests')))
+ # cleanup
+ cnx.execute('DELETE X in_group G WHERE X eid %s, G name "guests"' % user.eid)
+ cnx.commit()
def test_publish_validation_error(self):
- req = self.request()
- user = self.user()
- eid = unicode(user.eid)
- req.form = {
- 'eid': eid,
- '__type:'+eid: 'CWUser', '_cw_entity_fields:'+eid: 'login-subject',
- 'login-subject:'+eid: '', # ERROR: no login specified
- # just a sample, missing some necessary information for real life
- '__errorurl': 'view?vid=edition...'
- }
- path, params = self.expect_redirect_handle_request(req, 'edit')
- forminfo = req.session.data['view?vid=edition...']
- eidmap = forminfo['eidmap']
- self.assertEqual(eidmap, {})
- values = forminfo['values']
- self.assertEqual(values['login-subject:'+eid], '')
- self.assertEqual(values['eid'], eid)
- error = forminfo['error']
- self.assertEqual(error.entity, user.eid)
- self.assertEqual(error.errors['login-subject'], 'required field')
+ with self.admin_access.web_request() as req:
+ user = self.user()
+ eid = unicode(user.eid)
+ req.form = {
+ 'eid': eid,
+ '__type:'+eid: 'CWUser', '_cw_entity_fields:'+eid: 'login-subject',
+ 'login-subject:'+eid: '', # ERROR: no login specified
+ # just a sample, missing some necessary information for real life
+ '__errorurl': 'view?vid=edition...'
+ }
+ path, params = self.expect_redirect_handle_request(req, 'edit')
+ forminfo = req.session.data['view?vid=edition...']
+ eidmap = forminfo['eidmap']
+ self.assertEqual(eidmap, {})
+ values = forminfo['values']
+ self.assertEqual(values['login-subject:'+eid], '')
+ self.assertEqual(values['eid'], eid)
+ error = forminfo['error']
+ self.assertEqual(error.entity, user.eid)
+ self.assertEqual(error.errors['login-subject'], 'required field')
def test_validation_error_dont_loose_subentity_data_ctrl(self):
@@ -214,28 +208,28 @@
error occurs on the web controller
"""
- req = self.request()
- # set Y before X to ensure both entities are edited, not only X
- req.form = {'eid': ['Y', 'X'], '__maineid': 'X',
- '__type:X': 'CWUser', '_cw_entity_fields:X': 'login-subject',
- # missing required field
- 'login-subject:X': u'',
- # but email address is set
- '__type:Y': 'EmailAddress', '_cw_entity_fields:Y': 'address-subject',
- 'address-subject:Y': u'bougloup@logilab.fr',
- 'use_email-object:Y': 'X',
- # necessary to get validation error handling
- '__errorurl': 'view?vid=edition...',
- }
- path, params = self.expect_redirect_handle_request(req, 'edit')
- forminfo = req.session.data['view?vid=edition...']
- self.assertEqual(set(forminfo['eidmap']), set('XY'))
- self.assertEqual(forminfo['eidmap']['X'], None)
- self.assertIsInstance(forminfo['eidmap']['Y'], int)
- self.assertEqual(forminfo['error'].entity, 'X')
- self.assertEqual(forminfo['error'].errors,
- {'login-subject': 'required field'})
- self.assertEqual(forminfo['values'], req.form)
+ with self.admin_access.web_request() as req:
+ # set Y before X to ensure both entities are edited, not only X
+ req.form = {'eid': ['Y', 'X'], '__maineid': 'X',
+ '__type:X': 'CWUser', '_cw_entity_fields:X': 'login-subject',
+ # missing required field
+ 'login-subject:X': u'',
+ # but email address is set
+ '__type:Y': 'EmailAddress', '_cw_entity_fields:Y': 'address-subject',
+ 'address-subject:Y': u'bougloup@logilab.fr',
+ 'use_email-object:Y': 'X',
+ # necessary to get validation error handling
+ '__errorurl': 'view?vid=edition...',
+ }
+ path, params = self.expect_redirect_handle_request(req, 'edit')
+ forminfo = req.session.data['view?vid=edition...']
+ self.assertEqual(set(forminfo['eidmap']), set('XY'))
+ self.assertEqual(forminfo['eidmap']['X'], None)
+ self.assertIsInstance(forminfo['eidmap']['Y'], int)
+ self.assertEqual(forminfo['error'].entity, 'X')
+ self.assertEqual(forminfo['error'].errors,
+ {'login-subject': 'required field'})
+ self.assertEqual(forminfo['values'], req.form)
def test_validation_error_dont_loose_subentity_data_repo(self):
@@ -243,28 +237,28 @@
error occurs on the repository
"""
- req = self.request()
- # set Y before X to ensure both entities are edited, not only X
- req.form = {'eid': ['Y', 'X'], '__maineid': 'X',
- '__type:X': 'CWUser', '_cw_entity_fields:X': 'login-subject,upassword-subject',
- # already existent user
- 'login-subject:X': u'admin',
- 'upassword-subject:X': u'admin', 'upassword-subject-confirm:X': u'admin',
- '__type:Y': 'EmailAddress', '_cw_entity_fields:Y': 'address-subject',
- 'address-subject:Y': u'bougloup@logilab.fr',
- 'use_email-object:Y': 'X',
- # necessary to get validation error handling
- '__errorurl': 'view?vid=edition...',
- }
- path, params = self.expect_redirect_handle_request(req, 'edit')
- forminfo = req.session.data['view?vid=edition...']
- self.assertEqual(set(forminfo['eidmap']), set('XY'))
- self.assertIsInstance(forminfo['eidmap']['X'], int)
- self.assertIsInstance(forminfo['eidmap']['Y'], int)
- self.assertEqual(forminfo['error'].entity, forminfo['eidmap']['X'])
- self.assertEqual(forminfo['error'].errors,
- {'login-subject': u'the value "admin" is already used, use another one'})
- self.assertEqual(forminfo['values'], req.form)
+ with self.admin_access.web_request() as req:
+ # set Y before X to ensure both entities are edited, not only X
+ req.form = {'eid': ['Y', 'X'], '__maineid': 'X',
+ '__type:X': 'CWUser', '_cw_entity_fields:X': 'login-subject,upassword-subject',
+ # already existent user
+ 'login-subject:X': u'admin',
+ 'upassword-subject:X': u'admin', 'upassword-subject-confirm:X': u'admin',
+ '__type:Y': 'EmailAddress', '_cw_entity_fields:Y': 'address-subject',
+ 'address-subject:Y': u'bougloup@logilab.fr',
+ 'use_email-object:Y': 'X',
+ # necessary to get validation error handling
+ '__errorurl': 'view?vid=edition...',
+ }
+ path, params = self.expect_redirect_handle_request(req, 'edit')
+ forminfo = req.session.data['view?vid=edition...']
+ self.assertEqual(set(forminfo['eidmap']), set('XY'))
+ self.assertIsInstance(forminfo['eidmap']['X'], int)
+ self.assertIsInstance(forminfo['eidmap']['Y'], int)
+ self.assertEqual(forminfo['error'].entity, forminfo['eidmap']['X'])
+ self.assertEqual(forminfo['error'].errors,
+ {'login-subject': u'the value "admin" is already used, use another one'})
+ self.assertEqual(forminfo['values'], req.form)
def test_ajax_view_raise_arbitrary_error(self):
class ErrorAjaxView(view.View):
@@ -273,17 +267,17 @@
raise Exception('whatever')
with self.temporary_appobjects(ErrorAjaxView):
with real_error_handling(self.app) as app:
- req = self.request(vid='test.ajax.error')
- req.ajax_request = True
- page = app.handle_request(req, '')
+ with self.admin_access.web_request(vid='test.ajax.error') as req:
+ req.ajax_request = True
+ page = app.handle_request(req, '')
self.assertEqual(httplib.INTERNAL_SERVER_ERROR,
req.status_out)
def _test_cleaned(self, kwargs, injected, cleaned):
- req = self.request(**kwargs)
- page = self.app.handle_request(req, 'view')
- self.assertFalse(injected in page, (kwargs, injected))
- self.assertTrue(cleaned in page, (kwargs, cleaned))
+ with self.admin_access.web_request(**kwargs) as req:
+ page = self.app_handle_request(req, 'view')
+ self.assertNotIn(injected, page)
+ self.assertIn(cleaned, page)
def test_nonregr_script_kiddies(self):
"""test against current script injection"""
@@ -302,39 +296,28 @@
vreg = self.app.vreg
# default value
self.assertEqual(vreg.property_value('ui.language'), 'en')
- self.execute('INSERT CWProperty X: X value "fr", X pkey "ui.language"')
- self.assertEqual(vreg.property_value('ui.language'), 'en')
- self.commit()
- self.assertEqual(vreg.property_value('ui.language'), 'fr')
- self.execute('SET X value "de" WHERE X pkey "ui.language"')
- self.assertEqual(vreg.property_value('ui.language'), 'fr')
- self.commit()
- self.assertEqual(vreg.property_value('ui.language'), 'de')
- self.execute('DELETE CWProperty X WHERE X pkey "ui.language"')
- self.assertEqual(vreg.property_value('ui.language'), 'de')
- self.commit()
- self.assertEqual(vreg.property_value('ui.language'), 'en')
-
- def test_fb_login_concept(self):
- """see data/views.py"""
- self.set_auth_mode('cookie', 'anon')
- self.login('anon')
- req = self.request()
- origcnx = req.cnx
- req.form['__fblogin'] = u'turlututu'
- page = self.app.handle_request(req, '')
- self.assertFalse(req.cnx is origcnx)
- self.assertEqual(req.user.login, 'turlututu')
- self.assertTrue('turlututu' in page, page)
- req.cnx.close() # avoid warning
+ with self.admin_access.client_cnx() as cnx:
+ cnx.execute('INSERT CWProperty X: X value "fr", X pkey "ui.language"')
+ self.assertEqual(vreg.property_value('ui.language'), 'en')
+ cnx.commit()
+ self.assertEqual(vreg.property_value('ui.language'), 'fr')
+ cnx.execute('SET X value "de" WHERE X pkey "ui.language"')
+ self.assertEqual(vreg.property_value('ui.language'), 'fr')
+ cnx.commit()
+ self.assertEqual(vreg.property_value('ui.language'), 'de')
+ cnx.execute('DELETE CWProperty X WHERE X pkey "ui.language"')
+ self.assertEqual(vreg.property_value('ui.language'), 'de')
+ cnx.commit()
+ self.assertEqual(vreg.property_value('ui.language'), 'en')
# authentication tests ####################################################
def test_http_auth_no_anon(self):
req, origsession = self.init_authentication('http')
self.assertAuthFailure(req)
- self.assertRaises(AuthenticationError, self.app_handle_request, req, 'login')
- self.assertEqual(req.cnx, None)
+ self.app.handle_request(req, 'login')
+ self.assertEqual(401, req.status_out)
+ clear_cache(req, 'get_authorization')
authstr = base64.encodestring('%s:%s' % (self.admlogin, self.admpassword))
req.set_request_header('Authorization', 'basic %s' % authstr)
self.assertAuthSuccess(req, origsession)
@@ -345,12 +328,13 @@
req, origsession = self.init_authentication('cookie')
self.assertAuthFailure(req)
try:
- form = self.app_handle_request(req, 'login')
+ form = self.app.handle_request(req, 'login')
except Redirect as redir:
self.fail('anonymous user should get login form')
- self.assertTrue('__login' in form)
- self.assertTrue('__password' in form)
- self.assertEqual(req.cnx, None)
+ clear_cache(req, 'get_authorization')
+ self.assertIn('__login', form)
+ self.assertIn('__password', form)
+ self.assertFalse(req.cnx) # Mock cnx are False
req.form['__login'] = self.admlogin
req.form['__password'] = self.admpassword
self.assertAuthSuccess(req, origsession)
@@ -358,18 +342,19 @@
self.assertEqual(len(self.open_sessions), 0)
def test_login_by_email(self):
- login = self.request().user.login
- address = login + u'@localhost'
- self.execute('INSERT EmailAddress X: X address %(address)s, U primary_email X '
- 'WHERE U login %(login)s', {'address': address, 'login': login})
- self.commit()
+ with self.admin_access.client_cnx() as cnx:
+ login = cnx.user.login
+ address = login + u'@localhost'
+ cnx.execute('INSERT EmailAddress X: X address %(address)s, U primary_email X '
+ 'WHERE U login %(login)s', {'address': address, 'login': login})
+ cnx.commit()
# # option allow-email-login not set
req, origsession = self.init_authentication('cookie')
# req.form['__login'] = address
# req.form['__password'] = self.admpassword
# self.assertAuthFailure(req)
# option allow-email-login set
- origsession.login = address
+ #origsession.login = address
self.set_option('allow-email-login', True)
req.form['__login'] = address
req.form['__password'] = self.admpassword
@@ -387,22 +372,27 @@
raw=True)
clear_cache(req, 'get_authorization')
# reset session as if it was a new incoming request
- req.session = req.cnx = None
+ req.session = DBAPISession(None)
+ req.user = req.cnx = _NeedAuthAccessMock
+
def _test_auth_anon(self, req):
- self.app.connect(req)
- asession = req.session
+ asession = self.app.get_session(req)
+ # important otherwise _reset_cookie will not use the right session
+ req.set_cnx(repoapi.ClientConnection(asession))
self.assertEqual(len(self.open_sessions), 1)
self.assertEqual(asession.login, 'anon')
self.assertTrue(asession.anonymous_session)
self._reset_cookie(req)
def _test_anon_auth_fail(self, req):
- self.assertEqual(len(self.open_sessions), 1)
- self.app.connect(req)
+ self.assertEqual(1, len(self.open_sessions))
+ session = self.app.get_session(req)
+ # important otherwise _reset_cookie will not use the right session
+ req.set_cnx(repoapi.ClientConnection(session))
self.assertEqual(req.message, 'authentication failure')
self.assertEqual(req.session.anonymous_session, True)
- self.assertEqual(len(self.open_sessions), 1)
+ self.assertEqual(1, len(self.open_sessions))
self._reset_cookie(req)
def test_http_auth_anon_allowed(self):
@@ -427,25 +417,25 @@
req.form['__password'] = self.admpassword
self.assertAuthSuccess(req, origsession)
self.assertRaises(LogOut, self.app_handle_request, req, 'logout')
- self.assertEqual(len(self.open_sessions), 0)
+ self.assertEqual(0, len(self.open_sessions))
def test_anonymized_request(self):
- req = self.request()
- self.assertEqual(req.session.login, self.admlogin)
- # admin should see anon + admin
- self.assertEqual(len(list(req.find_entities('CWUser'))), 2)
- with anonymized_request(req):
- self.assertEqual(req.session.login, 'anon')
- # anon should only see anon user
- self.assertEqual(len(list(req.find_entities('CWUser'))), 1)
- self.assertEqual(req.session.login, self.admlogin)
- self.assertEqual(len(list(req.find_entities('CWUser'))), 2)
+ with self.admin_access.web_request() as req:
+ self.assertEqual(self.admlogin, req.session.user.login)
+ # admin should see anon + admin
+ self.assertEqual(2, len(list(req.find('CWUser'))))
+ with anonymized_request(req):
+ self.assertEqual('anon', req.session.login, 'anon')
+ # anon should only see anon user
+ self.assertEqual(1, len(list(req.find('CWUser'))))
+ self.assertEqual(self.admlogin, req.session.login)
+ self.assertEqual(2, len(list(req.find('CWUser'))))
def test_non_regr_optional_first_var(self):
- req = self.request()
- # expect a rset with None in [0][0]
- req.form['rql'] = 'rql:Any OV1, X WHERE X custom_workflow OV1?'
- self.app_handle_request(req)
+ with self.admin_access.web_request() as req:
+ # expect a rset with None in [0][0]
+ req.form['rql'] = 'rql:Any OV1, X WHERE X custom_workflow OV1?'
+ self.app_handle_request(req)
if __name__ == '__main__':
--- a/web/test/unittest_breadcrumbs.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_breadcrumbs.py Tue Jun 10 09:49:45 2014 +0200
@@ -22,20 +22,20 @@
class BreadCrumbsTC(CubicWebTC):
def test_base(self):
- req = self.request()
- f1 = req.create_entity('Folder', name=u'par&ent')
- f2 = req.create_entity('Folder', name=u'chi&ld')
- self.execute('SET F2 filed_under F1 WHERE F1 eid %(f1)s, F2 eid %(f2)s',
- {'f1' : f1.eid, 'f2' : f2.eid})
- self.commit()
- self.assertEqual(f2.view('breadcrumbs'),
- '<a href="http://testing.fr/cubicweb/folder/%s" title="">chi&ld</a>' % f2.eid)
- childrset = f2.as_rset()
- ibc = self.vreg['ctxcomponents'].select('breadcrumbs', self.request(), rset=childrset)
- l = []
- ibc.render(l.append)
- self.assertEqual(''.join(l),
- """<span id="breadcrumbs" class="pathbar"> > <a href="http://testing.fr/cubicweb/Folder">Folder_plural</a> > <a href="http://testing.fr/cubicweb/folder/%s" title="">par&ent</a> > 
+ with self.admin_access.web_request() as req:
+ f1 = req.create_entity('Folder', name=u'par&ent')
+ f2 = req.create_entity('Folder', name=u'chi&ld')
+ req.cnx.execute('SET F2 filed_under F1 WHERE F1 eid %(f1)s, F2 eid %(f2)s',
+ {'f1' : f1.eid, 'f2' : f2.eid})
+ req.cnx.commit()
+ self.assertEqual(f2.view('breadcrumbs'),
+ '<a href="http://testing.fr/cubicweb/folder/%s" title="">chi&ld</a>' % f2.eid)
+ childrset = f2.as_rset()
+ ibc = self.vreg['ctxcomponents'].select('breadcrumbs', req, rset=childrset)
+ l = []
+ ibc.render(l.append)
+ self.assertEqual(''.join(l),
+ """<span id="breadcrumbs" class="pathbar"> > <a href="http://testing.fr/cubicweb/Folder">Folder_plural</a> > <a href="http://testing.fr/cubicweb/folder/%s" title="">par&ent</a> > 
<a href="http://testing.fr/cubicweb/folder/%s" title="">chi&ld</a></span>""" % (f1.eid, f2.eid))
if __name__ == '__main__':
--- a/web/test/unittest_controller.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_controller.py Tue Jun 10 09:49:45 2014 +0200
@@ -28,30 +28,32 @@
class BaseControllerTC(testlib.CubicWebTC):
def test_parse_datetime_ok(self):
- ctrl = self.vreg['controllers'].select('view', self.request())
- pd = ctrl._cw.parse_datetime
- self.assertIsInstance(pd('2006/06/24 12:18'), datetime)
- self.assertIsInstance(pd('2006/06/24'), date)
- self.assertIsInstance(pd('2006/06/24 12:18', 'Datetime'), datetime)
- self.assertIsInstance(pd('2006/06/24', 'Datetime'), datetime)
- self.assertIsInstance(pd('2006/06/24', 'Date'), date)
- self.assertIsInstance(pd('12:18', 'Time'), time)
+ with self.admin_access.web_request() as req:
+ ctrl = self.vreg['controllers'].select('view', req)
+ pd = ctrl._cw.parse_datetime
+ self.assertIsInstance(pd('2006/06/24 12:18'), datetime)
+ self.assertIsInstance(pd('2006/06/24'), date)
+ self.assertIsInstance(pd('2006/06/24 12:18', 'Datetime'), datetime)
+ self.assertIsInstance(pd('2006/06/24', 'Datetime'), datetime)
+ self.assertIsInstance(pd('2006/06/24', 'Date'), date)
+ self.assertIsInstance(pd('12:18', 'Time'), time)
def test_parse_datetime_ko(self):
- ctrl = self.vreg['controllers'].select('view', self.request())
- pd = ctrl._cw.parse_datetime
- self.assertRaises(ValueError,
- pd, '2006/06/24 12:188', 'Datetime')
- self.assertRaises(ValueError,
- pd, '2006/06/240', 'Datetime')
- self.assertRaises(ValueError,
- pd, '2006/06/24 12:18', 'Date')
- self.assertRaises(ValueError,
- pd, '2006/24/06', 'Date')
- self.assertRaises(ValueError,
- pd, '2006/06/240', 'Date')
- self.assertRaises(ValueError,
- pd, '12:188', 'Time')
+ with self.admin_access.web_request() as req:
+ ctrl = self.vreg['controllers'].select('view', req)
+ pd = ctrl._cw.parse_datetime
+ self.assertRaises(ValueError,
+ pd, '2006/06/24 12:188', 'Datetime')
+ self.assertRaises(ValueError,
+ pd, '2006/06/240', 'Datetime')
+ self.assertRaises(ValueError,
+ pd, '2006/06/24 12:18', 'Date')
+ self.assertRaises(ValueError,
+ pd, '2006/24/06', 'Date')
+ self.assertRaises(ValueError,
+ pd, '2006/06/240', 'Date')
+ self.assertRaises(ValueError,
+ pd, '12:188', 'Time')
if __name__ == '__main__':
unittest_main()
--- a/web/test/unittest_facet.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_facet.py Tue Jun 10 09:49:45 2014 +0200
@@ -4,21 +4,20 @@
class BaseFacetTC(CubicWebTC):
- def prepare_rqlst(self, rql='CWUser X', mainvar='X',
+ def prepare_rqlst(self, req, rql='CWUser X', mainvar='X',
expected_baserql='Any X WHERE X is CWUser',
expected_preparedrql='DISTINCT Any WHERE X is CWUser'):
- req = self.request()
- rset = self.execute(rql)
+ rset = req.cnx.execute(rql)
rqlst = rset.syntax_tree().copy()
filtered_variable, baserql = facet.init_facets(rset, rqlst.children[0],
mainvar=mainvar)
self.assertEqual(filtered_variable.name, mainvar)
self.assertEqual(baserql, expected_baserql)
self.assertEqual(rqlst.as_string(), expected_preparedrql)
- return req, rset, rqlst, filtered_variable
+ return rset, rqlst, filtered_variable
- def _in_group_facet(self, cls=facet.RelationFacet, no_relation=False):
- req, rset, rqlst, filtered_variable = self.prepare_rqlst()
+ def _in_group_facet(self, req, cls=facet.RelationFacet, no_relation=False):
+ rset, rqlst, filtered_variable = self.prepare_rqlst(req)
cls.no_relation = no_relation
f = cls(req, rset=rset, select=rqlst.children[0],
filtered_variable=filtered_variable)
@@ -26,285 +25,328 @@
f.rtype = 'in_group'
f.role = 'subject'
f.target_attr = 'name'
- guests, managers = [eid for eid, in self.execute('CWGroup G ORDERBY GN '
- 'WHERE G name GN, G name IN ("guests", "managers")')]
- groups = [eid for eid, in self.execute('CWGroup G ORDERBY GN '
- 'WHERE G name GN, G name IN ("guests", "managers")')]
+ guests, managers = [eid for eid, in req.cnx.execute('CWGroup G ORDERBY GN '
+ 'WHERE G name GN, G name IN ("guests", "managers")')]
+ groups = [eid for eid, in req.cnx.execute('CWGroup G ORDERBY GN '
+ 'WHERE G name GN, G name IN ("guests", "managers")')]
return f, groups
def test_relation_simple(self):
- f, (guests, managers) = self._in_group_facet()
- self.assertEqual(f.vocabulary(),
- [(u'guests', guests), (u'managers', managers)])
- # ensure rqlst is left unmodified
- self.assertEqual(f.select.as_string(), 'DISTINCT Any WHERE X is CWUser')
- #rqlst = rset.syntax_tree()
- self.assertEqual(f.possible_values(),
- [str(guests), str(managers)])
- # ensure rqlst is left unmodified
- self.assertEqual(f.select.as_string(), 'DISTINCT Any WHERE X is CWUser')
- f._cw.form[f.__regid__] = str(guests)
- f.add_rql_restrictions()
- # selection is cluttered because rqlst has been prepared for facet (it
- # is not in real life)
- self.assertEqual(f.select.as_string(),
- 'DISTINCT Any WHERE X is CWUser, X in_group D, D eid %s' % guests)
+ with self.admin_access.web_request() as req:
+ f, (guests, managers) = self._in_group_facet(req)
+ self.assertEqual(f.vocabulary(),
+ [(u'guests', guests), (u'managers', managers)])
+ # ensure rqlst is left unmodified
+ self.assertEqual(f.select.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ #rqlst = rset.syntax_tree()
+ self.assertEqual(f.possible_values(),
+ [str(guests), str(managers)])
+ # ensure rqlst is left unmodified
+ self.assertEqual(f.select.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ f._cw.form[f.__regid__] = str(guests)
+ f.add_rql_restrictions()
+ # selection is cluttered because rqlst has been prepared for facet (it
+ # is not in real life)
+ self.assertEqual(f.select.as_string(),
+ 'DISTINCT Any WHERE X is CWUser, X in_group D, D eid %s' % guests)
def test_relation_multiple_and(self):
- f, (guests, managers) = self._in_group_facet()
- f._cw.form[f.__regid__] = [str(guests), str(managers)]
- f._cw.form[f.__regid__ + '_andor'] = 'AND'
- f.add_rql_restrictions()
- self.assertEqual(f.select.as_string(),
- 'DISTINCT Any WHERE X is CWUser, X in_group A, B eid %s, X in_group B, A eid %s' % (guests, managers))
+ with self.admin_access.web_request() as req:
+ f, (guests, managers) = self._in_group_facet(req)
+ f._cw.form[f.__regid__] = [str(guests), str(managers)]
+ f._cw.form[f.__regid__ + '_andor'] = 'AND'
+ f.add_rql_restrictions()
+ self.assertEqual(f.select.as_string(),
+ 'DISTINCT Any WHERE X is CWUser, X in_group A, B eid %s, X in_group B, A eid %s' % (guests, managers))
def test_relation_multiple_or(self):
- f, (guests, managers) = self._in_group_facet()
- f._cw.form[f.__regid__] = [str(guests), str(managers)]
- f._cw.form[f.__regid__ + '_andor'] = 'OR'
- f.add_rql_restrictions()
- self.assertEqual(f.select.as_string(),
- 'DISTINCT Any WHERE X is CWUser, X in_group A, A eid IN(%s, %s)' % (guests, managers))
+ with self.admin_access.web_request() as req:
+ f, (guests, managers) = self._in_group_facet(req)
+ f._cw.form[f.__regid__] = [str(guests), str(managers)]
+ f._cw.form[f.__regid__ + '_andor'] = 'OR'
+ f.add_rql_restrictions()
+ self.assertEqual(f.select.as_string(),
+ 'DISTINCT Any WHERE X is CWUser, X in_group A, A eid IN(%s, %s)' % (guests, managers))
def test_relation_optional_rel(self):
- req = self.request()
- rset = self.execute('Any X,GROUP_CONCAT(GN) GROUPBY X '
- 'WHERE X in_group G?, G name GN, NOT G name "users"')
- rqlst = rset.syntax_tree().copy()
- select = rqlst.children[0]
- filtered_variable, baserql = facet.init_facets(rset, select)
+ with self.admin_access.web_request() as req:
+ rset = req.cnx.execute('Any X,GROUP_CONCAT(GN) GROUPBY X '
+ 'WHERE X in_group G?, G name GN, NOT G name "users"')
+ rqlst = rset.syntax_tree().copy()
+ select = rqlst.children[0]
+ filtered_variable, baserql = facet.init_facets(rset, select)
- f = facet.RelationFacet(req, rset=rset,
- select=select,
- filtered_variable=filtered_variable)
- f.rtype = 'in_group'
- f.role = 'subject'
- f.target_attr = 'name'
- guests, managers = [eid for eid, in self.execute('CWGroup G ORDERBY GN '
- 'WHERE G name GN, G name IN ("guests", "managers")')]
- self.assertEqual(f.vocabulary(),
- [(u'guests', guests), (u'managers', managers)])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X in_group G?, G name GN, NOT G name "users"')
- #rqlst = rset.syntax_tree()
- self.assertEqual(sorted(f.possible_values()),
- [str(guests), str(managers)])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X in_group G?, G name GN, NOT G name "users"')
- req.form[f.__regid__] = str(guests)
- f.add_rql_restrictions()
- # selection is cluttered because rqlst has been prepared for facet (it
- # is not in real life)
- self.assertEqual(f.select.as_string(),
- 'DISTINCT Any WHERE X in_group G?, G name GN, NOT G name "users", X in_group D, D eid %s' % guests)
+ f = facet.RelationFacet(req, rset=rset,
+ select=select,
+ filtered_variable=filtered_variable)
+ f.rtype = 'in_group'
+ f.role = 'subject'
+ f.target_attr = 'name'
+ guests, managers = [eid for eid, in req.cnx.execute('CWGroup G ORDERBY GN '
+ 'WHERE G name GN, G name IN ("guests", "managers")')]
+ self.assertEqual(f.vocabulary(),
+ [(u'guests', guests), (u'managers', managers)])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), "DISTINCT Any WHERE X in_group G?, G name GN, NOT G name 'users'")
+ #rqlst = rset.syntax_tree()
+ self.assertEqual(sorted(f.possible_values()),
+ [str(guests), str(managers)])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), "DISTINCT Any WHERE X in_group G?, G name GN, NOT G name 'users'")
+ req.form[f.__regid__] = str(guests)
+ f.add_rql_restrictions()
+ # selection is cluttered because rqlst has been prepared for facet (it
+ # is not in real life)
+ self.assertEqual(f.select.as_string(),
+ "DISTINCT Any WHERE X in_group G?, G name GN, NOT G name 'users', X in_group D, D eid %s" % guests)
def test_relation_no_relation_1(self):
- f, (guests, managers) = self._in_group_facet(no_relation=True)
- self.assertEqual(f.vocabulary(),
- [(u'guests', guests), (u'managers', managers)])
- self.assertEqual(f.possible_values(),
- [str(guests), str(managers)])
- f._cw.create_entity('CWUser', login=u'hop', upassword='toto')
- self.assertEqual(f.vocabulary(),
- [(u'<no relation>', ''), (u'guests', guests), (u'managers', managers)])
- self.assertEqual(f.possible_values(),
- [str(guests), str(managers), ''])
- f._cw.form[f.__regid__] = ''
- f.add_rql_restrictions()
- self.assertEqual(f.select.as_string(),
- 'DISTINCT Any WHERE X is CWUser, NOT X in_group G')
+ with self.admin_access.web_request() as req:
+ f, (guests, managers) = self._in_group_facet(req, no_relation=True)
+ self.assertEqual(f.vocabulary(),
+ [(u'guests', guests), (u'managers', managers)])
+ self.assertEqual(f.possible_values(),
+ [str(guests), str(managers)])
+ f._cw.create_entity('CWUser', login=u'hop', upassword='toto')
+ self.assertEqual(f.vocabulary(),
+ [(u'<no relation>', ''), (u'guests', guests), (u'managers', managers)])
+ self.assertEqual(f.possible_values(),
+ [str(guests), str(managers), ''])
+ f._cw.form[f.__regid__] = ''
+ f.add_rql_restrictions()
+ self.assertEqual(f.select.as_string(),
+ 'DISTINCT Any WHERE X is CWUser, NOT X in_group G')
def test_relation_no_relation_2(self):
- f, (guests, managers) = self._in_group_facet(no_relation=True)
- f._cw.form[f.__regid__] = ['', guests]
- f.select.save_state()
- f.add_rql_restrictions()
- self.assertEqual(f.select.as_string(),
- 'DISTINCT Any WHERE X is CWUser, (NOT X in_group B) OR (X in_group A, A eid %s)' % guests)
- f.select.recover()
- self.assertEqual(f.select.as_string(),
- 'DISTINCT Any WHERE X is CWUser')
+ with self.admin_access.web_request() as req:
+ f, (guests, managers) = self._in_group_facet(req, no_relation=True)
+ f._cw.form[f.__regid__] = ['', guests]
+ f.select.save_state()
+ f.add_rql_restrictions()
+ self.assertEqual(f.select.as_string(),
+ 'DISTINCT Any WHERE X is CWUser, (NOT X in_group B) OR (X in_group A, A eid %s)' % guests)
+ f.select.recover()
+ self.assertEqual(f.select.as_string(),
+ 'DISTINCT Any WHERE X is CWUser')
def test_relationattribute(self):
- f, (guests, managers) = self._in_group_facet(cls=facet.RelationAttributeFacet)
- self.assertEqual(f.vocabulary(),
- [(u'guests', u'guests'), (u'managers', u'managers')])
- # ensure rqlst is left unmodified
- self.assertEqual(f.select.as_string(), 'DISTINCT Any WHERE X is CWUser')
- #rqlst = rset.syntax_tree()
- self.assertEqual(f.possible_values(),
- ['guests', 'managers'])
- # ensure rqlst is left unmodified
- self.assertEqual(f.select.as_string(), 'DISTINCT Any WHERE X is CWUser')
- f._cw.form[f.__regid__] = 'guests'
- f.add_rql_restrictions()
- # selection is cluttered because rqlst has been prepared for facet (it
- # is not in real life)
- self.assertEqual(f.select.as_string(),
- "DISTINCT Any WHERE X is CWUser, X in_group E, E name 'guests'")
+ with self.admin_access.web_request() as req:
+ f, (guests, managers) = self._in_group_facet(req, cls=facet.RelationAttributeFacet)
+ self.assertEqual(f.vocabulary(),
+ [(u'guests', u'guests'), (u'managers', u'managers')])
+ # ensure rqlst is left unmodified
+ self.assertEqual(f.select.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ #rqlst = rset.syntax_tree()
+ self.assertEqual(f.possible_values(),
+ ['guests', 'managers'])
+ # ensure rqlst is left unmodified
+ self.assertEqual(f.select.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ f._cw.form[f.__regid__] = 'guests'
+ f.add_rql_restrictions()
+ # selection is cluttered because rqlst has been prepared for facet (it
+ # is not in real life)
+ self.assertEqual(f.select.as_string(),
+ "DISTINCT Any WHERE X is CWUser, X in_group E, E name 'guests'")
def test_daterange(self):
- req, rset, rqlst, filtered_variable = self.prepare_rqlst()
- f = facet.DateRangeFacet(req, rset=rset,
- select=rqlst.children[0],
- filtered_variable=filtered_variable)
- f.rtype = 'creation_date'
- mind, maxd = self.execute('Any MIN(CD), MAX(CD) WHERE X is CWUser, X creation_date CD')[0]
- self.assertEqual(f.vocabulary(),
- [(str(mind), mind),
- (str(maxd), maxd)])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
- #rqlst = rset.syntax_tree()
- self.assertEqual(f.possible_values(),
- [str(mind), str(maxd)])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
- req.form['%s_inf' % f.__regid__] = str(datetime2ticks(mind))
- req.form['%s_sup' % f.__regid__] = str(datetime2ticks(mind))
- f.add_rql_restrictions()
- # selection is cluttered because rqlst has been prepared for facet (it
- # is not in real life)
- self.assertEqual(f.select.as_string(),
- 'DISTINCT Any WHERE X is CWUser, X creation_date >= "%s", '
- 'X creation_date <= "%s"'
- % (mind.strftime('%Y/%m/%d'),
- mind.strftime('%Y/%m/%d')))
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepare_rqlst(req)
+ f = facet.DateRangeFacet(req, rset=rset,
+ select=rqlst.children[0],
+ filtered_variable=filtered_variable)
+ f.rtype = 'creation_date'
+ mind, maxd = req.cnx.execute('Any MIN(CD), MAX(CD) WHERE X is CWUser, X creation_date CD')[0]
+ self.assertEqual(f.vocabulary(),
+ [(str(mind), mind),
+ (str(maxd), maxd)])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ #rqlst = rset.syntax_tree()
+ self.assertEqual(f.possible_values(),
+ [str(mind), str(maxd)])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ req.form['%s_inf' % f.__regid__] = str(datetime2ticks(mind))
+ req.form['%s_sup' % f.__regid__] = str(datetime2ticks(mind))
+ f.add_rql_restrictions()
+ # selection is cluttered because rqlst has been prepared for facet (it
+ # is not in real life)
+ self.assertEqual(f.select.as_string(),
+ 'DISTINCT Any WHERE X is CWUser, X creation_date >= "%s", '
+ 'X creation_date <= "%s"'
+ % (mind.strftime('%Y/%m/%d'),
+ mind.strftime('%Y/%m/%d')))
def test_attribute(self):
- req, rset, rqlst, filtered_variable = self.prepare_rqlst()
- f = facet.AttributeFacet(req, rset=rset,
- select=rqlst.children[0],
- filtered_variable=filtered_variable)
- f.rtype = 'login'
- self.assertEqual(f.vocabulary(),
- [(u'admin', u'admin'), (u'anon', u'anon')])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
- #rqlst = rset.syntax_tree()
- self.assertEqual(f.possible_values(),
- ['admin', 'anon'])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
- req.form[f.__regid__] = 'admin'
- f.add_rql_restrictions()
- # selection is cluttered because rqlst has been prepared for facet (it
- # is not in real life)
- self.assertEqual(f.select.as_string(),
- "DISTINCT Any WHERE X is CWUser, X login 'admin'")
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepare_rqlst(req)
+ f = facet.AttributeFacet(req, rset=rset,
+ select=rqlst.children[0],
+ filtered_variable=filtered_variable)
+ f.rtype = 'login'
+ self.assertEqual(f.vocabulary(),
+ [(u'admin', u'admin'), (u'anon', u'anon')])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ #rqlst = rset.syntax_tree()
+ self.assertEqual(f.possible_values(),
+ ['admin', 'anon'])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ req.form[f.__regid__] = 'admin'
+ f.add_rql_restrictions()
+ # selection is cluttered because rqlst has been prepared for facet (it
+ # is not in real life)
+ self.assertEqual(f.select.as_string(),
+ "DISTINCT Any WHERE X is CWUser, X login 'admin'")
def test_bitfield(self):
- req, rset, rqlst, filtered_variable = self.prepare_rqlst(
- 'CWAttribute X WHERE X ordernum XO',
- expected_baserql='Any X WHERE X ordernum XO, X is CWAttribute',
- expected_preparedrql='DISTINCT Any WHERE X ordernum XO, X is CWAttribute')
- f = facet.BitFieldFacet(req, rset=rset,
- select=rqlst.children[0],
- filtered_variable=filtered_variable)
- f.choices = [('un', 1,), ('deux', 2,)]
- f.rtype = 'ordernum'
- self.assertEqual(f.vocabulary(),
- [(u'deux', 2), (u'un', 1)])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X ordernum XO, X is CWAttribute')
- #rqlst = rset.syntax_tree()
- self.assertEqual(f.possible_values(),
- ['2', '1'])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X ordernum XO, X is CWAttribute')
- req.form[f.__regid__] = '3'
- f.add_rql_restrictions()
- # selection is cluttered because rqlst has been prepared for facet (it
- # is not in real life)
- self.assertEqual(f.select.as_string(),
- "DISTINCT Any WHERE X ordernum XO, X is CWAttribute, X ordernum C HAVING 3 = (C & 3)")
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepare_rqlst(req,
+ 'CWAttribute X WHERE X ordernum XO',
+ expected_baserql='Any X WHERE X ordernum XO, X is CWAttribute',
+ expected_preparedrql='DISTINCT Any WHERE X ordernum XO, X is CWAttribute')
+ f = facet.BitFieldFacet(req, rset=rset,
+ select=rqlst.children[0],
+ filtered_variable=filtered_variable)
+ f.choices = [('un', 1,), ('deux', 2,)]
+ f.rtype = 'ordernum'
+ self.assertEqual(f.vocabulary(),
+ [(u'deux', 2), (u'un', 1)])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X ordernum XO, X is CWAttribute')
+ #rqlst = rset.syntax_tree()
+ self.assertEqual(f.possible_values(),
+ ['2', '1'])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X ordernum XO, X is CWAttribute')
+ req.form[f.__regid__] = '3'
+ f.add_rql_restrictions()
+ # selection is cluttered because rqlst has been prepared for facet (it
+ # is not in real life)
+ self.assertEqual(f.select.as_string(),
+ "DISTINCT Any WHERE X ordernum XO, X is CWAttribute, X ordernum C HAVING 3 = (C & 3)")
def test_bitfield_0_value(self):
- req, rset, rqlst, filtered_variable = self.prepare_rqlst(
- 'CWAttribute X WHERE X ordernum XO',
- expected_baserql='Any X WHERE X ordernum XO, X is CWAttribute',
- expected_preparedrql='DISTINCT Any WHERE X ordernum XO, X is CWAttribute')
- f = facet.BitFieldFacet(req, rset=rset,
- select=rqlst.children[0],
- filtered_variable=filtered_variable)
- f.choices = [('zero', 0,), ('un', 1,), ('deux', 2,)]
- f.rtype = 'ordernum'
- self.assertEqual(f.vocabulary(),
- [(u'deux', 2), (u'un', 1), (u'zero', 0)])
- self.assertEqual(f.possible_values(),
- ['2', '1', '0'])
- req.form[f.__regid__] = '0'
- f.add_rql_restrictions()
- self.assertEqual(f.select.as_string(),
- "DISTINCT Any WHERE X ordernum XO, X is CWAttribute, X ordernum C HAVING 0 = C")
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepare_rqlst(req,
+ 'CWAttribute X WHERE X ordernum XO',
+ expected_baserql='Any X WHERE X ordernum XO, X is CWAttribute',
+ expected_preparedrql='DISTINCT Any WHERE X ordernum XO, X is CWAttribute')
+ f = facet.BitFieldFacet(req, rset=rset,
+ select=rqlst.children[0],
+ filtered_variable=filtered_variable)
+ f.choices = [('zero', 0,), ('un', 1,), ('deux', 2,)]
+ f.rtype = 'ordernum'
+ self.assertEqual(f.vocabulary(),
+ [(u'deux', 2), (u'un', 1), (u'zero', 0)])
+ self.assertEqual(f.possible_values(),
+ ['2', '1', '0'])
+ req.form[f.__regid__] = '0'
+ f.add_rql_restrictions()
+ self.assertEqual(f.select.as_string(),
+ "DISTINCT Any WHERE X ordernum XO, X is CWAttribute, X ordernum C HAVING 0 = C")
def test_rql_path_eid(self):
- req, rset, rqlst, filtered_variable = self.prepare_rqlst()
- class RPF(facet.RQLPathFacet):
- path = [('X created_by U'), ('U owned_by O'), ('O login OL')]
- filter_variable = 'O'
- label_variable = 'OL'
- f = RPF(req, rset=rset, select=rqlst.children[0],
- filtered_variable=filtered_variable)
- self.assertEqual(f.vocabulary(), [(u'admin', self.user().eid),])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
- #rqlst = rset.syntax_tree()
- self.assertEqual(f.possible_values(),
- [str(self.user().eid),])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
- req.form[f.__regid__] = '1'
- f.add_rql_restrictions()
- # selection is cluttered because rqlst has been prepared for facet (it
- # is not in real life)
- self.assertEqual(f.select.as_string(),
- "DISTINCT Any WHERE X is CWUser, X created_by F, F owned_by G, G eid 1")
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepare_rqlst(req)
+ class RPF(facet.RQLPathFacet):
+ path = [('X created_by U'), ('U owned_by O'), ('O login OL')]
+ filter_variable = 'O'
+ label_variable = 'OL'
+ f = RPF(req, rset=rset, select=rqlst.children[0],
+ filtered_variable=filtered_variable)
+ self.assertEqual(f.vocabulary(), [(u'admin', req.user.eid),])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ #rqlst = rset.syntax_tree()
+ self.assertEqual(f.possible_values(),
+ [str(req.user.eid),])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ req.form[f.__regid__] = '1'
+ f.add_rql_restrictions()
+ # selection is cluttered because rqlst has been prepared for facet (it
+ # is not in real life)
+ self.assertEqual(f.select.as_string(),
+ "DISTINCT Any WHERE X is CWUser, X created_by F, F owned_by G, G eid 1")
def test_rql_path_eid_no_label(self):
- req, rset, rqlst, filtered_variable = self.prepare_rqlst()
- class RPF(facet.RQLPathFacet):
- path = [('X created_by U'), ('U owned_by O'), ('O login OL')]
- filter_variable = 'O'
- f = RPF(req, rset=rset, select=rqlst.children[0],
- filtered_variable=filtered_variable)
- self.assertEqual(f.vocabulary(), [(str(self.user().eid), self.user().eid),])
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepare_rqlst(req)
+ class RPF(facet.RQLPathFacet):
+ path = [('X created_by U'), ('U owned_by O'), ('O login OL')]
+ filter_variable = 'O'
+ f = RPF(req, rset=rset, select=rqlst.children[0],
+ filtered_variable=filtered_variable)
+ self.assertEqual(f.vocabulary(), [(str(req.user.eid), req.user.eid),])
def test_rql_path_attr(self):
- req, rset, rqlst, filtered_variable = self.prepare_rqlst()
- class RPF(facet.RQLPathFacet):
- path = [('X created_by U'), ('U owned_by O'), ('O login OL')]
- filter_variable = 'OL'
- f = RPF(req, rset=rset, select=rqlst.children[0],
- filtered_variable=filtered_variable)
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepare_rqlst(req)
+ class RPF(facet.RQLPathFacet):
+ path = [('X created_by U'), ('U owned_by O'), ('O login OL')]
+ filter_variable = 'OL'
+ f = RPF(req, rset=rset, select=rqlst.children[0],
+ filtered_variable=filtered_variable)
- self.assertEqual(f.vocabulary(), [(u'admin', 'admin'),])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
- self.assertEqual(f.possible_values(), ['admin',])
- # ensure rqlst is left unmodified
- self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
- req.form[f.__regid__] = 'admin'
- f.add_rql_restrictions()
- # selection is cluttered because rqlst has been prepared for facet (it
- # is not in real life)
- self.assertEqual(f.select.as_string(),
- "DISTINCT Any WHERE X is CWUser, X created_by G, G owned_by H, H login 'admin'")
+ self.assertEqual(f.vocabulary(), [(u'admin', 'admin'),])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ self.assertEqual(f.possible_values(), ['admin',])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ req.form[f.__regid__] = 'admin'
+ f.add_rql_restrictions()
+ # selection is cluttered because rqlst has been prepared for facet (it
+ # is not in real life)
+ self.assertEqual(f.select.as_string(),
+ "DISTINCT Any WHERE X is CWUser, X created_by G, G owned_by H, H login 'admin'")
def test_rql_path_check_filter_label_variable(self):
- req, rset, rqlst, filtered_variable = self.prepareg_aggregat_rqlst()
- class RPF(facet.RQLPathFacet):
- path = [('X created_by U'), ('U owned_by O'), ('O login OL')]
- filter_variable = 'OL'
- label_variable = 'OL'
- self.assertRaises(AssertionError, RPF, req, rset=rset,
- select=rqlst.children[0],
- filtered_variable=filtered_variable)
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepareg_aggregat_rqlst(req)
+ class RPF(facet.RQLPathFacet):
+ path = [('X created_by U'), ('U owned_by O'), ('O login OL')]
+ filter_variable = 'OL'
+ label_variable = 'OL'
+ self.assertRaises(AssertionError, RPF, req, rset=rset,
+ select=rqlst.children[0],
+ filtered_variable=filtered_variable)
+
- def prepareg_aggregat_rqlst(self):
- return self.prepare_rqlst(
+ def test_rqlpath_range(self):
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepare_rqlst(req)
+ class RRF(facet.DateRangeRQLPathFacet):
+ path = [('X created_by U'), ('U owned_by O'), ('O creation_date OL')]
+ filter_variable = 'OL'
+ f = RRF(req, rset=rset, select=rqlst.children[0],
+ filtered_variable=filtered_variable)
+ mind, maxd = req.cnx.execute('Any MIN(CD), MAX(CD) WHERE X is CWUser, X created_by U, U owned_by O, O creation_date CD')[0]
+ self.assertEqual(f.vocabulary(), [(str(mind), mind),
+ (str(maxd), maxd)])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ self.assertEqual(f.possible_values(),
+ [str(mind), str(maxd)])
+ # ensure rqlst is left unmodified
+ self.assertEqual(rqlst.as_string(), 'DISTINCT Any WHERE X is CWUser')
+ req.form['%s_inf' % f.__regid__] = str(datetime2ticks(mind))
+ req.form['%s_sup' % f.__regid__] = str(datetime2ticks(mind))
+ f.add_rql_restrictions()
+ # selection is cluttered because rqlst has been prepared for facet (it
+ # is not in real life)
+ self.assertEqual(f.select.as_string(),
+ 'DISTINCT Any WHERE X is CWUser, X created_by G, G owned_by H, H creation_date >= "%s", '
+ 'H creation_date <= "%s"'
+ % (mind.strftime('%Y/%m/%d'),
+ mind.strftime('%Y/%m/%d')))
+
+ def prepareg_aggregat_rqlst(self, req):
+ return self.prepare_rqlst(req,
'Any 1, COUNT(X) WHERE X is CWUser, X creation_date XD, '
'X modification_date XM, Y creation_date YD, Y is CWGroup '
'HAVING DAY(XD)>=DAY(YD) AND DAY(XM)<=DAY(YD)', 'X',
@@ -317,47 +359,50 @@
def test_aggregat_query_cleanup_select(self):
- req, rset, rqlst, filtered_variable = self.prepareg_aggregat_rqlst()
- select = rqlst.children[0]
- facet.cleanup_select(select, filtered_variable=filtered_variable)
- self.assertEqual(select.as_string(),
- 'DISTINCT Any WHERE X is CWUser, X creation_date XD, '
- 'X modification_date XM, Y creation_date YD, Y is CWGroup '
- 'HAVING DAY(XD) >= DAY(YD), DAY(XM) <= DAY(YD)')
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepareg_aggregat_rqlst(req)
+ select = rqlst.children[0]
+ facet.cleanup_select(select, filtered_variable=filtered_variable)
+ self.assertEqual(select.as_string(),
+ 'DISTINCT Any WHERE X is CWUser, X creation_date XD, '
+ 'X modification_date XM, Y creation_date YD, Y is CWGroup '
+ 'HAVING DAY(XD) >= DAY(YD), DAY(XM) <= DAY(YD)')
def test_aggregat_query_rql_path(self):
- req, rset, rqlst, filtered_variable = self.prepareg_aggregat_rqlst()
- class RPF(facet.RQLPathFacet):
- path = [('X created_by U'), ('U owned_by O'), ('O login OL')]
- filter_variable = 'OL'
- f = RPF(req, rset=rset, select=rqlst.children[0],
- filtered_variable=filtered_variable)
- self.assertEqual(f.vocabulary(), [(u'admin', u'admin')])
- self.assertEqual(f.possible_values(), ['admin'])
- req.form[f.__regid__] = 'admin'
- f.add_rql_restrictions()
- self.assertEqual(f.select.as_string(),
- "DISTINCT Any WHERE X is CWUser, X creation_date XD, "
- "X modification_date XM, Y creation_date YD, Y is CWGroup, "
- "X created_by G, G owned_by H, H login 'admin' "
- "HAVING DAY(XD) >= DAY(YD), DAY(XM) <= DAY(YD)")
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepareg_aggregat_rqlst(req)
+ class RPF(facet.RQLPathFacet):
+ path = [('X created_by U'), ('U owned_by O'), ('O login OL')]
+ filter_variable = 'OL'
+ f = RPF(req, rset=rset, select=rqlst.children[0],
+ filtered_variable=filtered_variable)
+ self.assertEqual(f.vocabulary(), [(u'admin', u'admin')])
+ self.assertEqual(f.possible_values(), ['admin'])
+ req.form[f.__regid__] = 'admin'
+ f.add_rql_restrictions()
+ self.assertEqual(f.select.as_string(),
+ "DISTINCT Any WHERE X is CWUser, X creation_date XD, "
+ "X modification_date XM, Y creation_date YD, Y is CWGroup, "
+ "X created_by G, G owned_by H, H login 'admin' "
+ "HAVING DAY(XD) >= DAY(YD), DAY(XM) <= DAY(YD)")
def test_aggregat_query_attribute(self):
- req, rset, rqlst, filtered_variable = self.prepareg_aggregat_rqlst()
- f = facet.AttributeFacet(req, rset=rset,
- select=rqlst.children[0],
- filtered_variable=filtered_variable)
- f.rtype = 'login'
- self.assertEqual(f.vocabulary(),
- [(u'admin', u'admin'), (u'anon', u'anon')])
- self.assertEqual(f.possible_values(),
- ['admin', 'anon'])
- req.form[f.__regid__] = 'admin'
- f.add_rql_restrictions()
- self.assertEqual(f.select.as_string(),
- "DISTINCT Any WHERE X is CWUser, X creation_date XD, "
- "X modification_date XM, Y creation_date YD, Y is CWGroup, X login 'admin' "
- "HAVING DAY(XD) >= DAY(YD), DAY(XM) <= DAY(YD)")
+ with self.admin_access.web_request() as req:
+ rset, rqlst, filtered_variable = self.prepareg_aggregat_rqlst(req)
+ f = facet.AttributeFacet(req, rset=rset,
+ select=rqlst.children[0],
+ filtered_variable=filtered_variable)
+ f.rtype = 'login'
+ self.assertEqual(f.vocabulary(),
+ [(u'admin', u'admin'), (u'anon', u'anon')])
+ self.assertEqual(f.possible_values(),
+ ['admin', 'anon'])
+ req.form[f.__regid__] = 'admin'
+ f.add_rql_restrictions()
+ self.assertEqual(f.select.as_string(),
+ "DISTINCT Any WHERE X is CWUser, X creation_date XD, "
+ "X modification_date XM, Y creation_date YD, Y is CWGroup, X login 'admin' "
+ "HAVING DAY(XD) >= DAY(YD), DAY(XM) <= DAY(YD)")
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
--- a/web/test/unittest_form.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_form.py Tue Jun 10 09:49:45 2014 +0200
@@ -35,131 +35,138 @@
class FieldsFormTC(CubicWebTC):
def test_form_field_format(self):
- form = FieldsForm(self.request(), None)
- self.assertEqual(StringField().format(form), 'text/html')
- self.execute('INSERT CWProperty X: X pkey "ui.default-text-format", X value "text/rest", X for_user U WHERE U login "admin"')
- self.commit()
- self.assertEqual(StringField().format(form), 'text/rest')
+ with self.admin_access.web_request() as req:
+ form = FieldsForm(req, None)
+ self.assertEqual(StringField().format(form), 'text/html')
+ req.cnx.execute('INSERT CWProperty X: X pkey "ui.default-text-format", X value "text/rest", X for_user U WHERE U login "admin"')
+ req.cnx.commit()
+ self.assertEqual(StringField().format(form), 'text/rest')
def test_process_posted(self):
class AForm(FieldsForm):
anint = IntField()
astring = StringField()
- form = AForm(self.request(anint='1', astring='2', _cw_fields='anint,astring'))
- self.assertEqual(form.process_posted(), {'anint': 1, 'astring': '2'})
- form = AForm(self.request(anint='1a', astring='2b', _cw_fields='anint,astring'))
- self.assertRaises(ValidationError, form.process_posted)
+ with self.admin_access.web_request(anint='1', astring='2', _cw_fields='anint,astring') as req:
+ form = AForm(req)
+ self.assertEqual(form.process_posted(), {'anint': 1, 'astring': '2'})
+ with self.admin_access.web_request(anint='1a', astring='2b', _cw_fields='anint,astring') as req:
+ form = AForm(req)
+ self.assertRaises(ValidationError, form.process_posted)
class EntityFieldsFormTC(CubicWebTC):
- def setUp(self):
- super(EntityFieldsFormTC, self).setUp()
- self.req = self.request()
- self.entity = self.user(self.req)
+ def test_form_field_choices(self):
+ with self.admin_access.web_request() as req:
+ b = req.create_entity('BlogEntry', title=u'di mascii code', content=u'a best-seller')
+ t = req.create_entity('Tag', name=u'x')
+ form1 = self.vreg['forms'].select('edition', req, entity=t)
+ choices = [reid for rview, reid in form1.field_by_name('tags', 'subject', t.e_schema).choices(form1)]
+ self.assertIn(unicode(b.eid), choices)
+ form2 = self.vreg['forms'].select('edition', req, entity=b)
+ choices = [reid for rview, reid in form2.field_by_name('tags', 'object', t.e_schema).choices(form2)]
+ self.assertIn(unicode(t.eid), choices)
- def test_form_field_choices(self):
- b = self.req.create_entity('BlogEntry', title=u'di mascii code', content=u'a best-seller')
- t = self.req.create_entity('Tag', name=u'x')
- form1 = self.vreg['forms'].select('edition', self.req, entity=t)
- choices = [reid for rview, reid in form1.field_by_name('tags', 'subject', t.e_schema).choices(form1)]
- self.assertIn(unicode(b.eid), choices)
- form2 = self.vreg['forms'].select('edition', self.req, entity=b)
- choices = [reid for rview, reid in form2.field_by_name('tags', 'object', t.e_schema).choices(form2)]
- self.assertIn(unicode(t.eid), choices)
+ b.cw_clear_all_caches()
+ t.cw_clear_all_caches()
+ req.cnx.execute('SET X tags Y WHERE X is Tag, Y is BlogEntry')
- b.cw_clear_all_caches()
- t.cw_clear_all_caches()
- self.execute('SET X tags Y WHERE X is Tag, Y is BlogEntry')
-
- choices = [reid for rview, reid in form1.field_by_name('tags', 'subject', t.e_schema).choices(form1)]
- self.assertIn(unicode(b.eid), choices)
- choices = [reid for rview, reid in form2.field_by_name('tags', 'object', t.e_schema).choices(form2)]
- self.assertIn(unicode(t.eid), choices)
+ choices = [reid for rview, reid in form1.field_by_name('tags', 'subject', t.e_schema).choices(form1)]
+ self.assertIn(unicode(b.eid), choices)
+ choices = [reid for rview, reid in form2.field_by_name('tags', 'object', t.e_schema).choices(form2)]
+ self.assertIn(unicode(t.eid), choices)
def test_form_field_choices_new_entity(self):
- e = self.vreg['etypes'].etype_class('CWUser')(self.request())
- form = self.vreg['forms'].select('edition', self.req, entity=e)
- unrelated = [rview for rview, reid in form.field_by_name('in_group', 'subject').choices(form)]
- # should be default groups but owners, i.e. managers, users, guests
- self.assertEqual(unrelated, [u'guests', u'managers', u'users'])
+ with self.admin_access.web_request() as req:
+ e = self.vreg['etypes'].etype_class('CWUser')(req)
+ form = self.vreg['forms'].select('edition', req, entity=e)
+ unrelated = [rview for rview, reid in form.field_by_name('in_group', 'subject').choices(form)]
+ # should be default groups but owners, i.e. managers, users, guests
+ self.assertEqual(unrelated, [u'guests', u'managers', u'users'])
def test_consider_req_form_params(self):
- e = self.vreg['etypes'].etype_class('CWUser')(self.request())
- e.eid = 'A'
- form = EntityFieldsForm(self.request(login=u'toto'), None, entity=e)
- field = StringField(name='login', role='subject', eidparam=True)
- form.append_field(field)
- form.build_context({})
- self.assertEqual(field.widget.values(form, field), (u'toto',))
+ with self.admin_access.web_request() as req:
+ e = self.vreg['etypes'].etype_class('CWUser')(req)
+ e.eid = 'A'
+ with self.admin_access.web_request(login=u'toto') as toto_req:
+ form = EntityFieldsForm(toto_req, None, entity=e)
+ field = StringField(name='login', role='subject', eidparam=True)
+ form.append_field(field)
+ form.build_context({})
+ self.assertEqual(field.widget.values(form, field), (u'toto',))
def test_linkto_field_duplication_inout(self):
- e = self.vreg['etypes'].etype_class('CWUser')(self.request())
- e.eid = 'A'
- e._cw = self.req
- geid = self.execute('CWGroup X WHERE X name "users"')[0][0]
- self.req.form['__linkto'] = 'in_group:%s:subject' % geid
- form = self.vreg['forms'].select('edition', self.req, entity=e)
- form.content_type = 'text/html'
- pageinfo = self._check_html(form.render(), form, template=None)
- inputs = pageinfo.find_tag('select', False)
- ok = False
- for selectnode in pageinfo.matching_nodes('select', name='from_in_group-subject:A'):
- for optionnode in selectnode:
- self.assertEqual(optionnode.get('value'), str(geid))
- self.assertEqual(ok, False)
- ok = True
- inputs = pageinfo.find_tag('input', False)
- self.assertFalse(list(pageinfo.matching_nodes('input', name='__linkto')))
+ with self.admin_access.web_request() as req:
+ e = self.vreg['etypes'].etype_class('CWUser')(req)
+ e.eid = 'A'
+ e._cw = req
+ geid = req.cnx.execute('CWGroup X WHERE X name "users"')[0][0]
+ req.form['__linkto'] = 'in_group:%s:subject' % geid
+ form = self.vreg['forms'].select('edition', req, entity=e)
+ form.content_type = 'text/html'
+ pageinfo = self._check_html(form.render(), form, template=None)
+ inputs = pageinfo.find_tag('select', False)
+ ok = False
+ for selectnode in pageinfo.matching_nodes('select', name='from_in_group-subject:A'):
+ for optionnode in selectnode:
+ self.assertEqual(optionnode.get('value'), str(geid))
+ self.assertEqual(ok, False)
+ ok = True
+ inputs = pageinfo.find_tag('input', False)
+ self.assertFalse(list(pageinfo.matching_nodes('input', name='__linkto')))
def test_reledit_composite_field(self):
- rset = self.execute('INSERT BlogEntry X: X title "cubicweb.org", X content "hop"')
- form = self.vreg['views'].select('reledit', self.request(),
- rset=rset, row=0, rtype='content')
- data = form.render(row=0, rtype='content', formid='base', action='edit_rtype')
- self.assertTrue('content_format' in data)
+ with self.admin_access.web_request() as req:
+ rset = req.execute('INSERT BlogEntry X: X title "cubicweb.org", X content "hop"')
+ form = self.vreg['views'].select('reledit', req,
+ rset=rset, row=0, rtype='content')
+ data = form.render(row=0, rtype='content', formid='base', action='edit_rtype')
+ self.assertIn('content_format', data)
# form tests ##############################################################
def test_form_inheritance(self):
- class CustomChangeStateForm(ChangeStateForm):
- hello = IntField(name='youlou')
- creation_date = DateTimeField(widget=DateTimePicker)
- form = CustomChangeStateForm(self.req, redirect_path='perdu.com',
- entity=self.entity)
- form.render(formvalues=dict(state=123, trcomment=u'',
- trcomment_format=u'text/plain'))
+ with self.admin_access.web_request() as req:
+ class CustomChangeStateForm(ChangeStateForm):
+ hello = IntField(name='youlou')
+ creation_date = DateTimeField(widget=DateTimePicker)
+ form = CustomChangeStateForm(req, redirect_path='perdu.com',
+ entity=req.user)
+ form.render(formvalues=dict(state=123, trcomment=u'',
+ trcomment_format=u'text/plain'))
def test_change_state_form(self):
- form = ChangeStateForm(self.req, redirect_path='perdu.com',
- entity=self.entity)
- form.render(formvalues=dict(state=123, trcomment=u'',
- trcomment_format=u'text/plain'))
+ with self.admin_access.web_request() as req:
+ form = ChangeStateForm(req, redirect_path='perdu.com',
+ entity=req.user)
+ form.render(formvalues=dict(state=123, trcomment=u'',
+ trcomment_format=u'text/plain'))
# fields tests ############################################################
- def _render_entity_field(self, name, form):
+ def _render_entity_field(self, req, name, form):
form.build_context({})
- renderer = FormRenderer(self.req)
+ renderer = FormRenderer(req)
return form.field_by_name(name, 'subject').render(form, renderer)
- def _test_richtextfield(self, expected):
+ def _test_richtextfield(self, req, expected):
class RTFForm(EntityFieldsForm):
description = RichTextField(eidparam=True, role='subject')
- state = self.vreg['etypes'].etype_class('State')(self.req)
+ state = self.vreg['etypes'].etype_class('State')(req)
state.eid = 'S'
- form = RTFForm(self.req, redirect_path='perdu.com', entity=state)
+ form = RTFForm(req, redirect_path='perdu.com', entity=state)
# make it think it can use fck editor anyway
form.field_by_name('description', 'subject').format = lambda form, field=None: 'text/html'
- self.assertMultiLineEqual(self._render_entity_field('description', form),
+ self.assertMultiLineEqual(self._render_entity_field(req, 'description', form),
expected % {'eid': state.eid})
def test_richtextfield_1(self):
- self.req.use_fckeditor = lambda: False
- self._test_richtextfield('''<select id="description_format-subject:%(eid)s" name="description_format-subject:%(eid)s" size="1" style="display: block" tabindex="1">
+ with self.admin_access.web_request() as req:
+ req.use_fckeditor = lambda: False
+ self._test_richtextfield(req, '''<select id="description_format-subject:%(eid)s" name="description_format-subject:%(eid)s" size="1" style="display: block" tabindex="1">
<option value="text/cubicweb-page-template">text/cubicweb-page-template</option>
<option selected="selected" value="text/html">text/html</option>
<option value="text/plain">text/plain</option>
@@ -168,8 +175,9 @@
def test_richtextfield_2(self):
- self.req.use_fckeditor = lambda: True
- self._test_richtextfield('<input name="description_format-subject:%(eid)s" type="hidden" value="text/html" /><textarea cols="80" cubicweb:type="wysiwyg" id="description-subject:%(eid)s" name="description-subject:%(eid)s" onkeyup="autogrow(this)" rows="2" tabindex="1"></textarea>')
+ with self.admin_access.web_request() as req:
+ req.use_fckeditor = lambda: True
+ self._test_richtextfield(req, '<input name="description_format-subject:%(eid)s" type="hidden" value="text/html" /><textarea cols="80" cubicweb:type="wysiwyg" id="description-subject:%(eid)s" name="description-subject:%(eid)s" onkeyup="autogrow(this)" rows="2" tabindex="1"></textarea>')
def test_filefield(self):
@@ -180,10 +188,11 @@
encoding_field=StringField(name='data_encoding', max_length=20,
eidparam=True, role='subject'),
eidparam=True, role='subject')
- file = self.req.create_entity('File', data_name=u"pouet.txt", data_encoding=u'UTF-8',
- data=Binary('new widgets system'))
- form = FFForm(self.req, redirect_path='perdu.com', entity=file)
- self.assertMultiLineEqual(self._render_entity_field('data', form),
+ with self.admin_access.web_request() as req:
+ file = req.create_entity('File', data_name=u"pouet.txt", data_encoding=u'UTF-8',
+ data=Binary('new widgets system'))
+ form = FFForm(req, redirect_path='perdu.com', entity=file)
+ self.assertMultiLineEqual(self._render_entity_field(req, 'data', form),
'''<input id="data-subject:%(eid)s" name="data-subject:%(eid)s" tabindex="1" type="file" value="" />
<a href="javascript: toggleVisibility('data-subject:%(eid)s-advanced')" title="show advanced fields"><img src="http://testing.fr/cubicweb/data/puce_down.png" alt="show advanced fields"/></a>
<div id="data-subject:%(eid)s-advanced" class="hidden">
@@ -203,10 +212,11 @@
encoding_field=StringField('data_encoding', max_length=20,
eidparam=True, role='subject'),
eidparam=True, role='subject')
- file = self.req.create_entity('File', data_name=u"pouet.txt", data_encoding=u'UTF-8',
- data=Binary('new widgets system'))
- form = EFFForm(self.req, redirect_path='perdu.com', entity=file)
- self.assertMultiLineEqual(self._render_entity_field('data', form),
+ with self.admin_access.web_request() as req:
+ file = req.create_entity('File', data_name=u"pouet.txt", data_encoding=u'UTF-8',
+ data=Binary('new widgets system'))
+ form = EFFForm(req, redirect_path='perdu.com', entity=file)
+ self.assertMultiLineEqual(self._render_entity_field(req, 'data', form),
'''<input id="data-subject:%(eid)s" name="data-subject:%(eid)s" tabindex="1" type="file" value="" />
<a href="javascript: toggleVisibility('data-subject:%(eid)s-advanced')" title="show advanced fields"><img src="http://testing.fr/cubicweb/data/puce_down.png" alt="show advanced fields"/></a>
<div id="data-subject:%(eid)s-advanced" class="hidden">
@@ -223,13 +233,14 @@
def test_passwordfield(self):
class PFForm(EntityFieldsForm):
upassword = PasswordField(eidparam=True, role='subject')
- form = PFForm(self.req, redirect_path='perdu.com', entity=self.entity)
- self.assertMultiLineEqual(self._render_entity_field('upassword', form),
- '''<input id="upassword-subject:%(eid)s" name="upassword-subject:%(eid)s" tabindex="1" type="password" value="" />
+ with self.admin_access.web_request() as req:
+ form = PFForm(req, redirect_path='perdu.com', entity=req.user)
+ self.assertMultiLineEqual(self._render_entity_field(req, 'upassword', form),
+ '''<input id="upassword-subject:%(eid)s" name="upassword-subject:%(eid)s" tabindex="1" type="password" value="" />
<br/>
<input name="upassword-subject-confirm:%(eid)s" tabindex="1" type="password" value="" />
 
-<span class="emphasis">confirm password</span>''' % {'eid': self.entity.eid})
+<span class="emphasis">confirm password</span>''' % {'eid': req.user.eid})
# def test_datefield(self):
--- a/web/test/unittest_formfields.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_formfields.py Tue Jun 10 09:49:45 2014 +0200
@@ -25,7 +25,7 @@
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.web.formwidgets import PasswordInput, TextArea, Select, Radio
from cubicweb.web.formfields import *
-from cubicweb.web.views.forms import EntityFieldsForm
+from cubicweb.web.views.forms import EntityFieldsForm, FieldsForm
from cubes.file.entities import File
@@ -37,14 +37,11 @@
class GuessFieldTC(CubicWebTC):
- def setUp(self):
- super(GuessFieldTC, self).setUp()
- self.req = self.request()
-
def test_state_fields(self):
- title_field = guess_field(schema['State'], schema['name'], req=self.req)
- self.assertIsInstance(title_field, StringField)
- self.assertEqual(title_field.required, True)
+ with self.admin_access.web_request() as req:
+ title_field = guess_field(schema['State'], schema['name'], req=req)
+ self.assertIsInstance(title_field, StringField)
+ self.assertEqual(title_field.required, True)
# synopsis_field = guess_field(schema['State'], schema['synopsis'])
# self.assertIsInstance(synopsis_field, StringField)
@@ -52,18 +49,20 @@
# self.assertEqual(synopsis_field.required, False)
# self.assertEqual(synopsis_field.help, 'an abstract for this state')
- description_field = guess_field(schema['State'], schema['description'], req=self.req)
- self.assertIsInstance(description_field, RichTextField)
- self.assertEqual(description_field.required, False)
- self.assertEqual(description_field.format_field, None)
+ with self.admin_access.web_request() as req:
+ description_field = guess_field(schema['State'], schema['description'], req=req)
+ self.assertIsInstance(description_field, RichTextField)
+ self.assertEqual(description_field.required, False)
+ self.assertEqual(description_field.format_field, None)
# description_format_field = guess_field(schema['State'], schema['description_format'])
# self.assertEqual(description_format_field, None)
- description_format_field = guess_field(schema['State'], schema['description_format'],
- req=self.req)
- self.assertEqual(description_format_field.internationalizable, True)
- self.assertEqual(description_format_field.sort, True)
+ with self.admin_access.web_request() as req:
+ description_format_field = guess_field(schema['State'], schema['description_format'],
+ req=req)
+ self.assertEqual(description_format_field.internationalizable, True)
+ self.assertEqual(description_format_field.sort, True)
# wikiid_field = guess_field(schema['State'], schema['wikiid'])
# self.assertIsInstance(wikiid_field, StringField)
@@ -71,25 +70,29 @@
def test_cwuser_fields(self):
- upassword_field = guess_field(schema['CWUser'], schema['upassword'], req=self.req)
- self.assertIsInstance(upassword_field, StringField)
- self.assertIsInstance(upassword_field.widget, PasswordInput)
- self.assertEqual(upassword_field.required, True)
+ with self.admin_access.web_request() as req:
+ upassword_field = guess_field(schema['CWUser'], schema['upassword'], req=req)
+ self.assertIsInstance(upassword_field, StringField)
+ self.assertIsInstance(upassword_field.widget, PasswordInput)
+ self.assertEqual(upassword_field.required, True)
- last_login_time_field = guess_field(schema['CWUser'], schema['last_login_time'], req=self.req)
- self.assertIsInstance(last_login_time_field, DateTimeField)
- self.assertEqual(last_login_time_field.required, False)
+ with self.admin_access.web_request() as req:
+ last_login_time_field = guess_field(schema['CWUser'], schema['last_login_time'], req=req)
+ self.assertIsInstance(last_login_time_field, DateTimeField)
+ self.assertEqual(last_login_time_field.required, False)
- in_group_field = guess_field(schema['CWUser'], schema['in_group'], req=self.req)
- self.assertIsInstance(in_group_field, RelationField)
- self.assertEqual(in_group_field.required, True)
- self.assertEqual(in_group_field.role, 'subject')
- self.assertEqual(in_group_field.help, 'groups grant permissions to the user')
+ with self.admin_access.web_request() as req:
+ in_group_field = guess_field(schema['CWUser'], schema['in_group'], req=req)
+ self.assertIsInstance(in_group_field, RelationField)
+ self.assertEqual(in_group_field.required, True)
+ self.assertEqual(in_group_field.role, 'subject')
+ self.assertEqual(in_group_field.help, 'groups grant permissions to the user')
- owned_by_field = guess_field(schema['CWUser'], schema['owned_by'], 'object', req=self.req)
- self.assertIsInstance(owned_by_field, RelationField)
- self.assertEqual(owned_by_field.required, False)
- self.assertEqual(owned_by_field.role, 'object')
+ with self.admin_access.web_request() as req:
+ owned_by_field = guess_field(schema['CWUser'], schema['owned_by'], 'object', req=req)
+ self.assertIsInstance(owned_by_field, RelationField)
+ self.assertEqual(owned_by_field.required, False)
+ self.assertEqual(owned_by_field.role, 'object')
def test_file_fields(self):
@@ -100,64 +103,85 @@
# data_name_field = guess_field(schema['File'], schema['data_name'])
# self.assertEqual(data_name_field, None)
- data_field = guess_field(schema['File'], schema['data'], req=self.req)
- self.assertIsInstance(data_field, FileField)
- self.assertEqual(data_field.required, True)
- self.assertIsInstance(data_field.format_field, StringField)
- self.assertIsInstance(data_field.encoding_field, StringField)
- self.assertIsInstance(data_field.name_field, StringField)
+ with self.admin_access.web_request() as req:
+ data_field = guess_field(schema['File'], schema['data'], req=req)
+ self.assertIsInstance(data_field, FileField)
+ self.assertEqual(data_field.required, True)
+ self.assertIsInstance(data_field.format_field, StringField)
+ self.assertIsInstance(data_field.encoding_field, StringField)
+ self.assertIsInstance(data_field.name_field, StringField)
def test_constraints_priority(self):
- salesterm_field = guess_field(schema['Salesterm'], schema['reason'], req=self.req)
- constraints = schema['reason'].rdef('Salesterm', 'String').constraints
- self.assertEqual([c.__class__ for c in constraints],
- [SizeConstraint, StaticVocabularyConstraint])
- self.assertIsInstance(salesterm_field, StringField)
- self.assertIsInstance(salesterm_field.widget, Select)
+ with self.admin_access.web_request() as req:
+ salesterm_field = guess_field(schema['Salesterm'], schema['reason'], req=req)
+ constraints = schema['reason'].rdef('Salesterm', 'String').constraints
+ self.assertEqual([c.__class__ for c in constraints],
+ [SizeConstraint, StaticVocabularyConstraint])
+ self.assertIsInstance(salesterm_field, StringField)
+ self.assertIsInstance(salesterm_field.widget, Select)
def test_bool_field_base(self):
- field = guess_field(schema['CWAttribute'], schema['indexed'], req=self.req)
- self.assertIsInstance(field, BooleanField)
- self.assertEqual(field.required, False)
- self.assertIsInstance(field.widget, Radio)
- self.assertEqual(field.vocabulary(mock(_cw=mock(_=unicode))),
- [(u'yes', '1'), (u'no', '')])
+ with self.admin_access.web_request() as req:
+ field = guess_field(schema['CWAttribute'], schema['indexed'], req=req)
+ self.assertIsInstance(field, BooleanField)
+ self.assertEqual(field.required, False)
+ self.assertIsInstance(field.widget, Radio)
+ self.assertEqual(field.vocabulary(mock(_cw=mock(_=unicode))),
+ [(u'yes', '1'), (u'no', '')])
def test_bool_field_explicit_choices(self):
- field = guess_field(schema['CWAttribute'], schema['indexed'],
- choices=[(u'maybe', '1'), (u'no', '')], req=self.req)
- self.assertIsInstance(field.widget, Radio)
- self.assertEqual(field.vocabulary(mock(req=mock(_=unicode))),
- [(u'maybe', '1'), (u'no', '')])
+ with self.admin_access.web_request() as req:
+ field = guess_field(schema['CWAttribute'], schema['indexed'],
+ choices=[(u'maybe', '1'), (u'no', '')], req=req)
+ self.assertIsInstance(field.widget, Radio)
+ self.assertEqual(field.vocabulary(mock(req=mock(_=unicode))),
+ [(u'maybe', '1'), (u'no', '')])
class MoreFieldsTC(CubicWebTC):
def test_rtf_format_field(self):
- req = self.request()
- req.use_fckeditor = lambda: False
- e = self.vreg['etypes'].etype_class('State')(req)
- form = EntityFieldsForm(req, entity=e)
- description_field = guess_field(schema['State'], schema['description'])
- description_format_field = description_field.get_format_field(form)
- self.assertEqual(description_format_field.internationalizable, True)
- self.assertEqual(description_format_field.sort, True)
- # unlike below, initial is bound to form.form_field_format
- self.assertEqual(description_format_field.value(form), 'text/html')
- self.execute('INSERT CWProperty X: X pkey "ui.default-text-format", X value "text/rest", X for_user U WHERE U login "admin"')
- self.commit()
- self.assertEqual(description_format_field.value(form), 'text/rest')
+ with self.admin_access.web_request() as req:
+ req.use_fckeditor = lambda: False
+ e = self.vreg['etypes'].etype_class('State')(req)
+ form = EntityFieldsForm(req, entity=e)
+ description_field = guess_field(schema['State'], schema['description'])
+ description_format_field = description_field.get_format_field(form)
+ self.assertEqual(description_format_field.internationalizable, True)
+ self.assertEqual(description_format_field.sort, True)
+ # unlike below, initial is bound to form.form_field_format
+ self.assertEqual(description_format_field.value(form), 'text/html')
+ req.cnx.execute('INSERT CWProperty X: X pkey "ui.default-text-format", X value "text/rest", X for_user U WHERE U login "admin"')
+ req.cnx.commit()
+ self.assertEqual(description_format_field.value(form), 'text/rest')
def test_property_key_field(self):
from cubicweb.web.views.cwproperties import PropertyKeyField
- req = self.request()
- field = PropertyKeyField(name='test')
- e = self.vreg['etypes'].etype_class('CWProperty')(req)
- renderer = self.vreg['formrenderers'].select('base', req)
- form = EntityFieldsForm(req, entity=e)
- form.formvalues = {}
- field.render(form, renderer)
+ with self.admin_access.web_request() as req:
+ field = PropertyKeyField(name='test')
+ e = self.vreg['etypes'].etype_class('CWProperty')(req)
+ renderer = self.vreg['formrenderers'].select('base', req)
+ form = EntityFieldsForm(req, entity=e)
+ form.formvalues = {}
+ field.render(form, renderer)
+
+
+class CompoundFieldTC(CubicWebTC):
+
+ def test_multipart(self):
+ """Ensures that compound forms have needs_multipart set if their
+ children require it"""
+ class AForm(FieldsForm):
+ comp = CompoundField([IntField(), StringField()])
+ with self.admin_access.web_request() as req:
+ aform = AForm(req, None)
+ self.assertFalse(aform.needs_multipart)
+ class MForm(FieldsForm):
+ comp = CompoundField([IntField(), FileField()])
+ with self.admin_access.web_request() as req:
+ mform = MForm(req, None)
+ self.assertTrue(mform.needs_multipart)
class UtilsTC(TestCase):
--- a/web/test/unittest_http.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_http.py Tue Jun 10 09:49:45 2014 +0200
@@ -15,9 +15,13 @@
#
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
+
+import contextlib
+
from logilab.common.testlib import TestCase, unittest_main, tag, Tags
from cubicweb.devtools.fake import FakeRequest
+from cubicweb.devtools.testlib import CubicWebTC
def _test_cache(hin, hout, method='GET'):
@@ -290,5 +294,165 @@
self.assertEqual(value, [DATE])
+alloworig = 'access-control-allow-origin'
+allowmethods = 'access-control-allow-methods'
+allowheaders = 'access-control-allow-headers'
+allowcreds = 'access-control-allow-credentials'
+exposeheaders = 'access-control-expose-headers'
+maxage = 'access-control-max-age'
+
+requestmethod = 'access-control-request-method'
+requestheaders = 'access-control-request-headers'
+
+class _BaseAccessHeadersTC(CubicWebTC):
+
+ @contextlib.contextmanager
+ def options(self, **options):
+ for k, values in options.items():
+ self.config.set_option(k, values)
+ try:
+ yield
+ finally:
+ for k in options:
+ self.config.set_option(k, '')
+ def check_no_cors(self, req):
+ self.assertEqual(None, req.get_response_header(alloworig))
+ self.assertEqual(None, req.get_response_header(allowmethods))
+ self.assertEqual(None, req.get_response_header(allowheaders))
+ self.assertEqual(None, req.get_response_header(allowcreds))
+ self.assertEqual(None, req.get_response_header(exposeheaders))
+ self.assertEqual(None, req.get_response_header(maxage))
+
+
+class SimpleAccessHeadersTC(_BaseAccessHeadersTC):
+
+ def test_noaccess(self):
+ with self.admin_access.web_request() as req:
+ data = self.app_handle_request(req)
+ self.check_no_cors(req)
+
+ def test_noorigin(self):
+ with self.options(**{alloworig: '*'}):
+ with self.admin_access.web_request() as req:
+ data = self.app_handle_request(req)
+ self.check_no_cors(req)
+
+ def test_origin_noaccess(self):
+ with self.admin_access.web_request() as req:
+ req.set_request_header('Origin', 'http://www.cubicweb.org')
+ data = self.app_handle_request(req)
+ self.check_no_cors(req)
+
+ def test_origin_noaccess_bad_host(self):
+ with self.options(**{alloworig: '*'}):
+ with self.admin_access.web_request() as req:
+ req.set_request_header('Origin', 'http://www.cubicweb.org')
+ # in these tests, base_url is http://testing.fr/cubicweb/
+ req.set_request_header('Host', 'badhost.net')
+ data = self.app_handle_request(req)
+ self.check_no_cors(req)
+
+ def test_explicit_origin_noaccess(self):
+ with self.options(**{alloworig: ['http://www.toto.org', 'http://othersite.fr']}):
+ with self.admin_access.web_request() as req:
+ req.set_request_header('Origin', 'http://www.cubicweb.org')
+ # in these tests, base_url is http://testing.fr/cubicweb/
+ req.set_request_header('Host', 'testing.fr')
+ data = self.app_handle_request(req)
+ self.check_no_cors(req)
+
+ def test_origin_access(self):
+ with self.options(**{alloworig: '*'}):
+ with self.admin_access.web_request() as req:
+ req.set_request_header('Origin', 'http://www.cubicweb.org')
+ # in these tests, base_url is http://testing.fr/cubicweb/
+ req.set_request_header('Host', 'testing.fr')
+ data = self.app_handle_request(req)
+ self.assertEqual('http://www.cubicweb.org',
+ req.get_response_header(alloworig))
+
+ def test_explicit_origin_access(self):
+ with self.options(**{alloworig: ['http://www.cubicweb.org', 'http://othersite.fr']}):
+ with self.admin_access.web_request() as req:
+ req.set_request_header('Origin', 'http://www.cubicweb.org')
+ # in these tests, base_url is http://testing.fr/cubicweb/
+ req.set_request_header('Host', 'testing.fr')
+ data = self.app_handle_request(req)
+ self.assertEqual('http://www.cubicweb.org',
+ req.get_response_header(alloworig))
+
+ def test_origin_access_headers(self):
+ with self.options(**{alloworig: '*',
+ exposeheaders: ['ExposeHead1', 'ExposeHead2'],
+ allowheaders: ['AllowHead1', 'AllowHead2'],
+ allowmethods: ['GET', 'POST', 'OPTIONS']}):
+ with self.admin_access.web_request() as req:
+ req.set_request_header('Origin', 'http://www.cubicweb.org')
+ # in these tests, base_url is http://testing.fr/cubicweb/
+ req.set_request_header('Host', 'testing.fr')
+ data = self.app_handle_request(req)
+ self.assertEqual('http://www.cubicweb.org',
+ req.get_response_header(alloworig))
+ self.assertEqual("true",
+ req.get_response_header(allowcreds))
+ self.assertEqual(['ExposeHead1', 'ExposeHead2'],
+ req.get_response_header(exposeheaders))
+ self.assertEqual(None, req.get_response_header(allowmethods))
+ self.assertEqual(None, req.get_response_header(allowheaders))
+
+
+class PreflightAccessHeadersTC(_BaseAccessHeadersTC):
+
+ def test_noaccess(self):
+ with self.admin_access.web_request(method='OPTIONS') as req:
+ data = self.app_handle_request(req)
+ self.check_no_cors(req)
+
+ def test_noorigin(self):
+ with self.options(**{alloworig: '*'}):
+ with self.admin_access.web_request(method='OPTIONS') as req:
+ data = self.app_handle_request(req)
+ self.check_no_cors(req)
+
+ def test_origin_noaccess(self):
+ with self.admin_access.web_request(method='OPTIONS') as req:
+ req.set_request_header('Origin', 'http://www.cubicweb.org')
+ data = self.app_handle_request(req)
+ self.check_no_cors(req)
+
+ def test_origin_noaccess_bad_host(self):
+ with self.options(**{alloworig: '*'}):
+ with self.admin_access.web_request(method='OPTIONS') as req:
+ req.set_request_header('Origin', 'http://www.cubicweb.org')
+ # in these tests, base_url is http://testing.fr/cubicweb/
+ req.set_request_header('Host', 'badhost.net')
+ data = self.app_handle_request(req)
+ self.check_no_cors(req)
+
+ def test_origin_access(self):
+ with self.options(**{alloworig: '*',
+ exposeheaders: ['ExposeHead1', 'ExposeHead2'],
+ allowheaders: ['AllowHead1', 'AllowHead2'],
+ allowmethods: ['GET', 'POST', 'OPTIONS']}):
+ with self.admin_access.web_request(method='OPTIONS') as req:
+ req.set_request_header('Origin', 'http://www.cubicweb.org')
+ # in these tests, base_url is http://testing.fr/cubicweb/
+ req.set_request_header('Host', 'testing.fr')
+ req.set_request_header(requestmethod, 'GET')
+
+ data = self.app_handle_request(req)
+ self.assertEqual(200, req.status_out)
+ self.assertEqual('http://www.cubicweb.org',
+ req.get_response_header(alloworig))
+ self.assertEqual("true",
+ req.get_response_header(allowcreds))
+ self.assertEqual(set(['GET', 'POST', 'OPTIONS']),
+ req.get_response_header(allowmethods))
+ self.assertEqual(set(['AllowHead1', 'AllowHead2']),
+ req.get_response_header(allowheaders))
+ self.assertEqual(None,
+ req.get_response_header(exposeheaders))
+
+
if __name__ == '__main__':
unittest_main()
--- a/web/test/unittest_idownloadable.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_idownloadable.py Tue Jun 10 09:49:45 2014 +0200
@@ -21,7 +21,7 @@
from logilab.common.testlib import unittest_main
-from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.devtools.testlib import CubicWebTC, real_error_handling
from cubicweb import view
from cubicweb.predicates import is_instance
@@ -63,23 +63,22 @@
self.addCleanup(partial(self.vreg.unregister, IDownloadableUser))
def test_header_simple_case(self):
- req = self.request()
- req.form['vid'] = 'download'
- req.form['eid'] = str(req.user.eid)
- data = self.ctrl_publish(req, 'view')
- get = req.headers_out.getRawHeaders
- self.assertEqual(['attachment;filename="admin.txt"'],
- get('content-disposition'))
- self.assertEqual(['text/plain;charset=ascii'],
- get('content-type'))
- self.assertEqual('Babar is not dead!', data)
+ with self.admin_access.web_request() as req:
+ req.form['vid'] = 'download'
+ req.form['eid'] = str(req.user.eid)
+ data = self.ctrl_publish(req, 'view')
+ get = req.headers_out.getRawHeaders
+ self.assertEqual(['attachment;filename="admin.txt"'],
+ get('content-disposition'))
+ self.assertEqual(['text/plain;charset=ascii'],
+ get('content-type'))
+ self.assertEqual('Babar is not dead!', data)
def test_header_with_space(self):
- req = self.request()
- self.create_user(req, login=u'c c l a', password='babar')
- self.commit()
- with self.login(u'c c l a', password='babar'):
- req = self.request()
+ with self.admin_access.web_request() as req:
+ self.create_user(req, login=u'c c l a', password='babar')
+ req.cnx.commit()
+ with self.new_access(u'c c l a').web_request() as req:
req.form['vid'] = 'download'
req.form['eid'] = str(req.user.eid)
data = self.ctrl_publish(req,'view')
@@ -91,11 +90,10 @@
self.assertEqual('Babar is not dead!', data)
def test_header_with_space_and_comma(self):
- req = self.request()
- self.create_user(req, login=ur'c " l\ a', password='babar')
- self.commit()
- with self.login(ur'c " l\ a', password='babar'):
- req = self.request()
+ with self.admin_access.web_request() as req:
+ self.create_user(req, login=ur'c " l\ a', password='babar')
+ req.cnx.commit()
+ with self.new_access(ur'c " l\ a').web_request() as req:
req.form['vid'] = 'download'
req.form['eid'] = str(req.user.eid)
data = self.ctrl_publish(req,'view')
@@ -107,11 +105,10 @@
self.assertEqual('Babar is not dead!', data)
def test_header_unicode_filename(self):
- req = self.request()
- self.create_user(req, login=u'cécilia', password='babar')
- self.commit()
- with self.login(u'cécilia', password='babar'):
- req = self.request()
+ with self.admin_access.web_request() as req:
+ self.create_user(req, login=u'cécilia', password='babar')
+ req.cnx.commit()
+ with self.new_access(u'cécilia').web_request() as req:
req.form['vid'] = 'download'
req.form['eid'] = str(req.user.eid)
self.ctrl_publish(req,'view')
@@ -120,12 +117,11 @@
get('content-disposition'))
def test_header_unicode_long_filename(self):
- req = self.request()
name = u'Bèrte_hô_grand_nôm_ça_va_totallement_déborder_de_la_limite_là '
- self.create_user(req, login=name, password='babar')
- self.commit()
- with self.login(name, password='babar'):
- req = self.request()
+ with self.admin_access.web_request() as req:
+ self.create_user(req, login=name, password='babar')
+ req.cnx.commit()
+ with self.new_access(name).web_request() as req:
req.form['vid'] = 'download'
req.form['eid'] = str(req.user.eid)
self.ctrl_publish(req,'view')
@@ -137,20 +133,17 @@
def test_download_data_error(self):
self.vreg.register(BrokenIDownloadableGroup)
self.addCleanup(partial(self.vreg.unregister, BrokenIDownloadableGroup))
- req = self.request()
- req.form['vid'] = 'download'
- req.form['eid'] = str(req.execute('CWGroup X WHERE X name "managers"')[0][0])
- errhdlr = self.app.__dict__.pop('error_handler') # temporarily restore error handler
- try:
- data = self.app_handle_request(req)
- finally:
- self.app.error_handler = errhdlr
- get = req.headers_out.getRawHeaders
- self.assertEqual(['text/html;charset=UTF-8'],
- get('content-type'))
- self.assertEqual(None,
- get('content-disposition'))
- self.assertEqual(req.status_out, 500)
+ with self.admin_access.web_request() as req:
+ req.form['vid'] = 'download'
+ req.form['eid'] = str(req.execute('CWGroup X WHERE X name "managers"')[0][0])
+ with real_error_handling(self.app):
+ data = self.app_handle_request(req)
+ get = req.headers_out.getRawHeaders
+ self.assertEqual(['text/html;charset=UTF-8'],
+ get('content-type'))
+ self.assertEqual(None,
+ get('content-disposition'))
+ self.assertEqual(req.status_out, 500)
if __name__ == '__main__':
unittest_main()
--- a/web/test/unittest_magicsearch.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_magicsearch.py Tue Jun 10 09:49:45 2014 +0200
@@ -19,6 +19,7 @@
"""Unit tests for cw.web.views.magicsearch"""
import sys
+from contextlib import contextmanager
from logilab.common.testlib import TestCase, unittest_main
@@ -50,110 +51,120 @@
class QueryTranslatorTC(CubicWebTC):
"""test suite for QueryTranslatorTC"""
- def setUp(self):
- super(QueryTranslatorTC, self).setUp()
- self.req = self.request()
- self.vreg.config.translations = {'en': (_translate, _ctxtranslate)}
- proc = self.vreg['components'].select('magicsearch', self.req)
- self.proc = [p for p in proc.processors if isinstance(p, QueryTranslator)][0]
+ @contextmanager
+ def proc(self):
+ with self.admin_access.web_request() as req:
+ self.vreg.config.translations = {'en': (_translate, _ctxtranslate)}
+ proc = self.vreg['components'].select('magicsearch', req)
+ proc = [p for p in proc.processors if isinstance(p, QueryTranslator)][0]
+ yield proc
def test_basic_translations(self):
"""tests basic translations (no ambiguities)"""
- rql = "Any C WHERE C is Adresse, P adel C, C adresse 'Logilab'"
- rql, = self.proc.preprocess_query(rql)
- self.assertEqual(rql, "Any C WHERE C is EmailAddress, P use_email C, C address 'Logilab'")
+ with self.proc() as proc:
+ rql = "Any C WHERE C is Adresse, P adel C, C adresse 'Logilab'"
+ rql, = proc.preprocess_query(rql)
+ self.assertEqual(rql, "Any C WHERE C is EmailAddress, P use_email C, C address 'Logilab'")
def test_ambiguous_translations(self):
"""tests possibly ambiguous translations"""
- rql = "Any P WHERE P adel C, C is EmailAddress, C nom 'Logilab'"
- rql, = self.proc.preprocess_query(rql)
- self.assertEqual(rql, "Any P WHERE P use_email C, C is EmailAddress, C alias 'Logilab'")
- rql = "Any P WHERE P is Utilisateur, P adel C, P nom 'Smith'"
- rql, = self.proc.preprocess_query(rql)
- self.assertEqual(rql, "Any P WHERE P is CWUser, P use_email C, P surname 'Smith'")
+ with self.proc() as proc:
+ rql = "Any P WHERE P adel C, C is EmailAddress, C nom 'Logilab'"
+ rql, = proc.preprocess_query(rql)
+ self.assertEqual(rql, "Any P WHERE P use_email C, C is EmailAddress, C alias 'Logilab'")
+ rql = "Any P WHERE P is Utilisateur, P adel C, P nom 'Smith'"
+ rql, = proc.preprocess_query(rql)
+ self.assertEqual(rql, "Any P WHERE P is CWUser, P use_email C, P surname 'Smith'")
class QSPreProcessorTC(CubicWebTC):
"""test suite for QSPreProcessor"""
- def setUp(self):
- super(QSPreProcessorTC, self).setUp()
+
+ @contextmanager
+ def proc(self):
self.vreg.config.translations = {'en': (_translate, _ctxtranslate)}
- self.req = self.request()
- proc = self.vreg['components'].select('magicsearch', self.req)
- self.proc = [p for p in proc.processors if isinstance(p, QSPreProcessor)][0]
- self.proc._cw = self.req
+ with self.admin_access.web_request() as req:
+ proc = self.vreg['components'].select('magicsearch', req)
+ proc = [p for p in proc.processors if isinstance(p, QSPreProcessor)][0]
+ proc._cw = req
+ yield proc
def test_entity_translation(self):
"""tests QSPreProcessor._get_entity_name()"""
- translate = self.proc._get_entity_type
- self.assertEqual(translate(u'EmailAddress'), "EmailAddress")
- self.assertEqual(translate(u'emailaddress'), "EmailAddress")
- self.assertEqual(translate(u'Adresse'), "EmailAddress")
- self.assertEqual(translate(u'adresse'), "EmailAddress")
- self.assertRaises(BadRQLQuery, translate, 'whatever')
+ with self.proc() as proc:
+ translate = proc._get_entity_type
+ self.assertEqual(translate(u'EmailAddress'), "EmailAddress")
+ self.assertEqual(translate(u'emailaddress'), "EmailAddress")
+ self.assertEqual(translate(u'Adresse'), "EmailAddress")
+ self.assertEqual(translate(u'adresse'), "EmailAddress")
+ self.assertRaises(BadRQLQuery, translate, 'whatever')
def test_attribute_translation(self):
"""tests QSPreProcessor._get_attribute_name"""
- translate = self.proc._get_attribute_name
- eschema = self.schema.eschema('CWUser')
- self.assertEqual(translate(u'prénom', eschema), "firstname")
- self.assertEqual(translate(u'nom', eschema), 'surname')
- eschema = self.schema.eschema('EmailAddress')
- self.assertEqual(translate(u'adresse', eschema), "address")
- self.assertEqual(translate(u'nom', eschema), 'alias')
- # should fail if the name is not an attribute for the given entity schema
- self.assertRaises(BadRQLQuery, translate, 'whatever', eschema)
- self.assertRaises(BadRQLQuery, translate, 'prénom', eschema)
+ with self.proc() as proc:
+ translate = proc._get_attribute_name
+ eschema = self.schema.eschema('CWUser')
+ self.assertEqual(translate(u'prénom', eschema), "firstname")
+ self.assertEqual(translate(u'nom', eschema), 'surname')
+ eschema = self.schema.eschema('EmailAddress')
+ self.assertEqual(translate(u'adresse', eschema), "address")
+ self.assertEqual(translate(u'nom', eschema), 'alias')
+ # should fail if the name is not an attribute for the given entity schema
+ self.assertRaises(BadRQLQuery, translate, 'whatever', eschema)
+ self.assertRaises(BadRQLQuery, translate, 'prénom', eschema)
def test_one_word_query(self):
"""tests the 'one word shortcut queries'"""
- transform = self.proc._one_word_query
- self.assertEqual(transform('123'),
- ('Any X WHERE X eid %(x)s', {'x': 123}, 'x'))
- self.assertEqual(transform('CWUser'),
- ('CWUser C',))
- self.assertEqual(transform('Utilisateur'),
- ('CWUser C',))
- self.assertEqual(transform('Adresse'),
- ('EmailAddress E',))
- self.assertEqual(transform('adresse'),
- ('EmailAddress E',))
- self.assertRaises(BadRQLQuery, transform, 'Workcases')
+ with self.proc() as proc:
+ transform = proc._one_word_query
+ self.assertEqual(transform('123'),
+ ('Any X WHERE X eid %(x)s', {'x': 123}, 'x'))
+ self.assertEqual(transform('CWUser'),
+ ('CWUser C',))
+ self.assertEqual(transform('Utilisateur'),
+ ('CWUser C',))
+ self.assertEqual(transform('Adresse'),
+ ('EmailAddress E',))
+ self.assertEqual(transform('adresse'),
+ ('EmailAddress E',))
+ self.assertRaises(BadRQLQuery, transform, 'Workcases')
def test_two_words_query(self):
"""tests the 'two words shortcut queries'"""
- transform = self.proc._two_words_query
- self.assertEqual(transform('CWUser', 'E'),
- ("CWUser E",))
- self.assertEqual(transform('CWUser', 'Smith'),
- ('CWUser C ORDERBY FTIRANK(C) DESC WHERE C has_text %(text)s', {'text': 'Smith'}))
- self.assertEqual(transform('utilisateur', 'Smith'),
- ('CWUser C ORDERBY FTIRANK(C) DESC WHERE C has_text %(text)s', {'text': 'Smith'}))
- self.assertEqual(transform(u'adresse', 'Logilab'),
- ('EmailAddress E ORDERBY FTIRANK(E) DESC WHERE E has_text %(text)s', {'text': 'Logilab'}))
- self.assertEqual(transform(u'adresse', 'Logi%'),
- ('EmailAddress E WHERE E alias LIKE %(text)s', {'text': 'Logi%'}))
- self.assertRaises(BadRQLQuery, transform, "pers", "taratata")
+ with self.proc() as proc:
+ transform = proc._two_words_query
+ self.assertEqual(transform('CWUser', 'E'),
+ ("CWUser E",))
+ self.assertEqual(transform('CWUser', 'Smith'),
+ ('CWUser C ORDERBY FTIRANK(C) DESC WHERE C has_text %(text)s', {'text': 'Smith'}))
+ self.assertEqual(transform('utilisateur', 'Smith'),
+ ('CWUser C ORDERBY FTIRANK(C) DESC WHERE C has_text %(text)s', {'text': 'Smith'}))
+ self.assertEqual(transform(u'adresse', 'Logilab'),
+ ('EmailAddress E ORDERBY FTIRANK(E) DESC WHERE E has_text %(text)s', {'text': 'Logilab'}))
+ self.assertEqual(transform(u'adresse', 'Logi%'),
+ ('EmailAddress E WHERE E alias LIKE %(text)s', {'text': 'Logi%'}))
+ self.assertRaises(BadRQLQuery, transform, "pers", "taratata")
def test_three_words_query(self):
"""tests the 'three words shortcut queries'"""
- transform = self.proc._three_words_query
- self.assertEqual(transform('utilisateur', u'prénom', 'cubicweb'),
- ('CWUser C WHERE C firstname %(text)s', {'text': 'cubicweb'}))
- self.assertEqual(transform('utilisateur', 'nom', 'cubicweb'),
- ('CWUser C WHERE C surname %(text)s', {'text': 'cubicweb'}))
- self.assertEqual(transform(u'adresse', 'nom', 'cubicweb'),
- ('EmailAddress E WHERE E alias %(text)s', {'text': 'cubicweb'}))
- self.assertEqual(transform('EmailAddress', 'nom', 'cubicweb'),
- ('EmailAddress E WHERE E alias %(text)s', {'text': 'cubicweb'}))
- self.assertEqual(transform('utilisateur', u'prénom', 'cubicweb%'),
- ('CWUser C WHERE C firstname LIKE %(text)s', {'text': 'cubicweb%'}))
- # expanded shortcuts
- self.assertEqual(transform('CWUser', 'use_email', 'Logilab'),
- ('CWUser C ORDERBY FTIRANK(C1) DESC WHERE C use_email C1, C1 has_text %(text)s', {'text': 'Logilab'}))
- self.assertEqual(transform('CWUser', 'use_email', '%Logilab'),
- ('CWUser C WHERE C use_email C1, C1 alias LIKE %(text)s', {'text': '%Logilab'}))
- self.assertRaises(BadRQLQuery, transform, 'word1', 'word2', 'word3')
+ with self.proc() as proc:
+ transform = proc._three_words_query
+ self.assertEqual(transform('utilisateur', u'prénom', 'cubicweb'),
+ ('CWUser C WHERE C firstname %(text)s', {'text': 'cubicweb'}))
+ self.assertEqual(transform('utilisateur', 'nom', 'cubicweb'),
+ ('CWUser C WHERE C surname %(text)s', {'text': 'cubicweb'}))
+ self.assertEqual(transform(u'adresse', 'nom', 'cubicweb'),
+ ('EmailAddress E WHERE E alias %(text)s', {'text': 'cubicweb'}))
+ self.assertEqual(transform('EmailAddress', 'nom', 'cubicweb'),
+ ('EmailAddress E WHERE E alias %(text)s', {'text': 'cubicweb'}))
+ self.assertEqual(transform('utilisateur', u'prénom', 'cubicweb%'),
+ ('CWUser C WHERE C firstname LIKE %(text)s', {'text': 'cubicweb%'}))
+ # expanded shortcuts
+ self.assertEqual(transform('CWUser', 'use_email', 'Logilab'),
+ ('CWUser C ORDERBY FTIRANK(C1) DESC WHERE C use_email C1, C1 has_text %(text)s', {'text': 'Logilab'}))
+ self.assertEqual(transform('CWUser', 'use_email', '%Logilab'),
+ ('CWUser C WHERE C use_email C1, C1 alias LIKE %(text)s', {'text': '%Logilab'}))
+ self.assertRaises(BadRQLQuery, transform, 'word1', 'word2', 'word3')
def test_quoted_queries(self):
"""tests how quoted queries are handled"""
@@ -163,12 +174,13 @@
(u'Utilisateur firstname "Jean Paul"', ('CWUser C WHERE C firstname %(text)s', {'text': 'Jean Paul'})),
(u'CWUser firstname "Jean Paul"', ('CWUser C WHERE C firstname %(text)s', {'text': 'Jean Paul'})),
]
- transform = self.proc._quoted_words_query
- for query, expected in queries:
- self.assertEqual(transform(query), expected)
- self.assertRaises(BadRQLQuery, transform, "unquoted rql")
- self.assertRaises(BadRQLQuery, transform, 'pers "Jean Paul"')
- self.assertRaises(BadRQLQuery, transform, 'CWUser firstname other "Jean Paul"')
+ with self.proc() as proc:
+ transform = proc._quoted_words_query
+ for query, expected in queries:
+ self.assertEqual(transform(query), expected)
+ self.assertRaises(BadRQLQuery, transform, "unquoted rql")
+ self.assertRaises(BadRQLQuery, transform, 'pers "Jean Paul"')
+ self.assertRaises(BadRQLQuery, transform, 'CWUser firstname other "Jean Paul"')
def test_process_query(self):
"""tests how queries are processed"""
@@ -178,24 +190,25 @@
(u'Utilisateur cubicweb', (u'CWUser C ORDERBY FTIRANK(C) DESC WHERE C has_text %(text)s', {'text': u'cubicweb'})),
(u'CWUser prénom cubicweb', (u'CWUser C WHERE C firstname %(text)s', {'text': 'cubicweb'},)),
]
- for query, expected in queries:
- self.assertEqual(self.proc.preprocess_query(query), expected)
- self.assertRaises(BadRQLQuery,
- self.proc.preprocess_query, 'Any X WHERE X is Something')
+ with self.proc() as proc:
+ for query, expected in queries:
+ self.assertEqual(proc.preprocess_query(query), expected)
+ self.assertRaises(BadRQLQuery,
+ proc.preprocess_query, 'Any X WHERE X is Something')
## Processor Chains tests ############################################
-
class ProcessorChainTC(CubicWebTC):
"""test suite for magic_search's processor chains"""
- def setUp(self):
- super(ProcessorChainTC, self).setUp()
+ @contextmanager
+ def proc(self):
self.vreg.config.translations = {'en': (_translate, _ctxtranslate)}
- self.req = self.request()
- self.proc = self.vreg['components'].select('magicsearch', self.req)
+ with self.admin_access.web_request() as req:
+ proc = self.vreg['components'].select('magicsearch', req)
+ yield proc
def test_main_preprocessor_chain(self):
"""tests QUERY_PROCESSOR"""
@@ -211,31 +224,34 @@
(u'Any P WHERE P is Utilisateur, P nom "Smith"',
('Any P WHERE P is CWUser, P surname "Smith"', None)),
]
- for query, expected in queries:
- rset = self.proc.process_query(query)
- self.assertEqual((rset.rql, rset.args), expected)
+ with self.proc() as proc:
+ for query, expected in queries:
+ rset = proc.process_query(query)
+ self.assertEqual((rset.rql, rset.args), expected)
def test_accentuated_fulltext(self):
"""we must be able to type accentuated characters in the search field"""
- rset = self.proc.process_query(u'écrire')
- self.assertEqual(rset.rql, "Any X ORDERBY FTIRANK(X) DESC WHERE X has_text %(text)s")
- self.assertEqual(rset.args, {'text': u'écrire'})
+ with self.proc() as proc:
+ rset = proc.process_query(u'écrire')
+ self.assertEqual(rset.rql, "Any X ORDERBY FTIRANK(X) DESC WHERE X has_text %(text)s")
+ self.assertEqual(rset.args, {'text': u'écrire'})
def test_explicit_component(self):
- self.assertRaises(RQLSyntaxError,
- self.proc.process_query, u'rql: CWUser E WHERE E noattr "Smith",')
- self.assertRaises(BadRQLQuery,
- self.proc.process_query, u'rql: CWUser E WHERE E noattr "Smith"')
- rset = self.proc.process_query(u'text: utilisateur Smith')
- self.assertEqual(rset.rql, 'Any X ORDERBY FTIRANK(X) DESC WHERE X has_text %(text)s')
- self.assertEqual(rset.args, {'text': u'utilisateur Smith'})
+ with self.proc() as proc:
+ self.assertRaises(RQLSyntaxError,
+ proc.process_query, u'rql: CWUser E WHERE E noattr "Smith",')
+ self.assertRaises(BadRQLQuery,
+ proc.process_query, u'rql: CWUser E WHERE E noattr "Smith"')
+ rset = proc.process_query(u'text: utilisateur Smith')
+ self.assertEqual(rset.rql, 'Any X ORDERBY FTIRANK(X) DESC WHERE X has_text %(text)s')
+ self.assertEqual(rset.args, {'text': u'utilisateur Smith'})
class RQLSuggestionsBuilderTC(CubicWebTC):
def suggestions(self, rql):
- req = self.request()
- rbs = self.vreg['components'].select('rql.suggestions', req)
- return rbs.build_suggestions(rql)
+ with self.admin_access.web_request() as req:
+ rbs = self.vreg['components'].select('rql.suggestions', req)
+ return rbs.build_suggestions(rql)
def test_no_restrictions_rql(self):
self.assertListEqual([], self.suggestions(''))
@@ -313,9 +329,10 @@
def test_attribute_value_rql(self):
# suggestions should contain any possible value for
# a given attribute (limited to 10)
- req = self.request()
- for i in xrange(15):
- req.create_entity('Personne', nom=u'n%s' % i, prenom=u'p%s' % i)
+ with self.admin_access.web_request() as req:
+ for i in xrange(15):
+ req.create_entity('Personne', nom=u'n%s' % i, prenom=u'p%s' % i)
+ req.cnx.commit()
self.assertListEqual(['Any X WHERE X is Personne, X nom "n0"',
'Any X WHERE X is Personne, X nom "n1"',
'Any X WHERE X is Personne, X nom "n10"',
--- a/web/test/unittest_propertysheet.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_propertysheet.py Tue Jun 10 09:49:45 2014 +0200
@@ -35,19 +35,19 @@
'a {bgcolor: #FFFFFF; size: 1%;}')
self.assertEqual(ps.process_resource(DATADIR, 'pouet.css'),
CACHEDIR)
- self.assertTrue('pouet.css' in ps._cache)
+ self.assertIn('pouet.css', ps._cache)
self.assertFalse(ps.need_reload())
os.utime(join(DATADIR, 'sheet1.py'), None)
- self.assertTrue('pouet.css' in ps._cache)
+ self.assertIn('pouet.css', ps._cache)
self.assertTrue(ps.need_reload())
- self.assertTrue('pouet.css' in ps._cache)
+ self.assertIn('pouet.css', ps._cache)
ps.reload()
- self.assertFalse('pouet.css' in ps._cache)
+ self.assertNotIn('pouet.css', ps._cache)
self.assertFalse(ps.need_reload())
ps.process_resource(DATADIR, 'pouet.css') # put in cache
os.utime(join(DATADIR, 'pouet.css'), None)
self.assertFalse(ps.need_reload())
- self.assertFalse('pouet.css' in ps._cache)
+ self.assertNotIn('pouet.css', ps._cache)
if __name__ == '__main__':
unittest_main()
--- a/web/test/unittest_reledit.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_reledit.py Tue Jun 10 09:49:45 2014 +0200
@@ -25,10 +25,11 @@
class ReleditMixinTC(object):
def setup_database(self):
- self.req = self.request()
- self.proj = self.req.create_entity('Project', title=u'cubicweb-world-domination')
- self.tick = self.req.create_entity('Ticket', title=u'write the code')
- self.toto = self.req.create_entity('Personne', nom=u'Toto')
+ with self.admin_access.client_cnx() as cnx:
+ self.proj = cnx.create_entity('Project', title=u'cubicweb-world-domination').eid
+ self.tick = cnx.create_entity('Ticket', title=u'write the code').eid
+ self.toto = cnx.create_entity('Personne', nom=u'Toto').eid
+ cnx.commit()
class ClickAndEditFormTC(ReleditMixinTC, CubicWebTC):
@@ -39,13 +40,16 @@
'composite_card11_2ttypes': """<not specified>""",
'concerns': """<not specified>"""}
- for rschema, ttypes, role in self.proj.e_schema.relation_definitions(includefinal=True):
- if rschema not in reledit:
- continue
- rtype = rschema.type
- self.assertMultiLineEqual(reledit[rtype] % {'eid': self.proj.eid},
- self.proj.view('reledit', rtype=rtype, role=role),
- rtype)
+ with self.admin_access.web_request() as req:
+ proj = req.entity_from_eid(self.proj)
+
+ for rschema, ttypes, role in proj.e_schema.relation_definitions(includefinal=True):
+ if rschema not in reledit:
+ continue
+ rtype = rschema.type
+ self.assertMultiLineEqual(reledit[rtype] % {'eid': self.proj},
+ proj.view('reledit', rtype=rtype, role=role),
+ rtype)
def test_default_forms(self):
self.skipTest('Need to check if this test should still run post reledit/doreledit merge')
@@ -175,8 +179,10 @@
def setup_database(self):
super(ClickAndEditFormUICFGTC, self).setup_database()
- self.tick.cw_set(concerns=self.proj)
- self.proj.cw_set(manager=self.toto)
+ with self.admin_access.client_cnx() as cnx:
+ cnx.execute('SET T concerns P WHERE T eid %(t)s, P eid %(p)s', {'t': self.tick, 'p': self.proj})
+ cnx.execute('SET P manager T WHERE P eid %(p)s, T eid %(t)s', {'p': self.proj, 't': self.toto})
+ cnx.commit()
def test_with_uicfg(self):
old_rctl = reledit_ctrl._tagdefs.copy()
@@ -198,13 +204,15 @@
'composite_card11_2ttypes': """<not specified>""",
'concerns': """<div id="concerns-object-%(eid)s-reledit" onmouseout="jQuery('#concerns-object-%(eid)s').addClass('hidden')" onmouseover="jQuery('#concerns-object-%(eid)s').removeClass('hidden')" class="releditField"><div id="concerns-object-%(eid)s-value" class="editableFieldValue"><a href="http://testing.fr/cubicweb/ticket/%(tick)s" title="">write the code</a></div><div id="concerns-object-%(eid)s" class="editableField hidden"><div id="concerns-object-%(eid)s-update" class="editableField" onclick="cw.reledit.loadInlineEditionForm('base', %(eid)s, 'concerns', 'object', 'concerns-object-%(eid)s', false, 'autolimited', 'edit_rtype');" title="click to edit this field"><img title="click to edit this field" src="http://testing.fr/cubicweb/data/pen_icon.png" alt="click to edit this field"/></div></div></div>"""
}
- for rschema, ttypes, role in self.proj.e_schema.relation_definitions(includefinal=True):
- if rschema not in reledit:
- continue
- rtype = rschema.type
- self.assertMultiLineEqual(reledit[rtype] % {'eid': self.proj.eid, 'toto': self.toto.eid, 'tick': self.tick.eid},
- self.proj.view('reledit', rtype=rtype, role=role),
- rtype)
+ with self.admin_access.web_request() as req:
+ proj = req.entity_from_eid(self.proj)
+ for rschema, ttypes, role in proj.e_schema.relation_definitions(includefinal=True):
+ if rschema not in reledit:
+ continue
+ rtype = rschema.type
+ self.assertMultiLineEqual(reledit[rtype] % {'eid': self.proj, 'toto': self.toto, 'tick': self.tick},
+ proj.view('reledit', rtype=rtype, role=role),
+ rtype)
reledit_ctrl.clear()
reledit_ctrl._tagdefs.update(old_rctl)
--- a/web/test/unittest_session.py Tue Jun 10 09:35:26 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""unit tests for cubicweb.web.application
-
-:organization: Logilab
-:copyright: 2001-2011 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-from cubicweb.devtools.testlib import CubicWebTC
-from cubicweb.web import InvalidSession
-
-class SessionTC(CubicWebTC):
-
- def test_session_expiration(self):
- sm = self.app.session_handler.session_manager
- # make is if the web session has been opened by the session manager
- sm._sessions[self.cnx.sessionid] = self.websession
- sessionid = self.websession.sessionid
- self.assertEqual(len(sm._sessions), 1)
- self.assertEqual(self.websession.sessionid, self.websession.cnx.sessionid)
- # fake the repo session is expiring
- self.repo.close(sessionid)
- try:
- # fake an incoming http query with sessionid in session cookie
- # don't use self.request() which try to call req.set_session
- req = self.requestcls(self.vreg)
- self.assertRaises(InvalidSession, sm.get_session, req, sessionid)
- self.assertEqual(len(sm._sessions), 0)
- finally:
- # avoid error in tearDown by telling this connection is closed...
- self.cnx._closed = True
-
-if __name__ == '__main__':
- from logilab.common.testlib import unittest_main
- unittest_main()
--- a/web/test/unittest_views_basecontrollers.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_views_basecontrollers.py Tue Jun 10 09:49:45 2014 +0200
@@ -33,7 +33,7 @@
from cubicweb.uilib import rql_for_eid
from cubicweb.web import INTERNAL_FIELD_VALUE, Redirect, RequestError, RemoteCallFailed
import cubicweb.server.session
-from cubicweb.server.session import Transaction as OldTransaction
+from cubicweb.server.session import Connection as OldConnection
from cubicweb.entities.authobjs import CWUser
from cubicweb.web.views.autoform import get_pending_inserts, get_pending_deletes
from cubicweb.web.views.basecontrollers import JSonController, xhtmlize, jsonize
@@ -53,11 +53,11 @@
class EditControllerTC(CubicWebTC):
def setUp(self):
CubicWebTC.setUp(self)
- self.assertTrue('users' in self.schema.eschema('CWGroup').get_groups('read'))
+ self.assertIn('users', self.schema.eschema('CWGroup').get_groups('read'))
def tearDown(self):
CubicWebTC.tearDown(self)
- self.assertTrue('users' in self.schema.eschema('CWGroup').get_groups('read'))
+ self.assertIn('users', self.schema.eschema('CWGroup').get_groups('read'))
def test_noparam_edit(self):
"""check behaviour of this controller without any form parameter
@@ -121,7 +121,7 @@
path, params = self.expect_redirect_handle_request(req, 'edit')
cnx.commit() # commit to check we don't get late validation error for instance
self.assertEqual(path, 'cwuser/user')
- self.assertFalse('vid' in params)
+ self.assertNotIn('vid', params)
def test_user_editing_itself_no_relation(self):
"""checking we can edit an entity without specifying some required
@@ -916,15 +916,15 @@
class UndoControllerTC(CubicWebTC):
def setUp(self):
- class Transaction(OldTransaction):
+ class Connection(OldConnection):
"""Force undo feature to be turned on in all case"""
undo_actions = property(lambda tx: True, lambda x, y:None)
- cubicweb.server.session.Transaction = Transaction
+ cubicweb.server.session.Connection = Connection
super(UndoControllerTC, self).setUp()
def tearDown(self):
super(UndoControllerTC, self).tearDown()
- cubicweb.server.session.Transaction = OldTransaction
+ cubicweb.server.session.Connection = OldConnection
def setup_database(self):
--- a/web/test/unittest_views_basetemplates.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_views_basetemplates.py Tue Jun 10 09:49:45 2014 +0200
@@ -18,16 +18,15 @@
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.devtools.htmlparser import XMLValidator
+from cubicweb.dbapi import DBAPISession
class LogFormTemplateTC(CubicWebTC):
def _login_labels(self):
valid = self.content_type_validators.get('text/html', XMLValidator)()
- req = self.request()
- req.cnx.anonymous_connection = True
- page = valid.parse_string(self.vreg['views'].main_template(self.request(), 'login'))
- req.cnx.anonymous_connection = False
+ req = self.requestcls(self.vreg, url='login')
+ page = valid.parse_string(self.vreg['views'].main_template(req, 'login'))
return page.find_tag('label')
def test_label(self):
--- a/web/test/unittest_views_navigation.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_views_navigation.py Tue Jun 10 09:49:45 2014 +0200
@@ -126,12 +126,12 @@
# req, rset=rset, view=view, context='navtop')
# # breadcrumbs should be in headers by default
# clsids = set(obj.id for obj in objs)
- # self.assertTrue('breadcrumbs' in clsids)
+ # self.assertIn('breadcrumbs', clsids)
# objs = self.vreg['ctxcomponents'].poss_visible_objects(
# req, rset=rset, view=view, context='navbottom')
# # breadcrumbs should _NOT_ be in footers by default
# clsids = set(obj.id for obj in objs)
- # self.assertFalse('breadcrumbs' in clsids)
+ # self.assertNotIn('breadcrumbs', clsids)
# self.execute('INSERT CWProperty P: P pkey "ctxcomponents.breadcrumbs.context", '
# 'P value "navbottom"')
# # breadcrumbs should now be in footers
@@ -140,12 +140,12 @@
# req, rset=rset, view=view, context='navbottom')
# clsids = [obj.id for obj in objs]
- # self.assertTrue('breadcrumbs' in clsids)
+ # self.assertIn('breadcrumbs', clsids)
# objs = self.vreg['ctxcomponents'].poss_visible_objects(
# req, rset=rset, view=view, context='navtop')
# clsids = [obj.id for obj in objs]
- # self.assertFalse('breadcrumbs' in clsids)
+ # self.assertNotIn('breadcrumbs', clsids)
if __name__ == '__main__':
--- a/web/test/unittest_web.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_web.py Tue Jun 10 09:49:45 2014 +0200
@@ -92,6 +92,16 @@
self.assertEqual(webreq.status_code, 200)
self.assertDictEqual(expect, loads(webreq.content))
+class LanguageTC(CubicWebServerTC):
+
+ def test_language_neg(self):
+ headers = {'Accept-Language': 'fr'}
+ webreq = self.web_request(headers=headers)
+ self.assertIn('lang="fr"', webreq.read())
+ headers = {'Accept-Language': 'en'}
+ webreq = self.web_request(headers=headers)
+ self.assertIn('lang="en"', webreq.read())
+
if __name__ == '__main__':
unittest_main()
--- a/web/test/unittest_webconfig.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/test/unittest_webconfig.py Tue Jun 10 09:49:45 2014 +0200
@@ -38,9 +38,9 @@
self.assertTrue(isinstance(ie_css, list))
def test_locate_resource(self):
- self.assertTrue('FILE_ICON' in self.config.uiprops)
+ self.assertIn('FILE_ICON', self.config.uiprops)
rname = self.config.uiprops['FILE_ICON'].replace(self.config.datadir_url, '')
- self.assertTrue('file' in self.config.locate_resource(rname)[0].split(os.sep))
+ self.assertIn('file', self.config.locate_resource(rname)[0].split(os.sep))
cubicwebcsspath = self.config.locate_resource('cubicweb.css')[0].split(os.sep)
self.assertTrue('web' in cubicwebcsspath or 'shared' in cubicwebcsspath) # 'shared' if tests under apycot
--- a/web/views/authentication.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/authentication.py Tue Jun 10 09:49:45 2014 +0200
@@ -105,9 +105,10 @@
class RepositoryAuthenticationManager(AbstractAuthenticationManager):
"""authenticate user associated to a request and check session validity"""
- def __init__(self, vreg):
- super(RepositoryAuthenticationManager, self).__init__(vreg)
- self.repo = vreg.config.repository(vreg)
+ def __init__(self, repo):
+ super(RepositoryAuthenticationManager, self).__init__(repo)
+ self.repo = repo
+ vreg = repo.vreg
self.log_queries = vreg.config['query-log-file']
self.authinforetrievers = sorted(vreg['webauth'].possible_objects(vreg),
key=lambda x: x.order)
@@ -138,13 +139,6 @@
# actual user login
if login and session.login != login:
raise InvalidSession('login mismatch')
- try:
- # calling cnx.user() check connection validity, raise
- # BadConnectionId on failure
- user = session.cnx.user(req)
- except BadConnectionId:
- raise InvalidSession('bad connection id')
- return user
def authenticate(self, req):
"""authenticate user using connection information found in the request,
@@ -160,28 +154,24 @@
except NoAuthInfo:
continue
try:
- cnx = self._authenticate(login, authinfo)
+ session = self._authenticate(login, authinfo)
except AuthenticationError:
retriever.cleanup_authentication_information(req)
continue # the next one may succeed
for retriever_ in self.authinforetrievers:
- retriever_.authenticated(retriever, req, cnx, login, authinfo)
- return cnx, login
+ retriever_.authenticated(retriever, req, session, login, authinfo)
+ return session, login
# false if no authentication info found, eg this is not an
# authentication failure
if 'login' in locals():
req.set_message(req._('authentication failure'))
login, authinfo = self.anoninfo
if login:
- cnx = self._authenticate(login, authinfo)
- cnx.anonymous_connection = True
- return cnx, login
+ session = self._authenticate(login, authinfo)
+ return session, login
raise AuthenticationError()
def _authenticate(self, login, authinfo):
- cnxprops = ConnectionProperties(close=False, log=self.log_queries)
- cnx = _repo_connect(self.repo, login, cnxprops=cnxprops, **authinfo)
- # decorate connection
- cnx.vreg = self.vreg
- return cnx
+ sessionid = self.repo.connect(login, **authinfo)
+ return self.repo._sessions[sessionid]
--- a/web/views/basecomponents.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/basecomponents.py Tue Jun 10 09:49:45 2014 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -97,8 +97,7 @@
context = _('header-left')
def render(self, w):
- w(u'<a href="%s"><img id="logo" src="%s" alt="logo"/></a>'
- % (self._cw.base_url(), self._cw.uiprops['LOGO']))
+ w(u'<a id="logo" href="%s"></a>' % self._cw.base_url())
class ApplicationName(HeaderComponent):
@@ -188,79 +187,18 @@
"""
__select__ = yes()
__regid__ = 'applmessages'
- # don't want user to hide this component using an cwproperty
+ # don't want user to hide this component using a cwproperty
cw_property_defs = {}
def call(self, msg=None):
if msg is None:
- msgs = []
- if self._cw.cnx:
- srcmsg = self._cw.get_shared_data('sources_error', pop=True, txdata=True)
- if srcmsg:
- msgs.append(srcmsg)
- reqmsg = self._cw.message # XXX don't call self._cw.message twice
- if reqmsg:
- msgs.append(reqmsg)
- else:
- msgs = [msg]
+ msg = self._cw.message # XXX don't call self._cw.message twice
self.w(u'<div id="appMsg" onclick="%s" class="%s">\n' %
- (toggle_action('appMsg'), (msgs and ' ' or 'hidden')))
- for msg in msgs:
- self.w(u'<div class="message" id="%s">%s</div>' % (self.domid, msg))
+ (toggle_action('appMsg'), (msg and ' ' or 'hidden')))
+ self.w(u'<div class="message" id="%s">%s</div>' % (self.domid, msg))
self.w(u'</div>')
-class EtypeRestrictionComponent(component.Component):
- """displays the list of entity types contained in the resultset
- to be able to filter accordingly.
- """
- __regid__ = 'etypenavigation'
- __select__ = multi_etypes_rset() | match_form_params(
- '__restrtype', '__restrtypes', '__restrrql')
- cw_property_defs = VISIBLE_PROP_DEF
- # don't want user to hide this component using an cwproperty
- site_wide = True
- visible = False # disabled by default
-
- def call(self):
- _ = self._cw._
- self.w(u'<div id="etyperestriction">')
- restrtype = self._cw.form.get('__restrtype')
- restrtypes = self._cw.form.get('__restrtypes', '').split(',')
- restrrql = self._cw.form.get('__restrrql')
- if not restrrql:
- rqlst = self.cw_rset.syntax_tree()
- restrrql = rqlst.as_string(self._cw.encoding, self.cw_rset.args)
- restrtypes = self.cw_rset.column_types(0)
- else:
- rqlst = parse(restrrql)
- html = []
- on_etype = False
- etypes = sorted((display_name(self._cw, etype).capitalize(), etype)
- for etype in restrtypes)
- for elabel, etype in etypes:
- if etype == restrtype:
- html.append(u'<span class="selected">%s</span>' % elabel)
- on_etype = True
- else:
- rqlst.save_state()
- for select in rqlst.children:
- select.add_type_restriction(select.selection[0].variable, etype)
- newrql = rqlst.as_string(self._cw.encoding, self.cw_rset.args)
- url = self._cw.build_url(rql=newrql, __restrrql=restrrql,
- __restrtype=etype, __restrtypes=','.join(restrtypes))
- html.append(u'<span><a href="%s">%s</a></span>' % (
- xml_escape(url), elabel))
- rqlst.recover()
- if on_etype:
- url = self._cw.build_url(rql=restrrql)
- html.insert(0, u'<span><a href="%s">%s</a></span>' % (
- url, _('Any')))
- else:
- html.insert(0, u'<span class="selected">%s</span>' % _('Any'))
- self.w(u' | '.join(html))
- self.w(u'</div>')
-
# contextual components ########################################################
--- a/web/views/basetemplates.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/basetemplates.py Tue Jun 10 09:49:45 2014 +0200
@@ -474,7 +474,7 @@
if target and target != '/':
url_args['postlogin_path'] = target
return self._cw.build_url('login', __secure__=True, **url_args)
- return super(LogForm, self).form_action()
+ return super(BaseLogForm, self).form_action()
class LogForm(BaseLogForm):
"""Simple login form that send username and password
--- a/web/views/cwsources.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/cwsources.py Tue Jun 10 09:49:45 2014 +0200
@@ -99,7 +99,7 @@
cellvids={1: 'editable-final'})
-MAPPED_SOURCE_TYPES = set( ('pyrorql', 'datafeed') )
+MAPPED_SOURCE_TYPES = set( ('datafeed',) )
class CWSourceMappingTab(EntityView):
__regid__ = 'cwsource-mapping'
@@ -117,21 +117,7 @@
'Any X, SCH, XO ORDERBY ET WHERE X options XO, X cw_for_source S, S eid %(s)s, '
'X cw_schema SCH, SCH is ET', {'s': entity.eid})
self.wview('table', rset, 'noresult')
- # self.w('<h3>%s</h3>' % _('Relations that should not be crossed'))
- # self.w('<p>%s</p>' % _(
- # 'By default, when a relation is not supported by a source, it is '
- # 'supposed that a local relation may point to an entity from the '
- # 'external source. Relations listed here won\'t have this '
- # '"crossing" behaviour.'))
- # self.wview('list', entity.related('cw_dont_cross'), 'noresult')
- # self.w('<h3>%s</h3>' % _('Relations that can be crossed'))
- # self.w('<p>%s</p>' % _(
- # 'By default, when a relation is supported by a source, it is '
- # 'supposed that a local relation can\'t point to an entity from the '
- # 'external source. Relations listed here may have this '
- # '"crossing" behaviour anyway.'))
- # self.wview('list', entity.related('cw_may_cross'), 'noresult')
- checker = MAPPING_CHECKERS.get(entity.type, MappingChecker)(entity)
+ checker = MappingChecker(entity)
checker.check()
if (checker.errors or checker.warnings or checker.infos):
self.w('<h2>%s</h2>' % _('Detected problems'))
@@ -215,49 +201,6 @@
pass
-class PyroRQLMappingChecker(MappingChecker):
- """pyrorql source mapping checker"""
-
- def init(self):
- self.dontcross = set()
- self.maycross = set()
- super(PyroRQLMappingChecker, self).init()
-
- def init_schemacfg(self, schemacfg):
- options = schemacfg.options or ()
- if 'dontcross' in options:
- self.dontcross.add(schemacfg.schema.name)
- else:
- super(PyroRQLMappingChecker, self).init_schemacfg(schemacfg)
- if 'maycross' in options:
- self.maycross.add(schemacfg.schema.name)
-
- def custom_check(self):
- error = self.errors.append
- info = self.infos.append
- for etype in self.sentities:
- eschema = self.schema[etype]
- for rschema, ttypes, role in eschema.relation_definitions():
- if rschema in META_RTYPES:
- continue
- if not rschema in self.srelations:
- if rschema not in self.dontcross:
- if role == 'subject' and rschema.inlined:
- error(_('inlined relation %(rtype)s of %(etype)s '
- 'should be supported') %
- {'rtype': rschema, 'etype': etype})
- elif (rschema not in self.seen and rschema not in self.maycross):
- info(_('you may want to specify something for %s') %
- rschema)
- self.seen.add(rschema)
- elif rschema in self.maycross and rschema.inlined:
- error(_('you should un-inline relation %s which is '
- 'supported and may be crossed ') % rschema)
-
-MAPPING_CHECKERS = {
- 'pyrorql': PyroRQLMappingChecker,
- }
-
class CWSourceImportsTab(EntityView):
__regid__ = 'cwsource-imports'
--- a/web/views/debug.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/debug.py Tue Jun 10 09:49:45 2014 +0200
@@ -97,7 +97,7 @@
w(u'<h2>%s</h2>' % _('Repository'))
w(u'<h3>%s</h3>' % _('resources usage'))
w(u'<table>')
- stats = repo.stats()
+ stats = self._cw.call_service('repo_stats')
for element in sorted(stats):
w(u'<tr><th align="left">%s</th><td>%s %s</td></tr>'
% (element, xml_escape(unicode(stats[element])),
@@ -105,7 +105,7 @@
w(u'</table>')
if req.cnx.is_repo_in_memory and req.user.is_in_group('managers'):
w(u'<h3>%s</h3>' % _('opened sessions'))
- sessions = repo._sessions.itervalues()
+ sessions = repo._sessions.values()
if sessions:
w(u'<ul>')
for session in sessions:
@@ -131,19 +131,9 @@
sessions = SESSION_MANAGER.current_sessions()
w(u'<h3>%s</h3>' % _('opened web sessions'))
if sessions:
- n_no_cnx_sessions = 0
w(u'<ul>')
for session in sessions:
- if not session.cnx:
- # We do not want to list all sessions without cnx
- # Their session ID are useless, hence we just count them
- n_no_cnx_sessions += 1
- continue
- try:
- last_usage_time = session.cnx.check()
- except BadConnectionId:
- w(u'<li>%s (INVALID)</li>' % session.sessionid)
- continue
+ last_usage_time = session.mtime
w(u'<li>%s (%s: %s)<br/>' % (
session.sessionid,
_('last usage'),
@@ -151,9 +141,6 @@
dict_to_html(w, session.data)
w(u'</li>')
w(u'</ul>')
- if n_no_cnx_sessions > 0:
- w(u'<h3>%s %s</h3>' % (n_no_cnx_sessions,
- _('web sessions without CNX')))
else:
w(u'<p>%s</p>' % _('no web sessions found'))
--- a/web/views/editcontroller.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/editcontroller.py Tue Jun 10 09:49:45 2014 +0200
@@ -252,15 +252,7 @@
formid = 'edition'
form = req.vreg['forms'].select(formid, req, entity=entity)
eid = form.actual_eid(entity.eid)
- try:
- editedfields = formparams['_cw_entity_fields']
- except KeyError:
- try:
- editedfields = formparams['_cw_edited_fields']
- warn('[3.13] _cw_edited_fields has been renamed _cw_entity_fields',
- DeprecationWarning)
- except KeyError:
- raise RequestError(req._('no edited fields specified for entity %s' % entity.eid))
+ editedfields = formparams['_cw_entity_fields']
form.formvalues = {} # init fields value cache
for field in form.iter_modified_fields(editedfields, entity):
self.handle_formfield(form, field, rqlquery)
--- a/web/views/formrenderers.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/formrenderers.py Tue Jun 10 09:49:45 2014 +0200
@@ -202,6 +202,8 @@
attrs.setdefault('class', form.cssclass)
if form.cwtarget:
attrs.setdefault('cubicweb:target', form.cwtarget)
+ if not form.autocomplete:
+ attrs.setdefault('autocomplete', 'off')
return '<form %s>' % uilib.sgml_attributes(attrs)
def close_form(self, form, values):
--- a/web/views/forms.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/forms.py Tue Jun 10 09:49:45 2014 +0200
@@ -111,6 +111,9 @@
:attr:`fieldsets_in_order`
sequence of fieldset names , to control order
+ :attr:`autocomplete`
+ set to False to add 'autocomplete=off' in the form open tag
+
**Generic methods**
.. automethod:: cubicweb.web.form.Form.field_by_name(name, role=None)
@@ -160,6 +163,7 @@
form_buttons = None
form_renderer_id = 'default'
fieldsets_in_order = None
+ autocomplete = True
@property
def needs_multipart(self):
--- a/web/views/management.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/management.py Tue Jun 10 09:49:45 2014 +0200
@@ -181,7 +181,7 @@
__select__ = none_rset() & match_user_groups('users', 'managers')
def call(self):
- stats = self._cw.vreg.config.repository(None).stats()
+ stats = self._cw.call_service('repo_stats')
results = []
for element in stats:
results.append(u'%s %s' % (element, stats[element]))
--- a/web/views/sessions.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/sessions.py Tue Jun 10 09:49:45 2014 +0200
@@ -21,11 +21,14 @@
__docformat__ = "restructuredtext en"
+from time import time
+
from cubicweb import (RepositoryError, Unauthorized, AuthenticationError,
BadConnectionId)
from cubicweb.web import InvalidSession, Redirect
from cubicweb.web.application import AbstractSessionManager
from cubicweb.dbapi import ProgrammingError, DBAPISession
+from cubicweb import repoapi
class InMemoryRepositorySessionManager(AbstractSessionManager):
@@ -53,72 +56,60 @@
if sessionid not in self._sessions:
raise InvalidSession()
session = self._sessions[sessionid]
- if session.cnx:
- try:
- user = self.authmanager.validate_session(req, session)
- except InvalidSession:
- # invalid session
- self.close_session(session)
- raise
- # associate the connection to the current request
- req.set_session(session, user)
+ try:
+ user = self.authmanager.validate_session(req, session)
+ except InvalidSession:
+ self.close_session(session)
+ raise
+ if session.closed:
+ self.close_session(session)
+ raise InvalidSession()
return session
- def open_session(self, req, allow_no_cnx=True):
+ def open_session(self, req):
"""open and return a new session for the given request. The session is
also bound to the request.
raise :exc:`cubicweb.AuthenticationError` if authentication failed
(no authentication info found or wrong user/password)
"""
- try:
- cnx, login = self.authmanager.authenticate(req)
- except AuthenticationError:
- if allow_no_cnx:
- session = DBAPISession(None)
- else:
- raise
- else:
- session = DBAPISession(cnx, login)
+ session, login = self.authmanager.authenticate(req)
self._sessions[session.sessionid] = session
- # associate the connection to the current request
- req.set_session(session)
+ session.mtime = time()
return session
- def postlogin(self, req):
- """postlogin: the user has been authenticated, redirect to the original
- page (index by default) with a welcome message
+ def postlogin(self, req, session):
+ """postlogin: the user have been related to a session
+
+ Both req and session are passed to this function because actually
+ linking the request to the session is not yet done and not the
+ responsability of this object.
"""
# Update last connection date
# XXX: this should be in a post login hook in the repository, but there
# we can't differentiate actual login of automatic session
# reopening. Is it actually a problem?
if 'last_login_time' in req.vreg.schema:
- self._update_last_login_time(req)
- req.set_message(req._('welcome %s!') % req.user.login)
+ self._update_last_login_time(session)
+ req.set_message(req._('welcome %s!') % session.user.login)
- def _update_last_login_time(self, req):
+ def _update_last_login_time(self, session):
# XXX should properly detect missing permission / non writeable source
# and avoid "except (RepositoryError, Unauthorized)" below
try:
- req.execute('SET X last_login_time NOW WHERE X eid %(x)s',
- {'x' : req.user.eid})
- req.cnx.commit()
+ cnx = repoapi.ClientConnection(session)
+ with cnx:
+ cnx.execute('SET X last_login_time NOW WHERE X eid %(x)s',
+ {'x' : session.user.eid})
+ cnx.commit()
except (RepositoryError, Unauthorized):
- req.cnx.rollback()
- except Exception:
- req.cnx.rollback()
- raise
+ pass
def close_session(self, session):
"""close session on logout or on invalid session detected (expired out,
corrupted...)
"""
self.info('closing http session %s' % session.sessionid)
- del self._sessions[session.sessionid]
- if session.cnx:
- try:
- session.cnx.close()
- except (ProgrammingError, BadConnectionId): # expired on the repository side
- pass
- session.cnx = None
+ self._sessions.pop(session.sessionid, None)
+ if not session.closed:
+ session.repo.close(session.sessionid)
--- a/web/views/staticcontrollers.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/staticcontrollers.py Tue Jun 10 09:49:45 2014 +0200
@@ -27,6 +27,7 @@
import hashlib
import mimetypes
import threading
+import tempfile
from time import mktime
from datetime import datetime, timedelta
from logging import getLogger
@@ -145,32 +146,34 @@
def concat_cached_filepath(self, paths):
filepath = self.build_filepath(paths)
if not self._up_to_date(filepath, paths):
- tmpfile = filepath + '.tmp'
- try:
- with self.lock:
- if self._up_to_date(filepath, paths):
- # first check could have raced with some other thread
- # updating the file
- return filepath
- with open(tmpfile, 'wb') as f:
- for path in paths:
- dirpath, rid = self._resource(path)
- if rid is None:
- # In production mode log an error, do not return a 404
- # XXX the erroneous content is cached anyway
- self.logger.error('concatenated data url error: %r file '
- 'does not exist', path)
- if self.config.debugmode:
- raise NotFound(path)
- else:
- with open(osp.join(dirpath, rid), 'rb') as source:
- for line in source:
- f.write(line)
- f.write('\n')
+ with self.lock:
+ if self._up_to_date(filepath, paths):
+ # first check could have raced with some other thread
+ # updating the file
+ return filepath
+ fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(filepath))
+ try:
+ f = os.fdopen(fd, 'wb')
+ for path in paths:
+ dirpath, rid = self._resource(path)
+ if rid is None:
+ # In production mode log an error, do not return a 404
+ # XXX the erroneous content is cached anyway
+ self.logger.error('concatenated data url error: %r file '
+ 'does not exist', path)
+ if self.config.debugmode:
+ raise NotFound(path)
+ else:
+ with open(osp.join(dirpath, rid), 'rb') as source:
+ for line in source:
+ f.write(line)
+ f.write('\n')
+ f.close()
+ except:
+ os.remove(tmpfile)
+ raise
+ else:
os.rename(tmpfile, filepath)
- except:
- os.remove(tmpfile)
- raise
return filepath
--- a/web/views/urlpublishing.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/views/urlpublishing.py Tue Jun 10 09:49:45 2014 +0200
@@ -34,7 +34,7 @@
3. :class:`cubicweb.web.views.urlpublishing.URLRewriteEvaluator`
selects all urlrewriter components, sorts them according to their
- priorty, call their ``rewrite()`` method, the first one that
+ priority, call their ``rewrite()`` method, the first one that
doesn't raise a ``KeyError`` wins. This is where the
:mod:`cubicweb.web.views.urlrewrite` and
:class:`cubicweb.web.views.urlrewrite.SimpleReqRewriter` comes into
--- a/web/webconfig.py Tue Jun 10 09:35:26 2014 +0200
+++ b/web/webconfig.py Tue Jun 10 09:49:45 2014 +0200
@@ -111,14 +111,6 @@
'group': 'web', 'level': 3,
}),
# web configuration
- ('ui-cube',
- {'type' : 'string',
- 'default': None,
- 'help': 'the name of the UI cube that will be loaded before all other '\
- 'cubes. Setting this value to None will instruct cubicweb not to load '\
- 'any extra cube.',
- 'group': 'web', 'level': 3,
- }),
('https-url',
{'type' : 'string',
'default': None,
@@ -247,6 +239,36 @@
'help': 'The static data resource directory path.',
'group': 'web', 'level': 2,
}),
+ ('access-control-allow-origin',
+ {'type' : 'csv',
+ 'default': (),
+ 'help':('comma-separated list of allowed origin domains or "*" for any domain'),
+ 'group': 'web', 'level': 2,
+ }),
+ ('access-control-allow-methods',
+ {'type' : 'csv',
+ 'default': (),
+ 'help': ('comma-separated list of allowed HTTP methods'),
+ 'group': 'web', 'level': 2,
+ }),
+ ('access-control-max-age',
+ {'type' : 'int',
+ 'default': None,
+ 'help': ('maximum age of cross-origin resource sharing (in seconds)'),
+ 'group': 'web', 'level': 2,
+ }),
+ ('access-control-expose-headers',
+ {'type' : 'csv',
+ 'default': (),
+ 'help':('comma-separated list of HTTP headers the application declare in response to a preflight request'),
+ 'group': 'web', 'level': 2,
+ }),
+ ('access-control-allow-headers',
+ {'type' : 'csv',
+ 'default': (),
+ 'help':('comma-separated list of HTTP headers the application may set in the response'),
+ 'group': 'web', 'level': 2,
+ }),
))
def __init__(self, *args, **kwargs):
@@ -274,7 +296,7 @@
try:
return self.__repo
except AttributeError:
- from cubicweb.dbapi import get_repository
+ from cubicweb.repoapi import get_repository
repo = get_repository(config=self, vreg=vreg)
self.__repo = repo
return repo
--- a/wsgi/__init__.py Tue Jun 10 09:35:26 2014 +0200
+++ b/wsgi/__init__.py Tue Jun 10 09:49:45 2014 +0200
@@ -29,7 +29,7 @@
from email import message, message_from_string
from Cookie import SimpleCookie
from StringIO import StringIO
-from cgi import parse_header, parse_qsl
+from cgi import parse_header
from pprint import pformat as _pformat
@@ -40,13 +40,6 @@
except Exception:
return u'<could not parse>'
-def qs2dict(qs):
- """transforms a query string into a regular python dict"""
- result = {}
- for key, value in parse_qsl(qs, True):
- result.setdefault(key, []).append(value)
- return result
-
def normalize_header(header):
"""returns a normalized header name
@@ -70,31 +63,3 @@
break
fdst.write(buf)
size -= len(buf)
-
-def parse_file_upload(header_dict, post_data):
- """This is adapted FROM DJANGO"""
- raw_message = '\r\n'.join('%s:%s' % pair for pair in header_dict.iteritems())
- raw_message += '\r\n\r\n' + post_data
- msg = message_from_string(raw_message)
- post, files = {}, {}
- for submessage in msg.get_payload():
- name_dict = parse_header(submessage['Content-Disposition'])[1]
- key = name_dict['name']
- # name_dict is something like {'name': 'file', 'filename': 'test.txt'} for file uploads
- # or {'name': 'blah'} for POST fields
- # We assume all uploaded files have a 'filename' set.
- if 'filename' in name_dict:
- assert type([]) != type(submessage.get_payload()), "Nested MIME messages are not supported"
- if not name_dict['filename'].strip():
- continue
- # IE submits the full path, so trim everything but the basename.
- # (We can't use os.path.basename because that uses the server's
- # directory separator, which may not be the same as the
- # client's one.)
- filename = name_dict['filename'][name_dict['filename'].rfind("\\")+1:]
- mimetype = 'Content-Type' in submessage and submessage['Content-Type'] or None
- content = StringIO(submessage.get_payload())
- files[key] = [filename, mimetype, content]
- else:
- post.setdefault(key, []).append(submessage.get_payload())
- return post, files
--- a/wsgi/handler.py Tue Jun 10 09:35:26 2014 +0200
+++ b/wsgi/handler.py Tue Jun 10 09:49:45 2014 +0200
@@ -97,7 +97,7 @@
"""
def __init__(self, config):
- self.appli = CubicWebPublisher(config)
+ self.appli = CubicWebPublisher(config.repository(), config)
self.config = config
self.base_url = self.config['base-url']
self.url_rewriter = self.appli.vreg['components'].select_or_none('urlrewriter')
--- a/wsgi/request.py Tue Jun 10 09:35:26 2014 +0200
+++ b/wsgi/request.py Tue Jun 10 09:49:45 2014 +0200
@@ -27,14 +27,11 @@
from StringIO import StringIO
from urllib import quote
+from urlparse import parse_qs
-from logilab.common.decorators import cached
-
+from cubicweb.multipart import copy_file, parse_form_data
from cubicweb.web.request import CubicWebRequestBase
-from cubicweb.wsgi import (pformat, qs2dict, safe_copyfileobj, parse_file_upload,
- normalize_header)
-from cubicweb.web.http_headers import Headers
-
+from cubicweb.wsgi import pformat, normalize_header
class CubicWebWsgiRequest(CubicWebRequestBase):
@@ -45,7 +42,20 @@
self.environ = environ
self.path = environ['PATH_INFO']
self.method = environ['REQUEST_METHOD'].upper()
- self.content = environ['wsgi.input']
+
+ # content_length "may be empty or absent"
+ try:
+ length = int(environ['CONTENT_LENGTH'])
+ except (KeyError, ValueError):
+ length = 0
+ # wsgi.input is not seekable, so copy the request contents to a temporary file
+ if length < 100000:
+ self.content = StringIO()
+ else:
+ self.content = tempfile.TemporaryFile()
+ copy_file(environ['wsgi.input'], self.content, maxread=length)
+ self.content.seek(0, 0)
+ environ['wsgi.input'] = self.content
headers_in = dict((normalize_header(k[5:]), v) for k, v in self.environ.items()
if k.startswith('HTTP_'))
@@ -55,10 +65,11 @@
super(CubicWebWsgiRequest, self).__init__(vreg, https, post,
headers= headers_in)
if files is not None:
- for key, (name, _, stream) in files.iteritems():
- if name is not None:
- name = unicode(name, self.encoding)
- self.form[key] = (name, stream)
+ for key, part in files.iteritems():
+ name = None
+ if part.filename is not None:
+ name = unicode(part.filename, self.encoding)
+ self.form[key] = (name, part.file)
def __repr__(self):
# Since this is called as part of error handling, we need to be very
@@ -122,32 +133,11 @@
def get_posted_data(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
- post = qs2dict(self.environ.get('QUERY_STRING', ''))
+ post = parse_qs(self.environ.get('QUERY_STRING', ''))
files = None
if self.method == 'POST':
- if self.environ.get('CONTENT_TYPE', '').startswith('multipart'):
- header_dict = dict((normalize_header(k[5:]), v)
- for k, v in self.environ.items()
- if k.startswith('HTTP_'))
- header_dict['Content-Type'] = self.environ.get('CONTENT_TYPE', '')
- post_, files = parse_file_upload(header_dict, self.raw_post_data)
- post.update(post_)
- else:
- post.update(qs2dict(self.raw_post_data))
+ forms, files = parse_form_data(self.environ, strict=True,
+ mem_limit=self.vreg.config['max-post-length'])
+ post.update(forms)
+ self.content.seek(0, 0)
return post, files
-
- @property
- @cached
- def raw_post_data(self):
- buf = StringIO()
- try:
- # CONTENT_LENGTH might be absent if POST doesn't have content at all (lighttpd)
- content_length = int(self.environ.get('CONTENT_LENGTH', 0))
- except ValueError: # if CONTENT_LENGTH was empty string or not an integer
- content_length = 0
- if content_length > 0:
- safe_copyfileobj(self.environ['wsgi.input'], buf,
- size=content_length)
- postdata = buf.getvalue()
- buf.close()
- return postdata