--- a/.hgignore Mon Feb 17 11:13:27 2014 +0100
+++ b/.hgignore Mon Feb 17 15:32:50 2014 +0100
@@ -18,3 +18,4 @@
^doc/html/
^doc/doctrees/
^doc/book/en/devweb/js_api/
+data/pgdb/
--- a/__init__.py Mon Feb 17 11:13:27 2014 +0100
+++ b/__init__.py Mon Feb 17 15:32:50 2014 +0100
@@ -242,3 +242,15 @@
errors[rname(*key)] = errors.pop(key)
return ValidationError(getattr(entity, 'eid', entity), errors,
substitutions, i18nvalues)
+
+
+# exceptions ##################################################################
+
+class ProgrammingError(Exception): #DatabaseError):
+ """Exception raised for errors that are related to the database's operation
+ and not necessarily under the control of the programmer, e.g. an unexpected
+ disconnect occurs, the data source name is not found, a transaction could
+ not be processed, a memory allocation error occurred during processing,
+ etc.
+ """
+
--- a/_exceptions.py Mon Feb 17 11:13:27 2014 +0100
+++ b/_exceptions.py Mon Feb 17 15:32:50 2014 +0100
@@ -76,13 +76,6 @@
"""the eid is not defined in the system tables"""
msg = 'No entity with eid %s in the repository'
-class ETypeNotSupportedBySources(RepositoryError, InternalError):
- """no source support an entity type"""
- msg = 'No source supports %r entity\'s type'
-
-class MultiSourcesError(RepositoryError, InternalError):
- """usually due to bad multisources configuration or rql query"""
-
class UniqueTogetherError(RepositoryError):
"""raised when a unique_together constraint caused an IntegrityError"""
def __init__(self, session, **kwargs):
--- a/cwctl.py Mon Feb 17 11:13:27 2014 +0100
+++ b/cwctl.py Mon Feb 17 15:32:50 2014 +0100
@@ -781,7 +781,8 @@
if self.config.fs_only or toupgrade:
for cube, fromversion, toversion in toupgrade:
print '-> migration needed from %s to %s for %s' % (fromversion, toversion, cube)
- mih.migrate(vcconf, reversed(toupgrade), self.config)
+ with mih.cnx:
+ mih.migrate(vcconf, reversed(toupgrade), self.config)
else:
print '-> no data migration needed for instance %s.' % appid
# rewrite main configuration file
@@ -912,13 +913,14 @@
def _handle_networked(self, appuri):
""" returns migration context handler & shutdown function """
from cubicweb import AuthenticationError
- from cubicweb.dbapi import connect
+ from cubicweb.repoapi import connect, get_repository
from cubicweb.server.utils import manager_userpasswd
from cubicweb.server.migractions import ServerMigrationHelper
while True:
try:
login, pwd = manager_userpasswd(msg=None)
- cnx = connect(appuri, login=login, password=pwd, mulcnx=False)
+ repo = get_repository(appuri)
+ cnx = connect(repo, login=login, password=pwd, mulcnx=False)
except AuthenticationError as ex:
print ex
except (KeyboardInterrupt, EOFError):
@@ -948,15 +950,16 @@
else:
mih, shutdown_callback = self._handle_networked(appuri)
try:
- if args:
- # use cmdline parser to access left/right attributes only
- # remember that usage requires instance appid as first argument
- scripts, args = self.cmdline_parser.largs[1:], self.cmdline_parser.rargs
- for script in scripts:
- mih.cmd_process_script(script, scriptargs=args)
- mih.commit()
- else:
- mih.interactive_shell()
+ with mih.cnx:
+ if args:
+ # use cmdline parser to access left/right attributes only
+ # remember that usage requires instance appid as first argument
+ scripts, args = self.cmdline_parser.largs[1:], self.cmdline_parser.rargs
+ for script in scripts:
+ mih.cmd_process_script(script, scriptargs=args)
+ mih.commit()
+ else:
+ mih.interactive_shell()
finally:
shutdown_callback()
--- a/cwvreg.py Mon Feb 17 11:13:27 2014 +0100
+++ b/cwvreg.py Mon Feb 17 15:32:50 2014 +0100
@@ -701,7 +701,7 @@
def solutions(self, req, rqlst, args):
def type_from_eid(eid, req=req):
- return req.describe(eid)[0]
+ return req.entity_metas(eid)['type']
return self.rqlhelper.compute_solutions(rqlst, {'eid': type_from_eid}, args)
def parse(self, req, rql, args=None):
--- a/dataimport.py Mon Feb 17 11:13:27 2014 +0100
+++ b/dataimport.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -938,7 +938,7 @@
def drop_indexes(self, etype):
"""Drop indexes for a given entity type"""
if etype not in self.indexes_etypes:
- cu = self.session.cnxset['system']
+ cu = self.session.cnxset.cu
def index_to_attr(index):
"""turn an index name to (database) attribute name"""
return index.replace(etype.lower(), '').replace('idx', '').strip('_')
@@ -971,7 +971,6 @@
self._storage_handler = self.system_source._storage_handler
self.preprocess_entity = self.system_source.preprocess_entity
self.sqlgen = self.system_source.sqlgen
- self.copy_based_source = self.system_source.copy_based_source
self.uri = self.system_source.uri
self.eid = self.system_source.eid
# Directory to write temporary files
@@ -1125,9 +1124,8 @@
if extid is not None:
assert isinstance(extid, str)
extid = b64encode(extid)
- uri = 'system' if source.copy_based_source else source.uri
attrs = {'type': entity.cw_etype, 'eid': entity.eid, 'extid': extid,
- 'source': uri, 'asource': source.uri, 'mtime': datetime.utcnow()}
+ 'source': 'system', 'asource': source.uri}
self._handle_insert_entity_sql(session, self.sqlgen.insert('entities', attrs), attrs)
# insert core relations: is, is_instance_of and cw_source
try:
--- a/dbapi.py Mon Feb 17 11:13:27 2014 +0100
+++ b/dbapi.py Mon Feb 17 15:32:50 2014 +0100
@@ -34,13 +34,13 @@
from urlparse import urlparse
from logilab.common.logging_ext import set_log_methods
-from logilab.common.decorators import monkeypatch
+from logilab.common.decorators import monkeypatch, cachedproperty
from logilab.common.deprecation import deprecated
-from cubicweb import ETYPE_NAME_MAP, ConnectionError, AuthenticationError,\
- cwvreg, cwconfig
+from cubicweb import (ETYPE_NAME_MAP, AuthenticationError, ProgrammingError,
+ cwvreg, cwconfig)
+from cubicweb.repoapi import get_repository
from cubicweb.req import RequestSessionBase
-from cubicweb.utils import parse_repo_uri
_MARKER = object()
@@ -91,53 +91,7 @@
self.close_on_del = close
-def _get_inmemory_repo(config, vreg=None):
- from cubicweb.server.repository import Repository
- from cubicweb.server.utils import TasksManager
- return Repository(config, TasksManager(), vreg=vreg)
-
-def get_repository(uri=None, config=None, vreg=None):
- """get a repository for the given URI or config/vregistry (in case we're
- loading the repository for a client, eg web server, configuration).
-
- The returned repository may be an in-memory repository or a proxy object
- using a specific RPC method, depending on the given URI (pyro or zmq).
- """
- if uri is None:
- return _get_inmemory_repo(config, vreg)
-
- protocol, hostport, appid = parse_repo_uri(uri)
-
- if protocol == 'inmemory':
- # me may have been called with a dummy 'inmemory://' uri ...
- return _get_inmemory_repo(config, vreg)
-
- if protocol == 'pyroloc': # direct connection to the instance
- from logilab.common.pyro_ext import get_proxy
- uri = uri.replace('pyroloc', 'PYRO')
- return get_proxy(uri)
-
- if protocol == 'pyro': # connection mediated through the pyro ns
- from logilab.common.pyro_ext import ns_get_proxy
- path = appid.strip('/')
- if not path:
- raise ConnectionError(
- "can't find instance name in %s (expected to be the path component)"
- % uri)
- if '.' in path:
- nsgroup, nsid = path.rsplit('.', 1)
- else:
- nsgroup = 'cubicweb'
- nsid = path
- return ns_get_proxy(nsid, defaultnsgroup=nsgroup, nshost=hostport)
-
- if protocol.startswith('zmqpickle-'):
- from cubicweb.zmqclient import ZMQRepositoryClient
- return ZMQRepositoryClient(uri)
- else:
- raise ConnectionError('unknown protocol: `%s`' % protocol)
-
-
+@deprecated('[3.19] the dbapi is deprecated. Have a look at the new repoapi.')
def _repo_connect(repo, login, **kwargs):
"""Constructor to create a new connection to the given CubicWeb repository.
@@ -327,17 +281,17 @@
else:
# these args are initialized after a connection is
# established
- self.session = None
+ self.session = DBAPISession(None)
self.cnx = self.user = _NeedAuthAccessMock()
self.set_default_language(vreg)
- def from_controller(self):
- return 'view'
+ def get_option_value(self, option, foreid=None):
+ if foreid is not None:
+ warn('[3.19] foreid argument is deprecated', DeprecationWarning,
+ stacklevel=2)
+ return self.cnx.get_option_value(option)
- def get_option_value(self, option, foreid=None):
- return self.cnx.get_option_value(option, foreid)
-
- def set_session(self, session, user=None):
+ def set_session(self, session):
"""method called by the session handler when the user is authenticated
or an anonymous connection is open
"""
@@ -345,11 +299,8 @@
if session.cnx:
self.cnx = session.cnx
self.execute = session.cnx.cursor(self).execute
- if user is None:
- user = self.cnx.user(self)
- if user is not None:
- self.user = user
- self.set_entity_cache(user)
+ self.user = self.cnx.user(self)
+ self.set_entity_cache(self.user)
def execute(self, *args, **kwargs): # pylint: disable=E0202
"""overriden when session is set. By default raise authentication error
@@ -371,8 +322,8 @@
# server-side service call #################################################
- def call_service(self, regid, async=False, **kwargs):
- return self.cnx.call_service(regid, async, **kwargs)
+ def call_service(self, regid, **kwargs):
+ return self.cnx.call_service(regid, **kwargs)
# entities cache management ###############################################
@@ -407,20 +358,18 @@
# server session compat layer #############################################
- def describe(self, eid, asdict=False):
+ def entity_metas(self, eid):
"""return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
- return self.cnx.describe(eid, asdict)
+ return self.cnx.entity_metas(eid)
def source_defs(self):
"""return the definition of sources used by the repository."""
return self.cnx.source_defs()
- @deprecated('[3.17] do not use hijack_user. create new Session object')
- def hijack_user(self, user):
- """return a fake request/session using specified user"""
- req = DBAPIRequest(self.vreg)
- req.set_session(self.session, user)
- return req
+ @deprecated('[3.19] use .entity_metas(eid) instead')
+ def describe(self, eid, asdict=False):
+ """return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
+ return self.cnx.describe(eid, asdict)
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
@@ -429,16 +378,6 @@
set_log_methods(DBAPIRequest, getLogger('cubicweb.dbapi'))
-# exceptions ##################################################################
-
-class ProgrammingError(Exception): #DatabaseError):
- """Exception raised for errors that are related to the database's operation
- and not necessarily under the control of the programmer, e.g. an unexpected
- disconnect occurs, the data source name is not found, a transaction could
- not be processed, a memory allocation error occurred during processing,
- etc.
- """
-
# cursor / connection objects ##################################################
@@ -531,7 +470,6 @@
# make exceptions available through the connection object
ProgrammingError = ProgrammingError
# attributes that may be overriden per connection instance
- anonymous_connection = False
cursor_class = Cursor
vreg = None
_closed = None
@@ -557,6 +495,13 @@
return False
return isinstance(self._repo, Repository)
+ @property # could be a cached property but we want to prevent assigment to
+ # catch potential programming error.
+ def anonymous_connection(self):
+ login = self._repo.user_info(self.sessionid)[1]
+ anon_login = self.vreg.config.get('anonymous-user')
+ return login == anon_login
+
def __repr__(self):
if self.anonymous_connection:
return '<Connection %s (anonymous)>' % self.sessionid
@@ -583,8 +528,8 @@
# server-side service call #################################################
@check_not_closed
- def call_service(self, regid, async=False, **kwargs):
- return self._repo.call_service(self.sessionid, regid, async, **kwargs)
+ def call_service(self, regid, **kwargs):
+ return self._repo.call_service(self.sessionid, regid, **kwargs)
# connection initialization methods ########################################
@@ -641,11 +586,11 @@
def request(self):
if self._web_request:
- from cubicweb.web.request import CubicWebRequestBase
- req = CubicWebRequestBase(self.vreg, False)
+ from cubicweb.web.request import DBAPICubicWebRequestBase
+ req = DBAPICubicWebRequestBase(self.vreg, False)
req.get_header = lambda x, default=None: default
- req.set_session = lambda session, user=None: DBAPIRequest.set_session(
- req, session, user)
+ req.set_session = lambda session: DBAPIRequest.set_session(
+ req, session)
req.relative_path = lambda includeparams=True: ''
else:
req = DBAPIRequest(self.vreg)
@@ -720,22 +665,40 @@
@check_not_closed
def get_option_value(self, option, foreid=None):
- """Return the value for `option` in the configuration. If `foreid` is
- specified, the actual repository to which this entity belongs is
- dereferenced and the option value retrieved from it.
+ """Return the value for `option` in the configuration.
+
+ `foreid` argument is deprecated and now useless (as of 3.19).
"""
- return self._repo.get_option_value(option, foreid)
+ if foreid is not None:
+ warn('[3.19] foreid argument is deprecated', DeprecationWarning,
+ stacklevel=2)
+ return self._repo.get_option_value(option)
+
@check_not_closed
+ def entity_metas(self, eid):
+ """return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
+ return self._repo.entity_metas(self.sessionid, eid, **self._txid())
+
+ @deprecated('[3.19] use .entity_metas(eid) instead')
+ @check_not_closed
def describe(self, eid, asdict=False):
- metas = self._repo.describe(self.sessionid, eid, **self._txid())
- if len(metas) == 3: # backward compat
- metas = list(metas)
- metas.append(metas[1])
+ try:
+ metas = self._repo.entity_metas(self.sessionid, eid, **self._txid())
+ except AttributeError:
+ metas = self._repo.describe(self.sessionid, eid, **self._txid())
+ # talking to pre 3.19 repository
+ if len(metas) == 3: # even older backward compat
+ metas = list(metas)
+ metas.append(metas[1])
+ if asdict:
+ return dict(zip(('type', 'source', 'extid', 'asource'), metas))
+ return metas[:-1]
if asdict:
- return dict(zip(('type', 'source', 'extid', 'asource'), metas))
- # XXX :-1 for cw compat, use asdict=True for full information
- return metas[:-1]
+ metas['asource'] = meta['source'] # XXX pre 3.19 client compat
+ return metas
+ return metas['type'], metas['source'], metas['extid']
+
# db-api like interface ####################################################
--- a/devtools/__init__.py Mon Feb 17 11:13:27 2014 +0100
+++ b/devtools/__init__.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -25,8 +25,11 @@
import shutil
import pickle
import glob
+import random
+import subprocess
import warnings
import tempfile
+import getpass
from hashlib import sha1 # pylint: disable=E0611
from datetime import timedelta
from os.path import (abspath, join, exists, split, isabs, isdir)
@@ -86,6 +89,13 @@
'password': u'gingkow',
},
}
+DEFAULT_PSQL_SOURCES = DEFAULT_SOURCES.copy()
+DEFAULT_PSQL_SOURCES['system'] = DEFAULT_SOURCES['system'].copy()
+DEFAULT_PSQL_SOURCES['system']['db-driver'] = 'postgres'
+DEFAULT_PSQL_SOURCES['system']['db-host'] = '/tmp'
+DEFAULT_PSQL_SOURCES['system']['db-port'] = str(random.randrange(5432, 2**16))
+DEFAULT_PSQL_SOURCES['system']['db-user'] = unicode(getpass.getuser())
+DEFAULT_PSQL_SOURCES['system']['db-password'] = None
def turn_repo_off(repo):
""" Idea: this is less costly than a full re-creation of the repo object.
@@ -120,8 +130,7 @@
repo._type_source_cache = {}
repo._extid_cache = {}
repo.querier._rql_cache = {}
- for source in repo.sources:
- source.reset_caches()
+ repo.system_source.reset_caches()
repo._needs_refresh = False
@@ -130,6 +139,8 @@
read_instance_schema = False
init_repository = True
skip_db_create_and_restore = False
+ default_sources = DEFAULT_SOURCES
+
def __init__(self, appid='data', apphome=None, log_threshold=logging.CRITICAL+10):
# must be set before calling parent __init__
if apphome is None:
@@ -191,20 +202,20 @@
sourcefile = super(TestServerConfiguration, self).sources_file()
return sourcefile
- def sources(self):
+ def read_sources_file(self):
"""By default, we run tests with the sqlite DB backend. One may use its
own configuration by just creating a 'sources' file in the test
- directory from wich tests are launched or by specifying an alternative
+ directory from which tests are launched or by specifying an alternative
sources file using self.sourcefile.
"""
try:
- sources = super(TestServerConfiguration, self).sources()
+ sources = super(TestServerConfiguration, self).read_sources_file()
except ExecutionError:
sources = {}
if not sources:
- sources = DEFAULT_SOURCES
+ sources = self.default_sources
if 'admin' not in sources:
- sources['admin'] = DEFAULT_SOURCES['admin']
+ sources['admin'] = self.default_sources['admin']
return sources
# web config methods needed here for cases when we use this config as a web
@@ -245,6 +256,10 @@
self.sourcefile = sourcefile
+class PostgresApptestConfiguration(ApptestConfiguration):
+ default_sources = DEFAULT_PSQL_SOURCES
+
+
class RealDatabaseConfiguration(ApptestConfiguration):
"""configuration class for tests to run on a real database.
@@ -268,7 +283,6 @@
skip_db_create_and_restore = True
read_instance_schema = True # read schema from database
-
# test database handling #######################################################
DEFAULT_EMPTY_DB_ID = '__default_empty_db__'
@@ -390,7 +404,7 @@
"""return Connection object on the current repository"""
from cubicweb.dbapi import _repo_connect
repo = self.get_repo()
- sources = self.config.sources()
+ sources = self.config.read_sources_file()
login = unicode(sources['admin']['login'])
password = sources['admin']['password'] or 'xxx'
cnx = _repo_connect(repo, login, password=password)
@@ -411,8 +425,7 @@
@property
def system_source(self):
- sources = self.config.sources()
- return sources['system']
+ return self.config.system_source_config
@property
def dbname(self):
@@ -521,6 +534,22 @@
class PostgresTestDataBaseHandler(TestDataBaseHandler):
DRIVER = 'postgres'
+ __CTL = set()
+
+ @classmethod
+ def killall(cls):
+ for datadir in cls.__CTL:
+ subprocess.call(['pg_ctl', 'stop', '-D', datadir, '-m', 'fast'])
+
+ def __init__(self, config):
+ super(PostgresTestDataBaseHandler, self).__init__(config)
+ datadir = join(self.config.apphome, 'pgdb')
+ if not exists(datadir):
+ subprocess.check_call(['initdb', '-D', datadir, '-E', 'utf-8', '--locale=C'])
+ port = self.system_source['db-port']
+ subprocess.check_call(['pg_ctl', 'start', '-w', '-D', datadir, '-o', '-h "" -k /tmp -p %s' % port])
+ self.__CTL.add(datadir)
+
@property
@cached
def helper(self):
@@ -693,8 +722,8 @@
def absolute_dbfile(self):
"""absolute path of current database file"""
dbfile = join(self._ensure_test_backup_db_dir(),
- self.config.sources()['system']['db-name'])
- self.config.sources()['system']['db-name'] = dbfile
+ self.system_source['db-name'])
+ self.system_source['db-name'] = dbfile
return dbfile
def process_cache_entry(self, directory, dbname, db_id, entry):
@@ -733,6 +762,7 @@
import atexit
atexit.register(SQLiteTestDataBaseHandler._cleanup_all_tmpdb)
+atexit.register(PostgresTestDataBaseHandler.killall)
def install_sqlite_patch(querier):
@@ -824,8 +854,7 @@
handler = HCACHE.get(config)
if handler is not None:
return handler
- sources = config.sources()
- driver = sources['system']['db-driver']
+ driver = config.system_source_config['db-driver']
key = (driver, config)
handlerkls = HANDLERS.get(driver, None)
if handlerkls is not None:
--- a/devtools/devctl.py Mon Feb 17 11:13:27 2014 +0100
+++ b/devtools/devctl.py Mon Feb 17 15:32:50 2014 +0100
@@ -776,13 +776,19 @@
'short': "i", 'metavar': "<types>",
'help':'coma separated list of entity types to include in view',
}),
+ ('show-etype',
+ {'type':'string', 'default':'',
+ 'metavar': '<etype>',
+ 'help':'show graph of this etype and its neighbours'
+ }),
]
def run(self, args):
from subprocess import Popen
from tempfile import NamedTemporaryFile
from logilab.common.textutils import splitstrip
- from yams import schema2dot, BASE_TYPES
+ from logilab.common.graph import GraphGenerator, DotBackend
+ from yams import schema2dot as s2d, BASE_TYPES
from cubicweb.schema import (META_RTYPES, SCHEMA_TYPES, SYSTEM_RTYPES,
WORKFLOW_TYPES, INTERNAL_TYPES)
cubes = splitstrip(args[0])
@@ -801,7 +807,22 @@
skiptypes |= set(('CWUser', 'CWGroup', 'EmailAddress'))
skiptypes |= set(self['exclude-type'].split(','))
skiptypes -= set(self['include-type'].split(','))
- schema2dot.schema2dot(schema, out, skiptypes=skiptypes)
+
+ if not self['show-etype']:
+ s2d.schema2dot(schema, out, skiptypes=skiptypes)
+ else:
+ etype = self['show-etype']
+ visitor = s2d.OneHopESchemaVisitor(schema[etype], skiptypes=skiptypes)
+ propshdlr = s2d.SchemaDotPropsHandler(visitor)
+ backend = DotBackend('schema', 'BT',
+ ratio='compress',size=None,
+ renderer='dot',
+ additionnal_param={'overlap' : 'false',
+ 'splines' : 'true',
+ 'sep' : '0.2'})
+ generator = s2d.GraphGenerator(backend)
+ generator.generate(visitor, propshdlr, out)
+
if viewer:
p = Popen((viewer, out))
p.wait()
--- a/devtools/fake.py Mon Feb 17 11:13:27 2014 +0100
+++ b/devtools/fake.py Mon Feb 17 15:32:50 2014 +0100
@@ -24,7 +24,7 @@
from cubicweb.req import RequestSessionBase
from cubicweb.cwvreg import CWRegistryStore
-from cubicweb.web.request import CubicWebRequestBase
+from cubicweb.web.request import ConnectionCubicWebRequestBase
from cubicweb.devtools import BASE_URL, BaseApptestConfiguration
@@ -53,7 +53,7 @@
return {'system': {'db-driver': 'sqlite'}}
-class FakeRequest(CubicWebRequestBase):
+class FakeRequest(ConnectionCubicWebRequestBase):
"""test implementation of an cubicweb request object"""
def __init__(self, *args, **kwargs):
@@ -169,7 +169,6 @@
self.config = config or FakeConfig()
self.vreg = vreg or CWRegistryStore(self.config, initlog=False)
self.vreg.schema = schema
- self.sources = []
def internal_session(self):
return FakeSession(self)
@@ -188,9 +187,6 @@
source.after_entity_insertion(session, extid, entity)
return eid
- def eid2extid(self, source, eid, session=None):
- return self.eids[eid]
-
class FakeSource(object):
dbhelper = get_db_helper('sqlite')
--- a/devtools/httptest.py Mon Feb 17 11:13:27 2014 +0100
+++ b/devtools/httptest.py Mon Feb 17 15:32:50 2014 +0100
@@ -104,7 +104,7 @@
reactor.addSystemEventTrigger('after', 'startup', semaphore.release)
t = threading.Thread(target=safe_run, name='cubicweb_test_web_server',
- args=(self.config, self.vreg, True))
+ args=(self.config, True), kwargs={'repo': self.repo})
self.web_thread = t
t.start()
semaphore.acquire()
--- a/devtools/repotest.py Mon Feb 17 11:13:27 2014 +0100
+++ b/devtools/repotest.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -141,7 +141,7 @@
from rql import RQLHelper
-from cubicweb.devtools.fake import FakeRepo, FakeSession
+from cubicweb.devtools.fake import FakeRepo, FakeConfig, FakeSession
from cubicweb.server import set_debug, debugged
from cubicweb.server.querier import QuerierHelper
from cubicweb.server.session import Session
@@ -159,7 +159,7 @@
raise SkipTest(str(ex))
def setUp(self):
- self.repo = FakeRepo(self.schema)
+ self.repo = FakeRepo(self.schema, config=FakeConfig(apphome=self.datadir))
self.repo.system_source = mock_object(dbdriver=self.backend)
self.rqlhelper = RQLHelper(self.schema, special_relations={'eid': 'uid',
'has_text': 'fti'},
@@ -262,8 +262,8 @@
u = self.repo._build_user(self.session, self.session.user.eid)
u._groups = set(groups)
s = Session(u, self.repo)
- s._tx.cnxset = self.cnxset
- s._tx.ctx_count = 1
+ s._cnx.cnxset = self.cnxset
+ s._cnx.ctx_count = 1
# register session to ensure it gets closed
self._dumb_sessions.append(s)
return s
@@ -277,42 +277,23 @@
class BasePlannerTC(BaseQuerierTC):
- newsources = ()
def setup(self):
- clear_cache(self.repo, 'rel_type_sources')
- clear_cache(self.repo, 'rel_type_sources')
- clear_cache(self.repo, 'can_cross_relation')
- clear_cache(self.repo, 'is_multi_sources_relation')
# XXX source_defs
self.o = self.repo.querier
self.session = self.repo._sessions.values()[0]
self.cnxset = self.session.set_cnxset()
self.schema = self.o.schema
- self.sources = self.o._repo.sources
- self.system = self.sources[-1]
+ self.system = self.repo.system_source
do_monkey_patch()
self._dumb_sessions = [] # by hi-jacked parent setup
self.repo.vreg.rqlhelper.backend = 'postgres' # so FTIRANK is considered
- self.newsources = []
-
- def add_source(self, sourcecls, uri):
- source = sourcecls(self.repo, {'uri': uri, 'type': 'whatever'})
- if not source.copy_based_source:
- self.sources.append(source)
- self.newsources.append(source)
- self.repo.sources_by_uri[uri] = source
- setattr(self, uri, source)
def tearDown(self):
- for source in self.newsources:
- if not source.copy_based_source:
- self.sources.remove(source)
- del self.repo.sources_by_uri[source.uri]
undo_monkey_patch()
for session in self._dumb_sessions:
- if session._tx.cnxset is not None:
- session._tx.cnxset = None
+ if session._cnx.cnxset is not None:
+ session._cnx.cnxset = None
session.close()
def _prepare_plan(self, rql, kwargs=None):
@@ -324,7 +305,8 @@
select.solutions.sort()
else:
rqlst.solutions.sort()
- return self.o.plan_factory(rqlst, kwargs, self.session)
+ with self.session.ensure_cnx_set:
+ return self.o.plan_factory(rqlst, kwargs, self.session)
# monkey patch some methods to get predicatable results #######################
@@ -350,7 +332,6 @@
from cubicweb.server.querier import ExecutionPlan
_orig_check_permissions = ExecutionPlan._check_permissions
-_orig_init_temp_table = ExecutionPlan.init_temp_table
def _check_permissions(*args, **kwargs):
res, restricted = _orig_check_permissions(*args, **kwargs)
@@ -360,15 +341,6 @@
def _dummy_check_permissions(self, rqlst):
return {(): rqlst.solutions}, set()
-def _init_temp_table(self, table, selection, solution):
- if self.tablesinorder is None:
- tablesinorder = self.tablesinorder = {}
- else:
- tablesinorder = self.tablesinorder
- if not table in tablesinorder:
- tablesinorder[table] = 'table%s' % len(tablesinorder)
- return _orig_init_temp_table(self, table, selection, solution)
-
from cubicweb.server import rqlannotation
_orig_select_principal = rqlannotation._select_principal
@@ -381,16 +353,6 @@
return _orig_select_principal(scope, relations,
_sort=lambda rels: sorted(rels, key=sort_key))
-try:
- from cubicweb.server.msplanner import PartPlanInformation
-except ImportError:
- class PartPlanInformation(object):
- def merge_input_maps(self, *args, **kwargs):
- pass
- def _choose_term(self, sourceterms):
- pass
-_orig_merge_input_maps = PartPlanInformation.merge_input_maps
-_orig_choose_term = PartPlanInformation._choose_term
def _merge_input_maps(*args, **kwargs):
return sorted(_orig_merge_input_maps(*args, **kwargs))
@@ -410,12 +372,6 @@
return x.value
return _orig_choose_term(self, source, DumbOrderedDict2(sourceterms, get_key))
-from cubicweb.server.sources.pyrorql import PyroRQLSource
-_orig_syntax_tree_search = PyroRQLSource.syntax_tree_search
-
-def _syntax_tree_search(*args, **kwargs):
- return deepcopy(_orig_syntax_tree_search(*args, **kwargs))
-
def _ordered_iter_relations(stinfo):
return sorted(_orig_iter_relations(stinfo), key=lambda x:x.r_type)
@@ -425,17 +381,9 @@
rqlrewrite.RQLRewriter.build_variantes = _build_variantes
ExecutionPlan._check_permissions = _check_permissions
ExecutionPlan.tablesinorder = None
- ExecutionPlan.init_temp_table = _init_temp_table
- PartPlanInformation.merge_input_maps = _merge_input_maps
- PartPlanInformation._choose_term = _choose_term
- PyroRQLSource.syntax_tree_search = _syntax_tree_search
def undo_monkey_patch():
rqlrewrite.iter_relations = _orig_iter_relations
rqlrewrite.RQLRewriter.insert_snippets = _orig_insert_snippets
rqlrewrite.RQLRewriter.build_variantes = _orig_build_variantes
ExecutionPlan._check_permissions = _orig_check_permissions
- ExecutionPlan.init_temp_table = _orig_init_temp_table
- PartPlanInformation.merge_input_maps = _orig_merge_input_maps
- PartPlanInformation._choose_term = _orig_choose_term
- PyroRQLSource.syntax_tree_search = _orig_syntax_tree_search
--- a/devtools/test/unittest_testlib.py Mon Feb 17 11:13:27 2014 +0100
+++ b/devtools/test/unittest_testlib.py Mon Feb 17 15:32:50 2014 +0100
@@ -189,5 +189,62 @@
self.assertIn(AnAppobject, self.vreg['hip']['hop'])
self.assertNotIn(AnAppobject, self.vreg['hip']['hop'])
+ def test_login(self):
+ """Calling login should not break self.session hook control"""
+ self.hook_executed = False
+ babar = self.create_user(self.request(), 'babar')
+ self.commit()
+
+ from cubicweb.server import hook
+ from cubicweb.predicates import is_instance
+
+ class MyHook(hook.Hook):
+ __regid__ = 'whatever'
+ __select__ = hook.Hook.__select__ & is_instance('CWProperty')
+ category = 'test-hook'
+ events = ('after_add_entity',)
+ test = self
+
+ def __call__(self):
+ self.test.hook_executed = True
+
+ self.login('babar')
+ with self.temporary_appobjects(MyHook):
+ with self.session.allow_all_hooks_but('test-hook'):
+ req = self.request()
+ prop = req.create_entity('CWProperty', pkey=u'ui.language', value=u'en')
+ self.commit()
+ self.assertFalse(self.hook_executed)
+
+
+class RepoAccessTC(CubicWebTC):
+ def test_repo_connection(self):
+ acc = self.new_access('admin')
+ with acc.repo_cnx() as cnx:
+ rset = cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+ def test_client_connection(self):
+ acc = self.new_access('admin')
+ with acc.client_cnx() as cnx:
+ rset = cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+ def test_web_request(self):
+ acc = self.new_access('admin')
+ with acc.web_request(elephant='babar') as req:
+ rset = req.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+ self.assertEqual('babar', req.form['elephant'])
+
+ def test_close(self):
+ acc = self.new_access('admin')
+ acc.close()
+
+ def test_admin_access(self):
+ with self.admin_access.client_cnx() as cnx:
+ self.assertEqual('admin', cnx.user.login)
+
+
if __name__ == '__main__':
unittest_main()
--- a/devtools/testlib.py Mon Feb 17 11:13:27 2014 +0100
+++ b/devtools/testlib.py Mon Feb 17 15:32:50 2014 +0100
@@ -39,12 +39,13 @@
from logilab.common.deprecation import deprecated, class_deprecated
from logilab.common.shellutils import getlogin
-from cubicweb import ValidationError, NoSelectableObject
-from cubicweb import cwconfig, dbapi, devtools, web, server
+from cubicweb import ValidationError, NoSelectableObject, AuthenticationError
+from cubicweb import cwconfig, dbapi, devtools, web, server, repoapi
from cubicweb.utils import json
from cubicweb.sobjects import notification
from cubicweb.web import Redirect, application
from cubicweb.server.hook import SendMailOp
+from cubicweb.server.session import Session
from cubicweb.devtools import SYSTEM_ENTITIES, SYSTEM_RELATIONS, VIEW_VALIDATORS
from cubicweb.devtools import fake, htmlparser, DEFAULT_EMPTY_DB_ID
from cubicweb.utils import json
@@ -169,15 +170,85 @@
return getattr(self.cnx, attrname)
def __enter__(self):
- return self.cnx.__enter__()
+ # already open
+ return self.cnx
def __exit__(self, exctype, exc, tb):
try:
return self.cnx.__exit__(exctype, exc, tb)
finally:
- self.cnx.close()
self.testcase.restore_connection()
+# Repoaccess utility ###############################################3###########
+
+class RepoAccess(object):
+ """An helper to easily create object to access the repo as a specific user
+
+ Each RepoAccess have it own session.
+
+ A repo access can create three type of object:
+
+ .. automethod:: cubicweb.testlib.RepoAccess.repo_cnx
+ .. automethod:: cubicweb.testlib.RepoAccess.client_cnx
+ .. automethod:: cubicweb.testlib.RepoAccess.web_request
+
+ The RepoAccess need to be closed to destroy the associated Session.
+ TestCase usually take care of this aspect for the user.
+
+ .. automethod:: cubicweb.testlib.RepoAccess.close
+ """
+
+ def __init__(self, repo, login, requestcls):
+ self._repo = repo
+ self._login = login
+ self.requestcls = requestcls
+ # opening session
+ #
+ # XXX this very hackish code should be cleaned and move on repo.
+ with repo.internal_cnx() as cnx:
+ rset = cnx.execute('CWUser U WHERE U login %(u)s', {'u': login})
+ user = rset.get_entity(0, 0)
+ user.groups
+ user.properties
+ self._session = Session(user, repo)
+ repo._sessions[self._session.sessionid] = self._session
+ self._session.user._cw = self._session
+
+ @ contextmanager
+ def repo_cnx(self):
+ """Context manager returning a server side connection for the user"""
+ with self._session.new_cnx() as cnx:
+ yield cnx
+
+ @ contextmanager
+ def client_cnx(self):
+ """Context manager returning a client side connection for the user"""
+ with repoapi.ClientConnection(self._session) as cnx:
+ yield cnx
+
+ @ contextmanager
+ def web_request(self, url=None, headers={}, **kwargs):
+ """Context manager returning a web request pre-linked to a client cnx
+
+ To commit and rollback use::
+
+ req.cnx.commit()
+ req.cnx.rolback()
+ """
+ req = self.requestcls(self._repo.vreg, url=url, headers=headers, form=kwargs)
+ clt_cnx = repoapi.ClientConnection(self._session)
+ req.set_cnx(clt_cnx)
+ with clt_cnx:
+ yield req
+
+ def close(self):
+ """Close the session associated to the RepoAccess"""
+ if self._session is not None:
+ self._repo.close(self._session.sessionid)
+ self._session = None
+
+
+
# base class for cubicweb tests requiring a full cw environments ###############
class CubicWebTC(TestCase):
@@ -198,21 +269,197 @@
"""
appid = 'data'
configcls = devtools.ApptestConfiguration
- reset_schema = reset_vreg = False # reset schema / vreg between tests
tags = TestCase.tags | Tags('cubicweb', 'cw_repo')
test_db_id = DEFAULT_EMPTY_DB_ID
_cnxs = set() # establised connection
- _cnx = None # current connection
+ # stay on connection for leak detection purpose
+
+ def __init__(self, *args, **kwargs):
+ self._admin_session = None
+ self._admin_clt_cnx = None
+ self._current_session = None
+ self._current_clt_cnx = None
+ self.repo = None
+ self._open_access = set()
+ super(CubicWebTC, self).__init__(*args, **kwargs)
+
+ # repository connection handling ###########################################
+ def new_access(self, login):
+ """provide a new RepoAccess object for a given user
+
+ The access is automatically closed at the end of the test."""
+ access = RepoAccess(self.repo, login, self.requestcls)
+ self._open_access.add(access)
+ return access
+
+ def _close_access(self):
+ while self._open_access:
+ self._open_access.pop().close()
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def set_cnx(self, cnx):
+ """ """
+ # XXX we want to deprecate this
+ assert getattr(cnx, '_session', None) is not None
+ if cnx is self._admin_clt_cnx:
+ self._pop_custom_cnx()
+ else:
+ self._cnxs.add(cnx) # register the cnx to make sure it is removed
+ self._current_session = cnx._session
+ self._current_clt_cnx = cnx
- # Too much complicated stuff. the class doesn't need to bear the repo anymore
- @classmethod
- def set_cnx(cls, cnx):
- cls._cnxs.add(cnx)
- cls._cnx = cnx
+ @property
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def cnx(self):
+ # XXX we want to deprecate this
+ clt_cnx = self._current_clt_cnx
+ if clt_cnx is None:
+ clt_cnx = self._admin_clt_cnx
+ return clt_cnx
+
+ def _close_cnx(self):
+ """ensure that all cnx used by a test have been closed"""
+ for cnx in list(self._cnxs):
+ if cnx._open and not cnx._session.closed:
+ cnx.rollback()
+ cnx.close()
+ self._cnxs.remove(cnx)
+
+ @property
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def session(self):
+ """return current server side session"""
+ # XXX We want to use a srv_connection instead and deprecate this
+ # property
+ session = self._current_session
+ if session is None:
+ session = self._admin_session
+ # bypassing all sanity to use the same repo cnx in the session we
+ # can't call set_cnx as the Connection is not managed by the
+ # session.
+ session._Session__threaddata.cnx = self._admin_clt_cnx._cnx
+ else:
+ session._Session__threaddata.cnx = self.cnx._cnx
+ session.set_cnxset()
+ return session
+
+ @property
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def websession(self):
+ return self.session
@property
- def cnx(self):
- return self.__class__._cnx
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def adminsession(self):
+ """return current server side session (using default manager account)"""
+ return self._admin_session
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def login(self, login, **kwargs):
+ """return a connection for the given login/password"""
+ __ = kwargs.pop('autoclose', True) # not used anymore
+ if login == self.admlogin:
+ # undo any previous login, if we're not used as a context manager
+ self.restore_connection()
+ return self.cnx
+ else:
+ if not kwargs:
+ kwargs['password'] = str(login)
+ clt_cnx = repoapi.connect(self.repo, login, **kwargs)
+ self.set_cnx(clt_cnx)
+ clt_cnx.__enter__()
+ return TestCaseConnectionProxy(self, clt_cnx)
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def restore_connection(self):
+ self._pop_custom_cnx()
+
+ def _pop_custom_cnx(self):
+ if self._current_clt_cnx is not None:
+ if self._current_clt_cnx._open:
+ self._current_clt_cnx.close()
+ if not self._current_session.closed:
+ self.repo.close(self._current_session.sessionid)
+ self._current_clt_cnx = None
+ self._current_session = None
+
+ #XXX this doesn't need to a be classmethod anymore
+ def _init_repo(self):
+ """init the repository and connection to it.
+ """
+ # setup configuration for test
+ self.init_config(self.config)
+ # get or restore and working db.
+ db_handler = devtools.get_test_db_handler(self.config)
+ db_handler.build_db_cache(self.test_db_id, self.pre_setup_database)
+
+ db_handler.restore_database(self.test_db_id)
+ self.repo = db_handler.get_repo(startup=True)
+ # get an admin session (without actual login)
+ login = unicode(db_handler.config.default_admin_config['login'])
+ self.admin_access = self.new_access(login)
+ self._admin_session = self.admin_access._session
+ self._admin_clt_cnx = repoapi.ClientConnection(self._admin_session)
+ self._cnxs.add(self._admin_clt_cnx)
+ self._admin_clt_cnx.__enter__()
+ self.config.repository = lambda x=None: self.repo
+
+ # db api ##################################################################
+
+ @nocoverage
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def cursor(self, req=None):
+ if req is not None:
+ return req.cnx
+ else:
+ return self.cnx
+
+ @nocoverage
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def execute(self, rql, args=None, req=None):
+ """executes <rql>, builds a resultset, and returns a couple (rset, req)
+ where req is a FakeRequest
+ """
+ req = req or self.request(rql=rql)
+ return req.execute(unicode(rql), args)
+
+ @nocoverage
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def commit(self):
+ try:
+ return self.cnx.commit()
+ finally:
+ self.session.set_cnxset() # ensure cnxset still set after commit
+
+ @nocoverage
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def rollback(self):
+ try:
+ self.cnx.rollback()
+ except dbapi.ProgrammingError:
+ pass # connection closed
+ finally:
+ self.session.set_cnxset() # ensure cnxset still set after commit
+
+ requestcls = fake.FakeRequest
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def request(self, rollbackfirst=False, url=None, headers={}, **kwargs):
+ """return a web ui request"""
+ if rollbackfirst:
+ self.cnx.rollback()
+ req = self.requestcls(self.vreg, url=url, headers=headers, form=kwargs)
+ req.set_cnx(self.cnx)
+ return req
+
+ # server side db api #######################################################
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def sexecute(self, rql, args=None):
+ self.session.set_cnxset()
+ return self.session.execute(rql, args)
+
+
+ # config management ########################################################
@classproperty
def config(cls):
@@ -237,7 +484,7 @@
Otherwise, consider to use a different :class:`ApptestConfiguration`
defined in the `configcls` class attribute"""
- source = config.sources()['system']
+ source = config.system_source_config
cls.admlogin = unicode(source['db-user'])
cls.admpassword = source['db-password']
# uncomment the line below if you want rql queries to be logged
@@ -260,32 +507,10 @@
except Exception: # not in server only configuration
pass
- #XXX this doesn't need to a be classmethod anymore
- @classmethod
- def _init_repo(cls):
- """init the repository and connection to it.
- """
- # setup configuration for test
- cls.init_config(cls.config)
- # get or restore and working db.
- db_handler = devtools.get_test_db_handler(cls.config)
- db_handler.build_db_cache(cls.test_db_id, cls.pre_setup_database)
+ @property
+ def vreg(self):
+ return self.repo.vreg
- cls.repo, cnx = db_handler.get_repo_and_cnx(cls.test_db_id)
- # no direct assignation to cls.cnx anymore.
- # cnx is now an instance property that use a class protected attributes.
- cls.set_cnx(cnx)
- cls.vreg = cls.repo.vreg
- cls.websession = dbapi.DBAPISession(cnx, cls.admlogin)
- cls._orig_cnx = (cnx, cls.websession)
- cls.config.repository = lambda x=None: cls.repo
-
- def _close_cnx(self):
- for cnx in list(self._cnxs):
- if not cnx._closed:
- cnx.rollback()
- cnx.close()
- self._cnxs.remove(cnx)
# global resources accessors ###############################################
@@ -294,18 +519,6 @@
"""return the application schema"""
return self.vreg.schema
- @property
- def session(self):
- """return current server side session (using default manager account)"""
- session = self.repo._sessions[self.cnx.sessionid]
- session.set_cnxset()
- return session
-
- @property
- def adminsession(self):
- """return current server side session (using default manager account)"""
- return self.repo._sessions[self._orig_cnx[0].sessionid]
-
def shell(self):
"""return a shell session object"""
from cubicweb.server.migractions import ServerMigrationHelper
@@ -345,6 +558,14 @@
def tearDown(self):
# XXX hack until logilab.common.testlib is fixed
+ if self._admin_clt_cnx is not None:
+ if self._admin_clt_cnx._open:
+ self._admin_clt_cnx.close()
+ self._admin_clt_cnx = None
+ if self._admin_session is not None:
+ if not self._admin_session.closed:
+ self.repo.close(self._admin_session.sessionid)
+ self._admin_session = None
while self._cleanups:
cleanup, args, kwargs = self._cleanups.pop(-1)
cleanup(*args, **kwargs)
@@ -373,8 +594,7 @@
def user(self, req=None):
"""return the application schema"""
if req is None:
- req = self.request()
- return self.cnx.user(req)
+ return self.request().user
else:
return req.user
@@ -392,7 +612,7 @@
groups = login
login = req
assert not isinstance(self, type)
- req = self._orig_cnx[0].request()
+ req = self._admin_clt_cnx
if password is None:
password = login.encode('utf8')
user = req.create_entity('CWUser', login=unicode(login),
@@ -411,65 +631,6 @@
req.cnx.commit()
return user
- def login(self, login, **kwargs):
- """return a connection for the given login/password"""
- if login == self.admlogin:
- self.restore_connection()
- # definitly don't want autoclose when used as a context manager
- return self.cnx
- autoclose = kwargs.pop('autoclose', True)
- if not kwargs:
- kwargs['password'] = str(login)
- self.set_cnx(dbapi._repo_connect(self.repo, unicode(login), **kwargs))
- self.websession = dbapi.DBAPISession(self.cnx)
- if login == self.vreg.config.anonymous_user()[0]:
- self.cnx.anonymous_connection = True
- if autoclose:
- return TestCaseConnectionProxy(self, self.cnx)
- return self.cnx
-
- def restore_connection(self):
- if not self.cnx is self._orig_cnx[0]:
- if not self.cnx._closed:
- self.cnx.close()
- cnx, self.websession = self._orig_cnx
- self.set_cnx(cnx)
-
- # db api ##################################################################
-
- @nocoverage
- def cursor(self, req=None):
- return self.cnx.cursor(req or self.request())
-
- @nocoverage
- def execute(self, rql, args=None, req=None):
- """executes <rql>, builds a resultset, and returns a couple (rset, req)
- where req is a FakeRequest
- """
- req = req or self.request(rql=rql)
- return req.execute(unicode(rql), args)
-
- @nocoverage
- def commit(self):
- try:
- return self.cnx.commit()
- finally:
- self.session.set_cnxset() # ensure cnxset still set after commit
-
- @nocoverage
- def rollback(self):
- try:
- self.cnx.rollback()
- except dbapi.ProgrammingError:
- pass # connection closed
- finally:
- self.session.set_cnxset() # ensure cnxset still set after commit
-
- # server side db api #######################################################
-
- def sexecute(self, rql, args=None):
- self.session.set_cnxset()
- return self.session.execute(rql, args)
# other utilities #########################################################
@@ -652,21 +813,12 @@
@cached
def app(self):
"""return a cubicweb publisher"""
- publisher = application.CubicWebPublisher(self.config, vreg=self.vreg)
+ publisher = application.CubicWebPublisher(self.repo, self.config)
def raise_error_handler(*args, **kwargs):
raise
publisher.error_handler = raise_error_handler
return publisher
- requestcls = fake.FakeRequest
- def request(self, rollbackfirst=False, url=None, headers={}, **kwargs):
- """return a web ui request"""
- req = self.requestcls(self.vreg, url=url, headers=headers, form=kwargs)
- if rollbackfirst:
- self.websession.cnx.rollback()
- req.set_session(self.websession)
- return req
-
def remote_call(self, fname, *args):
"""remote json call simulation"""
dump = json.dumps
@@ -784,33 +936,29 @@
def init_authentication(self, authmode, anonuser=None):
self.set_auth_mode(authmode, anonuser)
- req = self.request(url='login')
- origsession = req.session
- req.session = req.cnx = None
- del req.execute # get back to class implementation
+ req = self.requestcls(self.vreg, url='login')
sh = self.app.session_handler
authm = sh.session_manager.authmanager
authm.anoninfo = self.vreg.config.anonymous_user()
authm.anoninfo = authm.anoninfo[0], {'password': authm.anoninfo[1]}
# not properly cleaned between tests
self.open_sessions = sh.session_manager._sessions = {}
- return req, origsession
+ return req, self.websession
def assertAuthSuccess(self, req, origsession, nbsessions=1):
sh = self.app.session_handler
- self.app.connect(req)
- session = req.session
+ session = self.app.get_session(req)
+ clt_cnx = repoapi.ClientConnection(session)
+ req.set_cnx(clt_cnx)
self.assertEqual(len(self.open_sessions), nbsessions, self.open_sessions)
self.assertEqual(session.login, origsession.login)
self.assertEqual(session.anonymous_session, False)
def assertAuthFailure(self, req, nbsessions=0):
- self.app.connect(req)
- self.assertIsInstance(req.session, dbapi.DBAPISession)
- self.assertEqual(req.session.cnx, None)
- self.assertIsInstance(req.cnx, (dbapi._NeedAuthAccessMock, NoneType))
- # + 1 since we should still have session without connection set
- self.assertEqual(len(self.open_sessions), nbsessions + 1)
+ with self.assertRaises(AuthenticationError):
+ self.app.get_session(req)
+ # +0 since we do not track the opened session
+ self.assertEqual(len(self.open_sessions), nbsessions)
clear_cache(req, 'get_authorization')
# content validation #######################################################
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc/3.19.rst Mon Feb 17 15:32:50 2014 +0100
@@ -0,0 +1,145 @@
+What's new in CubicWeb 3.19?
+============================
+
+Behaviour Changes
+-----------------
+
+* The anonymous property of Session and Connection are now computed from the
+ related user login. If it matches the ``anonymous-user`` in the config the
+ connection is anonymous. Beware that the ``anonymous-user`` config is web
+ specific. Therefore, no session may be anonymous in a repository only setup.
+
+
+New Repository Access API
+-------------------------
+
+Connection replaces Session
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A new explicit Connection object replaces Session as the main repository entry
+point. Connection holds all the necessary methods to be used server-side
+(``execute``, ``commit``, ``rollback``, ``call_service``, ``entity_from_eid``,
+etc...). One obtains a new Connection object using ``session.new_cnx()``.
+Connection objects need to have an explicit begin and end. Use them as a context
+manager to never miss an end::
+
+ with session.new_cnx() as cnx:
+ self.execute('INSERT Elephant E, E name "Cabar"')
+ self.commit()
+ self.execute('INSERT Elephant E, E name "Celeste"')
+ self.commit()
+ # Once you get out of the "with" clause, the connection is closed.
+
+Using the same Connection object in multiple threads will give you access to the
+same Transaction. However, Connection objects are not thread safe (hence at your
+own risks).
+
+``repository.internal_session`` is deprecated in favor of
+``repository.internal_cnx``. Note that internal connections are now `safe` by default,
+i.e. the integrity hooks are enabled.
+
+Backward compatibility is preserved on Session.
+
+
+dbapi vs repoapi
+~~~~~~~~~~~~~~~~
+
+A new API has been introduced to replace the dbapi. It is called `repoapi`.
+
+There are three relevant functions for now:
+
+* ``repoapi.get_repository`` returns a Repository object either from an
+ URI when used as ``repoapi.get_repository(uri)`` or from a config
+ when used as ``repoapi.get_repository(config=config)``.
+
+* ``repoapi.connect(repo, login, **credentials)`` returns a ClientConnection
+ associated with the user identified by the credentials. The
+ ClientConnection is associated with its own Session that is closed
+ when the ClientConnection is closed. A ClientConnection is a
+ Connection-like object to be used client side.
+
+* ``repoapi.anonymous_cnx(repo)`` returns a ClientConnection associated
+ with the anonymous user if described in the config.
+
+
+repoapi.ClientConnection replace dbapi.Connection and company
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+On the client/web side, the Request is now using a ``repoapi.ClientConnection``
+instead of a ``dbapi.connection``. The ``ClientConnection`` has multiple backward
+compatible methods to make it look like a ``dbapi.Cursor`` and ``dbapi.Connection``.
+
+Session used on the Web side are now the same than the one used Server side.
+Some backward compatibility methods have been installed on the server side Session
+to ease the transition.
+
+The authentication stack has been altered to use the ``repoapi`` instead of
+the ``dbapi``. Cubes adding new element to this stack are likely to break.
+
+
+New API in tests
+~~~~~~~~~~~~~~~~
+
+All current methods and attributes used to access the repo on ``CubicWebTC`` are
+deprecated. You may now use a ``RepoAccess`` object. A ``RepoAccess`` object is
+linked to a new ``Session`` for a specified user. It is able to create
+``Connection``, ``ClientConnection`` and web side requests linked to this
+session::
+
+ access = self.new_access('babar') # create a new RepoAccess for user babar
+ with access.repo_cnx() as cnx:
+ # some work with server side cnx
+ cnx.execute(...)
+ cnx.commit()
+ cnx.execute(...)
+ cnx.commit()
+
+ with access.client_cnx() as cnx:
+ # some work with client side cnx
+ cnx.execute(...)
+ cnx.commit()
+
+ with access.web_request(elephant='babar') as req:
+ # some work with client side cnx
+ elephant_name = req.form['elephant']
+ req.execute(...)
+ req.cnx.commit()
+
+By default ``testcase.admin_access`` contains a ``RepoAccess`` object for the
+default admin session.
+
+
+API changes
+-----------
+
+* ``RepositorySessionManager.postlogin`` is now called with two arguments,
+ request and session. And this now happens before the session is linked to the
+ request.
+
+* ``SessionManager`` and ``AuthenticationManager`` now take a repo object at
+ initialization time instead of a vreg.
+
+* The ``async`` argument of ``_cw.call_service`` has been dropped. All calls are
+ now synchronous. The zmq notification bus looks like a good replacement for
+ most async use cases.
+
+* ``repo.stats()`` is now deprecated. The same information is available through
+ a service (``_cw.call_service('repo_stats')``).
+
+* ``repo.gc_stats()`` is now deprecated. The same information is available through
+ a service (``_cw.call_service('repo_gc_stats')``).
+
+* ``request.set_session`` no longer takes an optional ``user`` argument.
+
+* CubicwebTC does not have repo and cnx as class attributes anymore. They are
+ standard instance attributes. ``set_cnx`` and ``_init_repo`` class methods
+ become instance methods.
+
+* ``set_cnxset`` and ``free_cnxset`` are deprecated. cnxset are now
+ automatically managed.
+
+
+Deprecated Code Drops
+----------------------
+
+* session.hijack_user mechanism has been dropped.
--- a/entities/authobjs.py Mon Feb 17 11:13:27 2014 +0100
+++ b/entities/authobjs.py Mon Feb 17 15:32:50 2014 +0100
@@ -166,6 +166,17 @@
dc_long_title = name
+ def __call__(self, *args, **kwargs):
+ """ugly hack for compatibility betweeb dbapi and repo api
+
+ In the dbapi, Connection and Session have a ``user`` method to
+ generated a user for a request In the repo api, Connection and Session
+ have a user attribute inherited from SessionRequestBase prototype. This
+ ugly hack allows to not break user of the user method.
+
+ XXX Deprecate me ASAP"""
+ return self
+
from logilab.common.deprecation import class_renamed
EUser = class_renamed('EUser', CWUser)
EGroup = class_renamed('EGroup', CWGroup)
--- a/entities/lib.py Mon Feb 17 11:13:27 2014 +0100
+++ b/entities/lib.py Mon Feb 17 15:32:50 2014 +0100
@@ -18,6 +18,7 @@
"""entity classes for optional library entities"""
__docformat__ = "restructuredtext en"
+from warnings import warn
from urlparse import urlsplit, urlunsplit
from datetime import datetime
@@ -130,6 +131,13 @@
__regid__ = 'CWCache'
fetch_attrs, cw_fetch_order = fetch_config(['name'])
+ def __init__(self, *args, **kwargs):
+ warn('[3.19] CWCache entity type is going away soon. '
+ 'Other caching mechanisms can be used more reliably '
+ 'to the same effect.',
+ DeprecationWarning)
+ super(CWCache, self).__init__(*args, **kwargs)
+
def touch(self):
self._cw.execute('SET X timestamp %(t)s WHERE X eid %(x)s',
{'t': datetime.now(), 'x': self.eid})
--- a/entity.py Mon Feb 17 11:13:27 2014 +0100
+++ b/entity.py Mon Feb 17 15:32:50 2014 +0100
@@ -633,11 +633,9 @@
@cached
def cw_metainformation(self):
- res = self._cw.describe(self.eid, asdict=True)
- # use 'asource' and not 'source' since this is the actual source,
- # while 'source' is the physical source (where it's stored)
- res['source'] = self._cw.source_defs()[res.pop('asource')]
- return res
+ metas = self._cw.entity_metas(self.eid)
+ metas['source'] = self._cw.source_defs()[metas['source']]
+ return metas
def cw_check_perm(self, action):
self.e_schema.check_perm(self._cw, action, eid=self.eid)
--- a/etwist/server.py Mon Feb 17 11:13:27 2014 +0100
+++ b/etwist/server.py Mon Feb 17 15:32:50 2014 +0100
@@ -57,12 +57,12 @@
class CubicWebRootResource(resource.Resource):
- def __init__(self, config, vreg=None):
+ def __init__(self, config, repo):
resource.Resource.__init__(self)
self.config = config
# instantiate publisher here and not in init_publisher to get some
# checks done before daemonization (eg versions consistency)
- self.appli = CubicWebPublisher(config, vreg=vreg)
+ self.appli = CubicWebPublisher(repo, config)
self.base_url = config['base-url']
self.https_url = config['https-url']
global MAX_POST_LENGTH
@@ -271,12 +271,20 @@
LOGGER = getLogger('cubicweb.twisted')
set_log_methods(CubicWebRootResource, LOGGER)
-def run(config, vreg=None, debug=None):
+def run(config, debug=None, repo=None):
+ # repo may by passed during test.
+ #
+ # Test has already created a repo object so we should not create a new one.
+ # Explicitly passing the repo object avoid relying on the fragile
+ # config.repository() cache. We could imagine making repo a mandatory
+ # argument and receives it from the starting command directly.
if debug is not None:
config.debugmode = debug
config.check_writeable_uid_directory(config.appdatahome)
# create the site
- root_resource = CubicWebRootResource(config, vreg=vreg)
+ if repo is None:
+ repo = config.repository()
+ root_resource = CubicWebRootResource(config, repo)
website = server.Site(root_resource)
# serve it via standard HTTP on port set in the configuration
port = config['port'] or 8080
--- a/hooks/__init__.py Mon Feb 17 11:13:27 2014 +0100
+++ b/hooks/__init__.py Mon Feb 17 15:32:50 2014 +0100
@@ -39,10 +39,6 @@
session.system_sql(
'DELETE FROM transactions WHERE tx_time < %(time)s',
{'time': mindate})
- # cleanup deleted entities
- session.system_sql(
- 'DELETE FROM deleted_entities WHERE dtime < %(time)s',
- {'time': mindate})
session.commit()
finally:
session.close()
@@ -57,12 +53,10 @@
def __call__(self):
def update_feeds(repo):
- # don't iter on repo.sources which doesn't include copy based
- # sources (the one we're looking for)
- # take a list to avoid iterating on a dictionary which size may
+ # take a list to avoid iterating on a dictionary whose size may
# change
- for source in list(repo.sources_by_eid.values()):
- if (not source.copy_based_source
+ for uri, source in list(repo.sources_by_uri.iteritems()):
+ if (uri == 'system'
or not repo.config.source_enabled(source)
or not source.config['synchronize']):
continue
@@ -83,8 +77,8 @@
def __call__(self):
def expire_dataimports(repo=self.repo):
- for source in repo.sources_by_eid.itervalues():
- if (not source.copy_based_source
+ for uri, source in repo.sources_by_uri.iteritems():
+ if (uri == 'system'
or not repo.config.source_enabled(source)):
continue
session = repo.internal_session()
--- a/hooks/integrity.py Mon Feb 17 11:13:27 2014 +0100
+++ b/hooks/integrity.py Mon Feb 17 15:32:50 2014 +0100
@@ -85,7 +85,7 @@
if rtype in pendingrtypes:
continue
if not session.execute(self.base_rql % rtype, {'x': eid}):
- etype = session.describe(eid)[0]
+ etype = session.entity_metas(eid)['type']
msg = _('at least one relation %(rtype)s is required on '
'%(etype)s (%(eid)s)')
raise validation_error(eid, {(rtype, self.role): msg},
@@ -325,7 +325,7 @@
for eid, rtype in self.get_data():
# don't do anything if the entity is being deleted
if eid not in pendingeids:
- etype = session.describe(eid)[0]
+ etype = session.entity_metas(eid)['type']
key = (etype, rtype)
if key not in eids_by_etype_rtype:
eids_by_etype_rtype[key] = [str(eid)]
--- a/hooks/metadata.py Mon Feb 17 11:13:27 2014 +0100
+++ b/hooks/metadata.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -158,12 +158,9 @@
entity = self.entity
extid = entity.cw_metainformation()['extid']
repo._type_source_cache[entity.eid] = (
- entity.cw_etype, self.newsource.uri, None, self.newsource.uri)
- if self.oldsource.copy_based_source:
- uri = 'system'
- else:
- uri = self.oldsource.uri
- repo._extid_cache[(extid, uri)] = -entity.eid
+ entity.cw_etype, None, self.newsource.uri)
+ repo._extid_cache[extid] = -entity.eid
+
class ChangeEntitySourceDeleteHook(MetaDataHook):
"""support for moving an entity from an external source by watching 'Any
@@ -197,16 +194,6 @@
syssource = newsource.repo_source
oldsource = self._cw.entity_from_eid(schange[self.eidfrom])
entity = self._cw.entity_from_eid(self.eidfrom)
- # copy entity if necessary
- if not oldsource.repo_source.copy_based_source:
- entity.complete(skip_bytes=False, skip_pwd=False)
- if not entity.creation_date:
- entity.cw_attr_cache['creation_date'] = datetime.now()
- if not entity.modification_date:
- entity.cw_attr_cache['modification_date'] = datetime.now()
- entity.cw_attr_cache['cwuri'] = u'%s%s' % (self._cw.base_url(), entity.eid)
- entity.cw_edited = EditedEntity(entity, **entity.cw_attr_cache)
- syssource.add_entity(self._cw, entity)
# we don't want the moved entity to be reimported later. To
# distinguish this state, the trick is to change the associated
# record in the 'entities' system table with eid=-eid while leaving
@@ -217,8 +204,7 @@
self._cw.system_sql('UPDATE entities SET eid=-eid WHERE eid=%(eid)s',
{'eid': self.eidfrom})
attrs = {'type': entity.cw_etype, 'eid': entity.eid, 'extid': None,
- 'source': 'system', 'asource': 'system',
- 'mtime': datetime.now()}
+ 'asource': 'system'}
self._cw.system_sql(syssource.sqlgen.insert('entities', attrs), attrs)
# register an operation to update repository/sources caches
ChangeEntitySourceUpdateCaches(self._cw, entity=entity,
--- a/hooks/security.py Mon Feb 17 11:13:27 2014 +0100
+++ b/hooks/security.py Mon Feb 17 15:32:50 2014 +0100
@@ -79,8 +79,8 @@
def precommit_event(self):
session = self.session
for action, rschema, eidfrom, eidto in self.get_data():
- rdef = rschema.rdef(session.describe(eidfrom)[0],
- session.describe(eidto)[0])
+ rdef = rschema.rdef(session.entity_metas(eidfrom)['type'],
+ session.entity_metas(eidto)['type'])
rdef.check_perm(session, action, fromeid=eidfrom, toeid=eidto)
@@ -135,8 +135,8 @@
if (self.eidfrom, self.rtype, self.eidto) in nocheck:
return
rschema = self._cw.repo.schema[self.rtype]
- rdef = rschema.rdef(self._cw.describe(self.eidfrom)[0],
- self._cw.describe(self.eidto)[0])
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
rdef.check_perm(self._cw, 'add', fromeid=self.eidfrom, toeid=self.eidto)
@@ -154,8 +154,8 @@
CheckRelationPermissionOp.get_instance(self._cw).add_data(
('add', rschema, self.eidfrom, self.eidto) )
else:
- rdef = rschema.rdef(self._cw.describe(self.eidfrom)[0],
- self._cw.describe(self.eidto)[0])
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
rdef.check_perm(self._cw, 'add', fromeid=self.eidfrom, toeid=self.eidto)
@@ -168,7 +168,7 @@
if (self.eidfrom, self.rtype, self.eidto) in nocheck:
return
rschema = self._cw.repo.schema[self.rtype]
- rdef = rschema.rdef(self._cw.describe(self.eidfrom)[0],
- self._cw.describe(self.eidto)[0])
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
rdef.check_perm(self._cw, 'delete', fromeid=self.eidfrom, toeid=self.eidto)
--- a/hooks/syncschema.py Mon Feb 17 11:13:27 2014 +0100
+++ b/hooks/syncschema.py Mon Feb 17 15:32:50 2014 +0100
@@ -81,7 +81,7 @@
# create index before alter table which may expectingly fail during test
# (sqlite) while index creation should never fail (test for index existence
# is done by the dbhelper)
- session.cnxset.source('system').create_index(session, table, column)
+ session.repo.system_source.create_index(session, table, column)
session.info('added index on %s(%s)', table, column)
@@ -196,7 +196,7 @@
clear_cache(eschema, 'ordered_relations')
def postcommit_event(self):
- rebuildinfered = self.session.data.get('rebuild-infered', True)
+ rebuildinfered = self.session.get_shared_data('rebuild-infered', True)
repo = self.session.repo
# commit event should not raise error, while set_schema has chances to
# do so because it triggers full vreg reloading
@@ -244,7 +244,7 @@
description=entity.description)
eschema = schema.add_entity_type(etype)
# create the necessary table
- tablesql = y2sql.eschema2sql(session.cnxset.source('system').dbhelper,
+ tablesql = y2sql.eschema2sql(session.repo.system_source.dbhelper,
eschema, prefix=SQL_PREFIX)
for sql in tablesql.split(';'):
if sql.strip():
@@ -287,18 +287,16 @@
self.session.vreg.schema.rename_entity_type(oldname, newname)
# we need sql to operate physical changes on the system database
sqlexec = self.session.system_sql
- dbhelper= self.session.cnxset.source('system').dbhelper
+ dbhelper = self.session.repo.system_source.dbhelper
sql = dbhelper.sql_rename_table(SQL_PREFIX+oldname,
SQL_PREFIX+newname)
sqlexec(sql)
self.info('renamed table %s to %s', oldname, newname)
sqlexec('UPDATE entities SET type=%(newname)s WHERE type=%(oldname)s',
{'newname': newname, 'oldname': oldname})
- for eid, (etype, uri, extid, auri) in self.session.repo._type_source_cache.items():
+ for eid, (etype, extid, auri) in self.session.repo._type_source_cache.items():
if etype == oldname:
- self.session.repo._type_source_cache[eid] = (newname, uri, extid, auri)
- sqlexec('UPDATE deleted_entities SET type=%(newname)s WHERE type=%(oldname)s',
- {'newname': newname, 'oldname': oldname})
+ self.session.repo._type_source_cache[eid] = (newname, extid, auri)
# XXX transaction records
def precommit_event(self):
@@ -434,7 +432,7 @@
# update the in-memory schema first
rdefdef = self.init_rdef(**props)
# then make necessary changes to the system source database
- syssource = session.cnxset.source('system')
+ syssource = session.repo.system_source
attrtype = y2sql.type_from_constraints(
syssource.dbhelper, rdefdef.object, rdefdef.constraints)
# XXX should be moved somehow into lgdb: sqlite doesn't support to
@@ -609,7 +607,7 @@
self.oldvalues = dict( (attr, getattr(rdef, attr)) for attr in self.values)
rdef.update(self.values)
# then make necessary changes to the system source database
- syssource = session.cnxset.source('system')
+ syssource = session.repo.system_source
if 'indexed' in self.values:
syssource.update_rdef_indexed(session, rdef)
self.indexed_changed = True
@@ -627,7 +625,7 @@
# revert changes on in memory schema
self.rdef.update(self.oldvalues)
# revert changes on database
- syssource = self.session.cnxset.source('system')
+ syssource = self.session.repo.system_source
if self.indexed_changed:
syssource.update_rdef_indexed(self.session, self.rdef)
if self.null_allowed_changed:
@@ -655,7 +653,7 @@
rdef.constraints.remove(self.oldcstr)
# then update database: alter the physical schema on size/unique
# constraint changes
- syssource = session.cnxset.source('system')
+ syssource = session.repo.system_source
cstrtype = self.oldcstr.type()
if cstrtype == 'SizeConstraint':
syssource.update_rdef_column(session, rdef)
@@ -671,7 +669,7 @@
if self.oldcstr is not None:
self.rdef.constraints.append(self.oldcstr)
# revert changes on database
- syssource = self.session.cnxset.source('system')
+ syssource = self.session.repo.system_source
if self.size_cstr_changed:
syssource.update_rdef_column(self.session, self.rdef)
if self.unique_changed:
@@ -702,7 +700,7 @@
rdef.constraints.append(newcstr)
# then update database: alter the physical schema on size/unique
# constraint changes
- syssource = session.cnxset.source('system')
+ syssource = session.repo.system_source
if cstrtype == 'SizeConstraint' and (oldcstr is None or
oldcstr.max != newcstr.max):
syssource.update_rdef_column(session, rdef)
@@ -721,7 +719,7 @@
entity = self.entity
table = '%s%s' % (prefix, entity.constraint_of[0].name)
cols = ['%s%s' % (prefix, r.name) for r in entity.relations]
- dbhelper = session.cnxset.source('system').dbhelper
+ dbhelper = session.repo.system_source.dbhelper
sqls = dbhelper.sqls_create_multicol_unique_index(table, cols, entity.name)
for sql in sqls:
session.system_sql(sql)
@@ -741,7 +739,7 @@
session = self.session
prefix = SQL_PREFIX
table = '%s%s' % (prefix, self.entity.type)
- dbhelper = session.cnxset.source('system').dbhelper
+ dbhelper = session.repo.system_source.dbhelper
cols = ['%s%s' % (prefix, c) for c in self.cols]
sqls = dbhelper.sqls_drop_multicol_unique_index(table, cols, self.cstrname)
for sql in sqls:
@@ -1182,7 +1180,7 @@
def __call__(self):
action = self.rtype.split('_', 1)[0]
- if self._cw.describe(self.eidto)[0] == 'CWGroup':
+ if self._cw.entity_metas(self.eidto)['type'] == 'CWGroup':
MemSchemaPermissionAdd(self._cw, action=action, eid=self.eidfrom,
group_eid=self.eidto)
else: # RQLExpression
@@ -1203,7 +1201,7 @@
if self._cw.deleted_in_transaction(self.eidfrom):
return
action = self.rtype.split('_', 1)[0]
- if self._cw.describe(self.eidto)[0] == 'CWGroup':
+ if self._cw.entity_metas(self.eidto)['type'] == 'CWGroup':
MemSchemaPermissionDel(self._cw, action=action, eid=self.eidfrom,
group_eid=self.eidto)
else: # RQLExpression
--- a/hooks/syncsession.py Mon Feb 17 11:13:27 2014 +0100
+++ b/hooks/syncsession.py Mon Feb 17 15:32:50 2014 +0100
@@ -229,7 +229,7 @@
def __call__(self):
session = self._cw
eidfrom = self.eidfrom
- if not session.describe(eidfrom)[0] == 'CWProperty':
+ if not session.entity_metas(eidfrom)['type'] == 'CWProperty':
return
key, value = session.execute('Any K,V WHERE P eid %(x)s,P pkey K,P value V',
{'x': eidfrom})[0]
--- a/hooks/syncsources.py Mon Feb 17 11:13:27 2014 +0100
+++ b/hooks/syncsources.py Mon Feb 17 15:32:50 2014 +0100
@@ -93,10 +93,7 @@
def precommit_event(self):
source = self.session.repo.sources_by_uri[self.oldname]
- if source.copy_based_source:
- sql = 'UPDATE entities SET asource=%(newname)s WHERE asource=%(oldname)s'
- else:
- sql = 'UPDATE entities SET source=%(newname)s, asource=%(newname)s WHERE source=%(oldname)s'
+ sql = 'UPDATE entities SET asource=%(newname)s WHERE asource=%(oldname)s'
self.session.system_sql(sql, {'oldname': self.oldname,
'newname': self.newname})
@@ -109,11 +106,6 @@
repo.sources_by_uri[self.newname] = source
repo._type_source_cache.clear()
clear_cache(repo, 'source_defs')
- if not source.copy_based_source:
- repo._extid_cache.clear()
- repo._clear_planning_caches()
- for cnxset in repo.cnxsets:
- cnxset.source_cnxs[self.oldname] = cnxset.source_cnxs.pop(self.oldname)
class SourceUpdatedHook(SourceHook):
--- a/hooks/test/unittest_syncschema.py Mon Feb 17 11:13:27 2014 +0100
+++ b/hooks/test/unittest_syncschema.py Mon Feb 17 15:32:50 2014 +0100
@@ -30,7 +30,6 @@
del SchemaModificationHooksTC.schema_eids
class SchemaModificationHooksTC(CubicWebTC):
- reset_schema = True
def setUp(self):
super(SchemaModificationHooksTC, self).setUp()
@@ -39,8 +38,8 @@
def index_exists(self, etype, attr, unique=False):
self.session.set_cnxset()
- dbhelper = self.session.cnxset.source('system').dbhelper
- sqlcursor = self.session.cnxset['system']
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = self.session.cnxset.cu
return dbhelper.index_exists(sqlcursor, SQL_PREFIX + etype, SQL_PREFIX + attr, unique=unique)
def _set_perms(self, eid):
@@ -60,8 +59,8 @@
def test_base(self):
schema = self.repo.schema
self.session.set_cnxset()
- dbhelper = self.session.cnxset.source('system').dbhelper
- sqlcursor = self.session.cnxset['system']
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = self.session.cnxset.cu
self.assertFalse(schema.has_entity('Societe2'))
self.assertFalse(schema.has_entity('concerne2'))
# schema should be update on insertion (after commit)
@@ -201,8 +200,8 @@
def test_uninline_relation(self):
self.session.set_cnxset()
- dbhelper = self.session.cnxset.source('system').dbhelper
- sqlcursor = self.session.cnxset['system']
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = self.session.cnxset.cu
self.assertTrue(self.schema['state_of'].inlined)
try:
self.execute('SET X inlined FALSE WHERE X name "state_of"')
@@ -226,8 +225,8 @@
def test_indexed_change(self):
self.session.set_cnxset()
- dbhelper = self.session.cnxset.source('system').dbhelper
- sqlcursor = self.session.cnxset['system']
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = self.session.cnxset.cu
try:
self.execute('SET X indexed FALSE WHERE X relation_type R, R name "name"')
self.assertTrue(self.schema['name'].rdef('Workflow', 'String').indexed)
@@ -245,8 +244,8 @@
def test_unique_change(self):
self.session.set_cnxset()
- dbhelper = self.session.cnxset.source('system').dbhelper
- sqlcursor = self.session.cnxset['system']
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = self.session.cnxset.cu
try:
self.execute('INSERT CWConstraint X: X cstrtype CT, DEF constrained_by X '
'WHERE CT name "UniqueConstraint", DEF relation_type RT, DEF from_entity E,'
--- a/hooks/workflow.py Mon Feb 17 11:13:27 2014 +0100
+++ b/hooks/workflow.py Mon Feb 17 15:32:50 2014 +0100
@@ -32,12 +32,8 @@
nocheck = session.transaction_data.setdefault('skip-security', set())
nocheck.add((x, 'in_state', oldstate))
nocheck.add((x, 'in_state', newstate))
- # delete previous state first unless in_state isn't stored in the system
- # source
- fromsource = session.describe(x)[1]
- if fromsource == 'system' or \
- not session.repo.sources_by_uri[fromsource].support_relation('in_state'):
- session.delete_relation(x, 'in_state', oldstate)
+ # delete previous state first
+ session.delete_relation(x, 'in_state', oldstate)
session.add_relation(x, 'in_state', newstate)
--- a/misc/migration/3.10.0_Any.py Mon Feb 17 11:13:27 2014 +0100
+++ b/misc/migration/3.10.0_Any.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,6 +1,6 @@
from cubicweb.server.session import hooks_control
-for uri, cfg in config.sources().items():
+for uri, cfg in config.read_sources_file().items():
if uri in ('system', 'admin'):
continue
repo.sources_by_uri[uri] = repo.get_source(cfg['adapter'], uri, cfg.copy())
@@ -18,7 +18,7 @@
'WHERE s.cw_name=e.type')
commit()
-for uri, cfg in config.sources().items():
+for uri, cfg in config.read_sources_file().items():
if uri in ('system', 'admin'):
continue
repo.sources_by_uri.pop(uri)
--- a/misc/migration/3.11.0_Any.py Mon Feb 17 11:13:27 2014 +0100
+++ b/misc/migration/3.11.0_Any.py Mon Feb 17 15:32:50 2014 +0100
@@ -9,77 +9,3 @@
add_attribute('CWSource', 'url')
add_attribute('CWSource', 'parser')
add_attribute('CWSource', 'latest_retrieval')
-
-try:
- from cubicweb.server.sources.pyrorql import PyroRQLSource
-except ImportError:
- pass
-else:
-
- from os.path import join
- # function to read old python mapping file
- def load_mapping_file(source):
- mappingfile = source.config['mapping-file']
- mappingfile = join(source.repo.config.apphome, mappingfile)
- mapping = {}
- execfile(mappingfile, mapping)
- for junk in ('__builtins__', '__doc__'):
- mapping.pop(junk, None)
- mapping.setdefault('support_relations', {})
- mapping.setdefault('dont_cross_relations', set())
- mapping.setdefault('cross_relations', set())
- # do some basic checks of the mapping content
- assert 'support_entities' in mapping, \
- 'mapping file should at least define support_entities'
- assert isinstance(mapping['support_entities'], dict)
- assert isinstance(mapping['support_relations'], dict)
- assert isinstance(mapping['dont_cross_relations'], set)
- assert isinstance(mapping['cross_relations'], set)
- unknown = set(mapping) - set( ('support_entities', 'support_relations',
- 'dont_cross_relations', 'cross_relations') )
- assert not unknown, 'unknown mapping attribute(s): %s' % unknown
- # relations that are necessarily not crossed
- for rtype in ('is', 'is_instance_of', 'cw_source'):
- assert rtype not in mapping['dont_cross_relations'], \
- '%s relation should not be in dont_cross_relations' % rtype
- assert rtype not in mapping['support_relations'], \
- '%s relation should not be in support_relations' % rtype
- return mapping
- # for now, only pyrorql sources have a mapping
- for source in repo.sources_by_uri.itervalues():
- if not isinstance(source, PyroRQLSource):
- continue
- sourceentity = session.entity_from_eid(source.eid)
- mapping = load_mapping_file(source)
- # write mapping as entities
- print 'migrating map for', source
- for etype, write in mapping['support_entities'].items():
- create_entity('CWSourceSchemaConfig',
- cw_for_source=sourceentity,
- cw_schema=session.entity_from_eid(schema[etype].eid),
- options=write and u'write' or None,
- ask_confirm=False)
- for rtype, write in mapping['support_relations'].items():
- options = []
- if write:
- options.append(u'write')
- if rtype in mapping['cross_relations']:
- options.append(u'maycross')
- create_entity('CWSourceSchemaConfig',
- cw_for_source=sourceentity,
- cw_schema=session.entity_from_eid(schema[rtype].eid),
- options=u':'.join(options) or None,
- ask_confirm=False)
- for rtype in mapping['dont_cross_relations']:
- create_entity('CWSourceSchemaConfig',
- cw_for_source=source,
- cw_schema=session.entity_from_eid(schema[rtype].eid),
- options=u'dontcross',
- ask_confirm=False)
- # latest update time cwproperty is now a source attribute (latest_retrieval)
- pkey = u'sources.%s.latest-update-time' % source.uri
- rset = session.execute('Any V WHERE X is CWProperty, X value V, X pkey %(k)s',
- {'k': pkey})
- timestamp = int(rset[0][0])
- sourceentity.cw_set(latest_retrieval=datetime.fromtimestamp(timestamp))
- session.execute('DELETE CWProperty X WHERE X pkey %(k)s', {'k': pkey})
--- a/misc/migration/3.14.4_Any.py Mon Feb 17 11:13:27 2014 +0100
+++ b/misc/migration/3.14.4_Any.py Mon Feb 17 15:32:50 2014 +0100
@@ -4,8 +4,7 @@
rdefdef = schema['CWSource'].rdef('name')
attrtype = y2sql.type_from_constraints(dbhelper, rdefdef.object, rdefdef.constraints).split()[0]
-cursor = session.cnxset['system']
+cursor = session.cnxset.cu
sql('UPDATE entities SET asource = source WHERE asource is NULL')
dbhelper.change_col_type(cursor, 'entities', 'asource', attrtype, False)
dbhelper.change_col_type(cursor, 'entities', 'source', attrtype, False)
-dbhelper.change_col_type(cursor, 'deleted_entities', 'source', attrtype, False)
--- a/misc/migration/3.16.0_Any.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-sync_schema_props_perms('EmailAddress')
-
-for source in rql('CWSource X WHERE X type "pyrorql"').entities():
- sconfig = source.dictconfig
- nsid = sconfig.pop('pyro-ns-id', config.appid)
- nshost = sconfig.pop('pyro-ns-host', '')
- nsgroup = sconfig.pop('pyro-ns-group', ':cubicweb')
- if nsgroup:
- nsgroup += '.'
- source.cw_set(url=u'pyro://%s/%s%s' % (nshost, nsgroup, nsid))
- source.update_config(skip_unknown=True, **sconfig)
-
-commit()
--- a/misc/migration/3.17.11_Any.py Mon Feb 17 11:13:27 2014 +0100
+++ b/misc/migration/3.17.11_Any.py Mon Feb 17 15:32:50 2014 +0100
@@ -2,6 +2,6 @@
('transactions', 'tx_time'),
('tx_entity_actions', 'tx_uuid'),
('tx_relation_actions', 'tx_uuid')]:
- session.cnxset.source('system').create_index(session, table, column)
+ repo.system_source.create_index(session, table, column)
commit()
--- a/misc/migration/3.18.0_Any.py Mon Feb 17 11:13:27 2014 +0100
+++ b/misc/migration/3.18.0_Any.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-driver = config.sources()['system']['db-driver']
+driver = config.system_source_config['db-driver']
if not (driver == 'postgres' or driver.startswith('sqlserver')):
import sys
print >>sys.stderr, 'This migration is not supported for backends other than sqlserver or postgres (yet).'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/misc/migration/3.19.0_Any.py Mon Feb 17 15:32:50 2014 +0100
@@ -0,0 +1,4 @@
+sql('DROP TABLE "deleted_entities"')
+sql('ALTER TABLE "entities" DROP COLUMN "mtime"')
+sql('ALTER TABLE "entities" DROP COLUMN "source"')
+
--- a/misc/migration/3.8.5_Any.py Mon Feb 17 11:13:27 2014 +0100
+++ b/misc/migration/3.8.5_Any.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,5 +1,5 @@
def migrate_varchar_to_nvarchar():
- dbdriver = config.sources()['system']['db-driver']
+ dbdriver = config.system_source_config['db-driver']
if dbdriver != "sqlserver2005":
return
--- a/misc/migration/bootstrapmigration_repository.py Mon Feb 17 11:13:27 2014 +0100
+++ b/misc/migration/bootstrapmigration_repository.py Mon Feb 17 15:32:50 2014 +0100
@@ -223,11 +223,11 @@
if applcubicwebversion < (3, 2, 2) and cubicwebversion >= (3, 2, 1):
from base64 import b64encode
- for table in ('entities', 'deleted_entities'):
- for eid, extid in sql('SELECT eid, extid FROM %s WHERE extid is NOT NULL'
- % table, ask_confirm=False):
- sql('UPDATE %s SET extid=%%(extid)s WHERE eid=%%(eid)s' % table,
- {'extid': b64encode(extid), 'eid': eid}, ask_confirm=False)
+ for eid, extid in sql('SELECT eid, extid FROM entities '
+ 'WHERE extid is NOT NULL',
+ ask_confirm=False):
+ sql('UPDATE entities SET extid=%(extid)s WHERE eid=%(eid)s',
+ {'extid': b64encode(extid), 'eid': eid}, ask_confirm=False)
commit()
if applcubicwebversion < (3, 2, 0) and cubicwebversion >= (3, 2, 0):
--- a/misc/scripts/cwuser_ldap2system.py Mon Feb 17 11:13:27 2014 +0100
+++ b/misc/scripts/cwuser_ldap2system.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,7 +1,7 @@
import base64
from cubicweb.server.utils import crypt_password
-dbdriver = config.sources()['system']['db-driver']
+dbdriver = config.system_source_config['db-driver']
from logilab.database import get_db_helper
dbhelper = get_db_helper(driver)
--- a/misc/scripts/drop_external_entities.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,23 +0,0 @@
-from cubicweb import UnknownEid
-source, = __args__
-
-sql("DELETE FROM entities WHERE type='Int'")
-
-ecnx = session.cnxset.connection(source)
-for e in rql('Any X WHERE X cw_source S, S name %(name)s', {'name': source}).entities():
- meta = e.cw_metainformation()
- assert meta['source']['uri'] == source
- try:
- suri = ecnx.describe(meta['extid'])[1]
- except UnknownEid:
- print 'cant describe', e.cw_etype, e.eid, meta
- continue
- if suri != 'system':
- try:
- print 'deleting', e.cw_etype, e.eid, suri, e.dc_title().encode('utf8')
- repo.delete_info(session, e, suri, scleanup=e.eid)
- except UnknownEid:
- print ' cant delete', e.cw_etype, e.eid, meta
-
-
-commit()
--- a/misc/scripts/ldap_change_base_dn.py Mon Feb 17 11:13:27 2014 +0100
+++ b/misc/scripts/ldap_change_base_dn.py Mon Feb 17 15:32:50 2014 +0100
@@ -6,7 +6,7 @@
print
print 'you should not have updated your sources file yet'
-olddn = repo.config.sources()[uri]['user-base-dn']
+olddn = repo.sources_by_uri[uri].config['user-base-dn']
assert olddn != newdn
--- a/misc/scripts/repair_file_1-9_migration.py Mon Feb 17 11:13:27 2014 +0100
+++ b/misc/scripts/repair_file_1-9_migration.py Mon Feb 17 15:32:50 2014 +0100
@@ -15,11 +15,11 @@
from cubicweb import cwconfig, dbapi
from cubicweb.server.session import hooks_control
-sourcescfg = repo.config.sources()
+defaultadmin = repo.config.default_admin_config
backupcfg = cwconfig.instance_configuration(backupinstance)
backupcfg.repairing = True
-backuprepo, backupcnx = dbapi.in_memory_repo_cnx(backupcfg, sourcescfg['admin']['login'],
- password=sourcescfg['admin']['password'],
+backuprepo, backupcnx = dbapi.in_memory_repo_cnx(backupcfg, defaultadmin['login'],
+ password=defaultadmin['password'],
host='localhost')
backupcu = backupcnx.cursor()
--- a/pytestconf.py Mon Feb 17 11:13:27 2014 +0100
+++ b/pytestconf.py Mon Feb 17 15:32:50 2014 +0100
@@ -43,6 +43,6 @@
if not cls.repo.shutting_down:
cls.repo.shutdown()
del cls.repo
- for clsattr in ('cnx', '_orig_cnx', 'config', '_config', 'vreg', 'schema'):
+ for clsattr in ('cnx', 'config', '_config', 'vreg', 'schema'):
if clsattr in cls.__dict__:
delattr(cls, clsattr)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/repoapi.py Mon Feb 17 15:32:50 2014 +0100
@@ -0,0 +1,354 @@
+# copyright 2013-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
+"""Official API to access the content of a repository
+"""
+from logilab.common.deprecation import deprecated
+
+from cubicweb.utils import parse_repo_uri
+from cubicweb import ConnectionError, ProgrammingError, AuthenticationError
+from uuid import uuid4
+from contextlib import contextmanager
+from cubicweb.req import RequestSessionBase
+from functools import wraps
+
+### private function for specific method ############################
+
+def _get_inmemory_repo(config, vreg=None):
+ from cubicweb.server.repository import Repository
+ from cubicweb.server.utils import TasksManager
+ return Repository(config, TasksManager(), vreg=vreg)
+
+
+### public API ######################################################
+
+def get_repository(uri=None, config=None, vreg=None):
+ """get a repository for the given URI or config/vregistry (in case we're
+ loading the repository for a client, eg web server, configuration).
+
+ The returned repository may be an in-memory repository or a proxy object
+ using a specific RPC method, depending on the given URI (pyro or zmq).
+ """
+ if uri is None:
+ return _get_inmemory_repo(config, vreg)
+
+ protocol, hostport, appid = parse_repo_uri(uri)
+
+ if protocol == 'inmemory':
+ # me may have been called with a dummy 'inmemory://' uri ...
+ return _get_inmemory_repo(config, vreg)
+
+ if protocol == 'pyroloc': # direct connection to the instance
+ from logilab.common.pyro_ext import get_proxy
+ uri = uri.replace('pyroloc', 'PYRO')
+ return get_proxy(uri)
+
+ if protocol == 'pyro': # connection mediated through the pyro ns
+ from logilab.common.pyro_ext import ns_get_proxy
+ path = appid.strip('/')
+ if not path:
+ raise ConnectionError(
+ "can't find instance name in %s (expected to be the path component)"
+ % uri)
+ if '.' in path:
+ nsgroup, nsid = path.rsplit('.', 1)
+ else:
+ nsgroup = 'cubicweb'
+ nsid = path
+ return ns_get_proxy(nsid, defaultnsgroup=nsgroup, nshost=hostport)
+
+ if protocol.startswith('zmqpickle-'):
+ from cubicweb.zmqclient import ZMQRepositoryClient
+ return ZMQRepositoryClient(uri)
+ else:
+ raise ConnectionError('unknown protocol: `%s`' % protocol)
+
+def connect(repo, login, **kwargs):
+ """Take credential and return associated ClientConnection.
+
+ The ClientConnection is associated to a new Session object that will be
+ closed when the ClientConnection is closed.
+
+ raise AuthenticationError if the credential are invalid."""
+ sessionid = repo.connect(login, **kwargs)
+ session = repo._get_session(sessionid)
+ # XXX the autoclose_session should probably be handle on the session directly
+ # this is something to consider once we have proper server side Connection.
+ return ClientConnection(session, autoclose_session=True)
+
+def anonymous_cnx(repo):
+ """return a ClientConnection for Anonymous user.
+
+ The ClientConnection is associated to a new Session object that will be
+ closed when the ClientConnection is closed.
+
+ raises an AuthenticationError if anonymous usage is not allowed
+ """
+ anoninfo = getattr(repo.config, 'anonymous_user', lambda: None)()
+ if anoninfo is None: # no anonymous user
+ raise AuthenticationError('anonymous access is not authorized')
+ anon_login, anon_password = anoninfo
+ # use vreg's repository cache
+ return connect(repo, anon_login, password=anon_password)
+
+def _srv_cnx_func(name):
+ """Decorate ClientConnection method blindly forward to Connection
+ THIS TRANSITIONAL PURPOSE
+
+ will be dropped when we have standalone connection"""
+ def proxy(clt_cnx, *args, **kwargs):
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ if not clt_cnx._open:
+ raise ProgrammingError('Closed client connection')
+ return getattr(clt_cnx._cnx, name)(*args, **kwargs)
+ return proxy
+
+def _open_only(func):
+ """decorator for ClientConnection method that check it is open"""
+ @wraps(func)
+ def check_open(clt_cnx, *args, **kwargs):
+ if not clt_cnx._open:
+ raise ProgrammingError('Closed client connection')
+ return func(clt_cnx, *args, **kwargs)
+ return check_open
+
+
+class ClientConnection(RequestSessionBase):
+ """A Connection object to be used Client side.
+
+ This object is aimed to be used client side (so potential communication
+ with the repo through RPC) and aims to offer some compatibility with the
+ cubicweb.dbapi.Connection interface.
+
+ The autoclose_session parameter informs the connection that this session
+ has been opened explicitly and only for this client connection. The
+ connection will close the session on exit.
+ """
+ # make exceptions available through the connection object
+ ProgrammingError = ProgrammingError
+ # attributes that may be overriden per connection instance
+ anonymous_connection = False # XXX really needed ?
+ is_repo_in_memory = True # BC, always true
+
+ def __init__(self, session, autoclose_session=False):
+ self._session = session # XXX there is no real reason to keep the
+ # session around function still using it should
+ # be rewritten and migrated.
+ self._cnx = None
+ self._open = None
+ self._web_request = False
+ self.vreg = session.vreg
+ self._set_user(session.user)
+ self._autoclose_session = autoclose_session
+
+ def __enter__(self):
+ assert self._open is None
+ self._open = True
+ self._cnx = self._session.new_cnx()
+ self._cnx.__enter__()
+ self._cnx.ctx_count += 1
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self._open = False
+ self._cnx.ctx_count -= 1
+ self._cnx.__exit__(exc_type, exc_val, exc_tb)
+ self._cnx = None
+ if self._autoclose_session:
+ # we have to call repo.close to ensure the repo properly forgets the
+ # session; calling session.close() is not enough :-(
+ self._session.repo.close(self._session.id)
+
+
+ # begin silly BC
+ @property
+ def _closed(self):
+ return not self._open
+
+ def close(self):
+ if self._open:
+ self.__exit__(None, None, None)
+
+ def __repr__(self):
+ # XXX we probably want to reference the user of the session here
+ if self._open is None:
+ return '<ClientConnection (not open yet)>'
+ elif not self._open:
+ return '<ClientConnection (closed)>'
+ elif self.anonymous_connection:
+ return '<ClientConnection %s (anonymous)>' % self._cnx.connectionid
+ else:
+ return '<ClientConnection %s>' % self._cnx.connectionid
+ # end silly BC
+
+ # Main Connection purpose in life #########################################
+
+ call_service = _srv_cnx_func('call_service')
+
+ @_open_only
+ def execute(self, *args, **kwargs):
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ rset = self._cnx.execute(*args, **kwargs)
+ rset.req = self
+ # XXX keep the same behavior as the old dbapi
+ # otherwise multiple tests break.
+ # The little internet kitten is very sad about this situation.
+ rset._rqlst = None
+ return rset
+
+ commit = _srv_cnx_func('commit')
+ rollback = _srv_cnx_func('rollback')
+
+ # session data methods #####################################################
+
+ get_shared_data = _srv_cnx_func('get_shared_data')
+ set_shared_data = _srv_cnx_func('set_shared_data')
+
+ # meta-data accessors ######################################################
+
+ @_open_only
+ def source_defs(self):
+ """Return the definition of sources used by the repository."""
+ return self._session.repo.source_defs()
+
+ @_open_only
+ def get_schema(self):
+ """Return the schema currently used by the repository."""
+ return self._session.repo.source_defs()
+
+ @_open_only
+ def get_option_value(self, option):
+ """Return the value for `option` in the configuration."""
+ return self._session.repo.get_option_value(option)
+
+ entity_metas = _srv_cnx_func('entity_metas')
+ describe = _srv_cnx_func('describe') # XXX deprecated in 3.19
+
+ # undo support ############################################################
+
+ @_open_only
+ def undoable_transactions(self, ueid=None, req=None, **actionfilters):
+ """Return a list of undoable transaction objects by the connection's
+ user, ordered by descendant transaction time.
+
+ Managers may filter according to user (eid) who has done the transaction
+ using the `ueid` argument. Others will only see their own transactions.
+
+ Additional filtering capabilities is provided by using the following
+ named arguments:
+
+ * `etype` to get only transactions creating/updating/deleting entities
+ of the given type
+
+ * `eid` to get only transactions applied to entity of the given eid
+
+ * `action` to get only transactions doing the given action (action in
+ 'C', 'U', 'D', 'A', 'R'). If `etype`, action can only be 'C', 'U' or
+ 'D'.
+
+ * `public`: when additional filtering is provided, their are by default
+ only searched in 'public' actions, unless a `public` argument is given
+ and set to false.
+ """
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ source = self._cnx.repo.system_source
+ txinfos = source.undoable_transactions(self._cnx, ueid, **actionfilters)
+ for txinfo in txinfos:
+ txinfo.req = req or self # XXX mostly wrong
+ return txinfos
+
+ @_open_only
+ def transaction_info(self, txuuid, req=None):
+ """Return transaction object for the given uid.
+
+ raise `NoSuchTransaction` if not found or if session's user is not
+ allowed (eg not in managers group and the transaction doesn't belong to
+ him).
+ """
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ txinfo = self._cnx.repo.system_source.tx_info(self._cnx, txuuid)
+ if req:
+ txinfo.req = req
+ else:
+ txinfo.cnx = self
+ return txinfo
+
+ @_open_only
+ def transaction_actions(self, txuuid, public=True):
+ """Return an ordered list of action effectued during that transaction.
+
+ If public is true, return only 'public' actions, eg not ones triggered
+ under the cover by hooks, else return all actions.
+
+ raise `NoSuchTransaction` if the transaction is not found or if
+ session's user is not allowed (eg not in managers group and the
+ transaction doesn't belong to him).
+ """
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ return self._cnx.repo.system_source.tx_actions(self._cnx, txuuid, public)
+
+ @_open_only
+ def undo_transaction(self, txuuid):
+ """Undo the given transaction. Return potential restoration errors.
+
+ raise `NoSuchTransaction` if not found or if session's user is not
+ allowed (eg not in managers group and the transaction doesn't belong to
+ him).
+ """
+ # the ``with`` dance is transitional. We do not have Standalone
+ # Connection yet so we use this trick to unsure the session have the
+ # proper cnx loaded. This can be simplified one we have Standalone
+ # Connection object
+ return self._cnx.repo.system_source.undo_transaction(self._cnx, txuuid)
+
+ @deprecated('[3.19] This is a repoapi.ClientConnection object not a dbapi one')
+ def request(self):
+ return self
+
+ @deprecated('[3.19] This is a repoapi.ClientConnection object not a dbapi one')
+ def cursor(self):
+ return self
+
+ @ property
+ @deprecated('[3.19] This is a repoapi.ClientConnection object not a dbapi one')
+ def sessionid(self):
+ return self._session.id
+
+ @property
+ @deprecated('[3.19] This is a repoapi.ClientConnection object not a dbapi one')
+ def connection(self):
+ return self
+
+ @property
+ @deprecated('[3.19] This is a repoapi.ClientConnection object not a dbapi one')
+ def _repo(self):
+ return self._session.repo
--- a/req.py Mon Feb 17 11:13:27 2014 +0100
+++ b/req.py Mon Feb 17 15:32:50 2014 +0100
@@ -75,6 +75,23 @@
self.local_perm_cache = {}
self._ = unicode
+ def _set_user(self, orig_user):
+ """set the user for this req_session_base
+
+ A special method is needed to ensure the linked user is linked to the
+ connection too.
+ """
+ # cnx validity is checked by the call to .user_info
+ rset = self.eid_rset(orig_user.eid, 'CWUser')
+ user_cls = self.vreg['etypes'].etype_class('CWUser')
+ user = user_cls(self, rset, row=0, groups=orig_user.groups,
+ properties=orig_user.properties)
+ user.cw_attr_cache['login'] = orig_user.login # cache login
+ self.user = user
+ self.set_entity_cache(user)
+ self.set_language(user.prefered_language())
+
+
def set_language(self, lang):
"""install i18n configuration for `lang` translation.
@@ -86,7 +103,7 @@
self._ = self.__ = gettext
self.pgettext = pgettext
- def get_option_value(self, option, foreid=None):
+ def get_option_value(self, option):
raise NotImplementedError
def property_value(self, key):
@@ -94,7 +111,9 @@
user specific value if any, else using site value
"""
if self.user:
- return self.user.property_value(key)
+ val = self.user.property_value(key)
+ if val is not None:
+ return val
return self.vreg.property_value(key)
def etype_rset(self, etype, size=1):
@@ -114,7 +133,7 @@
"""
eid = int(eid)
if etype is None:
- etype = self.describe(eid)[0]
+ etype = self.entity_metas(eid)['type']
rset = ResultSet([(eid,)], 'Any X WHERE X eid %(x)s', {'x': eid},
[(etype,)])
rset.req = self
@@ -224,6 +243,11 @@
- cubes.blog.mycache
- etc.
"""
+ warn.warning('[3.19] .get_cache will disappear soon. '
+ 'Distributed caching mechanisms are being introduced instead.'
+ 'Other caching mechanism can be used more reliably '
+ 'to the same effect.',
+ DeprecationWarning)
if cachename in CACHE_REGISTRY:
cache = CACHE_REGISTRY[cachename]
else:
@@ -253,24 +277,20 @@
"""
# use *args since we don't want first argument to be "anonymous" to
# avoid potential clash with kwargs
+ method = None
if args:
assert len(args) == 1, 'only 0 or 1 non-named-argument expected'
method = args[0]
- else:
- method = None
+ if method is None:
+ method = 'view'
# XXX I (adim) think that if method is passed explicitly, we should
# not try to process it and directly call req.build_url()
- if method is None:
- if self.from_controller() == 'view' and not '_restpath' in kwargs:
- method = self.relative_path(includeparams=False) or 'view'
- else:
- method = 'view'
base_url = kwargs.pop('base_url', None)
if base_url is None:
secure = kwargs.pop('__secure__', None)
base_url = self.base_url(secure=secure)
if '_restpath' in kwargs:
- assert method == 'view', method
+ assert method == 'view', repr(method)
path = kwargs.pop('_restpath')
else:
path = method
--- a/schema.py Mon Feb 17 11:13:27 2014 +0100
+++ b/schema.py Mon Feb 17 15:32:50 2014 +0100
@@ -812,20 +812,20 @@
assert not ('fromeid' in kwargs or 'toeid' in kwargs), kwargs
assert action in ('read', 'update')
if 'eid' in kwargs:
- subjtype = _cw.describe(kwargs['eid'])[0]
+ subjtype = _cw.entity_metas(kwargs['eid'])['type']
else:
subjtype = objtype = None
else:
assert not 'eid' in kwargs, kwargs
assert action in ('read', 'add', 'delete')
if 'fromeid' in kwargs:
- subjtype = _cw.describe(kwargs['fromeid'])[0]
+ subjtype = _cw.entity_metas(kwargs['fromeid'])['type']
elif 'frometype' in kwargs:
subjtype = kwargs.pop('frometype')
else:
subjtype = None
if 'toeid' in kwargs:
- objtype = _cw.describe(kwargs['toeid'])[0]
+ objtype = _cw.entity_metas(kwargs['toeid'])['type']
elif 'toetype' in kwargs:
objtype = kwargs.pop('toetype')
else:
--- a/server/__init__.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/__init__.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -31,8 +31,6 @@
from logilab.common.modutils import LazyObject
from logilab.common.textutils import splitstrip
from logilab.common.registry import yes
-from logilab import database
-
from yams import BASE_GROUPS
from cubicweb import CW_SOFTWARE_ROOT
@@ -204,7 +202,7 @@
with the minimal set of entities (ie at least the schema, base groups and
a initial user)
"""
- from cubicweb.dbapi import in_memory_repo_cnx
+ from cubicweb.repoapi import get_repository, connect
from cubicweb.server.repository import Repository
from cubicweb.server.utils import manager_userpasswd
from cubicweb.server.sqlutils import sqlexec, sqlschema, sql_drop_all_user_tables
@@ -218,7 +216,7 @@
# only enable the system source at initialization time
repo = Repository(config, vreg=vreg)
schema = repo.schema
- sourcescfg = config.sources()
+ sourcescfg = config.read_sources_file()
source = sourcescfg['system']
driver = source['db-driver']
sqlcnx = repo.system_source.get_connection()
@@ -257,49 +255,47 @@
sqlcursor.close()
sqlcnx.commit()
sqlcnx.close()
- session = repo.internal_session()
- # insert entity representing the system source
- ssource = session.create_entity('CWSource', type=u'native', name=u'system')
- repo.system_source.eid = ssource.eid
- session.execute('SET X cw_source X WHERE X eid %(x)s', {'x': ssource.eid})
- # insert base groups and default admin
- print '-> inserting default user and default groups.'
- try:
- login = unicode(sourcescfg['admin']['login'])
- pwd = sourcescfg['admin']['password']
- except KeyError:
- if interactive:
- msg = 'enter login and password of the initial manager account'
- login, pwd = manager_userpasswd(msg=msg, confirm=True)
- else:
- login, pwd = unicode(source['db-user']), source['db-password']
- # sort for eid predicatability as expected in some server tests
- for group in sorted(BASE_GROUPS):
- session.create_entity('CWGroup', name=unicode(group))
- admin = create_user(session, login, pwd, 'managers')
- session.execute('SET X owned_by U WHERE X is IN (CWGroup,CWSource), U eid %(u)s',
- {'u': admin.eid})
- session.commit()
- session.close()
+ with repo.internal_cnx() as cnx:
+ # insert entity representing the system source
+ ssource = cnx.create_entity('CWSource', type=u'native', name=u'system')
+ repo.system_source.eid = ssource.eid
+ cnx.execute('SET X cw_source X WHERE X eid %(x)s', {'x': ssource.eid})
+ # insert base groups and default admin
+ print '-> inserting default user and default groups.'
+ try:
+ login = unicode(sourcescfg['admin']['login'])
+ pwd = sourcescfg['admin']['password']
+ except KeyError:
+ if interactive:
+ msg = 'enter login and password of the initial manager account'
+ login, pwd = manager_userpasswd(msg=msg, confirm=True)
+ else:
+ login, pwd = unicode(source['db-user']), source['db-password']
+ # sort for eid predicatability as expected in some server tests
+ for group in sorted(BASE_GROUPS):
+ cnx.create_entity('CWGroup', name=unicode(group))
+ admin = create_user(cnx, login, pwd, 'managers')
+ cnx.execute('SET X owned_by U WHERE X is IN (CWGroup,CWSource), U eid %(u)s',
+ {'u': admin.eid})
+ cnx.commit()
repo.shutdown()
# reloging using the admin user
config._cubes = None # avoid assertion error
- repo, cnx = in_memory_repo_cnx(config, login, password=pwd)
- repo.system_source.eid = ssource.eid # redo this manually
- assert len(repo.sources) == 1, repo.sources
- handler = config.migration_handler(schema, interactive=False,
- cnx=cnx, repo=repo)
- # install additional driver specific sql files
- handler.cmd_install_custom_sql_scripts()
- for cube in reversed(config.cubes()):
- handler.cmd_install_custom_sql_scripts(cube)
- # serialize the schema
- initialize_schema(config, schema, handler)
- # yoo !
- cnx.commit()
- repo.system_source.init_creating()
- cnx.commit()
- cnx.close()
+ repo = get_repository(config=config)
+ with connect(repo, login, password=pwd) as cnx:
+ repo.system_source.eid = ssource.eid # redo this manually
+ handler = config.migration_handler(schema, interactive=False,
+ cnx=cnx, repo=repo)
+ # install additional driver specific sql files
+ handler.cmd_install_custom_sql_scripts()
+ for cube in reversed(config.cubes()):
+ handler.cmd_install_custom_sql_scripts(cube)
+ # serialize the schema
+ initialize_schema(config, schema, handler)
+ # yoo !
+ cnx.commit()
+ repo.system_source.init_creating()
+ cnx.commit()
repo.shutdown()
# restore initial configuration
config.creating = False
@@ -312,13 +308,13 @@
def initialize_schema(config, schema, mhandler, event='create'):
from cubicweb.server.schemaserial import serialize_schema
- session = mhandler.session
+ cnx = mhandler.cnx
cubes = config.cubes()
# deactivate every hooks but those responsible to set metadata
# so, NO INTEGRITY CHECKS are done, to have quicker db creation.
# Active integrity is kept else we may pb such as two default
# workflows for one entity type.
- with session.deny_all_hooks_but('metadata', 'activeintegrity'):
+ with cnx._cnx.deny_all_hooks_but('metadata', 'activeintegrity'):
# execute cubicweb's pre<event> script
mhandler.cmd_exec_event_script('pre%s' % event)
# execute cubes pre<event> script if any
@@ -327,8 +323,7 @@
# execute instance's pre<event> script (useful in tests)
mhandler.cmd_exec_event_script('pre%s' % event, apphome=True)
# enter instance'schema into the database
- session.set_cnxset()
- serialize_schema(session, schema)
+ serialize_schema(cnx, schema)
# execute cubicweb's post<event> script
mhandler.cmd_exec_event_script('post%s' % event)
# execute cubes'post<event> script if any
@@ -353,6 +348,4 @@
SOURCE_TYPES = {'native': LazyObject('cubicweb.server.sources.native', 'NativeSQLSource'),
'datafeed': LazyObject('cubicweb.server.sources.datafeed', 'DataFeedSource'),
'ldapfeed': LazyObject('cubicweb.server.sources.ldapfeed', 'LDAPFeedSource'),
- 'pyrorql': LazyObject('cubicweb.server.sources.pyrorql', 'PyroRQLSource'),
- 'zmqrql': LazyObject('cubicweb.server.sources.zmqrql', 'ZMQRQLSource'),
}
--- a/server/checkintegrity.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/checkintegrity.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -39,24 +39,12 @@
"""return true if the eid is a valid eid"""
if eid in eids:
return eids[eid]
- sqlcursor.execute('SELECT type, source FROM entities WHERE eid=%s' % eid)
+ sqlcursor.execute('SELECT type FROM entities WHERE eid=%s' % eid)
try:
- etype, source = sqlcursor.fetchone()
+ etype = sqlcursor.fetchone()[0]
except Exception:
eids[eid] = False
return False
- if source and source != 'system':
- try:
- # insert eid *and* etype to attempt checking entity has not been
- # replaced by another subsquently to a restore of an old dump
- if session.execute('Any X WHERE X is %s, X eid %%(x)s' % etype,
- {'x': eid}):
- eids[eid] = True
- return True
- except Exception: # TypeResolverError, Unauthorized...
- pass
- eids[eid] = False
- return False
if etype not in session.vreg.schema:
eids[eid] = False
return False
@@ -99,7 +87,7 @@
# deactivate modification_date hook since we don't want them
# to be updated due to the reindexation
repo = session.repo
- cursor = session.cnxset['system']
+ cursor = session.cnxset.cu
dbhelper = session.repo.system_source.dbhelper
if not dbhelper.has_fti_table(cursor):
print 'no text index table'
@@ -193,7 +181,7 @@
notify_fixed(fix)
# source in entities, but no relation cw_source
applcwversion = session.repo.get_versions().get('cubicweb')
- if applcwversion >= (3,13,1): # entities.asource appeared in 3.13.1
+ if applcwversion >= (3, 13, 1): # entities.asource appeared in 3.13.1
cursor = session.system_sql('SELECT e.eid FROM entities as e, cw_CWSource as s '
'WHERE s.cw_name=e.asource AND '
'NOT EXISTS(SELECT 1 FROM cw_source_relation as cs '
--- a/server/cwzmq.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/cwzmq.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# copyright 2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2012-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -17,17 +17,17 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-from threading import Thread
import cPickle
import traceback
+from threading import Thread
+from logging import getLogger
import zmq
from zmq.eventloop import ioloop
import zmq.eventloop.zmqstream
-from logging import getLogger
from cubicweb import set_log_methods
-from cubicweb.server.server import QuitEvent
+from cubicweb.server.server import QuitEvent, Finished
ctx = zmq.Context()
--- a/server/hook.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/hook.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -257,8 +257,8 @@
from logilab.common.decorators import classproperty, cached
from logilab.common.deprecation import deprecated, class_renamed
from logilab.common.logging_ext import set_log_methods
-from logilab.common.registry import (Predicate, NotPredicate, OrPredicate,
- objectify_predicate, yes)
+from logilab.common.registry import (NotPredicate, OrPredicate,
+ objectify_predicate)
from cubicweb import RegistryNotFound, server
from cubicweb.cwvreg import CWRegistry, CWRegistryStore
@@ -460,10 +460,10 @@
if kwargs.get('rtype') not in self.expected:
return 0
if self.frometypes is not None and \
- req.describe(kwargs['eidfrom'])[0] not in self.frometypes:
+ req.entity_metas(kwargs['eidfrom'])['type'] not in self.frometypes:
return 0
if self.toetypes is not None and \
- req.describe(kwargs['eidto'])[0] not in self.toetypes:
+ req.entity_metas(kwargs['eidto'])['type'] not in self.toetypes:
return 0
return 1
@@ -604,7 +604,7 @@
def __call__(self):
assert self.main_rtype
for eid in (self.eidfrom, self.eidto):
- etype = self._cw.describe(eid)[0]
+ etype = self._cw.entity_metas(eid)['type']
if self.main_rtype not in self._cw.vreg.schema.eschema(etype).subjrels:
return
if self.rtype in self.subject_relations:
@@ -640,7 +640,7 @@
skip_object_relations = ()
def __call__(self):
- eschema = self._cw.vreg.schema.eschema(self._cw.describe(self.eidfrom)[0])
+ eschema = self._cw.vreg.schema.eschema(self._cw.entity_metas(self.eidfrom)['type'])
execute = self._cw.execute
for rel in self.subject_relations:
if rel in eschema.subjrels and not rel in self.skip_subject_relations:
@@ -664,7 +664,7 @@
events = ('after_delete_relation',)
def __call__(self):
- eschema = self._cw.vreg.schema.eschema(self._cw.describe(self.eidfrom)[0])
+ eschema = self._cw.vreg.schema.eschema(self._cw.entity_metas(self.eidfrom)['type'])
execute = self._cw.execute
for rel in self.subject_relations:
if rel in eschema.subjrels and not rel in self.skip_subject_relations:
--- a/server/ldaputils.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,360 +0,0 @@
-# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""cubicweb utilities for ldap sources
-
-Part of the code is coming form Zope's LDAPUserFolder
-
-Copyright (c) 2004 Jens Vagelpohl.
-All Rights Reserved.
-
-This software is subject to the provisions of the Zope Public License,
-Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
-WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
-FOR A PARTICULAR PURPOSE.
-"""
-
-from __future__ import division # XXX why?
-
-from datetime import datetime
-
-import ldap
-from ldap.ldapobject import ReconnectLDAPObject
-from ldap.filter import filter_format
-from ldapurl import LDAPUrl
-
-from cubicweb import ValidationError, AuthenticationError, Binary
-from cubicweb.server import utils
-from cubicweb.server.sources import ConnectionWrapper
-
-_ = unicode
-
-# search scopes
-BASE = ldap.SCOPE_BASE
-ONELEVEL = ldap.SCOPE_ONELEVEL
-SUBTREE = ldap.SCOPE_SUBTREE
-
-# map ldap protocol to their standard port
-PROTO_PORT = {'ldap': 389,
- 'ldaps': 636,
- 'ldapi': None,
- }
-
-
-class LDAPSourceMixIn(object):
- """a mix-in for LDAP based source"""
- options = (
- ('auth-mode',
- {'type' : 'choice',
- 'default': 'simple',
- 'choices': ('simple', 'cram_md5', 'digest_md5', 'gssapi'),
- 'help': 'authentication mode used to authenticate user to the ldap.',
- 'group': 'ldap-source', 'level': 3,
- }),
- ('auth-realm',
- {'type' : 'string',
- 'default': None,
- 'help': 'realm to use when using gssapi/kerberos authentication.',
- 'group': 'ldap-source', 'level': 3,
- }),
-
- ('data-cnx-dn',
- {'type' : 'string',
- 'default': '',
- 'help': 'user dn to use to open data connection to the ldap (eg used \
-to respond to rql queries). Leave empty for anonymous bind',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('data-cnx-password',
- {'type' : 'string',
- 'default': '',
- 'help': 'password to use to open data connection to the ldap (eg used to respond to rql queries). Leave empty for anonymous bind.',
- 'group': 'ldap-source', 'level': 1,
- }),
-
- ('user-base-dn',
- {'type' : 'string',
- 'default': '',
- 'help': 'base DN to lookup for users; disable user importation mechanism if unset',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('user-scope',
- {'type' : 'choice',
- 'default': 'ONELEVEL',
- 'choices': ('BASE', 'ONELEVEL', 'SUBTREE'),
- 'help': 'user search scope (valid values: "BASE", "ONELEVEL", "SUBTREE")',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('user-classes',
- {'type' : 'csv',
- 'default': ('top', 'posixAccount'),
- 'help': 'classes of user (with Active Directory, you want to say "user" here)',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('user-filter',
- {'type': 'string',
- 'default': '',
- 'help': 'additional filters to be set in the ldap query to find valid users',
- 'group': 'ldap-source', 'level': 2,
- }),
- ('user-login-attr',
- {'type' : 'string',
- 'default': 'uid',
- 'help': 'attribute used as login on authentication (with Active Directory, you want to use "sAMAccountName" here)',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('user-default-group',
- {'type' : 'csv',
- 'default': ('users',),
- 'help': 'name of a group in which ldap users will be by default. \
-You can set multiple groups by separating them by a comma.',
- 'group': 'ldap-source', 'level': 1,
- }),
- ('user-attrs-map',
- {'type' : 'named',
- 'default': {'uid': 'login', 'gecos': 'email', 'userPassword': 'upassword'},
- 'help': 'map from ldap user attributes to cubicweb attributes (with Active Directory, you want to use sAMAccountName:login,mail:email,givenName:firstname,sn:surname)',
- 'group': 'ldap-source', 'level': 1,
- }),
-
- )
-
- _conn = None
-
- def _entity_update(self, source_entity):
- super(LDAPSourceMixIn, self)._entity_update(source_entity)
- if self.urls:
- if len(self.urls) > 1:
- raise ValidationError(source_entity.eid, {'url': _('can only have one url')})
- try:
- protocol, hostport = self.urls[0].split('://')
- except ValueError:
- raise ValidationError(source_entity.eid, {'url': _('badly formatted url')})
- if protocol not in PROTO_PORT:
- raise ValidationError(source_entity.eid, {'url': _('unsupported protocol')})
-
- def update_config(self, source_entity, typedconfig):
- """update configuration from source entity. `typedconfig` is config
- properly typed with defaults set
- """
- super(LDAPSourceMixIn, self).update_config(source_entity, typedconfig)
- self.authmode = typedconfig['auth-mode']
- self._authenticate = getattr(self, '_auth_%s' % self.authmode)
- self.cnx_dn = typedconfig['data-cnx-dn']
- self.cnx_pwd = typedconfig['data-cnx-password']
- self.user_base_dn = str(typedconfig['user-base-dn'])
- self.user_base_scope = globals()[typedconfig['user-scope']]
- self.user_login_attr = typedconfig['user-login-attr']
- self.user_default_groups = typedconfig['user-default-group']
- self.user_attrs = {'dn': 'eid', 'modifyTimestamp': 'modification_date'}
- self.user_attrs.update(typedconfig['user-attrs-map'])
- self.user_rev_attrs = dict((v, k) for k, v in self.user_attrs.iteritems())
- self.base_filters = [filter_format('(%s=%s)', ('objectClass', o))
- for o in typedconfig['user-classes']]
- if typedconfig['user-filter']:
- self.base_filters.append(typedconfig['user-filter'])
- self._conn = None
-
- def connection_info(self):
- assert len(self.urls) == 1, self.urls
- protocol, hostport = self.urls[0].split('://')
- if protocol != 'ldapi' and not ':' in hostport:
- hostport = '%s:%s' % (hostport, PROTO_PORT[protocol])
- return protocol, hostport
-
- def get_connection(self):
- """open and return a connection to the source"""
- if self._conn is None:
- try:
- self._connect()
- except Exception:
- self.exception('unable to connect to ldap')
- return ConnectionWrapper(self._conn)
-
- def authenticate(self, session, login, password=None, **kwargs):
- """return CWUser eid for the given login/password if this account is
- defined in this source, else raise `AuthenticationError`
-
- two queries are needed since passwords are stored crypted, so we have
- to fetch the salt first
- """
- self.info('ldap authenticate %s', login)
- if not password:
- # On Windows + ADAM this would have succeeded (!!!)
- # You get Authenticated as: 'NT AUTHORITY\ANONYMOUS LOGON'.
- # we really really don't want that
- raise AuthenticationError()
- searchfilter = [filter_format('(%s=%s)', (self.user_login_attr, login))]
- searchfilter.extend(self.base_filters)
- searchstr = '(&%s)' % ''.join(searchfilter)
- # first search the user
- try:
- user = self._search(session, self.user_base_dn,
- self.user_base_scope, searchstr)[0]
- except (IndexError, ldap.SERVER_DOWN):
- # no such user
- raise AuthenticationError()
- # check password by establishing a (unused) connection
- try:
- self._connect(user, password)
- except ldap.LDAPError as ex:
- # Something went wrong, most likely bad credentials
- self.info('while trying to authenticate %s: %s', user, ex)
- raise AuthenticationError()
- except Exception:
- self.error('while trying to authenticate %s', user, exc_info=True)
- raise AuthenticationError()
- eid = self.repo.extid2eid(self, user['dn'], 'CWUser', session, {})
- if eid < 0:
- # user has been moved away from this source
- raise AuthenticationError()
- return eid
-
- def _connect(self, user=None, userpwd=None):
- protocol, hostport = self.connection_info()
- self.info('connecting %s://%s as %s', protocol, hostport,
- user and user['dn'] or 'anonymous')
- # don't require server certificate when using ldaps (will
- # enable self signed certs)
- ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
- url = LDAPUrl(urlscheme=protocol, hostport=hostport)
- conn = ReconnectLDAPObject(url.initializeUrl())
- # Set the protocol version - version 3 is preferred
- try:
- conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3)
- except ldap.LDAPError: # Invalid protocol version, fall back safely
- conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION2)
- # Deny auto-chasing of referrals to be safe, we handle them instead
- # Required for AD
- try:
- conn.set_option(ldap.OPT_REFERRALS, 0)
- except ldap.LDAPError: # Cannot set referrals, so do nothing
- pass
- #conn.set_option(ldap.OPT_NETWORK_TIMEOUT, conn_timeout)
- #conn.timeout = op_timeout
- # Now bind with the credentials given. Let exceptions propagate out.
- if user is None:
- # no user specified, we want to initialize the 'data' connection,
- assert self._conn is None
- self._conn = conn
- # XXX always use simple bind for data connection
- if not self.cnx_dn:
- conn.simple_bind_s(self.cnx_dn, self.cnx_pwd)
- else:
- self._authenticate(conn, {'dn': self.cnx_dn}, self.cnx_pwd)
- else:
- # user specified, we want to check user/password, no need to return
- # the connection which will be thrown out
- self._authenticate(conn, user, userpwd)
- return conn
-
- def _auth_simple(self, conn, user, userpwd):
- conn.simple_bind_s(user['dn'], userpwd)
-
- def _auth_cram_md5(self, conn, user, userpwd):
- from ldap import sasl
- auth_token = sasl.cram_md5(user['dn'], userpwd)
- conn.sasl_interactive_bind_s('', auth_token)
-
- def _auth_digest_md5(self, conn, user, userpwd):
- from ldap import sasl
- auth_token = sasl.digest_md5(user['dn'], userpwd)
- conn.sasl_interactive_bind_s('', auth_token)
-
- def _auth_gssapi(self, conn, user, userpwd):
- # print XXX not proper sasl/gssapi
- import kerberos
- if not kerberos.checkPassword(user[self.user_login_attr], userpwd):
- raise Exception('BAD login / mdp')
- #from ldap import sasl
- #conn.sasl_interactive_bind_s('', sasl.gssapi())
-
- def _search(self, session, base, scope,
- searchstr='(objectClass=*)', attrs=()):
- """make an ldap query"""
- self.debug('ldap search %s %s %s %s %s', self.uri, base, scope,
- searchstr, list(attrs))
- # XXX for now, we do not have connections set support for LDAP, so
- # this is always self._conn
- cnx = self.get_connection().cnx #session.cnxset.connection(self.uri).cnx
- if cnx is None:
- # cant connect to server
- msg = session._("can't connect to source %s, some data may be missing")
- session.set_shared_data('sources_error', msg % self.uri, txdata=True)
- return []
- try:
- res = cnx.search_s(base, scope, searchstr, attrs)
- except ldap.PARTIAL_RESULTS:
- res = cnx.result(all=0)[1]
- except ldap.NO_SUCH_OBJECT:
- self.info('ldap NO SUCH OBJECT %s %s %s', base, scope, searchstr)
- self._process_no_such_object(session, base)
- return []
- # except ldap.REFERRAL as e:
- # cnx = self.handle_referral(e)
- # try:
- # res = cnx.search_s(base, scope, searchstr, attrs)
- # except ldap.PARTIAL_RESULTS:
- # res_type, res = cnx.result(all=0)
- result = []
- for rec_dn, rec_dict in res:
- # When used against Active Directory, "rec_dict" may not be
- # be a dictionary in some cases (instead, it can be a list)
- #
- # An example of a useless "res" entry that can be ignored
- # from AD is
- # (None, ['ldap://ForestDnsZones.PORTAL.LOCAL/DC=ForestDnsZones,DC=PORTAL,DC=LOCAL'])
- # This appears to be some sort of internal referral, but
- # we can't handle it, so we need to skip over it.
- try:
- items = rec_dict.iteritems()
- except AttributeError:
- continue
- else:
- itemdict = self._process_ldap_item(rec_dn, items)
- result.append(itemdict)
- self.debug('ldap built results %s', len(result))
- return result
-
- def _process_ldap_item(self, dn, iterator):
- """Turn an ldap received item into a proper dict."""
- itemdict = {'dn': dn}
- for key, value in iterator:
- if self.user_attrs.get(key) == 'upassword': # XXx better password detection
- value = value[0].encode('utf-8')
- # we only support ldap_salted_sha1 for ldap sources, see: server/utils.py
- if not value.startswith('{SSHA}'):
- value = utils.crypt_password(value)
- itemdict[key] = Binary(value)
- elif self.user_attrs.get(key) == 'modification_date':
- itemdict[key] = datetime.strptime(value[0], '%Y%m%d%H%M%SZ')
- else:
- value = [unicode(val, 'utf-8', 'replace') for val in value]
- if len(value) == 1:
- itemdict[key] = value = value[0]
- else:
- itemdict[key] = value
- return itemdict
-
- def _process_no_such_object(self, session, dn):
- """Some search return NO_SUCH_OBJECT error, handle this (usually because
- an object whose dn is no more existent in ldap as been encountered).
-
- Do nothing by default, let sub-classes handle that.
- """
--- a/server/migractions.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/migractions.py Mon Feb 17 15:32:50 2014 +0100
@@ -53,15 +53,11 @@
PURE_VIRTUAL_RTYPES,
CubicWebRelationSchema, order_eschemas)
from cubicweb.cwvreg import CW_EVENT_MANAGER
-from cubicweb.dbapi import get_repository, _repo_connect
+from cubicweb import repoapi
from cubicweb.migration import MigrationHelper, yes
-from cubicweb.server import hook
-try:
- from cubicweb.server import SOURCE_TYPES, schemaserial as ss
- from cubicweb.server.utils import manager_userpasswd
- from cubicweb.server.sqlutils import sqlexec, SQL_PREFIX
-except ImportError: # LAX
- pass
+from cubicweb.server import hook, schemaserial as ss
+from cubicweb.server.utils import manager_userpasswd
+from cubicweb.server.sqlutils import sqlexec, SQL_PREFIX
def mock_object(**params):
@@ -102,7 +98,7 @@
# no config on shell to a remote instance
if config is not None and (cnx or connect):
repo = self.repo
- self.session.data['rebuild-infered'] = False
+ self.session.set_shared_data('rebuild-infered', False)
# register a hook to clear our group_mapping cache and the
# self._synchronized set when some group is added or updated
ClearGroupMap.mih = self
@@ -129,7 +125,7 @@
@cached
def repo_connect(self):
- self.repo = get_repository(config=self.config)
+ self.repo = repoapi.get_repository(config=self.config)
return self.repo
def cube_upgraded(self, cube, version):
@@ -187,18 +183,18 @@
open(backupfile,'w').close() # kinda lock
os.chmod(backupfile, 0600)
# backup
+ source = repo.system_source
tmpdir = tempfile.mkdtemp()
try:
failed = False
- for source in repo.sources:
- try:
- source.backup(osp.join(tmpdir, source.uri), self.confirm, format=format)
- except Exception as ex:
- print '-> error trying to backup %s [%s]' % (source.uri, ex)
- if not self.confirm('Continue anyway?', default='n'):
- raise SystemExit(1)
- else:
- failed = True
+ try:
+ source.backup(osp.join(tmpdir, source.uri), self.confirm, format=format)
+ except Exception as ex:
+ print '-> error trying to backup %s [%s]' % (source.uri, ex)
+ if not self.confirm('Continue anyway?', default='n'):
+ raise SystemExit(1)
+ else:
+ failed = True
with open(osp.join(tmpdir, 'format.txt'), 'w') as format_file:
format_file.write('%s\n' % format)
with open(osp.join(tmpdir, 'versions.txt'), 'w') as version_file:
@@ -217,8 +213,7 @@
finally:
shutil.rmtree(tmpdir)
- def restore_database(self, backupfile, drop=True, systemonly=True,
- askconfirm=True, format='native'):
+ def restore_database(self, backupfile, drop=True, askconfirm=True, format='native'):
# check
if not osp.exists(backupfile):
raise ExecutionError("Backup file %s doesn't exist" % backupfile)
@@ -247,15 +242,13 @@
format = written_format
self.config.init_cnxset_pool = False
repo = self.repo_connect()
- for source in repo.sources:
- if systemonly and source.uri != 'system':
- continue
- try:
- source.restore(osp.join(tmpdir, source.uri), self.confirm, drop, format)
- except Exception as exc:
- print '-> error trying to restore %s [%s]' % (source.uri, exc)
- if not self.confirm('Continue anyway?', default='n'):
- raise SystemExit(1)
+ source = repo.system_source
+ try:
+ source.restore(osp.join(tmpdir, source.uri), self.confirm, drop, format)
+ except Exception as exc:
+ print '-> error trying to restore %s [%s]' % (source.uri, exc)
+ if not self.confirm('Continue anyway?', default='n'):
+ raise SystemExit(1)
shutil.rmtree(tmpdir)
# call hooks
repo.init_cnxset_pool()
@@ -268,15 +261,14 @@
try:
return self._cnx
except AttributeError:
- sourcescfg = self.repo.config.sources()
try:
- login = sourcescfg['admin']['login']
- pwd = sourcescfg['admin']['password']
+ login = self.repo.config.default_admin_config['login']
+ pwd = self.repo.config.default_admin_config['password']
except KeyError:
login, pwd = manager_userpasswd()
while True:
try:
- self._cnx = _repo_connect(self.repo, login, password=pwd)
+ self._cnx = repoapi.connect(self.repo, login, password=pwd)
if not 'managers' in self._cnx.user(self.session).groups:
print 'migration need an account in the managers group'
else:
@@ -292,7 +284,7 @@
print 'aborting...'
sys.exit(0)
self.session.keep_cnxset_mode('transaction')
- self.session.data['rebuild-infered'] = False
+ self.session.set_shared_data('rebuild-infered', False)
return self._cnx
@property
@@ -335,7 +327,7 @@
'schema': self.repo.get_schema(),
'cnx': self.cnx,
'fsschema': self.fs_schema,
- 'session' : self.session,
+ 'session' : self.cnx._cnx,
'repo' : self.repo,
})
return context
@@ -343,12 +335,12 @@
@cached
def group_mapping(self):
"""cached group mapping"""
- return ss.group_mapping(self._cw)
+ return ss.group_mapping(self.cnx)
@cached
def cstrtype_mapping(self):
"""cached constraint types mapping"""
- return ss.cstrtype_mapping(self._cw)
+ return ss.cstrtype_mapping(self.cnx)
def cmd_exec_event_script(self, event, cube=None, funcname=None,
*args, **kwargs):
@@ -817,7 +809,7 @@
groupmap = self.group_mapping()
cstrtypemap = self.cstrtype_mapping()
# register the entity into CWEType
- execute = self._cw.execute
+ execute = self.cnx.execute
ss.execschemarql(execute, eschema, ss.eschema2rql(eschema, groupmap))
# add specializes relation if needed
specialized = eschema.specializes()
@@ -998,7 +990,7 @@
hook.CleanupDeletedEidsCacheOp.get_instance(session).union(thispending)
# and don't forget to remove record from system tables
entities = [session.entity_from_eid(eid, rdeftype) for eid in thispending]
- self.repo.system_source.delete_info_multi(session, entities, 'system')
+ self.repo.system_source.delete_info_multi(session, entities)
self.sqlexec('DELETE FROM cw_%s WHERE cw_from_entity=%%(eid)s OR '
'cw_to_entity=%%(eid)s' % rdeftype,
{'eid': oldeid}, ask_confirm=False)
@@ -1046,7 +1038,7 @@
"""
reposchema = self.repo.schema
rschema = self.fs_schema.rschema(rtype)
- execute = self._cw.execute
+ execute = self.cnx.execute
if rtype in reposchema:
print 'warning: relation type %s is already known, skip addition' % (
rtype)
@@ -1118,7 +1110,7 @@
subjtype, rtype, objtype)
return
rdef = self._get_rdef(rschema, subjtype, objtype)
- ss.execschemarql(self._cw.execute, rdef,
+ ss.execschemarql(self.cnx.execute, rdef,
ss.rdef2rql(rdef, self.cstrtype_mapping(),
self.group_mapping()))
if commit:
@@ -1345,14 +1337,6 @@
# other data migration commands ###########################################
- @property
- def _cw(self):
- session = self.session
- if session is not None:
- session.set_cnxset()
- return session
- return self.cnx.request()
-
def cmd_storage_changed(self, etype, attribute):
"""migrate entities to a custom storage. The new storage is expected to
be set, it will be temporarily removed for the migration.
@@ -1376,14 +1360,14 @@
def cmd_create_entity(self, etype, commit=False, **kwargs):
"""add a new entity of the given type"""
- entity = self._cw.create_entity(etype, **kwargs)
+ entity = self.cnx.create_entity(etype, **kwargs)
if commit:
self.commit()
return entity
def cmd_find_entities(self, etype, **kwargs):
"""find entities of the given type and attribute values"""
- return self._cw.find_entities(etype, **kwargs)
+ return self.cnx.find_entities(etype, **kwargs)
def cmd_find_one_entity(self, etype, **kwargs):
"""find one entity of the given type and attribute values.
@@ -1391,7 +1375,7 @@
raise :exc:`cubicweb.req.FindEntityError` if can not return one and only
one entity.
"""
- return self._cw.find_one_entity(etype, **kwargs)
+ return self.cnx.find_one_entity(etype, **kwargs)
def cmd_update_etype_fti_weight(self, etype, weight):
if self.repo.system_source.dbdriver == 'postgres':
@@ -1450,7 +1434,7 @@
"""
if not ask_confirm or self.confirm('Execute sql: %s ?' % sql):
try:
- cu = self.session.system_sql(sql, args)
+ cu = self.cnx._cnx.system_sql(sql, args)
except Exception:
ex = sys.exc_info()[1]
if self.confirm('Error: %s\nabort?' % ex, pdb=True):
@@ -1468,7 +1452,7 @@
if not isinstance(rql, (tuple, list)):
rql = ( (rql, kwargs), )
res = None
- execute = self._cw.execute
+ execute = self.cnx.execute
for rql, kwargs in rql:
if kwargs:
msg = '%s (%s)' % (rql, kwargs)
@@ -1504,7 +1488,7 @@
self.sqlexec(sql, ask_confirm=False)
dbhelper = self.repo.system_source.dbhelper
sqltype = dbhelper.TYPE_MAPPING[newtype]
- cursor = self.session.cnxset[self.repo.system_source.uri]
+ cursor = self.session.cnxset.cu
dbhelper.change_col_type(cursor, 'cw_%s' % etype, 'cw_%s' % attr, sqltype, allownull)
if commit:
self.commit()
--- a/server/msplanner.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1822 +0,0 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""plan execution of rql queries on multiple sources
-
-the best way to understand what are we trying to acheive here is to read the
-unit-tests in unittest_msplanner.py
-
-
-What you need to know
-~~~~~~~~~~~~~~~~~~~~~
-1. The system source is expected to support every entity and relation types
-
-2. Given "X relation Y":
-
- * if relation, X and Y types are supported by the external source, we suppose
- by default that X and Y should both come from the same source as the
- relation. You can specify otherwise by adding relation into the
- "cross_relations" set in the source's mapping file and it that case, we'll
- consider that we can also find in the system source some relation between
- X and Y coming from different sources.
-
- * if "relation" isn't supported by the external source but X or Y
- types (or both) are, we suppose by default that can find in the system
- source some relation where X and/or Y come from the external source. You
- can specify otherwise by adding relation into the "dont_cross_relations"
- set in the source's mapping file and it that case, we'll consider that we
- can only find in the system source some relation between X and Y coming
- the system source.
-
-
-Implementation
-~~~~~~~~~~~~~~
-XXX explain algorithm
-
-
-Exemples of multi-sources query execution
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-For a system source and a ldap user source (only CWUser and its attributes
-is supported, no group or such):
-
-:CWUser X:
-1. fetch CWUser X from both sources and return concatenation of results
-
-:CWUser X WHERE X in_group G, G name 'users':
-* catch 1
- 1. fetch CWUser X from both sources, store concatenation of results into a
- temporary table
- 2. return the result of TMP X WHERE X in_group G, G name 'users' from the
- system source
-* catch 2
- 1. return the result of CWUser X WHERE X in_group G, G name 'users' from system
- source, that's enough (optimization of the sql querier will avoid join on
- CWUser, so we will directly get local eids)
-
-:CWUser X,L WHERE X in_group G, X login L, G name 'users':
-1. fetch Any X,L WHERE X is CWUser, X login L from both sources, store
- concatenation of results into a temporary table
-2. return the result of Any X, L WHERE X is TMP, X login LX in_group G,
- G name 'users' from the system source
-
-
-:Any X WHERE X owned_by Y:
-* catch 1
- 1. fetch CWUser X from both sources, store concatenation of results into a
- temporary table
- 2. return the result of Any X WHERE X owned_by Y, Y is TMP from the system
- source
-* catch 2
- 1. return the result of Any X WHERE X owned_by Y from system source, that's
- enough (optimization of the sql querier will avoid join on CWUser, so we
- will directly get local eids)
-"""
-
-__docformat__ = "restructuredtext en"
-
-from itertools import imap, ifilterfalse
-
-from logilab.common.compat import any
-from logilab.common.decorators import cached
-from logilab.common.deprecation import deprecated
-
-from rql import BadRQLQuery
-from rql.stmts import Union, Select
-from rql.nodes import (VariableRef, Comparison, Relation, Constant, Variable,
- Not, Exists, SortTerm, Function)
-
-from cubicweb import server
-from cubicweb.utils import make_uid
-from cubicweb.rqlrewrite import add_types_restriction, cleanup_solutions
-from cubicweb.server.ssplanner import SSPlanner, OneFetchStep
-from cubicweb.server.mssteps import *
-
-Variable._ms_table_key = lambda x: x.name
-Relation._ms_table_key = lambda x: x.r_type
-# str() Constant.value to ensure generated table name won't be unicode
-Constant._ms_table_key = lambda x: str(x.value)
-
-Variable._ms_may_be_processed = lambda x, terms, linkedterms: any(
- t for t in terms if t in linkedterms.get(x, ()))
-Relation._ms_may_be_processed = lambda x, terms, linkedterms: all(
- getattr(hs, 'variable', hs) in terms for hs in x.get_variable_parts())
-
-def ms_scope(term):
- rel = None
- scope = term.scope
- if isinstance(term, Variable) and len(term.stinfo['relations']) == 1:
- rel = iter(term.stinfo['relations']).next().relation()
- elif isinstance(term, Constant):
- rel = term.relation()
- elif isinstance(term, Relation):
- rel = term
- if rel is not None and (
- rel.r_type != 'identity' and rel.scope is scope
- and isinstance(rel.parent, Exists) and rel.parent.neged(strict=True)):
- return scope.parent.scope
- return scope
-
-def need_intersect(select, getrschema):
- for rel in select.iget_nodes(Relation):
- if isinstance(rel.parent, Exists) and rel.parent.neged(strict=True) and not rel.is_types_restriction():
- rschema = getrschema(rel.r_type)
- if not rschema.final:
- # if one of the relation's variable is ambiguous but not
- # invariant, an intersection will be necessary
- for vref in rel.get_nodes(VariableRef):
- var = vref.variable
- if (var.valuable_references() == 1
- and len(var.stinfo['possibletypes']) > 1):
- return True
- return False
-
-def neged_relation(rel):
- parent = rel.parent
- return isinstance(parent, Not) or (isinstance(parent, Exists) and
- isinstance(parent.parent, Not))
-
-def need_source_access_relation(vargraph):
- if not vargraph:
- return False
- # check vargraph contains some other relation than the identity relation
- # test of key nature since it may be a variable name (don't care about that)
- # or a 2-uple (var1, var2) associated to the relation to traverse to go from
- # var1 to var2
- return any(key for key, val in vargraph.iteritems()
- if isinstance(key, tuple) and val != 'identity')
-
-def need_aggr_step(select, sources, stepdefs=None):
- """return True if a temporary table is necessary to store some partial
- results to execute the given query
- """
- if len(sources) == 1:
- # can do everything at once with a single source
- return False
- if select.orderby or select.groupby or select.has_aggregat:
- # if more than one source, we need a temp table to deal with sort /
- # groups / aggregat if :
- # * the rqlst won't be splitted (in the other case the last query
- # using partial temporary table can do sort/groups/aggregat without
- # the need for a later AggrStep)
- # * the rqlst is splitted in multiple steps and there are more than one
- # final step
- if stepdefs is None:
- return True
- has_one_final = False
- fstepsolindices = set()
- for stepdef in stepdefs:
- if stepdef[-1]:
- if has_one_final or frozenset(stepdef[2]) != fstepsolindices:
- return True
- has_one_final = True
- else:
- fstepsolindices.update(stepdef[2])
- return False
-
-def select_group_sort(select): # XXX something similar done in rql2sql
- # add variables used in groups and sort terms to the selection
- # if necessary
- if select.groupby:
- for vref in select.groupby:
- if not vref in select.selection:
- select.append_selected(vref.copy(select))
- for sortterm in select.orderby:
- for vref in sortterm.iget_nodes(VariableRef):
- if not vref in select.get_selected_variables():
- # we can't directly insert sortterm.term because it references
- # a variable of the select before the copy.
- # XXX if constant term are used to define sort, their value
- # may necessite a decay
- select.append_selected(vref.copy(select))
- if select.groupby and not vref in select.groupby:
- select.add_group_var(vref.copy(select))
-
-def allequals(solutions):
- """return true if all solutions are identical"""
- sol = solutions.next()
- noconstsol = None
- for sol_ in solutions:
- if sol_ != sol:
- return False
- return True
-
-# XXX move functions below to rql ##############################################
-
-def is_ancestor(n1, n2):
- """return True if n2 is a parent scope of n1"""
- p = n1.parent
- while p is not None:
- if p is n2:
- return True
- p = p.parent
- return False
-
-def copy_node(newroot, node, subparts=()):
- newnode = node.__class__(*node.initargs(newroot))
- for part in subparts:
- newnode.append(part)
- return newnode
-
-def used_in_outer_scope(var, scope):
- """return true if the variable is used in an outer scope of the given scope
- """
- for rel in var.stinfo['relations']:
- rscope = ms_scope(rel)
- if not rscope is scope and is_ancestor(scope, rscope):
- return True
- return False
-
-################################################################################
-
-class PartPlanInformation(object):
- """regroups necessary information to execute some part of a "global" rql
- query ("global" means as received by the querier, which may result in
- several internal queries, e.g. parts, due to security insertions). Actually
- a PPI is created for each subquery and for each query in a union.
-
- It exposes as well some methods helping in executing this part on a
- multi-sources repository, modifying its internal structure during the
- process.
-
- :attr plan:
- the execution plan
- :attr rqlst:
- the original rql syntax tree handled by this part
-
- :attr needsplit:
- bool telling if the query has to be split into multiple steps for
- execution or if it can be executed at once
-
- :attr temptable:
- a SQL temporary table name or None, if necessary to handle aggregate /
- sorting for this part of the query
-
- :attr finaltable:
- a SQL table name or None, if results for this part of the query should be
- written into a temporary table (usually shared by multiple PPI)
-
- :attr sourcesterms:
- a dictionary {source : {term: set([solution index, ])}} telling for each
- source which terms are supported for which solutions. A "term" may be
- either a rql Variable, Constant or Relation node.
- """
- def __init__(self, plan, rqlst, rqlhelper=None):
- self.plan = plan
- self.rqlst = rqlst
- self.needsplit = False
- self.temptable = None
- self.finaltable = None
- # shortcuts
- self._schema = plan.schema
- self._session = plan.session
- self._repo = self._session.repo
- self._solutions = rqlst.solutions
- self._solindices = range(len(self._solutions))
- self.system_source = self._repo.system_source
- # source : {term: [solution index, ]}
- self.sourcesterms = self._sourcesterms = {}
- # source : {relation: set(child variable and constant)}
- self._crossrelations = {}
- # term : set(sources)
- self._discarded_sources = {}
- # dictionary of variables and constants which are linked to each other
- # using a non final relation supported by multiple sources (crossed or
- # not).
- self._linkedterms = {}
- # processing
- termssources = self._compute_sourcesterms()
- self._remove_invalid_sources(termssources)
- self._compute_needsplit()
- # after initialisation, .sourcesterms contains the same thing as
- # ._sourcesterms though during plan construction, ._sourcesterms will
- # be modified while .sourcesterms will be kept unmodified
- self.sourcesterms = {}
- for k, v in self._sourcesterms.iteritems():
- self.sourcesterms[k] = {}
- for k2, v2 in v.iteritems():
- self.sourcesterms[k][k2] = v2.copy()
- # cleanup linked var
- for var, linkedrelsinfo in self._linkedterms.iteritems():
- self._linkedterms[var] = frozenset(x[0] for x in linkedrelsinfo)
- # map output of a step to input of a following step
- self._inputmaps = {}
- # record input map conflicts to resolve them on final step generation
- self._conflicts = []
- if rqlhelper is not None: # else test
- self._insert_identity_variable = rqlhelper._annotator.rewrite_shared_optional
- if server.DEBUG & server.DBG_MS:
- print 'sourcesterms:'
- self._debug_sourcesterms()
-
- def _debug_sourcesterms(self):
- for source in self._sourcesterms:
- print '-', source
- for term, sols in self._sourcesterms[source].items():
- print ' -', term, id(term), ':', sols
-
- def copy_solutions(self, solindices):
- return [self._solutions[solidx].copy() for solidx in solindices]
-
- @property
- @cached
- def part_sources(self):
- if self._sourcesterms:
- return tuple(sorted(self._sourcesterms))
- return (self.system_source,)
-
- @property
- @cached
- def _sys_source_set(self):
- return frozenset((self.system_source, solindex)
- for solindex in self._solindices)
-
- @cached
- def _norel_support_set(self, relation):
- """return a set of (source, solindex) where source doesn't support the
- relation
- """
- return frozenset((source, solidx) for source in self._repo.sources
- for solidx in self._solindices
- if not ((source.support_relation(relation.r_type))
- or relation.r_type in source.dont_cross_relations))
-
- def _compute_sourcesterms(self):
- """compute for each term (variable, rewritten constant, relation) and
- for each solution in the rqlst which sources support them
- """
- repo = self._repo
- eschema = self._schema.eschema
- sourcesterms = self._sourcesterms
- # find for each source which variable/solution are supported
- for varname, varobj in self.rqlst.defined_vars.items():
- # if variable has an eid specified, we can get its source directly
- # NOTE: use uidrel and not constnode to deal with "X eid IN(1,2,3,4)"
- if varobj.stinfo['uidrel'] is not None:
- rel = varobj.stinfo['uidrel']
- hasrel = len(varobj.stinfo['relations']) > 1
- for const in rel.children[1].get_nodes(Constant):
- eid = const.eval(self.plan.args)
- source = self._session.source_from_eid(eid)
- if (source is self.system_source
- or (hasrel and varobj._q_invariant and
- not any(source.support_relation(r.r_type)
- for r in varobj.stinfo['relations']
- if not r is rel))):
- self._set_source_for_term(self.system_source, varobj)
- else:
- self._set_source_for_term(source, varobj)
- continue
- rels = varobj.stinfo['relations']
- if not rels and varobj.stinfo['typerel'] is None:
- # (rare) case where the variable has no type specified nor
- # relation accessed ex. "Any MAX(X)"
- self._set_source_for_term(self.system_source, varobj)
- continue
- for i, sol in enumerate(self._solutions):
- vartype = sol[varname]
- # skip final variable
- if eschema(vartype).final:
- break
- for source in repo.sources:
- if source.support_entity(vartype):
- # the source support the entity type, though we will
- # actually have to fetch from it only if
- # * the variable isn't invariant
- # * at least one supported relation specified
- if not varobj._q_invariant or \
- any(imap(source.support_relation,
- (r.r_type for r in rels if r.r_type not in ('identity', 'eid')))):
- sourcesterms.setdefault(source, {}).setdefault(varobj, set()).add(i)
- # if variable is not invariant and is used by a relation
- # not supported by this source, we'll have to split the
- # query
- if not varobj._q_invariant and any(ifilterfalse(
- source.support_relation, (r.r_type for r in rels))):
- self.needsplit = True
- # add source for rewritten constants to sourcesterms
- self._const_vars = {}
- for vconsts in self.rqlst.stinfo['rewritten'].itervalues():
- # remember those consts come from the same variable
- for const in vconsts:
- self._const_vars[const] = vconsts
- source = self._session.source_from_eid(const.eval(self.plan.args))
- if source is self.system_source:
- for const in vconsts:
- self._set_source_for_term(source, const)
- elif not self._sourcesterms:
- for const in vconsts:
- self._set_source_for_term(source, const)
- elif source in self._sourcesterms:
- source_scopes = frozenset(ms_scope(t) for t in self._sourcesterms[source])
- for const in vconsts:
- if ms_scope(const) in source_scopes:
- self._set_source_for_term(source, const)
- # if system source is used, add every rewritten constant
- # to its supported terms even when associated entity
- # doesn't actually come from it so we get a changes that
- # allequals will return True as expected when computing
- # needsplit
- # check const is used in a relation restriction
- if const.relation() and self.system_source in sourcesterms:
- self._set_source_for_term(self.system_source, const)
- # add source for relations
- rschema = self._schema.rschema
- termssources = {}
- sourcerels = []
- for rel in self.rqlst.iget_nodes(Relation):
- # process non final relations only
- # note: don't try to get schema for 'is' relation (not available
- # during bootstrap)
- if rel.r_type == 'cw_source':
- sourcerels.append(rel)
- if not (rel.is_types_restriction() or rschema(rel.r_type).final):
- # nothing to do if relation is not supported by multiple sources
- # or if some source has it listed in its cross_relations
- # attribute
- #
- # XXX code below don't deal if some source allow relation
- # crossing but not another one
- relsources = [s for s in repo.rel_type_sources(rel.r_type)
- if s is self.system_source
- or s in self._sourcesterms]
- if len(relsources) < 2:
- # filter out sources being there because they have this
- # relation in their dont_cross_relations attribute
- relsources = [source for source in relsources
- if source.support_relation(rel.r_type)]
- if relsources:
- # this means the relation is using a variable inlined as
- # a constant and another unsupported variable, in which
- # case we put the relation in sourcesterms
- self._sourcesterms.setdefault(relsources[0], {})[rel] = set(self._solindices)
- continue
- lhs, rhs = rel.get_variable_parts()
- lhsv, rhsv = getattr(lhs, 'variable', lhs), getattr(rhs, 'variable', rhs)
- # update dictionary of sources supporting lhs and rhs vars
- if not lhsv in termssources:
- termssources[lhsv] = self._term_sources(lhs)
- if not rhsv in termssources:
- termssources[rhsv] = self._term_sources(rhs)
- self._handle_cross_relation(rel, relsources, termssources)
- self._linkedterms.setdefault(lhsv, set()).add((rhsv, rel))
- self._linkedterms.setdefault(rhsv, set()).add((lhsv, rel))
- # extract information from cw_source relation
- for srel in sourcerels:
- vref = srel.children[1].children[0]
- sourceeids, sourcenames = [], []
- if isinstance(vref, Constant):
- # simplified variable
- sourceeids = None, (vref.eval(self.plan.args),)
- var = vref
- else:
- var = vref.variable
- for rel in var.stinfo['relations'] - var.stinfo['rhsrelations']:
- # skip neged eid relation since it's the kind of query
- # generated when clearing old value of '?1" relation,
- # cw_source included. See
- # unittest_ldapuser.test_copy_to_system_source
- if rel.r_type == 'name' or \
- (rel.r_type == 'eid' and not rel.neged(strict=True)):
- if rel.r_type == 'eid':
- slist = sourceeids
- else:
- slist = sourcenames
- sources = [cst.eval(self.plan.args)
- for cst in rel.children[1].get_nodes(Constant)]
- if sources:
- if slist:
- # don't attempt to do anything
- sourcenames = sourceeids = None
- break
- slist[:] = (rel, sources)
- if sourceeids:
- rel, values = sourceeids
- sourcesdict = self._repo.sources_by_eid
- elif sourcenames:
- rel, values = sourcenames
- sourcesdict = self._repo.sources_by_uri
- else:
- sourcesdict = None
- if sourcesdict is not None:
- lhs = srel.children[0]
- try:
- sources = [sourcesdict[key] for key in values]
- except KeyError:
- raise BadRQLQuery('source conflict for term %s' % lhs.as_string())
- if isinstance(lhs, Constant):
- source = self._session.source_from_eid(lhs.eval(self.plan.args))
- if not source in sources:
- raise BadRQLQuery('source conflict for term %s' % lhs.as_string())
- else:
- lhs = getattr(lhs, 'variable', lhs)
- invariant = getattr(lhs, '_q_invariant', False)
- # XXX NOT NOT
- neged = srel.neged(traverse_scope=True) or (rel and rel.neged(strict=True))
- has_copy_based_source = False
- sources_ = []
- for source in sources:
- if source.copy_based_source:
- has_copy_based_source = True
- if not self.system_source in sources_:
- sources_.append(self.system_source)
- else:
- sources_.append(source)
- sources = sources_
- if neged:
- for source in sources:
- if invariant and source is self.system_source:
- continue
- self._remove_source_term(source, lhs)
- self._discarded_sources.setdefault(lhs, set()).add(source)
- usesys = self.system_source not in sources
- else:
- for source, terms in sourcesterms.items():
- if lhs in terms and not source in sources:
- if invariant and source is self.system_source:
- continue
- self._remove_source_term(source, lhs)
- self._discarded_sources.setdefault(lhs, set()).add(source)
- usesys = self.system_source in sources
- if rel is None or (len(var.stinfo['relations']) == 2 and
- not var.stinfo['selected']):
- self._remove_source_term(self.system_source, var)
- if not (has_copy_based_source or len(sources) > 1
- or usesys or invariant):
- if rel is None:
- srel.parent.remove(srel)
- else:
- self.rqlst.undefine_variable(var)
- self._remove_source_term(self.system_source, srel)
- return termssources
-
- def _handle_cross_relation(self, rel, relsources, termssources):
- for source in relsources:
- if rel.r_type in source.cross_relations:
- ssource = self.system_source
- crossvars = set(x.variable for x in rel.get_nodes(VariableRef))
- for const in rel.get_nodes(Constant):
- if source.uri != 'system' and not const in self._sourcesterms.get(source, ()):
- continue
- crossvars.add(const)
- self._crossrelations.setdefault(source, {})[rel] = crossvars
- if len(crossvars) < 2:
- # this means there is a constant in the relation which is
- # not supported by the source, so we can stop here
- continue
- self._sourcesterms.setdefault(ssource, {})[rel] = set(self._solindices)
- solindices = None
- for term in crossvars:
- if len(termssources[term]) == 1 and iter(termssources[term]).next()[0].uri == 'system':
- for ov in crossvars:
- if ov is not term and (isinstance(ov, Constant) or ov._q_invariant):
- ssset = frozenset((ssource,))
- self._remove_sources(ov, termssources[ov] - ssset)
- break
- if solindices is None:
- solindices = set(sol for s, sol in termssources[term]
- if s is source)
- else:
- solindices &= set(sol for s, sol in termssources[term]
- if s is source)
- else:
- self._sourcesterms.setdefault(source, {})[rel] = solindices
-
- def _remove_invalid_sources(self, termssources):
- """removes invalid sources from `sourcesterms` member according to
- traversed relations and their properties (which sources support them,
- can they cross sources, etc...)
- """
- for term in self._linkedterms:
- self._remove_sources_until_stable(term, termssources)
- if len(self._sourcesterms) > 1 and hasattr(self.plan.rqlst, 'main_relations'):
- # the querier doesn't annotate write queries, need to do it here
- self.plan.annotate_rqlst()
- # insert/update/delete queries, we may get extra information from
- # the main relation (eg relations to the left of the WHERE
- if self.plan.rqlst.TYPE == 'insert':
- inserted = dict((vref.variable, etype)
- for etype, vref in self.plan.rqlst.main_variables)
- else:
- inserted = {}
- repo = self._repo
- rschema = self._schema.rschema
- for rel in self.plan.rqlst.main_relations:
- if not rschema(rel.r_type).final:
- # nothing to do if relation is not supported by multiple sources
- if len(repo.rel_type_sources(rel.r_type)) < 2:
- continue
- lhs, rhs = rel.get_variable_parts()
- try:
- lhsv = self._extern_term(lhs, termssources, inserted)
- rhsv = self._extern_term(rhs, termssources, inserted)
- except KeyError:
- continue
- self._remove_term_sources(lhsv, rel, rhsv, termssources)
- self._remove_term_sources(rhsv, rel, lhsv, termssources)
-
- def _extern_term(self, term, termssources, inserted):
- var = term.variable
- if var.stinfo['constnode']:
- termv = var.stinfo['constnode']
- termssources[termv] = self._term_sources(termv)
- elif var in inserted:
- termv = var
- source = self._repo.locate_etype_source(inserted[var])
- termssources[termv] = set((source, solindex)
- for solindex in self._solindices)
- else:
- termv = self.rqlst.defined_vars[var.name]
- if not termv in termssources:
- termssources[termv] = self._term_sources(termv)
- return termv
-
- def _remove_sources_until_stable(self, term, termssources):
- sourcesterms = self._sourcesterms
- for oterm, rel in self._linkedterms.get(term, ()):
- tscope = ms_scope(term)
- otscope = ms_scope(oterm)
- rscope = ms_scope(rel)
- if not tscope is otscope and rscope.neged(strict=True):
- # can't get information from relation inside a NOT exists
- # where terms don't belong to the same scope
- continue
- need_ancestor_scope = False
- if not (tscope is rscope and otscope is rscope):
- if rel.ored():
- continue
- if rel.ored(traverse_scope=True):
- # if relation has some OR as parent, constraints should only
- # propagate from parent scope to child scope, nothing else
- need_ancestor_scope = True
- relsources = self._repo.rel_type_sources(rel.r_type)
- if neged_relation(rel) and (
- len(relsources) < 2
- or not isinstance(oterm, Variable)
- or oterm.valuable_references() != 1
- or any(sourcesterms[source][term] != sourcesterms[source][oterm]
- for source in relsources
- if term in sourcesterms.get(source, ())
- and oterm in sourcesterms.get(source, ()))):
- # neged relation doesn't allow to infer term sources unless
- # we're on a multisource relation for a term only used by this
- # relation (eg "Any X WHERE NOT X multisource_rel Y" and over is
- # Y)
- continue
- # compute invalid sources for terms and remove them
- if not need_ancestor_scope or is_ancestor(tscope, otscope):
- self._remove_term_sources(term, rel, oterm, termssources)
- if not need_ancestor_scope or is_ancestor(otscope, tscope):
- self._remove_term_sources(oterm, rel, term, termssources)
-
- def _remove_term_sources(self, term, rel, oterm, termssources):
- """remove invalid sources for term according to oterm's sources and the
- relation between those two terms.
- """
- norelsup = self._norel_support_set(rel)
- termsources = termssources[term]
- invalid_sources = termsources - (termssources[oterm] | norelsup)
- if invalid_sources and self._repo.can_cross_relation(rel.r_type):
- invalid_sources -= self._sys_source_set
- if invalid_sources and isinstance(term, Variable) \
- and self._need_ext_source_access(term, rel):
- # if the term is a not invariant variable, we should filter out
- # source where the relation is a cross relation from invalid
- # sources
- invalid_sources = frozenset((s, solidx) for s, solidx in invalid_sources
- if not (s in self._crossrelations and
- rel in self._crossrelations[s]))
- if invalid_sources:
- self._remove_sources(term, invalid_sources)
- discarded = self._discarded_sources.get(term)
- if discarded is not None and not any(x[0] for x in (termsources-invalid_sources)
- if not x[0] in discarded):
- raise BadRQLQuery('relation %s cant be crossed but %s and %s should '
- 'come from difference sources' %
- (rel.r_type, term.as_string(), oterm.as_string()))
- # if term is a rewritten const, we can apply the same changes to
- # all other consts inserted from the same original variable
- for const in self._const_vars.get(term, ()):
- if const is not term:
- self._remove_sources(const, invalid_sources)
- termsources -= invalid_sources
- self._remove_sources_until_stable(term, termssources)
- if isinstance(oterm, Constant):
- self._remove_sources(oterm, invalid_sources)
-
- def _compute_needsplit(self):
- """tell according to sourcesterms if the rqlst has to be splitted for
- execution among multiple sources
-
- the execution has to be split if
- * a source support an entity (non invariant) but doesn't support a
- relation on it
- * a source support an entity which is accessed by an optional relation
- * there is more than one source and either all sources'supported
- variable/solutions are not equivalent or multiple variables have to
- be fetched from some source
- """
- # NOTE: < 2 since may be 0 on queries such as Any X WHERE X eid 2
- if len(self._sourcesterms) < 2:
- self.needsplit = False
- # if this is not the system source but we have only constant terms
- # and no relation (other than eid), apply query on the system source
- #
- # testing for rqlst with nothing in vargraph nor defined_vars is the
- # simplest way the check the condition explained below
- if not self.system_source in self._sourcesterms and \
- not self.rqlst.defined_vars and \
- not need_source_access_relation(self.rqlst.vargraph):
- self._sourcesterms = {self.system_source: {}}
- elif not self.needsplit:
- if not allequals(self._sourcesterms.itervalues()):
- for source, terms in self._sourcesterms.iteritems():
- if source is self.system_source:
- continue
- if any(x for x in terms if not isinstance(x, Constant)):
- self.needsplit = True
- return
- self._sourcesterms = {self.system_source: {}}
- self.needsplit = False
- else:
- sample = self._sourcesterms.itervalues().next()
- if len(sample) > 1:
- for term in sample:
- # need split if unlinked variable
- if isinstance(term, Variable) and not term in self._linkedterms:
- self.needsplit = True
- break
- else:
- # need split if there are some cross relation on non
- # invariant variable or if the variable is used in
- # multi-sources relation
- if self._crossrelations:
- for reldict in self._crossrelations.itervalues():
- for rel, terms in reldict.iteritems():
- for term in terms:
- if isinstance(term, Variable) \
- and self._need_ext_source_access(term, rel):
- self.needsplit = True
- return
- else:
- # remove sources only accessing to constant nodes
- for source, terms in self._sourcesterms.items():
- if source is self.system_source:
- continue
- if not any(x for x in terms if not isinstance(x, Constant)):
- del self._sourcesterms[source]
- if len(self._sourcesterms) < 2:
- self.needsplit = False
-
- @cached
- def _need_ext_source_access(self, var, rel):
- if not var._q_invariant:
- return True
- if any(r for x, r in self._linkedterms[var]
- if not r is rel and self._repo.is_multi_sources_relation(r.r_type)):
- return True
- return False
-
- def _set_source_for_term(self, source, term):
- self._sourcesterms.setdefault(source, {})[term] = set(self._solindices)
-
- def _term_sources(self, term):
- """returns possible sources for terms `term`"""
- if isinstance(term, Constant):
- source = self._session.source_from_eid(term.eval(self.plan.args))
- return set((source, solindex) for solindex in self._solindices)
- else:
- var = getattr(term, 'variable', term)
- sources = [source for source, varobjs in self.sourcesterms.iteritems()
- if var in varobjs]
- return set((source, solindex) for source in sources
- for solindex in self.sourcesterms[source][var])
-
- def _remove_sources(self, term, sources):
- """removes invalid sources (`sources`) from `sourcesterms`
-
- :param sources: the list of sources to remove
- :param term: the analyzed term
- """
- sourcesterms = self._sourcesterms
- for source, solindex in sources:
- try:
- sourcesterms[source][term].remove(solindex)
- except KeyError:
- import rql.base as rqlb
- assert isinstance(term, (rqlb.BaseNode, Variable)), repr(term)
- continue # may occur with subquery column alias
- if not sourcesterms[source][term]:
- self._remove_source_term(source, term)
-
- def _remove_source_term(self, source, term):
- try:
- poped = self._sourcesterms[source].pop(term, None)
- except KeyError:
- pass
- else:
- if not self._sourcesterms[source]:
- del self._sourcesterms[source]
-
- def crossed_relation(self, source, relation):
- return relation in self._crossrelations.get(source, ())
-
- def part_steps(self):
- """precompute necessary part steps before generating actual rql for
- each step. This is necessary to know if an aggregate step will be
- necessary or not.
- """
- steps = []
- select = self.rqlst
- rschema = self._schema.rschema
- for source in self.part_sources:
- try:
- sourceterms = self._sourcesterms[source]
- except KeyError:
- continue # already proceed
- while sourceterms:
- # take a term randomly, and all terms supporting the
- # same solutions
- term, solindices = self._choose_term(source, sourceterms)
- if source.uri == 'system':
- # ensure all variables are available for the latest step
- # (missing one will be available from temporary tables
- # of previous steps)
- scope = select
- terms = scope.defined_vars.values() + scope.aliases.values()
- sourceterms.clear()
- sources = [source]
- else:
- scope = ms_scope(term)
- # find which sources support the same term and solutions
- sources = self._expand_sources(source, term, solindices)
- # no try to get as much terms as possible
- terms = self._expand_terms(term, sources, sourceterms,
- scope, solindices)
- if len(terms) == 1 and isinstance(terms[0], Constant):
- # we can't generate anything interesting with a single
- # constant term (will generate an empty "Any" query),
- # go to the next iteration directly!
- continue
- if not sourceterms:
- try:
- del self._sourcesterms[source]
- except KeyError:
- # XXX already cleaned
- pass
- # set of terms which should be additionaly selected when
- # possible
- needsel = set()
- if not self._sourcesterms and scope is select:
- terms += scope.defined_vars.values() + scope.aliases.values()
- if isinstance(term, Relation) and len(sources) > 1:
- variants = set()
- partterms = [term]
- for vref in term.get_nodes(VariableRef):
- if not vref.variable._q_invariant:
- variants.add(vref.name)
- if len(variants) == 2:
- # we need an extra-step to fetch relations from each source
- # before a join with prefetched inputs
- # (see test_crossed_relation_noeid_needattr in
- # unittest_msplanner / unittest_multisources)
- lhs, rhs = term.get_variable_parts()
- steps.append( (sources, [term, getattr(lhs, 'variable', lhs),
- getattr(rhs, 'variable', rhs)],
- solindices, scope, variants, False) )
- sources = [self.system_source]
- final = True
- else:
- # suppose this is a final step until the contrary is proven
- final = scope is select
- # add attribute variables and mark variables which should be
- # additionaly selected when possible
- for var in select.defined_vars.itervalues():
- if not var in terms:
- stinfo = var.stinfo
- for ovar, rtype in stinfo.get('attrvars', ()):
- if ovar in terms:
- needsel.add(var.name)
- terms.append(var)
- break
- else:
- needsel.add(var.name)
- final = False
- # check all relations are supported by the sources
- for rel in scope.iget_nodes(Relation):
- if rel.is_types_restriction():
- continue
- # take care not overwriting the existing "source" identifier
- for _source in sources:
- if not _source.support_relation(rel.r_type) or (
- self.crossed_relation(_source, rel) and not rel in terms):
- for vref in rel.iget_nodes(VariableRef):
- needsel.add(vref.name)
- final = False
- break
- else:
- if not scope is select:
- self._exists_relation(rel, terms, needsel, source)
- # if relation is supported by all sources and some of
- # its lhs/rhs variable isn't in "terms", and the
- # other end *is* in "terms", mark it have to be
- # selected
- if source.uri != 'system' and not rschema(rel.r_type).final:
- lhs, rhs = rel.get_variable_parts()
- try:
- lhsvar = lhs.variable
- except AttributeError:
- lhsvar = lhs
- try:
- rhsvar = rhs.variable
- except AttributeError:
- rhsvar = rhs
- try:
- if lhsvar in terms and not rhsvar in terms:
- needsel.add(lhsvar.name)
- elif rhsvar in terms and not lhsvar in terms:
- needsel.add(rhsvar.name)
- except AttributeError:
- continue # not an attribute, no selection needed
- if final and source.uri != 'system':
- # check rewritten constants
- for vconsts in select.stinfo['rewritten'].itervalues():
- const = vconsts[0]
- eid = const.eval(self.plan.args)
- _source = self._session.source_from_eid(eid)
- if len(sources) > 1 or not _source in sources:
- # if there is some rewriten constant used by a not
- # neged relation while there are some source not
- # supporting the associated entity, this step can't
- # be final (unless the relation is explicitly in
- # `terms`, eg cross relations)
- for c in vconsts:
- rel = c.relation()
- if rel is None or not (rel in terms or neged_relation(rel)):
- final = False
- break
- break
- if final:
- self._cleanup_sourcesterms(sources, solindices)
- steps.append((sources, terms, solindices, scope, needsel, final)
- )
- if not steps[-1][-1]:
- # add a final step
- terms = select.defined_vars.values() + select.aliases.values()
- steps.append( ([self.system_source], terms, set(self._solindices),
- select, set(), True) )
- return steps
-
- def _exists_relation(self, rel, terms, needsel, source):
- rschema = self._schema.rschema(rel.r_type)
- lhs, rhs = rel.get_variable_parts()
- try:
- lhsvar, rhsvar = lhs.variable, rhs.variable
- except AttributeError:
- pass
- else:
- # supported relation with at least one end supported, check the
- # other end is in as well. If not this usually means the
- # variable is refed by an outer scope and should be substituted
- # using an 'identity' relation (else we'll get a conflict of
- # temporary tables)
- relscope = ms_scope(rel)
- lhsscope = ms_scope(lhsvar)
- rhsscope = ms_scope(rhsvar)
- if rhsvar in terms and not lhsvar in terms and lhsscope is lhsvar.stmt:
- self._identity_substitute(rel, lhsvar, terms, needsel, relscope)
- elif lhsvar in terms and not rhsvar in terms and rhsscope is rhsvar.stmt:
- self._identity_substitute(rel, rhsvar, terms, needsel, relscope)
- elif self.crossed_relation(source, rel):
- if lhsscope is not relscope:
- self._identity_substitute(rel, lhsvar, terms, needsel,
- relscope, lhsscope)
- if rhsscope is not relscope:
- self._identity_substitute(rel, rhsvar, terms, needsel,
- relscope, rhsscope)
-
- def _identity_substitute(self, relation, var, terms, needsel, exist,
- idrelscope=None):
- newvar = self._insert_identity_variable(exist, var, idrelscope)
- # ensure relation is using '=' operator, else we rely on a
- # sqlgenerator side effect (it won't insert an inequality operator
- # in this case)
- relation.children[1].operator = '='
- terms.append(newvar)
- needsel.add(newvar.name)
-
- def _choose_term(self, source, sourceterms):
- """pick one term among terms supported by a source, which will be used
- as a base to generate an execution step
- """
- secondchoice = None
- if len(self._sourcesterms) > 1:
- # first, return non invariant variable of crossed relation, then the
- # crossed relation itself
- for term in sourceterms:
- if (isinstance(term, Relation)
- and self.crossed_relation(source, term)
- and not ms_scope(term) is self.rqlst):
- for vref in term.get_variable_parts():
- try:
- var = vref.variable
- except AttributeError:
- # Constant
- continue
- if ((len(var.stinfo['relations']) > 1 or var.stinfo['selected'])
- and var in sourceterms):
- return var, sourceterms.pop(var)
- return term, sourceterms.pop(term)
- # priority to variable from subscopes
- for term in sourceterms:
- if not ms_scope(term) is self.rqlst:
- if isinstance(term, Variable):
- return term, sourceterms.pop(term)
- secondchoice = term
- else:
- # priority to variable from outer scope
- for term in sourceterms:
- if ms_scope(term) is self.rqlst:
- if isinstance(term, Variable):
- return term, sourceterms.pop(term)
- secondchoice = term
- if secondchoice is not None:
- return secondchoice, sourceterms.pop(secondchoice)
- # priority to variable with the less solutions supported and with the
- # most valuable refs. Add variable name for test predictability
- variables = sorted([(var, sols) for (var, sols) in sourceterms.items()
- if isinstance(var, Variable)],
- key=lambda (v, s): (len(s), -v.valuable_references(), v.name))
- if variables:
- var = variables[0][0]
- return var, sourceterms.pop(var)
- # priority to constant
- for term in sourceterms:
- if isinstance(term, Constant):
- return term, sourceterms.pop(term)
- # whatever (relation)
- term = iter(sourceterms).next()
- return term, sourceterms.pop(term)
-
- def _expand_sources(self, selected_source, term, solindices):
- """return all sources supporting given term / solindices"""
- sources = [selected_source]
- sourcesterms = self._sourcesterms
- for source in list(sourcesterms):
- if source is selected_source:
- continue
- if not (term in sourcesterms[source] and
- solindices.issubset(sourcesterms[source][term])):
- continue
- sources.append(source)
- if source.uri != 'system' or not (isinstance(term, Variable) and not term in self._linkedterms):
- termsolindices = sourcesterms[source][term]
- termsolindices -= solindices
- if not termsolindices:
- del sourcesterms[source][term]
- if not sourcesterms[source]:
- del sourcesterms[source]
- return sources
-
- def _expand_terms(self, term, sources, sourceterms, scope, solindices):
- terms = [term]
- sources = sorted(sources)
- sourcesterms = self._sourcesterms
- linkedterms = self._linkedterms
- # term has to belong to the same scope if there is more
- # than the system source remaining
- if len(sourcesterms) > 1 and not scope is self.rqlst:
- candidates = (t for t in sourceterms if scope is ms_scope(t))
- else:
- candidates = sourceterms
- # we only want one unlinked term in each generated query
- candidates = [t for t in candidates
- if isinstance(t, (Constant, Relation)) or
- (solindices.issubset(sourceterms[t]) and t in linkedterms)]
- cross_rels = {}
- for source in sources:
- cross_rels.update(self._crossrelations.get(source, {}))
- exclude = {}
- for crossvars in cross_rels.itervalues():
- vars = [t for t in crossvars if isinstance(t, Variable)]
- try:
- exclude[vars[0]] = vars[1]
- exclude[vars[1]] = vars[0]
- except IndexError:
- pass
- accept_term = lambda x: (not any(s for s in sources
- if not x in sourcesterms.get(s, ()))
- and x._ms_may_be_processed(terms, linkedterms)
- and not exclude.get(x) in terms)
- if isinstance(term, Relation) and term in cross_rels:
- cross_terms = cross_rels.pop(term)
- base_accept_term = accept_term
- accept_term = lambda x: (base_accept_term(x) or x in cross_terms)
- for refed in cross_terms:
- if not refed in candidates:
- terms.append(refed)
- # repeat until no term can't be added, since addition of a new
- # term may permit to another one to be added
- modified = True
- while modified and candidates:
- modified = False
- for term in candidates[:]:
- if isinstance(term, Constant):
- termsources = set(x[0] for x in self._term_sources(term))
- # ensure system source is there for constant
- if self.system_source in sources:
- termsources.add(self.system_source)
- if sorted(termsources) != sources:
- continue
- terms.append(term)
- candidates.remove(term)
- modified = True
- del sourceterms[term]
- elif accept_term(term):
- terms.append(term)
- candidates.remove(term)
- modified = True
- self._cleanup_sourcesterms(sources, solindices, term)
- return terms
-
- def _cleanup_sourcesterms(self, sources, solindices, term=None):
- """remove solutions so we know they are already processed"""
- for source in sources:
- try:
- sourceterms = self._sourcesterms[source]
- except KeyError:
- continue
- if term is None:
- for term, termsolindices in sourceterms.items():
- if isinstance(term, Relation) and self.crossed_relation(source, term):
- continue
- termsolindices -= solindices
- if not termsolindices:
- del sourceterms[term]
- else:
- try:
- sourceterms[term] -= solindices
- if not sourceterms[term]:
- del sourceterms[term]
- except KeyError:
- pass
- #assert term in cross_terms
- if not sourceterms:
- del self._sourcesterms[source]
-
- def merge_input_maps(self, allsolindices, complete=True):
- """inputmaps is a dictionary with tuple of solution indices as key with
- an associated input map as value. This function compute for each
- solution its necessary input map and return them grouped
-
- ex:
- inputmaps = {(0, 1, 2): {'A': 't1.login1', 'U': 't1.C0', 'U.login': 't1.login1'},
- (1,): {'X': 't2.C0', 'T': 't2.C1'}}
- return : [([1], {'A': 't1.login1', 'U': 't1.C0', 'U.login': 't1.login1',
- 'X': 't2.C0', 'T': 't2.C1'}),
- ([0,2], {'A': 't1.login1', 'U': 't1.C0', 'U.login': 't1.login1'})]
- """
- if not self._inputmaps:
- return [(allsolindices, None)]
- _allsolindices = allsolindices.copy()
- mapbysol = {}
- # compute a single map for each solution
- for solindices, basemap in self._inputmaps.iteritems():
- for solindex in solindices:
- if not (complete or solindex in allsolindices):
- continue
- solmap = mapbysol.setdefault(solindex, {})
- solmap.update(basemap)
- try:
- _allsolindices.remove(solindex)
- except KeyError:
- continue # already removed
- # group results by identical input map
- result = []
- for solindex, solmap in mapbysol.iteritems():
- for solindices, commonmap in result:
- if commonmap == solmap:
- solindices.append(solindex)
- break
- else:
- result.append( ([solindex], solmap) )
- if _allsolindices:
- result.append( (list(_allsolindices), None) )
- return result
-
- def build_final_part(self, select, solindices, inputmap, sources,
- insertedvars):
- solutions = [self._solutions[i] for i in solindices]
- if self._conflicts and inputmap:
- for varname, mappedto in self._conflicts:
- var = select.defined_vars[varname]
- newvar = select.make_variable()
- # XXX should use var.scope but scope hasn't been computed yet
- select.add_relation(var, 'identity', newvar)
- for sol in solutions:
- sol[newvar.name] = sol[varname]
- inputmap[newvar.name] = mappedto
- rqlst = self.plan.finalize(select, solutions, insertedvars)
- if self.temptable is None and self.finaltable is None:
- return OneFetchStep(self.plan, rqlst, sources, inputmap=inputmap)
- table = self.temptable or self.finaltable
- return FetchStep(self.plan, rqlst, sources, table, True, inputmap)
-
- def build_non_final_part(self, select, solindices, sources, insertedvars,
- table):
- """non final step, will have to store results in a temporary table"""
- inputmapkey = tuple(sorted(solindices))
- solutions = [self._solutions[i] for i in solindices]
- # XXX be smarter vs rql comparison
- idx_key = (select.as_string(), inputmapkey,
- tuple(sorted(sources)), tuple(sorted(insertedvars)))
- try:
- # if a similar step has already been process, simply backport its
- # input map
- step = self.plan.ms_steps_idx[idx_key]
- except KeyError:
- # processing needed
- rqlst = self.plan.finalize(select, solutions, insertedvars)
- step = FetchStep(self.plan, rqlst, sources, table, False)
- self.plan.ms_steps_idx[idx_key] = step
- self.plan.add_step(step)
- # update input map for following steps, according to processed solutions
- inputmap = self._inputmaps.setdefault(inputmapkey, {})
- for varname, mapping in step.outputmap.iteritems():
- if varname in inputmap and not '.' in varname and \
- not (mapping == inputmap[varname] or
- self._schema.eschema(solutions[0][varname]).final):
- self._conflicts.append((varname, inputmap[varname]))
- inputmap.update(step.outputmap)
-
-
-@deprecated('[3.18] old multi-source system will go away in the next version')
-class MSPlanner(SSPlanner):
- """MultiSourcesPlanner: build execution plan for rql queries
-
- decompose the RQL query according to sources'schema
- """
-
- def build_select_plan(self, plan, rqlst):
- """build execution plan for a SELECT RQL query
-
- the rqlst should not be tagged at this point
- """
- # preprocess deals with security insertion and returns a new syntax tree
- # which have to be executed to fulfill the query: according
- # to permissions for variable's type, different rql queries may have to
- # be executed
- plan.preprocess(rqlst)
- if server.DEBUG & server.DBG_MS:
- print '-'*80
- print 'PLANNING', rqlst
- ppis = [PartPlanInformation(plan, select, self.rqlhelper)
- for select in rqlst.children]
- plan.ms_steps_idx = {}
- steps = self._union_plan(plan, ppis)
- if server.DEBUG & server.DBG_MS:
- from pprint import pprint
- for step in plan.steps:
- pprint(step.test_repr())
- pprint(steps[0].test_repr())
- return steps
-
- def _ppi_subqueries(self, ppi):
- # part plan info for subqueries
- plan = ppi.plan
- inputmap = {}
- for subquery in ppi.rqlst.with_[:]:
- sppis = [PartPlanInformation(plan, select)
- for select in subquery.query.children]
- for sppi in sppis:
- if sppi.needsplit or sppi.part_sources != ppi.part_sources:
- temptable = plan.make_temp_table_name('T%s' % make_uid(id(subquery)))
- sstep = self._union_plan(plan, sppis, temptable)[0]
- break
- else:
- sstep = None
- if sstep is not None:
- ppi.rqlst.with_.remove(subquery)
- for i, colalias in enumerate(subquery.aliases):
- inputmap[colalias.name] = '%s.C%s' % (temptable, i)
- ppi.plan.add_step(sstep)
- return inputmap
-
- def _union_plan(self, plan, ppis, temptable=None):
- tosplit, cango, allsources = [], {}, set()
- for planinfo in ppis:
- if planinfo.needsplit:
- tosplit.append(planinfo)
- else:
- cango.setdefault(planinfo.part_sources, []).append(planinfo)
- for source in planinfo.part_sources:
- allsources.add(source)
- # first add steps for query parts which doesn't need to splitted
- steps = []
- for sources, cppis in cango.iteritems():
- byinputmap = {}
- for ppi in cppis:
- select = ppi.rqlst
- if sources != (ppi.system_source,):
- add_types_restriction(self.schema, select)
- # part plan info for subqueries
- inputmap = self._ppi_subqueries(ppi)
- aggrstep = need_aggr_step(select, sources)
- if aggrstep:
- atemptable = plan.make_temp_table_name('T%s' % make_uid(id(select)))
- sunion = Union()
- sunion.append(select)
- selected = select.selection[:]
- select_group_sort(select)
- step = AggrStep(plan, selected, select, atemptable, temptable)
- step.set_limit_offset(select.limit, select.offset)
- select.limit = None
- select.offset = 0
- fstep = FetchStep(plan, sunion, sources, atemptable, True, inputmap)
- step.children.append(fstep)
- steps.append(step)
- else:
- byinputmap.setdefault(tuple(inputmap.iteritems()), []).append( (select) )
- for inputmap, queries in byinputmap.iteritems():
- inputmap = dict(inputmap)
- sunion = Union()
- for select in queries:
- sunion.append(select)
- if temptable:
- steps.append(FetchStep(plan, sunion, sources, temptable, True, inputmap))
- else:
- steps.append(OneFetchStep(plan, sunion, sources, inputmap))
- # then add steps for splitted query parts
- for planinfo in tosplit:
- steps.append(self.split_part(planinfo, temptable))
- if len(steps) > 1:
- if temptable:
- step = UnionFetchStep(plan)
- else:
- step = UnionStep(plan)
- step.children = steps
- return (step,)
- return steps
-
- # internal methods for multisources decomposition #########################
-
- def split_part(self, ppi, temptable):
- ppi.finaltable = temptable
- plan = ppi.plan
- select = ppi.rqlst
- subinputmap = self._ppi_subqueries(ppi)
- stepdefs = ppi.part_steps()
- if need_aggr_step(select, ppi.part_sources, stepdefs):
- atemptable = plan.make_temp_table_name('T%s' % make_uid(id(select)))
- selection = select.selection[:]
- select_group_sort(select)
- else:
- atemptable = None
- selection = select.selection
- ppi.temptable = atemptable
- vfilter = TermsFiltererVisitor(self.schema, ppi)
- steps = []
- multifinal = len([x for x in stepdefs if x[-1]]) >= 2
- for sources, terms, solindices, scope, needsel, final in stepdefs:
- # extract an executable query using only the specified terms
- if sources[0].uri == 'system':
- # in this case we have to merge input maps before call to
- # filter so already processed restriction are correctly
- # removed
- solsinputmaps = ppi.merge_input_maps(
- solindices, complete=not (final and multifinal))
- for solindices, inputmap in solsinputmaps:
- minrqlst, insertedvars = vfilter.filter(
- sources, terms, scope, set(solindices), needsel, final)
- if inputmap is None:
- inputmap = subinputmap
- else:
- inputmap.update(subinputmap)
- steps.append(ppi.build_final_part(minrqlst, solindices, inputmap,
- sources, insertedvars))
- else:
- # this is a final part (i.e. retreiving results for the
- # original query part) if all term / sources have been
- # treated or if this is the last shot for used solutions
- minrqlst, insertedvars = vfilter.filter(
- sources, terms, scope, solindices, needsel, final)
- if final:
- solsinputmaps = ppi.merge_input_maps(
- solindices, complete=not (final and multifinal))
- if len(solsinputmaps) > 1:
- refrqlst = minrqlst
- for solindices, inputmap in solsinputmaps:
- if inputmap is None:
- inputmap = subinputmap
- else:
- inputmap.update(subinputmap)
- if len(solsinputmaps) > 1:
- minrqlst = refrqlst.copy()
- sources = sources[:]
- if inputmap and len(sources) > 1:
- sources.remove(ppi.system_source)
- steps.append(ppi.build_final_part(minrqlst, solindices, None,
- sources, insertedvars))
- steps.append(ppi.build_final_part(minrqlst, solindices, inputmap,
- [ppi.system_source], insertedvars))
- else:
- steps.append(ppi.build_final_part(minrqlst, solindices, inputmap,
- sources, insertedvars))
- else:
- table = plan.make_temp_table_name('T%s' % make_uid(id(select)))
- ppi.build_non_final_part(minrqlst, solindices, sources,
- insertedvars, table)
- # finally: join parts, deal with aggregat/group/sorts if necessary
- if atemptable is not None:
- step = AggrStep(plan, selection, select, atemptable, temptable)
- step.children = steps
- elif len(steps) > 1:
- getrschema = self.schema.rschema
- if need_intersect(select, getrschema) or any(need_intersect(select, getrschema)
- for step in steps
- for select in step.union.children):
- if temptable:
- raise NotImplementedError('oops') # IntersectFetchStep(plan)
- else:
- step = IntersectStep(plan)
- else:
- if temptable:
- step = UnionFetchStep(plan)
- else:
- step = UnionStep(plan)
- step.children = steps
- else:
- step = steps[0]
- if select.limit is not None or select.offset:
- step.set_limit_offset(select.limit, select.offset)
- return step
-
-
-class UnsupportedBranch(Exception):
- pass
-
-
-class TermsFiltererVisitor(object):
- def __init__(self, schema, ppi):
- self.schema = schema
- self.ppi = ppi
- self.skip = {}
- self.hasaggrstep = self.ppi.temptable
- self.extneedsel = frozenset(vref.name for sortterm in ppi.rqlst.orderby
- for vref in sortterm.iget_nodes(VariableRef))
-
- def _rqlst_accept(self, rqlst, node, newroot, terms, setfunc=None):
- try:
- newrestr, node_ = node.accept(self, newroot, terms[:])
- except UnsupportedBranch:
- return rqlst
- if setfunc is not None and newrestr is not None:
- setfunc(newrestr)
- if not node_ is node:
- rqlst = node.parent
- return rqlst
-
- def filter(self, sources, terms, rqlst, solindices, needsel, final):
- if server.DEBUG & server.DBG_MS:
- print 'filter', final and 'final' or '', sources, terms, rqlst, solindices, needsel
- newroot = Select()
- self.sources = sorted(sources)
- self.terms = terms
- self.solindices = solindices
- self.final = final
- self._pending_vrefs = []
- # terms which appear in unsupported branches
- needsel |= self.extneedsel
- self.needsel = needsel
- # terms which appear in supported branches
- self.mayneedsel = set()
- # new inserted variables
- self.insertedvars = []
- # other structures (XXX document)
- self.mayneedvar, self.hasvar = {}, {}
- self.use_only_defined = False
- self.scopes = {rqlst: newroot}
- self.current_scope = rqlst
- if rqlst.where:
- rqlst = self._rqlst_accept(rqlst, rqlst.where, newroot, terms,
- newroot.set_where)
- if isinstance(rqlst, Select):
- self.use_only_defined = True
- if rqlst.groupby:
- groupby = []
- for node in rqlst.groupby:
- rqlst = self._rqlst_accept(rqlst, node, newroot, terms,
- groupby.append)
- if groupby:
- newroot.set_groupby(groupby)
- if rqlst.having:
- having = []
- for node in rqlst.having:
- rqlst = self._rqlst_accept(rqlst, node, newroot, terms,
- having.append)
- if having:
- newroot.set_having(having)
- if final and rqlst.orderby and not self.hasaggrstep:
- orderby = []
- for node in rqlst.orderby:
- rqlst = self._rqlst_accept(rqlst, node, newroot, terms,
- orderby.append)
- if orderby:
- newroot.set_orderby(orderby)
- elif rqlst.orderby:
- for sortterm in rqlst.orderby:
- if any(f for f in sortterm.iget_nodes(Function) if f.name == 'FTIRANK'):
- newnode, oldnode = sortterm.accept(self, newroot, terms)
- if newnode is not None:
- newroot.add_sort_term(newnode)
- self.process_selection(newroot, terms, rqlst)
- elif not newroot.where:
- # no restrictions have been copied, just select terms and add
- # type restriction (done later by add_types_restriction)
- for v in terms:
- if not isinstance(v, Variable):
- continue
- newroot.append_selected(VariableRef(newroot.get_variable(v.name)))
- solutions = self.ppi.copy_solutions(solindices)
- cleanup_solutions(newroot, solutions)
- newroot.set_possible_types(solutions)
- if final:
- if self.hasaggrstep:
- self.add_necessary_selection(newroot, self.mayneedsel & self.extneedsel)
- newroot.distinct = rqlst.distinct
- else:
- self.add_necessary_selection(newroot, self.mayneedsel & self.needsel)
- # insert vars to fetch constant values when needed
- for (varname, rschema), reldefs in self.mayneedvar.iteritems():
- for rel, ored in reldefs:
- if not (varname, rschema) in self.hasvar:
- self.hasvar[(varname, rschema)] = None # just to avoid further insertion
- cvar = newroot.make_variable()
- for sol in newroot.solutions:
- sol[cvar.name] = rschema.objects(sol[varname])[0]
- # if the current restriction is not used in a OR branch,
- # we can keep it, else we have to drop the constant
- # restriction (or we may miss some results)
- if not ored:
- rel = rel.copy(newroot)
- newroot.add_restriction(rel)
- # add a relation to link the variable
- newroot.remove_node(rel.children[1])
- cmp = Comparison('=')
- rel.append(cmp)
- cmp.append(VariableRef(cvar))
- self.insertedvars.append((varname, rschema, cvar.name))
- newroot.append_selected(VariableRef(newroot.get_variable(cvar.name)))
- # NOTE: even if the restriction is done by this query, we have
- # to let it in the original rqlst so that it appears anyway in
- # the "final" query, else we may change the meaning of the query
- # if there are NOT somewhere :
- # 'NOT X relation Y, Y name "toto"' means X WHERE X isn't related
- # to Y whose name is toto while
- # 'NOT X relation Y' means X WHERE X has no 'relation' (whatever Y)
- elif ored:
- newroot.remove_node(rel)
- add_types_restriction(self.schema, rqlst, newroot, solutions)
- if server.DEBUG & server.DBG_MS:
- print '--->', newroot
- return newroot, self.insertedvars
-
- def visit_and(self, node, newroot, terms):
- subparts = []
- for i in xrange(len(node.children)):
- child = node.children[i]
- try:
- newchild, child_ = child.accept(self, newroot, terms)
- if not child_ is child:
- node = child_.parent
- if newchild is None:
- continue
- subparts.append(newchild)
- except UnsupportedBranch:
- continue
- if not subparts:
- return None, node
- if len(subparts) == 1:
- return subparts[0], node
- return copy_node(newroot, node, subparts), node
-
- visit_or = visit_and
-
- def _relation_supported(self, relation):
- rtype = relation.r_type
- for source in self.sources:
- if not source.support_relation(rtype) or (
- rtype in source.cross_relations and not relation in self.terms):
- return False
- if not self.final and not relation in self.terms:
- rschema = self.schema.rschema(relation.r_type)
- if not rschema.final:
- for term in relation.get_nodes((VariableRef, Constant)):
- term = getattr(term, 'variable', term)
- termsources = sorted(set(x[0] for x in self.ppi._term_sources(term)))
- if termsources and termsources != self.sources:
- return False
- return True
-
- def visit_relation(self, node, newroot, terms):
- if not node.is_types_restriction():
- if not node in terms and node in self.skip and self.solindices.issubset(self.skip[node]):
- return None, node
- if not self._relation_supported(node):
- raise UnsupportedBranch()
- # don't copy type restriction unless this is the only supported relation
- # for the lhs variable, else they'll be reinserted later as needed (in
- # other cases we may copy a type restriction while the variable is not
- # actually used)
- elif not (node.neged(strict=True) or
- any(self._relation_supported(rel)
- for rel in node.children[0].variable.stinfo['relations'])):
- return self.visit_default(node, newroot, terms)
- else:
- raise UnsupportedBranch()
- rschema = self.schema.rschema(node.r_type)
- self._pending_vrefs = []
- try:
- res = self.visit_default(node, newroot, terms)[0]
- except Exception:
- # when a relation isn't supported, we should dereference potentially
- # introduced variable refs
- for vref in self._pending_vrefs:
- vref.unregister_reference()
- raise
- ored = node.ored()
- if rschema.final or rschema.inlined:
- vrefs = node.children[1].get_nodes(VariableRef)
- if not vrefs:
- if not ored:
- self.skip.setdefault(node, set()).update(self.solindices)
- else:
- self.mayneedvar.setdefault((node.children[0].name, rschema), []).append( (res, ored) )
- else:
- assert len(vrefs) == 1
- vref = vrefs[0]
- # XXX check operator ?
- self.hasvar[(node.children[0].name, rschema)] = vref
- if self._may_skip_attr_rel(rschema, node, vref, ored, terms, res):
- self.skip.setdefault(node, set()).update(self.solindices)
- elif not ored:
- self.skip.setdefault(node, set()).update(self.solindices)
- return res, node
-
- def _may_skip_attr_rel(self, rschema, rel, vref, ored, terms, res):
- var = vref.variable
- if ored:
- return False
- if var.name in self.extneedsel or var.stinfo['selected']:
- return False
- if not var in terms or used_in_outer_scope(var, self.current_scope):
- return False
- if any(v for v, _ in var.stinfo.get('attrvars', ()) if not v in terms):
- return False
- return True
-
- def visit_exists(self, node, newroot, terms):
- newexists = node.__class__()
- self.scopes = {node: newexists}
- subparts, node = self._visit_children(node, newroot, terms)
- if not subparts:
- return None, node
- newexists.set_where(subparts[0])
- return newexists, node
-
- def visit_not(self, node, newroot, terms):
- subparts, node = self._visit_children(node, newroot, terms)
- if not subparts:
- return None, node
- return copy_node(newroot, node, subparts), node
-
- def visit_group(self, node, newroot, terms):
- if not self.final:
- return None, node
- return self.visit_default(node, newroot, terms)
-
- def visit_variableref(self, node, newroot, terms):
- if self.use_only_defined:
- if not node.variable.name in newroot.defined_vars:
- raise UnsupportedBranch(node.name)
- elif not node.variable in terms:
- raise UnsupportedBranch(node.name)
- self.mayneedsel.add(node.name)
- # set scope so we can insert types restriction properly
- newvar = newroot.get_variable(node.name)
- newvar.stinfo['scope'] = self.scopes.get(node.variable.scope, newroot)
- vref = VariableRef(newvar)
- self._pending_vrefs.append(vref)
- return vref, node
-
- def visit_constant(self, node, newroot, terms):
- return copy_node(newroot, node), node
-
- def visit_comparison(self, node, newroot, terms):
- subparts, node = self._visit_children(node, newroot, terms)
- copy = copy_node(newroot, node, subparts)
- # ignore comparison operator when fetching non final query
- if not self.final and isinstance(node.children[0], VariableRef):
- copy.operator = '='
- return copy, node
-
- def visit_function(self, node, newroot, terms):
- if node.name == 'FTIRANK':
- # FTIRANK is somewhat special... Rank function should be included in
- # the same query has the has_text relation, potentially added to
- # selection for latter usage
- if not self.hasaggrstep and self.final and node not in self.skip:
- return self.visit_default(node, newroot, terms)
- elif any(s for s in self.sources if s.uri != 'system'):
- return None, node
- # p = node.parent
- # while p is not None and not isinstance(p, SortTerm):
- # p = p.parent
- # if isinstance(p, SortTerm):
- if not self.hasaggrstep and self.final and node in self.skip:
- return Constant(self.skip[node], 'Int'), node
- # XXX only if not yet selected
- newroot.append_selected(node.copy(newroot))
- self.skip[node] = len(newroot.selection)
- return None, node
- return self.visit_default(node, newroot, terms)
-
- def visit_default(self, node, newroot, terms):
- subparts, node = self._visit_children(node, newroot, terms)
- return copy_node(newroot, node, subparts), node
-
- visit_mathexpression = visit_constant = visit_default
-
- def visit_sortterm(self, node, newroot, terms):
- subparts, node = self._visit_children(node, newroot, terms)
- if not subparts:
- return None, node
- return copy_node(newroot, node, subparts), node
-
- def _visit_children(self, node, newroot, terms):
- subparts = []
- for i in xrange(len(node.children)):
- child = node.children[i]
- newchild, child_ = child.accept(self, newroot, terms)
- if not child is child_:
- node = child_.parent
- if newchild is not None:
- subparts.append(newchild)
- return subparts, node
-
- def process_selection(self, newroot, terms, rqlst):
- if self.final:
- for term in rqlst.selection:
- newroot.append_selected(term.copy(newroot))
- for vref in term.get_nodes(VariableRef):
- self.needsel.add(vref.name)
- return
- for term in rqlst.selection:
- vrefs = term.get_nodes(VariableRef)
- if vrefs:
- supportedvars = []
- for vref in vrefs:
- var = vref.variable
- if var in terms:
- supportedvars.append(vref)
- continue
- else:
- self.needsel.add(vref.name)
- break
- else:
- for vref in vrefs:
- newroot.append_selected(vref.copy(newroot))
- supportedvars = []
- for vref in supportedvars:
- if not vref in newroot.get_selected_variables():
- newroot.append_selected(VariableRef(newroot.get_variable(vref.name)))
- elif term in self.terms:
- newroot.append_selected(term.copy(newroot))
-
- def add_necessary_selection(self, newroot, terms):
- selected = tuple(newroot.get_selected_variables())
- for varname in terms:
- var = newroot.defined_vars[varname]
- for vref in var.references():
- rel = vref.relation()
- if rel is None and vref in selected:
- # already selected
- break
- else:
- selvref = VariableRef(var)
- newroot.append_selected(selvref)
- if newroot.groupby:
- newroot.add_group_var(VariableRef(selvref.variable, noautoref=1))
--- a/server/mssteps.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,309 +0,0 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""Defines the diferent querier steps usable in plans.
-
-FIXME : this code needs refactoring. Some problems :
-* get data from the parent plan, the latest step, temporary table...
-* each step has is own members (this is not necessarily bad, but a bit messy
- for now)
-"""
-__docformat__ = "restructuredtext en"
-
-from rql.nodes import VariableRef, Variable, Function
-
-from cubicweb.server.ssplanner import (LimitOffsetMixIn, Step, OneFetchStep,
- varmap_test_repr, offset_result)
-
-AGGR_TRANSFORMS = {'COUNT':'SUM', 'MIN':'MIN', 'MAX':'MAX', 'SUM': 'SUM'}
-
-class remove_and_restore_clauses(object):
- def __init__(self, union, keepgroup):
- self.union = union
- self.keepgroup = keepgroup
- self.clauses = None
-
- def __enter__(self):
- self.clauses = clauses = []
- for select in self.union.children:
- if self.keepgroup:
- having, orderby = select.having, select.orderby
- select.having, select.orderby = (), ()
- clauses.append( (having, orderby) )
- else:
- groupby, having, orderby = select.groupby, select.having, select.orderby
- select.groupby, select.having, select.orderby = (), (), ()
- clauses.append( (groupby, having, orderby) )
-
- def __exit__(self, exctype, exc, traceback):
- for i, select in enumerate(self.union.children):
- if self.keepgroup:
- select.having, select.orderby = self.clauses[i]
- else:
- select.groupby, select.having, select.orderby = self.clauses[i]
-
-
-class FetchStep(OneFetchStep):
- """step consisting in fetching data from sources, and storing result in
- a temporary table
- """
- def __init__(self, plan, union, sources, table, keepgroup, inputmap=None):
- OneFetchStep.__init__(self, plan, union, sources)
- # temporary table to store step result
- self.table = table
- # should groupby clause be kept or not
- self.keepgroup = keepgroup
- # variables mapping to use as input
- self.inputmap = inputmap
- # output variable mapping
- srqlst = union.children[0] # sample select node
- # add additional information to the output mapping
- self.outputmap = plan.init_temp_table(table, srqlst.selection,
- srqlst.solutions[0])
- for vref in srqlst.selection:
- if not isinstance(vref, VariableRef):
- continue
- var = vref.variable
- if var.stinfo.get('attrvars'):
- for lhsvar, rtype in var.stinfo['attrvars']:
- if lhsvar.name in srqlst.defined_vars:
- key = '%s.%s' % (lhsvar.name, rtype)
- self.outputmap[key] = self.outputmap[var.name]
- else:
- rschema = self.plan.schema.rschema
- for rel in var.stinfo['rhsrelations']:
- if rschema(rel.r_type).inlined:
- lhsvar = rel.children[0]
- if lhsvar.name in srqlst.defined_vars:
- key = '%s.%s' % (lhsvar.name, rel.r_type)
- self.outputmap[key] = self.outputmap[var.name]
-
- def execute(self):
- """execute this step"""
- self.execute_children()
- plan = self.plan
- plan.create_temp_table(self.table)
- union = self.union
- with remove_and_restore_clauses(union, self.keepgroup):
- for source in self.sources:
- source.flying_insert(self.table, plan.session, union, plan.args,
- self.inputmap)
-
- def mytest_repr(self):
- """return a representation of this step suitable for test"""
- with remove_and_restore_clauses(self.union, self.keepgroup):
- try:
- inputmap = varmap_test_repr(self.inputmap, self.plan.tablesinorder)
- outputmap = varmap_test_repr(self.outputmap, self.plan.tablesinorder)
- except AttributeError:
- inputmap = self.inputmap
- outputmap = self.outputmap
- return (self.__class__.__name__,
- sorted((r.as_string(kwargs=self.plan.args), r.solutions)
- for r in self.union.children),
- sorted(self.sources), inputmap, outputmap)
-
-
-class AggrStep(LimitOffsetMixIn, Step):
- """step consisting in making aggregat from temporary data in the system
- source
- """
- def __init__(self, plan, selection, select, table, outputtable=None):
- Step.__init__(self, plan)
- # original selection
- self.selection = selection
- # original Select RQL tree
- self.select = select
- # table where are located temporary results
- self.table = table
- # optional table where to write results
- self.outputtable = outputtable
- if outputtable is not None:
- plan.init_temp_table(outputtable, selection, select.solutions[0])
-
- #self.inputmap = inputmap
-
- def mytest_repr(self):
- """return a representation of this step suitable for test"""
- try:
- # rely on a monkey patch (cf unittest_querier)
- table = self.plan.tablesinorder[self.table]
- outputtable = self.outputtable and self.plan.tablesinorder[self.outputtable]
- except AttributeError:
- # not monkey patched
- table = self.table
- outputtable = self.outputtable
- sql = self.get_sql().replace(self.table, table)
- return (self.__class__.__name__, sql, outputtable)
-
- def execute(self):
- """execute this step"""
- self.execute_children()
- sql = self.get_sql()
- if self.outputtable:
- self.plan.create_temp_table(self.outputtable)
- sql = 'INSERT INTO %s %s' % (self.outputtable, sql)
- self.plan.syssource.doexec(self.plan.session, sql, self.plan.args)
- else:
- return self.plan.sqlexec(sql, self.plan.args)
-
- def get_sql(self):
- self.inputmap = inputmap = self.children[-1].outputmap
- dbhelper=self.plan.syssource.dbhelper
- # get the select clause
- clause = []
- for i, term in enumerate(self.selection):
- try:
- var_name = inputmap[term.as_string()]
- except KeyError:
- var_name = 'C%s' % i
- if isinstance(term, Function):
- # we have to translate some aggregat function
- # (for instance COUNT -> SUM)
- orig_name = term.name
- try:
- term.name = AGGR_TRANSFORMS[term.name]
- # backup and reduce children
- orig_children = term.children
- term.children = [VariableRef(Variable(var_name))]
- clause.append(term.accept(self))
- # restaure the tree XXX necessary?
- term.name = orig_name
- term.children = orig_children
- except KeyError:
- clause.append(var_name)
- else:
- clause.append(var_name)
- for vref in term.iget_nodes(VariableRef):
- inputmap[vref.name] = var_name
- # XXX handle distinct with non selected sort term
- if self.select.distinct:
- sql = ['SELECT DISTINCT %s' % ', '.join(clause)]
- else:
- sql = ['SELECT %s' % ', '.join(clause)]
- sql.append("FROM %s" % self.table)
- # get the group/having clauses
- if self.select.groupby:
- clause = [inputmap[var.name] for var in self.select.groupby]
- grouped = set(var.name for var in self.select.groupby)
- sql.append('GROUP BY %s' % ', '.join(clause))
- else:
- grouped = None
- if self.select.having:
- clause = [term.accept(self) for term in self.select.having]
- sql.append('HAVING %s' % ', '.join(clause))
- # get the orderby clause
- if self.select.orderby:
- clause = []
- for sortterm in self.select.orderby:
- sqlterm = sortterm.term.accept(self)
- if sortterm.asc:
- clause.append(sqlterm)
- else:
- clause.append('%s DESC' % sqlterm)
- if grouped is not None:
- for vref in sortterm.iget_nodes(VariableRef):
- if not vref.name in grouped:
- sql[-1] += ', ' + self.inputmap[vref.name]
- grouped.add(vref.name)
- sql = dbhelper.sql_add_order_by(' '.join(sql),
- clause,
- None, False,
- self.limit or self.offset)
- else:
- sql = ' '.join(sql)
- clause = None
-
- sql = dbhelper.sql_add_limit_offset(sql, self.limit, self.offset, clause)
- return sql
-
- def visit_function(self, function):
- """generate SQL name for a function"""
- try:
- return self.children[0].outputmap[str(function)]
- except KeyError:
- return '%s(%s)' % (function.name,
- ','.join(c.accept(self) for c in function.children))
-
- def visit_variableref(self, variableref):
- """get the sql name for a variable reference"""
- try:
- return self.inputmap[variableref.name]
- except KeyError: # XXX duh? explain
- return variableref.variable.name
-
- def visit_constant(self, constant):
- """generate SQL name for a constant"""
- assert constant.type == 'Int'
- return str(constant.value)
-
-
-class UnionStep(LimitOffsetMixIn, Step):
- """union results of child in-memory steps (e.g. OneFetchStep / AggrStep)"""
-
- def execute(self):
- """execute this step"""
- result = []
- limit = olimit = self.limit
- offset = self.offset
- assert offset != 0
- if offset is not None:
- limit = limit + offset
- for step in self.children:
- if limit is not None:
- if offset is None:
- limit = olimit - len(result)
- step.set_limit_offset(limit, None)
- result_ = step.execute()
- if offset is not None:
- offset, result_ = offset_result(offset, result_)
- result += result_
- if limit is not None:
- if len(result) >= olimit:
- return result[:olimit]
- return result
-
- def mytest_repr(self):
- """return a representation of this step suitable for test"""
- return (self.__class__.__name__, self.limit, self.offset)
-
-
-class IntersectStep(UnionStep):
- """return intersection of results of child in-memory steps (e.g. OneFetchStep / AggrStep)"""
-
- def execute(self):
- """execute this step"""
- result = set()
- for step in self.children:
- result &= frozenset(step.execute())
- result = list(result)
- if self.offset:
- result = result[self.offset:]
- if self.limit:
- result = result[:self.limit]
- return result
-
-
-class UnionFetchStep(Step):
- """union results of child steps using temporary tables (e.g. FetchStep)"""
-
- def execute(self):
- """execute this step"""
- self.execute_children()
-
-
-__all__ = ('FetchStep', 'AggrStep', 'UnionStep', 'UnionFetchStep', 'IntersectStep')
--- a/server/pool.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,160 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""CubicWeb server connections set : the repository has a limited number of
-:class:`ConnectionsSet` (defined in configuration, default to 4). Each of them
-hold a connection for each source used by the repository.
-"""
-
-__docformat__ = "restructuredtext en"
-
-import sys
-
-class ConnectionsSet(object):
- """handle connections on a set of sources, at some point associated to a
- :class:`Session`
- """
-
- def __init__(self, sources):
- # dictionary of (source, connection), indexed by sources'uri
- self.source_cnxs = {}
- for source in sources:
- self.add_source(source)
- if not 'system' in self.source_cnxs:
- self.source_cnxs['system'] = self.source_cnxs[sources[0].uri]
- self._cursors = {}
-
- def __getitem__(self, uri):
- """subscription notation provide access to sources'cursors"""
- try:
- cursor = self._cursors[uri]
- except KeyError:
- cursor = self.source_cnxs[uri][1].cursor()
- if cursor is not None:
- # None possible on sources without cursor support such as ldap
- self._cursors[uri] = cursor
- return cursor
-
- def add_source(self, source):
- assert not source.uri in self.source_cnxs
- self.source_cnxs[source.uri] = (source, source.get_connection())
-
- def remove_source(self, source):
- source, cnx = self.source_cnxs.pop(source.uri)
- cnx.close()
- self._cursors.pop(source.uri, None)
-
- def commit(self):
- """commit the current transaction for this user"""
- # FIXME: what happends if a commit fail
- # would need a two phases commit or like, but I don't know how to do
- # this using the db-api...
- for source, cnx in self.source_cnxs.itervalues():
- # let exception propagates
- cnx.commit()
-
- def rollback(self):
- """rollback the current transaction for this user"""
- for source, cnx in self.source_cnxs.itervalues():
- # catch exceptions, rollback other sources anyway
- try:
- cnx.rollback()
- except Exception:
- source.critical('rollback error', exc_info=sys.exc_info())
- # error on rollback, the connection is much probably in a really
- # bad state. Replace it by a new one.
- self.reconnect(source)
-
- def close(self, i_know_what_i_do=False):
- """close all connections in the set"""
- if i_know_what_i_do is not True: # unexpected closing safety belt
- raise RuntimeError('connections set shouldn\'t be closed')
- for cu in self._cursors.itervalues():
- try:
- cu.close()
- except Exception:
- continue
- for _, cnx in self.source_cnxs.itervalues():
- try:
- cnx.close()
- except Exception:
- continue
-
- # internals ###############################################################
-
- def cnxset_set(self):
- """connections set is being set on a session"""
- self.check_connections()
-
- def cnxset_freed(self):
- """connections set is being freed from a session"""
- for source, cnx in self.source_cnxs.itervalues():
- source.cnxset_freed(cnx)
-
- def sources(self):
- """return the source objects handled by this connections set"""
- # implementation details of flying insert requires the system source
- # first
- yield self.source_cnxs['system'][0]
- for uri, (source, cnx) in self.source_cnxs.items():
- if uri == 'system':
- continue
- yield source
- #return [source_cnx[0] for source_cnx in self.source_cnxs.itervalues()]
-
- def source(self, uid):
- """return the source object with the given uri"""
- return self.source_cnxs[uid][0]
-
- def connection(self, uid):
- """return the connection on the source object with the given uri"""
- return self.source_cnxs[uid][1]
-
- def reconnect(self, source=None):
- """reopen a connection for this source or all sources if none specified
- """
- if source is None:
- sources = self.sources()
- else:
- sources = (source,)
- for source in sources:
- try:
- # properly close existing connection if any
- self.source_cnxs[source.uri][1].close()
- except Exception:
- pass
- source.info('trying to reconnect')
- self.source_cnxs[source.uri] = (source, source.get_connection())
- self._cursors.pop(source.uri, None)
-
- def check_connections(self):
- for source, cnx in self.source_cnxs.itervalues():
- newcnx = source.check_connection(cnx)
- if newcnx is not None:
- self.reset_connection(source, newcnx)
-
- def reset_connection(self, source, cnx):
- self.source_cnxs[source.uri] = (source, cnx)
- self._cursors.pop(source.uri, None)
-
-
-from cubicweb.server.hook import Operation, LateOperation, SingleLastOperation
-from logilab.common.deprecation import class_moved, class_renamed
-Operation = class_moved(Operation)
-PreCommitOperation = class_renamed('PreCommitOperation', Operation)
-LateOperation = class_moved(LateOperation)
-SingleLastOperation = class_moved(SingleLastOperation)
--- a/server/querier.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/querier.py Mon Feb 17 15:32:50 2014 +0100
@@ -36,7 +36,7 @@
from cubicweb.server.rqlannotation import SQLGenAnnotator, set_qdata
from cubicweb.server.ssplanner import READ_ONLY_RTYPES, add_types_restriction
from cubicweb.server.edition import EditedEntity
-
+from cubicweb.server.ssplanner import SSPlanner
ETYPE_PYOBJ_MAP[Binary] = 'Bytes'
@@ -64,16 +64,16 @@
if etype == 'Password':
raise Unauthorized('Password selection is not allowed (%s)' % var)
-def term_etype(session, term, solution, args):
+def term_etype(cnx, term, solution, args):
"""return the entity type for the given term (a VariableRef or a Constant
node)
"""
try:
return solution[term.name]
except AttributeError:
- return session.describe(term.eval(args))[0]
+ return cnx.entity_metas(term.eval(args))['type']
-def check_read_access(session, rqlst, solution, args):
+def check_read_access(cnx, rqlst, solution, args):
"""Check that the given user has credentials to access data read by the
query and return a dict defining necessary "local checks" (i.e. rql
expression in read permission defined in the schema) where no group grants
@@ -86,7 +86,7 @@
# when used as an external source by another repository.
# XXX what about local read security w/ those rewritten constants...
DBG = (server.DEBUG & server.DBG_SEC) and 'read' in server._SECURITY_CAPS
- schema = session.repo.schema
+ schema = cnx.repo.schema
if rqlst.where is not None:
for rel in rqlst.where.iget_nodes(Relation):
# XXX has_text may have specific perm ?
@@ -94,37 +94,37 @@
continue
rschema = schema.rschema(rel.r_type)
if rschema.final:
- eschema = schema.eschema(term_etype(session, rel.children[0],
+ eschema = schema.eschema(term_etype(cnx, rel.children[0],
solution, args))
rdef = eschema.rdef(rschema)
else:
- rdef = rschema.rdef(term_etype(session, rel.children[0],
+ rdef = rschema.rdef(term_etype(cnx, rel.children[0],
solution, args),
- term_etype(session, rel.children[1].children[0],
+ term_etype(cnx, rel.children[1].children[0],
solution, args))
- if not session.user.matching_groups(rdef.get_groups('read')):
+ if not cnx.user.matching_groups(rdef.get_groups('read')):
if DBG:
print ('check_read_access: %s %s does not match %s' %
- (rdef, session.user.groups, rdef.get_groups('read')))
+ (rdef, cnx.user.groups, rdef.get_groups('read')))
# XXX rqlexpr not allowed
raise Unauthorized('read', rel.r_type)
if DBG:
print ('check_read_access: %s %s matches %s' %
- (rdef, session.user.groups, rdef.get_groups('read')))
+ (rdef, cnx.user.groups, rdef.get_groups('read')))
localchecks = {}
# iterate on defined_vars and not on solutions to ignore column aliases
for varname in rqlst.defined_vars:
eschema = schema.eschema(solution[varname])
if eschema.final:
continue
- if not session.user.matching_groups(eschema.get_groups('read')):
+ if not cnx.user.matching_groups(eschema.get_groups('read')):
erqlexprs = eschema.get_rqlexprs('read')
if not erqlexprs:
ex = Unauthorized('read', solution[varname])
ex.var = varname
if DBG:
print ('check_read_access: %s %s %s %s' %
- (varname, eschema, session.user.groups, eschema.get_groups('read')))
+ (varname, eschema, cnx.user.groups, eschema.get_groups('read')))
raise ex
# don't insert security on variable only referenced by 'NOT X relation Y' or
# 'NOT EXISTS(X relation Y)'
@@ -144,23 +144,21 @@
class ExecutionPlan(object):
"""the execution model of a rql query, composed of querier steps"""
- def __init__(self, querier, rqlst, args, session):
+ def __init__(self, querier, rqlst, args, cnx):
# original rql syntax tree
self.rqlst = rqlst
self.args = args or {}
- # session executing the query
- self.session = session
+ # cnx executing the query
+ self.cnx = cnx
# quick reference to the system source
- self.syssource = session.cnxset.source('system')
+ self.syssource = cnx.repo.system_source
# execution steps
self.steps = []
- # index of temporary tables created during execution
- self.temp_tables = {}
# various resource accesors
self.querier = querier
self.schema = querier.schema
self.sqlannotate = querier.sqlgen_annotate
- self.rqlhelper = session.vreg.rqlhelper
+ self.rqlhelper = cnx.vreg.rqlhelper
def annotate_rqlst(self):
if not self.rqlst.annotated:
@@ -170,49 +168,15 @@
"""add a step to the plan"""
self.steps.append(step)
- def clean(self):
- """remove temporary tables"""
- self.syssource.clean_temp_data(self.session, self.temp_tables)
-
def sqlexec(self, sql, args=None):
- return self.syssource.sqlexec(self.session, sql, args)
+ return self.syssource.sqlexec(self.cnx, sql, args)
def execute(self):
"""execute a plan and return resulting rows"""
- try:
- for step in self.steps:
- result = step.execute()
- # the latest executed step contains the full query result
- return result
- finally:
- self.clean()
-
- def make_temp_table_name(self, table):
- """
- return a temp table name according to db backend
- """
- return self.syssource.make_temp_table_name(table)
-
-
- def init_temp_table(self, table, selected, sol):
- """initialize sql schema and variable map for a temporary table which
- will be used to store result for the given rqlst
- """
- try:
- outputmap, sqlschema, _ = self.temp_tables[table]
- update_varmap(outputmap, selected, table)
- except KeyError:
- sqlschema, outputmap = self.syssource.temp_table_def(selected, sol,
- table)
- self.temp_tables[table] = [outputmap, sqlschema, False]
- return outputmap
-
- def create_temp_table(self, table):
- """create a temporary table to store result for the given rqlst"""
- if not self.temp_tables[table][-1]:
- sqlschema = self.temp_tables[table][1]
- self.syssource.create_temp_table(self.session, table, sqlschema)
- self.temp_tables[table][-1] = True
+ for step in self.steps:
+ result = step.execute()
+ # the latest executed step contains the full query result
+ return result
def preprocess(self, union, security=True):
"""insert security when necessary then annotate rql st for sql generation
@@ -220,15 +184,15 @@
return rqlst to actually execute
"""
cached = None
- if security and self.session.read_security:
+ if security and self.cnx.read_security:
# ensure security is turned of when security is inserted,
# else we may loop for ever...
- if self.session.transaction_data.get('security-rqlst-cache'):
+ if self.cnx.transaction_data.get('security-rqlst-cache'):
key = self.cache_key
else:
key = None
- if key is not None and key in self.session.transaction_data:
- cachedunion, args = self.session.transaction_data[key]
+ if key is not None and key in self.cnx.transaction_data:
+ cachedunion, args = self.cnx.transaction_data[key]
union.children[:] = []
for select in cachedunion.children:
union.append(select)
@@ -237,10 +201,10 @@
self.args = args
cached = True
else:
- with self.session.security_enabled(read=False):
+ with self.cnx.security_enabled(read=False):
noinvariant = self._insert_security(union)
if key is not None:
- self.session.transaction_data[key] = (union, self.args)
+ self.cnx.transaction_data[key] = (union, self.args)
else:
noinvariant = ()
if cached is None:
@@ -257,7 +221,7 @@
self._insert_security(subquery.query)
localchecks, restricted = self._check_permissions(select)
if any(localchecks):
- self.session.rql_rewriter.insert_local_checks(
+ self.cnx.rql_rewriter.insert_local_checks(
select, self.args, localchecks, restricted, noinvariant)
return noinvariant
@@ -279,12 +243,12 @@
Note rqlst should not have been simplified at this point.
"""
- session = self.session
+ cnx = self.cnx
msgs = []
# dict(varname: eid), allowing to check rql expression for variables
# which have a known eid
varkwargs = {}
- if not session.transaction_data.get('security-rqlst-cache'):
+ if not cnx.transaction_data.get('security-rqlst-cache'):
for var in rqlst.defined_vars.itervalues():
if var.stinfo['constnode'] is not None:
eid = var.stinfo['constnode'].eval(self.args)
@@ -295,10 +259,10 @@
newsolutions = []
for solution in rqlst.solutions:
try:
- localcheck = check_read_access(session, rqlst, solution, self.args)
+ localcheck = check_read_access(cnx, rqlst, solution, self.args)
except Unauthorized as ex:
msg = 'remove %s from solutions since %s has no %s access to %s'
- msg %= (solution, session.user.login, ex.args[0], ex.args[1])
+ msg %= (solution, cnx.user.login, ex.args[0], ex.args[1])
msgs.append(msg)
LOGGER.info(msg)
else:
@@ -313,10 +277,10 @@
# if entity has been added in the current transaction, the
# user can read it whatever rql expressions are associated
# to its type
- if session.added_in_transaction(eid):
+ if cnx.added_in_transaction(eid):
continue
for rqlexpr in rqlexprs:
- if rqlexpr.check(session, eid):
+ if rqlexpr.check(cnx, eid):
break
else:
raise Unauthorized('No read acces on %r with eid %i.' % (var, eid))
@@ -352,8 +316,8 @@
"""an execution model specific to the INSERT rql query
"""
- def __init__(self, querier, rqlst, args, session):
- ExecutionPlan.__init__(self, querier, rqlst, args, session)
+ def __init__(self, querier, rqlst, args, cnx):
+ ExecutionPlan.__init__(self, querier, rqlst, args, cnx)
# save originaly selected variable, we may modify this
# dictionary for substitution (query parameters)
self.selected = rqlst.selection
@@ -451,17 +415,17 @@
if there is two entities matching U, the result set will look like
[(eidX1, eidY1), (eidX2, eidY2)]
"""
- session = self.session
- repo = session.repo
+ cnx = self.cnx
+ repo = cnx.repo
results = []
for row in self.e_defs:
- results.append([repo.glob_add_entity(session, edef)
+ results.append([repo.glob_add_entity(cnx, edef)
for edef in row])
return results
def insert_relation_defs(self):
- session = self.session
- repo = session.repo
+ cnx = self.cnx
+ repo = cnx.repo
edited_entities = {}
relations = {}
for subj, rtype, obj in self.relation_defs():
@@ -476,7 +440,7 @@
obj = obj.entity.eid
if repo.schema.rschema(rtype).inlined:
if subj not in edited_entities:
- entity = session.entity_from_eid(subj)
+ entity = cnx.entity_from_eid(subj)
edited = EditedEntity(entity)
edited_entities[subj] = edited
else:
@@ -487,9 +451,9 @@
relations[rtype].append((subj, obj))
else:
relations[rtype] = [(subj, obj)]
- repo.glob_add_relations(session, relations)
+ repo.glob_add_relations(cnx, relations)
for edited in edited_entities.itervalues():
- repo.glob_update_entity(session, edited)
+ repo.glob_update_entity(cnx, edited)
class QuerierHelper(object):
@@ -516,27 +480,14 @@
self.solutions = repo.vreg.solutions
rqlhelper = repo.vreg.rqlhelper
# set backend on the rql helper, will be used for function checking
- rqlhelper.backend = repo.config.sources()['system']['db-driver']
+ rqlhelper.backend = repo.config.system_source_config['db-driver']
self._parse = rqlhelper.parse
self._annotate = rqlhelper.annotate
# rql planner
- if len(repo.sources) < 2:
- from cubicweb.server.ssplanner import SSPlanner
- self._planner = SSPlanner(schema, rqlhelper)
- else:
- from cubicweb.server.msplanner import MSPlanner
- self._planner = MSPlanner(schema, rqlhelper)
+ self._planner = SSPlanner(schema, rqlhelper)
# sql generation annotator
self.sqlgen_annotate = SQLGenAnnotator(schema).annotate
- def set_planner(self):
- if len(self._repo.sources) < 2:
- from cubicweb.server.ssplanner import SSPlanner
- self._planner = SSPlanner(self.schema, self._repo.vreg.rqlhelper)
- else:
- from cubicweb.server.msplanner import MSPlanner
- self._planner = MSPlanner(self.schema, self._repo.vreg.rqlhelper)
-
def parse(self, rql, annotate=False):
"""return a rql syntax tree for the given rql"""
try:
@@ -544,13 +495,13 @@
except UnicodeError:
raise RQLSyntaxError(rql)
- def plan_factory(self, rqlst, args, session):
+ def plan_factory(self, rqlst, args, cnx):
"""create an execution plan for an INSERT RQL query"""
if rqlst.TYPE == 'insert':
- return InsertPlan(self, rqlst, args, session)
- return ExecutionPlan(self, rqlst, args, session)
+ return InsertPlan(self, rqlst, args, cnx)
+ return ExecutionPlan(self, rqlst, args, cnx)
- def execute(self, session, rql, args=None, build_descr=True):
+ def execute(self, cnx, rql, args=None, build_descr=True):
"""execute a rql query, return resulting rows and their description in
a `ResultSet` object
@@ -584,7 +535,7 @@
# if there are some, we need a better cache key, eg (rql +
# entity type of each eid)
try:
- cachekey = self._repo.querier_cache_key(session, rql,
+ cachekey = self._repo.querier_cache_key(cnx, rql,
args, eidkeys)
except UnknownEid:
# we want queries such as "Any X WHERE X eid 9999"
@@ -600,7 +551,7 @@
# which are eids. Notice that if you may not need `eidkeys`, we
# have to compute solutions anyway (kept as annotation on the
# tree)
- eidkeys = self.solutions(session, rqlst, args)
+ eidkeys = self.solutions(cnx, rqlst, args)
except UnknownEid:
# we want queries such as "Any X WHERE X eid 9999" return an
# empty result instead of raising UnknownEid
@@ -608,19 +559,19 @@
if args and rql not in self._rql_ck_cache:
self._rql_ck_cache[rql] = eidkeys
if eidkeys:
- cachekey = self._repo.querier_cache_key(session, rql, args,
+ cachekey = self._repo.querier_cache_key(cnx, rql, args,
eidkeys)
self._rql_cache[cachekey] = rqlst
orig_rqlst = rqlst
if rqlst.TYPE != 'select':
- if session.read_security:
+ if cnx.read_security:
check_no_password_selected(rqlst)
- # write query, ensure session's mode is 'write' so connections won't
- # be released until commit/rollback
- session.mode = 'write'
+ # write query, ensure connection's mode is 'write' so connections
+ # won't be released until commit/rollback
+ cnx.mode = 'write'
cachekey = None
else:
- if session.read_security:
+ if cnx.read_security:
for select in rqlst.children:
check_no_password_selected(select)
# on select query, always copy the cached rqlst so we don't have to
@@ -634,7 +585,7 @@
cachekey += tuple(sorted([k for k, v in args.iteritems()
if v is None]))
# make an execution plan
- plan = self.plan_factory(rqlst, args, session)
+ plan = self.plan_factory(rqlst, args, cnx)
plan.cache_key = cachekey
self._planner.build_plan(plan)
# execute the plan
@@ -646,11 +597,11 @@
#
# notes:
# * we should not reset the connections set here, since we don't want the
- # session to loose it during processing
+ # connection to loose it during processing
# * don't rollback if we're in the commit process, will be handled
- # by the session
- if session.commit_state is None:
- session.commit_state = 'uncommitable'
+ # by the connection
+ if cnx.commit_state is None:
+ cnx.commit_state = 'uncommitable'
raise
# build a description for the results if necessary
descr = ()
@@ -665,14 +616,14 @@
descr = RepeatList(len(results), tuple(description))
else:
# hard, delegate the work :o)
- descr = manual_build_descr(session, rqlst, args, results)
+ descr = manual_build_descr(cnx, rqlst, args, results)
elif rqlst.TYPE == 'insert':
# on insert plan, some entities may have been auto-casted,
# so compute description manually even if there is only
# one solution
basedescr = [None] * len(plan.selected)
todetermine = zip(xrange(len(plan.selected)), repeat(False))
- descr = _build_descr(session, results, basedescr, todetermine)
+ descr = _build_descr(cnx, results, basedescr, todetermine)
# FIXME: get number of affected entities / relations on non
# selection queries ?
# return a result set object
@@ -688,7 +639,7 @@
set_log_methods(QuerierHelper, LOGGER)
-def manual_build_descr(tx, rqlst, args, result):
+def manual_build_descr(cnx, rqlst, args, result):
"""build a description for a given result by analysing each row
XXX could probably be done more efficiently during execution of query
@@ -712,11 +663,11 @@
basedescr.append(ttype)
if not todetermine:
return RepeatList(len(result), tuple(basedescr))
- return _build_descr(tx, result, basedescr, todetermine)
+ return _build_descr(cnx, result, basedescr, todetermine)
-def _build_descr(tx, result, basedescription, todetermine):
+def _build_descr(cnx, result, basedescription, todetermine):
description = []
- etype_from_eid = tx.describe
+ entity_metas = cnx.entity_metas
todel = []
for i, row in enumerate(result):
row_descr = basedescription[:]
@@ -730,9 +681,9 @@
row_descr[index] = etype_from_pyobj(value)
else:
try:
- row_descr[index] = etype_from_eid(value)[0]
+ row_descr[index] = entity_metas(value)['type']
except UnknownEid:
- tx.error('wrong eid %s in repository, you should '
+ cnx.error('wrong eid %s in repository, you should '
'db-check the database' % value)
todel.append(i)
break
--- a/server/repository.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/repository.py Mon Feb 17 15:32:50 2014 +0100
@@ -33,27 +33,24 @@
import Queue
from warnings import warn
from itertools import chain
-from os.path import join
-from datetime import datetime
from time import time, localtime, strftime
+from contextlib import contextmanager
from warnings import warn
from logilab.common.decorators import cached, clear_cache
-from logilab.common.compat import any
-from logilab.common import flatten
+from logilab.common.deprecation import deprecated
from yams import BadSchemaDefinition
from yams.schema import role_name
from rql import RQLSyntaxError
from rql.utils import rqlvar_maker
-from cubicweb import (CW_SOFTWARE_ROOT, CW_MIGRATION_MAP, QueryError,
+from cubicweb import (CW_MIGRATION_MAP, QueryError,
UnknownEid, AuthenticationError, ExecutionError,
- ETypeNotSupportedBySources, MultiSourcesError,
BadConnectionId, Unauthorized, ValidationError,
- RepositoryError, UniqueTogetherError, onevent)
+ UniqueTogetherError, onevent)
from cubicweb import cwvreg, schema, server
-from cubicweb.server import ShuttingDown, utils, hook, pool, querier, sources
+from cubicweb.server import ShuttingDown, utils, hook, querier, sources
from cubicweb.server.session import Session, InternalSession, InternalManager
from cubicweb.server.ssplanner import EditedEntity
@@ -187,14 +184,13 @@
self.shutting_down = False
# sources (additional sources info in the system database)
self.system_source = self.get_source('native', 'system',
- config.sources()['system'].copy())
- self.sources = [self.system_source]
+ config.system_source_config.copy())
self.sources_by_uri = {'system': self.system_source}
# querier helper, need to be created after sources initialization
self.querier = querier.QuerierHelper(self, self.schema)
- # cache eid -> (type, physical source, extid, actual source)
+ # cache eid -> (type, extid, actual source)
self._type_source_cache = {}
- # cache (extid, source uri) -> eid
+ # cache extid -> eid
self._extid_cache = {}
# open some connection sets
if config.init_cnxset_pool:
@@ -218,7 +214,7 @@
self._cnxsets_pool = Queue.Queue()
# 0. init a cnxset that will be used to fetch bootstrap information from
# the database
- self._cnxsets_pool.put_nowait(pool.ConnectionsSet(self.sources))
+ self._cnxsets_pool.put_nowait(self.system_source.wrapped_connection())
# 1. set used cubes
if config.creating or not config.read_instance_schema:
config.bootstrap_cubes()
@@ -249,8 +245,7 @@
if config.creating:
# call init_creating so that for instance native source can
# configurate tsearch according to postgres version
- for source in self.sources:
- source.init_creating()
+ self.system_source.init_creating()
else:
self.init_sources_from_database()
if 'CWProperty' in self.schema:
@@ -260,7 +255,7 @@
self._get_cnxset().close(True)
self.cnxsets = [] # list of available cnxsets (can't iterate on a Queue)
for i in xrange(config['connections-pool-size']):
- self.cnxsets.append(pool.ConnectionsSet(self.sources))
+ self.cnxsets.append(self.system_source.wrapped_connection())
self._cnxsets_pool.put_nowait(self.cnxsets[-1])
# internals ###############################################################
@@ -271,9 +266,9 @@
or not 'CWSource' in self.schema: # # 3.10 migration
self.system_source.init_creating()
return
- with self.internal_session() as session:
+ with self.internal_cnx() as cnx:
# FIXME: sources should be ordered (add_entity priority)
- for sourceent in session.execute(
+ for sourceent in cnx.execute(
'Any S, SN, SA, SC WHERE S is_instance_of CWSource, '
'S name SN, S type SA, S config SC').entities():
if sourceent.name == 'system':
@@ -281,14 +276,12 @@
self.sources_by_eid[sourceent.eid] = self.system_source
self.system_source.init(True, sourceent)
continue
- self.add_source(sourceent, add_to_cnxsets=False)
+ self.add_source(sourceent)
def _clear_planning_caches(self):
- for cache in ('source_defs', 'is_multi_sources_relation',
- 'can_cross_relation', 'rel_type_sources'):
- clear_cache(self, cache)
+ clear_cache(self, 'source_defs')
- def add_source(self, sourceent, add_to_cnxsets=True):
+ def add_source(self, sourceent):
source = self.get_source(sourceent.type, sourceent.name,
sourceent.host_config, sourceent.eid)
self.sources_by_eid[sourceent.eid] = source
@@ -299,14 +292,6 @@
# internal session, which is not possible until connections sets have been
# initialized)
source.init(True, sourceent)
- if not source.copy_based_source:
- warn('[3.18] old multi-source system will go away in the next version',
- DeprecationWarning)
- self.sources.append(source)
- self.querier.set_planner()
- if add_to_cnxsets:
- for cnxset in self.cnxsets:
- cnxset.add_source(source)
else:
source.init(False, sourceent)
self._clear_planning_caches()
@@ -314,11 +299,6 @@
def remove_source(self, uri):
source = self.sources_by_uri.pop(uri)
del self.sources_by_eid[source.eid]
- if self.config.source_enabled(source) and not source.copy_based_source:
- self.sources.remove(source)
- self.querier.set_planner()
- for cnxset in self.cnxsets:
- cnxset.remove_source(source)
self._clear_planning_caches()
def get_source(self, type, uri, source_config, eid=None):
@@ -336,8 +316,6 @@
else:
self.vreg._set_schema(schema)
self.querier.set_schema(schema)
- # don't use self.sources, we may want to give schema even to disabled
- # sources
for source in self.sources_by_uri.itervalues():
source.set_schema(schema)
self.schema = schema
@@ -347,9 +325,9 @@
from cubicweb.server.schemaserial import deserialize_schema
appschema = schema.CubicWebSchema(self.config.appid)
self.debug('deserializing db schema into %s %#x', appschema.name, id(appschema))
- with self.internal_session() as session:
+ with self.internal_cnx() as cnx:
try:
- deserialize_schema(appschema, session)
+ deserialize_schema(appschema, cnx)
except BadSchemaDefinition:
raise
except Exception as ex:
@@ -470,7 +448,7 @@
except ZeroDivisionError:
pass
- def check_auth_info(self, session, login, authinfo):
+ def check_auth_info(self, cnx, login, authinfo):
"""validate authentication, raise AuthenticationError on failure, return
associated CWUser's eid on success.
"""
@@ -479,70 +457,55 @@
for source in self.sources_by_uri.itervalues():
if self.config.source_enabled(source) and source.support_entity('CWUser'):
try:
- return source.authenticate(session, login, **authinfo)
+ with cnx.ensure_cnx_set:
+ return source.authenticate(cnx, login, **authinfo)
except AuthenticationError:
continue
else:
raise AuthenticationError('authentication failed with all sources')
- def authenticate_user(self, session, login, **authinfo):
+ def authenticate_user(self, cnx, login, **authinfo):
"""validate login / password, raise AuthenticationError on failure
return associated CWUser instance on success
"""
- eid = self.check_auth_info(session, login, authinfo)
- cwuser = self._build_user(session, eid)
+ eid = self.check_auth_info(cnx, login, authinfo)
+ cwuser = self._build_user(cnx, eid)
if self.config.consider_user_state and \
not cwuser.cw_adapt_to('IWorkflowable').state in cwuser.AUTHENTICABLE_STATES:
raise AuthenticationError('user is not in authenticable state')
return cwuser
- def _build_user(self, session, eid):
+ def _build_user(self, cnx, eid):
"""return a CWUser entity for user with the given eid"""
- cls = self.vreg['etypes'].etype_class('CWUser')
- st = cls.fetch_rqlst(session.user, ordermethod=None)
- st.add_eid_restriction(st.get_variable('X'), 'x', 'Substitute')
- rset = session.execute(st.as_string(), {'x': eid})
- assert len(rset) == 1, rset
- cwuser = rset.get_entity(0, 0)
- # pylint: disable=W0104
- # prefetch / cache cwuser's groups and properties. This is especially
- # useful for internal sessions to avoid security insertions
- cwuser.groups
- cwuser.properties
- return cwuser
+ with cnx.ensure_cnx_set:
+ cls = self.vreg['etypes'].etype_class('CWUser')
+ st = cls.fetch_rqlst(cnx.user, ordermethod=None)
+ st.add_eid_restriction(st.get_variable('X'), 'x', 'Substitute')
+ rset = cnx.execute(st.as_string(), {'x': eid})
+ assert len(rset) == 1, rset
+ cwuser = rset.get_entity(0, 0)
+ # pylint: disable=W0104
+ # prefetch / cache cwuser's groups and properties. This is especially
+ # useful for internal sessions to avoid security insertions
+ cwuser.groups
+ cwuser.properties
+ return cwuser
# public (dbapi) interface ################################################
+ @deprecated("[3.19] use _cw.call_service('repo_stats'")
def stats(self): # XXX restrict to managers session?
"""Return a dictionary containing some statistics about the repository
resources usage.
This is a public method, not requiring a session id.
+
+ This method is deprecated in favor of using _cw.call_service('repo_stats')
"""
- results = {}
- querier = self.querier
- source = self.system_source
- for size, maxsize, hits, misses, title in (
- (len(querier._rql_cache), self.config['rql-cache-size'],
- querier.cache_hit, querier.cache_miss, 'rqlt_st'),
- (len(source._cache), self.config['rql-cache-size'],
- source.cache_hit, source.cache_miss, 'sql'),
- ):
- results['%s_cache_size' % title] = '%s / %s' % (size, maxsize)
- results['%s_cache_hit' % title] = hits
- results['%s_cache_miss' % title] = misses
- results['%s_cache_hit_percent' % title] = (hits * 100) / (hits + misses)
- results['type_source_cache_size'] = len(self._type_source_cache)
- results['extid_cache_size'] = len(self._extid_cache)
- results['sql_no_cache'] = self.system_source.no_cache
- results['nb_open_sessions'] = len(self._sessions)
- results['nb_active_threads'] = threading.activeCount()
- looping_tasks = self._tasks_manager._looping_tasks
- results['looping_tasks'] = ', '.join(str(t) for t in looping_tasks)
- results['available_cnxsets'] = self._cnxsets_pool.qsize()
- results['threads'] = ', '.join(sorted(str(t) for t in threading.enumerate()))
- return results
+ with self.internal_cnx() as cnx:
+ return cnx.call_service('repo_stats')
+ @deprecated("[3.19] use _cw.call_service('repo_gc_stats'")
def gc_stats(self, nmax=20):
"""Return a dictionary containing some statistics about the repository
memory usage.
@@ -552,33 +515,8 @@
nmax is the max number of (most) referenced object returned as
the 'referenced' result
"""
-
- from cubicweb._gcdebug import gc_info
- from cubicweb.appobject import AppObject
- from cubicweb.rset import ResultSet
- from cubicweb.dbapi import Connection, Cursor
- from cubicweb.web.request import CubicWebRequestBase
- from rql.stmts import Union
-
- lookupclasses = (AppObject,
- Union, ResultSet,
- Connection, Cursor,
- CubicWebRequestBase)
- try:
- from cubicweb.server.session import Session, InternalSession
- lookupclasses += (InternalSession, Session)
- except ImportError:
- pass # no server part installed
-
- results = {}
- counters, ocounters, garbage = gc_info(lookupclasses,
- viewreferrersclasses=())
- values = sorted(counters.iteritems(), key=lambda x: x[1], reverse=True)
- results['lookupclasses'] = values
- values = sorted(ocounters.iteritems(), key=lambda x: x[1], reverse=True)[:nmax]
- results['referenced'] = values
- results['unreachable'] = len(garbage)
- return results
+ with self.internal_cnx() as cnx:
+ return cnx.call_service('repo_gc_stats', nmax=nmax)
def get_schema(self):
"""Return the instance schema.
@@ -601,31 +539,17 @@
return cubes
def get_option_value(self, option, foreid=None):
- """Return the value for `option` in the configuration. If `foreid` is
- specified, the actual repository to which this entity belongs is
- derefenced and the option value retrieved from it.
+ """Return the value for `option` in the configuration.
This is a public method, not requiring a session id.
+
+ `foreid` argument is deprecated and now useless (as of 3.19).
"""
+ if foreid is not None:
+ warn('[3.19] foreid argument is deprecated', DeprecationWarning,
+ stacklevel=2)
# XXX we may want to check we don't give sensible information
- # XXX the only cube using 'foreid', apycot, stop used this, we probably
- # want to drop this argument
- if foreid is None:
- return self.config[option]
- _, sourceuri, extid, _ = self.type_and_source_from_eid(foreid)
- if sourceuri == 'system':
- return self.config[option]
- cnxset = self._get_cnxset()
- try:
- cnx = cnxset.connection(sourceuri)
- # needed to check connection is valid and usable by the current
- # thread
- newcnx = self.sources_by_uri[sourceuri].check_connection(cnx)
- if newcnx is not None:
- cnx = newcnx
- return cnx.get_option_value(option, extid)
- finally:
- self._free_cnxset(cnxset)
+ return self.config[option]
@cached
def get_versions(self, checkversions=False):
@@ -636,8 +560,8 @@
"""
from logilab.common.changelog import Version
vcconf = {}
- with self.internal_session() as session:
- for pk, version in session.execute(
+ with self.internal_cnx() as cnx:
+ for pk, version in cnx.execute(
'Any K,V WHERE P is CWProperty, P value V, P pkey K, '
'P pkey ~="system.version.%"', build_descr=False):
cube = pk.split('.')[-1]
@@ -675,11 +599,12 @@
This is a public method, not requiring a session id.
"""
- with self.internal_session() as session:
- # don't use session.execute, we don't want rset.req set
- return self.querier.execute(session, 'Any K,V WHERE P is CWProperty,'
- 'P pkey K, P value V, NOT P for_user U',
- build_descr=False)
+ with self.internal_cnx() as cnx:
+ with cnx.ensure_cnx_set:
+ # don't use cnx.execute, we don't want rset.req set
+ return self.querier.execute(cnx, 'Any K,V WHERE P is CWProperty,'
+ 'P pkey K, P value V, NOT P for_user U',
+ build_descr=False)
# XXX protect this method: anonymous should be allowed and registration
# plugged
@@ -757,9 +682,9 @@
"""
cnxprops = kwargs.pop('cnxprops', None)
# use an internal connection
- with self.internal_session() as session:
+ with self.internal_cnx() as cnx:
# try to get a user object
- user = self.authenticate_user(session, login, **kwargs)
+ user = self.authenticate_user(cnx, login, **kwargs)
session = Session(user, self, cnxprops)
if threading.currentThread() in self._pyro_sessions:
# assume no pyro client does one get_repository followed by
@@ -769,13 +694,13 @@
self._pyro_sessions[threading.currentThread()] = session
user._cw = user.cw_rset.req = session
user.cw_clear_relation_cache()
- self._sessions[session.id] = session
- self.info('opened session %s for user %s', session.id, login)
+ self._sessions[session.sessionid] = session
+ self.info('opened session %s for user %s', session.sessionid, login)
self.hm.call_hooks('session_open', session)
# commit session at this point in case write operation has been done
# during `session_open` hooks
session.commit()
- return session.id
+ return session.sessionid
def execute(self, sessionid, rqlstring, args=None, build_descr=True,
txid=None):
@@ -805,13 +730,35 @@
finally:
session.free_cnxset()
+ @deprecated('[3.19] use .entity_metas(sessionid, eid, txid) instead')
def describe(self, sessionid, eid, txid=None):
"""return a tuple `(type, physical source uri, extid, actual source
uri)` for the entity of the given `eid`
+
+ As of 3.19, physical source uri is always the system source.
"""
session = self._get_session(sessionid, setcnxset=True, txid=txid)
try:
- return self.type_and_source_from_eid(eid, session)
+ etype, extid, source = self.type_and_source_from_eid(eid, session)
+ return etype, source, extid, source
+ finally:
+ session.free_cnxset()
+
+ def entity_metas(self, sessionid, eid, txid=None):
+ """return a dictionary containing meta-datas for the entity of the given
+ `eid`. Available keys are:
+
+ * 'type', the entity's type name,
+
+ * 'source', the name of the source from which this entity's coming from,
+
+ * 'extid', the identifierfor this entity in its originating source, as
+ an encoded string or `None` for entities from the 'system' source.
+ """
+ session = self._get_session(sessionid, setcnxset=True, txid=txid)
+ try:
+ etype, extid, source = self.type_and_source_from_eid(eid, session)
+ return {'type': etype, 'source': source, 'extid': extid}
finally:
session.free_cnxset()
@@ -848,7 +795,7 @@
self.debug('begin commit for session %s', sessionid)
try:
session = self._get_session(sessionid)
- session.set_tx(txid)
+ session.set_cnx(txid)
return session.commit()
except (ValidationError, Unauthorized):
raise
@@ -861,7 +808,7 @@
self.debug('begin rollback for session %s', sessionid)
try:
session = self._get_session(sessionid)
- session.set_tx(txid)
+ session.set_cnx(txid)
session.rollback()
except Exception:
self.exception('unexpected error')
@@ -883,33 +830,14 @@
del self._sessions[sessionid]
self.info('closed session %s for user %s', sessionid, session.user.login)
- def call_service(self, sessionid, regid, async, **kwargs):
+ def call_service(self, sessionid, regid, **kwargs):
"""
See :class:`cubicweb.dbapi.Connection.call_service`
and :class:`cubicweb.server.Service`
"""
+ # XXX lack a txid
session = self._get_session(sessionid)
- return self._call_service_with_session(session, regid, async, **kwargs)
-
- def _call_service_with_session(self, session, regid, async, **kwargs):
- if async:
- self.info('calling service %s asynchronously', regid)
- def task():
- session.set_cnxset()
- try:
- service = session.vreg['services'].select(regid, session, **kwargs)
- return service.call(**kwargs)
- finally:
- session.rollback() # free cnxset
- self.threaded_task(task)
- else:
- self.info('calling service %s synchronously', regid)
- session.set_cnxset()
- try:
- service = session.vreg['services'].select(regid, session, **kwargs)
- return service.call(**kwargs)
- finally:
- session.free_cnxset()
+ return session._cnx.call_service(regid, **kwargs)
def user_info(self, sessionid, props=None):
"""this method should be used by client to:
@@ -954,25 +882,6 @@
finally:
session.free_cnxset()
- # public (inter-repository) interface #####################################
-
- def entities_modified_since(self, etypes, mtime):
- """function designed to be called from an external repository which
- is using this one as a rql source for synchronization, and return a
- 3-uple containing :
- * the local date
- * list of (etype, eid) of entities of the given types which have been
- modified since the given timestamp (actually entities whose full text
- index content has changed)
- * list of (etype, eid) of entities of the given types which have been
- deleted since the given timestamp
- """
- with self.internal_session() as session:
- updatetime = datetime.utcnow()
- modentities, delentities = self.system_source.modified_entities(
- session, etypes, mtime)
- return updatetime, modentities, delentities
-
# session handling ########################################################
def close_sessions(self):
@@ -993,15 +902,19 @@
nbclosed = 0
for session in self._sessions.values():
if session.timestamp < mintime:
- self.close(session.id)
+ self.close(session.sessionid)
nbclosed += 1
return nbclosed
+ @deprecated("[3.19] use internal_cnx now\n"
+ "(Beware that integrity hook are now enabled by default)")
def internal_session(self, cnxprops=None, safe=False):
"""return a dbapi like connection/cursor using internal user which have
every rights on the repository. The `safe` argument is a boolean flag
telling if integrity hooks should be activated or not.
+ /!\ the safe argument is False by default.
+
*YOU HAVE TO* commit/rollback or close (rollback implicitly) the
session once the job's done, else you'll leak connections set up to the
time where no one is available, causing irremediable freeze...
@@ -1010,6 +923,22 @@
session.set_cnxset()
return session
+ @contextmanager
+ def internal_cnx(self):
+ """return a Connection using internal user which have
+ every rights on the repository. The `safe` argument is dropped. all
+ hook are enabled by default.
+
+ /!\ IN OPPOSITE OF THE OLDER INTERNAL_SESSION,
+ /!\ INTERNAL CONNECTION HAVE ALL HOOKS ENABLED.
+
+ This is to be used a context manager.
+ """
+ with InternalSession(self) as session:
+ with session.new_cnx() as cnx:
+ yield cnx
+
+
def _get_session(self, sessionid, setcnxset=False, txid=None,
checkshuttingdown=True):
"""return the session associated with the given session identifier"""
@@ -1020,7 +949,7 @@
except KeyError:
raise BadConnectionId('No such session %s' % sessionid)
if setcnxset:
- session.set_tx(txid) # must be done before set_cnxset
+ session.set_cnx(txid) # must be done before set_cnxset
session.set_cnxset()
return session
@@ -1029,8 +958,8 @@
# * correspondance between eid and local id (i.e. specific to a given source)
def type_and_source_from_eid(self, eid, session=None):
- """return a tuple `(type, physical source uri, extid, actual source
- uri)` for the entity of the given `eid`
+ """return a tuple `(type, extid, actual source uri)` for the entity of
+ the given `eid`
"""
try:
eid = int(eid)
@@ -1045,15 +974,13 @@
else:
free_cnxset = False
try:
- etype, uri, extid, auri = self.system_source.eid_type_source(
+ etype, extid, auri = self.system_source.eid_type_source(
session, eid)
finally:
if free_cnxset:
session.free_cnxset()
- self._type_source_cache[eid] = (etype, uri, extid, auri)
- if uri != 'system':
- self._extid_cache[(extid, uri)] = eid
- return etype, uri, extid, auri
+ self._type_source_cache[eid] = (etype, extid, auri)
+ return etype, extid, auri
def clear_caches(self, eids):
etcache = self._type_source_cache
@@ -1061,23 +988,18 @@
rqlcache = self.querier._rql_cache
for eid in eids:
try:
- etype, uri, extid, auri = etcache.pop(int(eid)) # may be a string in some cases
+ etype, extid, auri = etcache.pop(int(eid)) # may be a string in some cases
rqlcache.pop( ('%s X WHERE X eid %s' % (etype, eid),), None)
- extidcache.pop((extid, uri), None)
+ extidcache.pop(extid, None)
except KeyError:
etype = None
rqlcache.pop( ('Any X WHERE X eid %s' % eid,), None)
- for source in self.sources:
- source.clear_eid_cache(eid, etype)
+ self.system_source.clear_eid_cache(eid, etype)
def type_from_eid(self, eid, session=None):
"""return the type of the entity with id <eid>"""
return self.type_and_source_from_eid(eid, session)[0]
- def source_from_eid(self, eid, session=None):
- """return the source for the given entity's eid"""
- return self.sources_by_uri[self.type_and_source_from_eid(eid, session)[1]]
-
def querier_cache_key(self, session, rql, args, eidkeys):
cachekey = [rql]
for key in sorted(eidkeys):
@@ -1093,14 +1015,6 @@
args[key] = int(args[key])
return tuple(cachekey)
- def eid2extid(self, source, eid, session=None):
- """get local id from an eid"""
- etype, uri, extid, _ = self.type_and_source_from_eid(eid, session)
- if source.uri != uri:
- # eid not from the given source
- raise UnknownEid(eid)
- return extid
-
def extid2eid(self, source, extid, etype, session=None, insert=True,
complete=True, commit=True, sourceparams=None):
"""Return eid from a local id. If the eid is a negative integer, that
@@ -1125,20 +1039,18 @@
6. unless source's :attr:`should_call_hooks` tell otherwise,
'before_add_entity' hooks are called
"""
- uri = 'system' if source.copy_based_source else source.uri
- cachekey = (extid, uri)
try:
- return self._extid_cache[cachekey]
+ return self._extid_cache[extid]
except KeyError:
pass
free_cnxset = False
if session is None:
session = self.internal_session()
free_cnxset = True
- eid = self.system_source.extid2eid(session, uri, extid)
+ eid = self.system_source.extid2eid(session, extid)
if eid is not None:
- self._extid_cache[cachekey] = eid
- self._type_source_cache[eid] = (etype, uri, extid, source.uri)
+ self._extid_cache[extid] = eid
+ self._type_source_cache[eid] = (etype, extid, source.uri)
if free_cnxset:
session.free_cnxset()
return eid
@@ -1155,8 +1067,8 @@
free_cnxset = True
try:
eid = self.system_source.create_eid(session)
- self._extid_cache[cachekey] = eid
- self._type_source_cache[eid] = (etype, uri, extid, source.uri)
+ self._extid_cache[extid] = eid
+ self._type_source_cache[eid] = (etype, extid, source.uri)
entity = source.before_entity_insertion(
session, extid, etype, eid, sourceparams)
if source.should_call_hooks:
@@ -1177,11 +1089,11 @@
else:
# XXX do some cleanup manually so that the transaction has a
# chance to be commited, with simply this entity discarded
- self._extid_cache.pop(cachekey, None)
+ self._extid_cache.pop(extid, None)
self._type_source_cache.pop(eid, None)
if 'entity' in locals():
hook.CleanupDeletedEidsCacheOp.get_instance(session).add_data(entity.eid)
- self.system_source.delete_info_multi(session, [entity], uri)
+ self.system_source.delete_info_multi(session, [entity])
if source.should_call_hooks:
session._tx.pending_operations = pending_operations
raise
@@ -1194,32 +1106,22 @@
hook.CleanupNewEidsCacheOp.get_instance(session).add_data(entity.eid)
self.system_source.add_info(session, entity, source, extid, complete)
- def delete_info(self, session, entity, sourceuri, scleanup=None):
+ def delete_info(self, session, entity, sourceuri):
"""called by external source when some entity known by the system source
has been deleted in the external source
"""
# mark eid as being deleted in session info and setup cache update
# operation
hook.CleanupDeletedEidsCacheOp.get_instance(session).add_data(entity.eid)
- self._delete_info(session, entity, sourceuri, scleanup)
+ self._delete_info(session, entity, sourceuri)
- def _delete_info(self, session, entity, sourceuri, scleanup=None):
+ def _delete_info(self, session, entity, sourceuri):
"""delete system information on deletion of an entity:
* delete all remaining relations from/to this entity
-
- * call delete info on the system source which will transfer record from
- the entities table to the deleted_entities table
-
- When scleanup is specified, it's expected to be the source's eid, in
- which case we'll specify the target's relation source so that this
- source is ignored. E.g. we want to delete relations stored locally, as
- the deletion information comes from the external source, it's its
- responsability to have cleaned-up its own relations.
+ * call delete info on the system source
"""
pendingrtypes = session.transaction_data.get('pendingrtypes', ())
- if scleanup is not None:
- source = self.sources_by_eid[scleanup]
# delete remaining relations: if user can delete the entity, he can
# delete all its relations without security checking
with session.security_enabled(read=False, write=False):
@@ -1234,34 +1136,20 @@
rql = 'DELETE X %s Y WHERE X eid %%(x)s' % rtype
else:
rql = 'DELETE Y %s X WHERE X eid %%(x)s' % rtype
- if scleanup is not None:
- # if the relation can't be crossed, nothing to cleanup (we
- # would get a BadRQLQuery from the multi-sources planner).
- # This may still leave some junk if the mapping has changed
- # at some point, but one can still run db-check to catch
- # those
- if not source in self.can_cross_relation(rtype):
- continue
- # source cleaning: only delete relations stored locally
- # (here, scleanup
- rql += ', NOT (Y cw_source S, S eid %(seid)s)'
try:
- session.execute(rql, {'x': eid, 'seid': scleanup},
- build_descr=False)
+ session.execute(rql, {'x': eid}, build_descr=False)
except Exception:
if self.config.mode == 'test':
raise
self.exception('error while cascading delete for entity %s '
'from %s. RQL: %s', entity, sourceuri, rql)
- self.system_source.delete_info_multi(session, [entity], sourceuri)
+ self.system_source.delete_info_multi(session, [entity])
- def _delete_info_multi(self, session, entities, sourceuri, scleanup=None):
+ def _delete_info_multi(self, session, entities):
"""same as _delete_info but accepts a list of entities with
the same etype and belinging to the same source.
"""
pendingrtypes = session.transaction_data.get('pendingrtypes', ())
- if scleanup is not None:
- source = self.sources_by_eid[scleanup]
# delete remaining relations: if user can delete the entity, he can
# delete all its relations without security checking
with session.security_enabled(read=False, write=False):
@@ -1276,74 +1164,33 @@
rql = 'DELETE X %s Y WHERE X eid IN (%s)' % (rtype, in_eids)
else:
rql = 'DELETE Y %s X WHERE X eid IN (%s)' % (rtype, in_eids)
- if scleanup is not None:
- # if the relation can't be crossed, nothing to cleanup (we
- # would get a BadRQLQuery from the multi-sources planner).
- # This may still leave some junk if the mapping has changed
- # at some point, but one can still run db-check to catch
- # those
- if not source in self.can_cross_relation(rtype):
- continue
- # source cleaning: only delete relations stored locally
- rql += ', NOT (Y cw_source S, S eid %(seid)s)'
try:
- session.execute(rql, {'seid': scleanup}, build_descr=False)
+ session.execute(rql, build_descr=False)
except ValidationError:
raise
except Unauthorized:
- self.exception('Unauthorized exception while cascading delete for entity %s '
- 'from %s. RQL: %s.\nThis should not happen since security is disabled here.',
- entities, sourceuri, rql)
+ self.exception('Unauthorized exception while cascading delete for entity %s. '
+ 'RQL: %s.\nThis should not happen since security is disabled here.',
+ entities, rql)
raise
except Exception:
if self.config.mode == 'test':
raise
- self.exception('error while cascading delete for entity %s '
- 'from %s. RQL: %s', entities, sourceuri, rql)
- self.system_source.delete_info_multi(session, entities, sourceuri)
-
- def locate_relation_source(self, session, subject, rtype, object):
- subjsource = self.source_from_eid(subject, session)
- objsource = self.source_from_eid(object, session)
- if not subjsource is objsource:
- source = self.system_source
- if not (subjsource.may_cross_relation(rtype)
- and objsource.may_cross_relation(rtype)):
- raise MultiSourcesError(
- "relation %s can't be crossed among sources"
- % rtype)
- elif not subjsource.support_relation(rtype):
- source = self.system_source
- else:
- source = subjsource
- if not source.support_relation(rtype, True):
- raise MultiSourcesError(
- "source %s doesn't support write of %s relation"
- % (source.uri, rtype))
- return source
-
- def locate_etype_source(self, etype):
- for source in self.sources:
- if source.support_entity(etype, 1):
- return source
- else:
- raise ETypeNotSupportedBySources(etype)
+ self.exception('error while cascading delete for entity %s. RQL: %s',
+ entities, rql)
+ self.system_source.delete_info_multi(session, entities)
def init_entity_caches(self, session, entity, source):
"""add entity to session entities cache and repo's extid cache.
Return entity's ext id if the source isn't the system source.
"""
session.set_entity_cache(entity)
- suri = source.uri
- if suri == 'system':
+ if source.uri == 'system':
extid = None
else:
- if source.copy_based_source:
- suri = 'system'
extid = source.get_extid(entity)
- self._extid_cache[(str(extid), suri)] = entity.eid
- self._type_source_cache[entity.eid] = (entity.cw_etype, suri, extid,
- source.uri)
+ self._extid_cache[str(extid)] = entity.eid
+ self._type_source_cache[entity.eid] = (entity.cw_etype, extid, source.uri)
return extid
def glob_add_entity(self, session, edited):
@@ -1356,7 +1203,7 @@
entity._cw_is_saved = False # entity has an eid but is not yet saved
# init edited_attributes before calling before_add_entity hooks
entity.cw_edited = edited
- source = self.locate_etype_source(entity.cw_etype)
+ source = self.system_source
# allocate an eid to the entity before calling hooks
entity.eid = self.system_source.create_eid(session)
# set caches asap
@@ -1364,8 +1211,7 @@
if server.DEBUG & server.DBG_REPO:
print 'ADD entity', self, entity.cw_etype, entity.eid, edited
prefill_entity_caches(entity)
- if source.should_call_hooks:
- self.hm.call_hooks('before_add_entity', session, entity=entity)
+ self.hm.call_hooks('before_add_entity', session, entity=entity)
relations = preprocess_inlined_relations(session, entity)
edited.set_defaults()
if session.is_hook_category_activated('integrity'):
@@ -1379,14 +1225,13 @@
self.add_info(session, entity, source, extid, complete=False)
edited.saved = entity._cw_is_saved = True
# trigger after_add_entity after after_add_relation
- if source.should_call_hooks:
- self.hm.call_hooks('after_add_entity', session, entity=entity)
- # call hooks for inlined relations
- for attr, value in relations:
- self.hm.call_hooks('before_add_relation', session,
- eidfrom=entity.eid, rtype=attr, eidto=value)
- self.hm.call_hooks('after_add_relation', session,
- eidfrom=entity.eid, rtype=attr, eidto=value)
+ self.hm.call_hooks('after_add_entity', session, entity=entity)
+ # call hooks for inlined relations
+ for attr, value in relations:
+ self.hm.call_hooks('before_add_relation', session,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
+ self.hm.call_hooks('after_add_relation', session,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
return entity.eid
def glob_update_entity(self, session, edited):
@@ -1402,10 +1247,10 @@
session.set_entity_cache(entity)
orig_edited = getattr(entity, 'cw_edited', None)
entity.cw_edited = edited
+ source = self.system_source
try:
only_inline_rels, need_fti_update = True, False
relations = []
- source = self.source_from_eid(entity.eid, session)
for attr in list(edited):
if attr == 'eid':
continue
@@ -1421,18 +1266,17 @@
previous_value = previous_value[0][0] # got a result set
if previous_value == entity.cw_attr_cache[attr]:
previous_value = None
- elif source.should_call_hooks:
+ else:
hm.call_hooks('before_delete_relation', session,
eidfrom=entity.eid, rtype=attr,
eidto=previous_value)
relations.append((attr, edited[attr], previous_value))
- if source.should_call_hooks:
- # call hooks for inlined relations
- for attr, value, _t in relations:
- hm.call_hooks('before_add_relation', session,
- eidfrom=entity.eid, rtype=attr, eidto=value)
- if not only_inline_rels:
- hm.call_hooks('before_update_entity', session, entity=entity)
+ # call hooks for inlined relations
+ for attr, value, _t in relations:
+ hm.call_hooks('before_add_relation', session,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
+ if not only_inline_rels:
+ hm.call_hooks('before_update_entity', session, entity=entity)
if session.is_hook_category_activated('integrity'):
edited.check()
try:
@@ -1443,25 +1287,24 @@
'IUserFriendlyError', session, entity=entity, exc=exc)
userhdlr.raise_user_exception()
self.system_source.update_info(session, entity, need_fti_update)
- if source.should_call_hooks:
- if not only_inline_rels:
- hm.call_hooks('after_update_entity', session, entity=entity)
- for attr, value, prevvalue in relations:
- # if the relation is already cached, update existant cache
- relcache = entity.cw_relation_cached(attr, 'subject')
- if prevvalue is not None:
- hm.call_hooks('after_delete_relation', session,
- eidfrom=entity.eid, rtype=attr, eidto=prevvalue)
- if relcache is not None:
- session.update_rel_cache_del(entity.eid, attr, prevvalue)
- del_existing_rel_if_needed(session, entity.eid, attr, value)
+ if not only_inline_rels:
+ hm.call_hooks('after_update_entity', session, entity=entity)
+ for attr, value, prevvalue in relations:
+ # if the relation is already cached, update existant cache
+ relcache = entity.cw_relation_cached(attr, 'subject')
+ if prevvalue is not None:
+ hm.call_hooks('after_delete_relation', session,
+ eidfrom=entity.eid, rtype=attr, eidto=prevvalue)
if relcache is not None:
- session.update_rel_cache_add(entity.eid, attr, value)
- else:
- entity.cw_set_relation_cache(attr, 'subject',
- session.eid_rset(value))
- hm.call_hooks('after_add_relation', session,
- eidfrom=entity.eid, rtype=attr, eidto=value)
+ session.update_rel_cache_del(entity.eid, attr, prevvalue)
+ del_existing_rel_if_needed(session, entity.eid, attr, value)
+ if relcache is not None:
+ session.update_rel_cache_add(entity.eid, attr, value)
+ else:
+ entity.cw_set_relation_cache(attr, 'subject',
+ session.eid_rset(value))
+ hm.call_hooks('after_add_relation', session,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
finally:
if orig_edited is not None:
entity.cw_edited = orig_edited
@@ -1479,37 +1322,28 @@
eids = frozenset(eids)
eids = eids - op._container
op._container |= eids
- data_by_etype_source = {} # values are ([list of eids],
- # [list of extid],
- # [list of entities])
+ data_by_etype = {} # values are [list of entities]
#
# WARNING: the way this dictionary is populated is heavily optimized
# and does not use setdefault on purpose. Unless a new release
# of the Python interpreter advertises large perf improvements
# in setdefault, this should not be changed without profiling.
-
for eid in eids:
- etype, sourceuri, extid, _ = self.type_and_source_from_eid(eid, session)
+ etype = self.type_from_eid(eid, session)
# XXX should cache entity's cw_metainformation
entity = session.entity_from_eid(eid, etype)
try:
- data_by_etype_source[(etype, sourceuri)].append(entity)
+ data_by_etype[etype].append(entity)
except KeyError:
- data_by_etype_source[(etype, sourceuri)] = [entity]
- for (etype, sourceuri), entities in data_by_etype_source.iteritems():
+ data_by_etype[etype] = [entity]
+ source = self.system_source
+ for etype, entities in data_by_etype.iteritems():
if server.DEBUG & server.DBG_REPO:
print 'DELETE entities', etype, [entity.eid for entity in entities]
- source = self.sources_by_uri[sourceuri]
- if source.should_call_hooks:
- self.hm.call_hooks('before_delete_entity', session, entities=entities)
- if session.deleted_in_transaction(source.eid):
- # source is being deleted, think to give scleanup argument
- self._delete_info_multi(session, entities, sourceuri, scleanup=source.eid)
- else:
- self._delete_info_multi(session, entities, sourceuri)
+ self.hm.call_hooks('before_delete_entity', session, entities=entities)
+ self._delete_info_multi(session, entities)
source.delete_entities(session, entities)
- if source.should_call_hooks:
- self.hm.call_hooks('after_delete_entity', session, entities=entities)
+ self.hm.call_hooks('after_delete_entity', session, entities=entities)
# don't clear cache here, it is done in a hook on commit
def glob_add_relation(self, session, subject, rtype, object):
@@ -1521,7 +1355,8 @@
relations is a dictionary rtype: [(subj_eid, obj_eid), ...]
"""
- sources = {}
+ source = self.system_source
+ relations_by_rtype = {}
subjects_by_types = {}
objects_by_types = {}
activintegrity = session.is_hook_category_activated('activeintegrity')
@@ -1530,12 +1365,6 @@
for subjeid, objeid in eids_subj_obj:
print 'ADD relation', subjeid, rtype, objeid
for subjeid, objeid in eids_subj_obj:
- source = self.locate_relation_source(session, subjeid, rtype, objeid)
- if source not in sources:
- relations_by_rtype = {}
- sources[source] = relations_by_rtype
- else:
- relations_by_rtype = sources[source]
if rtype in relations_by_rtype:
relations_by_rtype[rtype].append((subjeid, objeid))
else:
@@ -1569,35 +1398,30 @@
objects[objeid] = len(relations_by_rtype[rtype])
continue
objects[objeid] = len(relations_by_rtype[rtype])
- for source, relations_by_rtype in sources.iteritems():
- if source.should_call_hooks:
- for rtype, source_relations in relations_by_rtype.iteritems():
- self.hm.call_hooks('before_add_relation', session,
- rtype=rtype, eids_from_to=source_relations)
- for rtype, source_relations in relations_by_rtype.iteritems():
- source.add_relations(session, rtype, source_relations)
- rschema = self.schema.rschema(rtype)
- for subjeid, objeid in source_relations:
- session.update_rel_cache_add(subjeid, rtype, objeid, rschema.symmetric)
- if source.should_call_hooks:
- for rtype, source_relations in relations_by_rtype.iteritems():
- self.hm.call_hooks('after_add_relation', session,
- rtype=rtype, eids_from_to=source_relations)
+ for rtype, source_relations in relations_by_rtype.iteritems():
+ self.hm.call_hooks('before_add_relation', session,
+ rtype=rtype, eids_from_to=source_relations)
+ for rtype, source_relations in relations_by_rtype.iteritems():
+ source.add_relations(session, rtype, source_relations)
+ rschema = self.schema.rschema(rtype)
+ for subjeid, objeid in source_relations:
+ session.update_rel_cache_add(subjeid, rtype, objeid, rschema.symmetric)
+ for rtype, source_relations in relations_by_rtype.iteritems():
+ self.hm.call_hooks('after_add_relation', session,
+ rtype=rtype, eids_from_to=source_relations)
def glob_delete_relation(self, session, subject, rtype, object):
"""delete a relation from the repository"""
if server.DEBUG & server.DBG_REPO:
print 'DELETE relation', subject, rtype, object
- source = self.locate_relation_source(session, subject, rtype, object)
- if source.should_call_hooks:
- self.hm.call_hooks('before_delete_relation', session,
- eidfrom=subject, rtype=rtype, eidto=object)
+ source = self.system_source
+ self.hm.call_hooks('before_delete_relation', session,
+ eidfrom=subject, rtype=rtype, eidto=object)
source.delete_relation(session, subject, rtype, object)
rschema = self.schema.rschema(rtype)
session.update_rel_cache_del(subject, rtype, object, rschema.symmetric)
- if source.should_call_hooks:
- self.hm.call_hooks('after_delete_relation', session,
- eidfrom=subject, rtype=rtype, eidto=object)
+ self.hm.call_hooks('after_delete_relation', session,
+ eidfrom=subject, rtype=rtype, eidto=object)
# pyro handling ###########################################################
@@ -1648,7 +1472,7 @@
# client was not yet connected to the repo
return
if not session.closed:
- self.close(session.id)
+ self.close(session.sessionid)
daemon.removeConnection = removeConnection
return daemon
@@ -1660,35 +1484,10 @@
self.info('repository re-registered as a pyro object %s',
self.pyro_appid)
- # multi-sources planner helpers ###########################################
-
- @cached
- def rel_type_sources(self, rtype):
- warn('[3.18] old multi-source system will go away in the next version',
- DeprecationWarning)
- return tuple([source for source in self.sources
- if source.support_relation(rtype)
- or rtype in source.dont_cross_relations])
-
- @cached
- def can_cross_relation(self, rtype):
- warn('[3.18] old multi-source system will go away in the next version',
- DeprecationWarning)
- return tuple([source for source in self.sources
- if source.support_relation(rtype)
- and rtype in source.cross_relations])
-
- @cached
- def is_multi_sources_relation(self, rtype):
- warn('[3.18] old multi-source system will go away in the next version',
- DeprecationWarning)
- return any(source for source in self.sources
- if not source is self.system_source
- and source.support_relation(rtype))
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
- info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+ info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
def pyro_unregister(config):
--- a/server/schemaserial.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/schemaserial.py Mon Feb 17 15:32:50 2014 +0100
@@ -20,7 +20,6 @@
__docformat__ = "restructuredtext en"
import os
-from itertools import chain
import json
from logilab.common.shellutils import ProgressBar
@@ -28,13 +27,13 @@
from yams import (BadSchemaDefinition, schema as schemamod, buildobjs as ybo,
schema2sql as y2sql)
-from cubicweb import CW_SOFTWARE_ROOT, Binary, typed_eid
+from cubicweb import Binary
from cubicweb.schema import (KNOWN_RPROPERTIES, CONSTRAINTS, ETYPE_NAME_MAP,
- VIRTUAL_RTYPES, PURE_VIRTUAL_RTYPES)
+ VIRTUAL_RTYPES)
from cubicweb.server import sqlutils
-def group_mapping(cursor, interactive=True):
+def group_mapping(cnx, interactive=True):
"""create a group mapping from an rql cursor
A group mapping has standard group names as key (managers, owners at least)
@@ -43,7 +42,7 @@
from the user.
"""
res = {}
- for eid, name in cursor.execute('Any G, N WHERE G is CWGroup, G name N',
+ for eid, name in cnx.execute('Any G, N WHERE G is CWGroup, G name N',
build_descr=False):
res[name] = eid
if not interactive:
@@ -75,33 +74,33 @@
break
return res
-def cstrtype_mapping(cursor):
+def cstrtype_mapping(cnx):
"""cached constraint types mapping"""
- map = dict(cursor.execute('Any T, X WHERE X is CWConstraintType, X name T'))
+ map = dict(cnx.execute('Any T, X WHERE X is CWConstraintType, X name T'))
return map
# schema / perms deserialization ##############################################
-def deserialize_schema(schema, session):
+def deserialize_schema(schema, cnx):
"""return a schema according to information stored in an rql database
as CWRType and CWEType entities
"""
- repo = session.repo
+ repo = cnx.repo
dbhelper = repo.system_source.dbhelper
# XXX bw compat (3.6 migration)
- sqlcu = session.cnxset['system']
- sqlcu.execute("SELECT * FROM cw_CWRType WHERE cw_name='symetric'")
- if sqlcu.fetchall():
- sql = dbhelper.sql_rename_col('cw_CWRType', 'cw_symetric', 'cw_symmetric',
- dbhelper.TYPE_MAPPING['Boolean'], True)
- sqlcu.execute(sql)
- sqlcu.execute("UPDATE cw_CWRType SET cw_name='symmetric' WHERE cw_name='symetric'")
- session.commit(False)
+ with cnx.ensure_cnx_set:
+ sqlcu = cnx.system_sql("SELECT * FROM cw_CWRType WHERE cw_name='symetric'")
+ if sqlcu.fetchall():
+ sql = dbhelper.sql_rename_col('cw_CWRType', 'cw_symetric', 'cw_symmetric',
+ dbhelper.TYPE_MAPPING['Boolean'], True)
+ sqlcu.execute(sql)
+ sqlcu.execute("UPDATE cw_CWRType SET cw_name='symmetric' WHERE cw_name='symetric'")
+ cnx.commit(False)
ertidx = {}
copiedeids = set()
- permsidx = deserialize_ertype_permissions(session)
+ permsidx = deserialize_ertype_permissions(cnx)
schema.reading_from_database = True
- for eid, etype, desc in session.execute(
+ for eid, etype, desc in cnx.execute(
'Any X, N, D WHERE X is CWEType, X name N, X description D',
build_descr=False):
# base types are already in the schema, skip them
@@ -115,7 +114,7 @@
needcopy = False
netype = ETYPE_NAME_MAP[etype]
# can't use write rql queries at this point, use raw sql
- sqlexec = session.system_sql
+ sqlexec = cnx.system_sql
if sqlexec('SELECT 1 FROM %(p)sCWEType WHERE %(p)sname=%%(n)s'
% {'p': sqlutils.SQL_PREFIX}, {'n': netype}).fetchone():
# the new type already exists, we should copy (eg make existing
@@ -132,17 +131,12 @@
sqlexec(alter_table_sql)
sqlexec('UPDATE entities SET type=%(n)s WHERE type=%(x)s',
{'x': etype, 'n': netype})
- session.commit(False)
- try:
- sqlexec('UPDATE deleted_entities SET type=%(n)s WHERE type=%(x)s',
- {'x': etype, 'n': netype})
- except Exception:
- pass
+ cnx.commit(False)
tocleanup = [eid]
tocleanup += (eid for eid, cached in repo._type_source_cache.iteritems()
if etype == cached[0])
repo.clear_caches(tocleanup)
- session.commit(False)
+ cnx.commit(False)
if needcopy:
ertidx[eid] = netype
copiedeids.add(eid)
@@ -154,14 +148,14 @@
eschema = schema.add_entity_type(
ybo.EntityType(name=etype, description=desc, eid=eid))
set_perms(eschema, permsidx)
- for etype, stype in session.execute(
+ for etype, stype in cnx.execute(
'Any XN, ETN WHERE X is CWEType, X name XN, X specializes ET, ET name ETN',
build_descr=False):
etype = ETYPE_NAME_MAP.get(etype, etype)
stype = ETYPE_NAME_MAP.get(stype, stype)
schema.eschema(etype)._specialized_type = stype
schema.eschema(stype)._specialized_by.append(etype)
- for eid, rtype, desc, sym, il, ftc in session.execute(
+ for eid, rtype, desc, sym, il, ftc in cnx.execute(
'Any X,N,D,S,I,FTC WHERE X is CWRType, X name N, X description D, '
'X symmetric S, X inlined I, X fulltext_container FTC', build_descr=False):
ertidx[eid] = rtype
@@ -169,7 +163,7 @@
ybo.RelationType(name=rtype, description=desc,
symmetric=bool(sym), inlined=bool(il),
fulltext_container=ftc, eid=eid))
- cstrsidx = deserialize_rdef_constraints(session)
+ cstrsidx = deserialize_rdef_constraints(cnx)
pendingrdefs = []
# closure to factorize common code of attribute/relation rdef addition
def _add_rdef(rdefeid, seid, reid, oeid, **kwargs):
@@ -198,13 +192,13 @@
set_perms(rdefs, permsidx)
# Get the type parameters for additional base types.
try:
- extra_props = dict(session.execute('Any X, XTP WHERE X is CWAttribute, '
+ extra_props = dict(cnx.execute('Any X, XTP WHERE X is CWAttribute, '
'X extra_props XTP'))
except Exception:
- session.critical('Previous CRITICAL notification about extra_props is not '
+ cnx.critical('Previous CRITICAL notification about extra_props is not '
'a problem if you are migrating to cubicweb 3.17')
extra_props = {} # not yet in the schema (introduced by 3.17 migration)
- for values in session.execute(
+ for values in cnx.execute(
'Any X,SE,RT,OE,CARD,ORD,DESC,IDX,FTIDX,I18N,DFLT WHERE X is CWAttribute,'
'X relation_type RT, X cardinality CARD, X ordernum ORD, X indexed IDX,'
'X description DESC, X internationalizable I18N, X defaultval DFLT,'
@@ -222,7 +216,7 @@
cardinality=card, description=desc, order=ord,
indexed=idx, fulltextindexed=ftidx, internationalizable=i18n,
default=default, **typeparams)
- for values in session.execute(
+ for values in cnx.execute(
'Any X,SE,RT,OE,CARD,ORD,DESC,C WHERE X is CWRelation, X relation_type RT,'
'X cardinality CARD, X ordernum ORD, X description DESC, '
'X from_entity SE, X to_entity OE, X composite C', build_descr=False):
@@ -238,7 +232,7 @@
if rdefs is not None:
set_perms(rdefs, permsidx)
unique_togethers = {}
- rset = session.execute(
+ rset = cnx.execute(
'Any X,E,R WHERE '
'X is CWUniqueTogetherConstraint, '
'X constraint_of E, X relations R', build_descr=False)
@@ -257,11 +251,11 @@
for eschema, unique_together in unique_togethers.itervalues():
eschema._unique_together.append(tuple(sorted(unique_together)))
schema.infer_specialization_rules()
- session.commit()
+ cnx.commit()
schema.reading_from_database = False
-def deserialize_ertype_permissions(session):
+def deserialize_ertype_permissions(cnx):
"""return sect action:groups associations for the given
entity or relation schema with its eid, according to schema's
permissions stored in the database as [read|add|delete|update]_permission
@@ -270,21 +264,21 @@
res = {}
for action in ('read', 'add', 'update', 'delete'):
rql = 'Any E,N WHERE G is CWGroup, G name N, E %s_permission G' % action
- for eid, gname in session.execute(rql, build_descr=False):
+ for eid, gname in cnx.execute(rql, build_descr=False):
res.setdefault(eid, {}).setdefault(action, []).append(gname)
rql = ('Any E,X,EXPR,V WHERE X is RQLExpression, X expression EXPR, '
'E %s_permission X, X mainvars V' % action)
- for eid, expreid, expr, mainvars in session.execute(rql, build_descr=False):
+ for eid, expreid, expr, mainvars in cnx.execute(rql, build_descr=False):
# we don't know yet if it's a rql expr for an entity or a relation,
# so append a tuple to differentiate from groups and so we'll be
# able to instantiate it later
res.setdefault(eid, {}).setdefault(action, []).append( (expr, mainvars, expreid) )
return res
-def deserialize_rdef_constraints(session):
+def deserialize_rdef_constraints(cnx):
"""return the list of relation definition's constraints as instances"""
res = {}
- for rdefeid, ceid, ct, val in session.execute(
+ for rdefeid, ceid, ct, val in cnx.execute(
'Any E, X,TN,V WHERE E constrained_by X, X is CWConstraint, '
'X cstrtype T, T name TN, X value V', build_descr=False):
cstr = CONSTRAINTS[ct].deserialize(val)
@@ -311,7 +305,7 @@
# schema / perms serialization ################################################
-def serialize_schema(cursor, schema):
+def serialize_schema(cnx, schema):
"""synchronize schema and permissions in the database according to
current schema
"""
@@ -319,7 +313,7 @@
if not quiet:
_title = '-> storing the schema in the database '
print _title,
- execute = cursor.execute
+ execute = cnx.execute
eschemas = schema.entities()
if not quiet:
pb_size = (len(eschemas + schema.relations())
@@ -328,7 +322,7 @@
pb = ProgressBar(pb_size, title=_title)
else:
pb = None
- groupmap = group_mapping(cursor, interactive=False)
+ groupmap = group_mapping(cnx, interactive=False)
# serialize all entity types, assuring CWEType is serialized first for proper
# is / is_instance_of insertion
eschemas.remove(schema.eschema('CWEType'))
--- a/server/server.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/server.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -19,13 +19,9 @@
__docformat__ = "restructuredtext en"
-import os
-import sys
import select
-import warnings
from time import localtime, mktime
-from cubicweb.cwconfig import CubicWebConfiguration
from cubicweb.server.utils import TasksManager
from cubicweb.server.repository import Repository
--- a/server/serverconfig.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/serverconfig.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -24,7 +24,7 @@
from StringIO import StringIO
import logilab.common.configuration as lgconfig
-from logilab.common.decorators import wproperty, cached
+from logilab.common.decorators import cached
from cubicweb.toolsutils import read_config, restrict_perms_to_user
from cubicweb.cwconfig import CONFIGURATIONS, CubicWebConfiguration
@@ -297,13 +297,16 @@
# configuration file (#16102)
@cached
def read_sources_file(self):
+ """return a dictionary of values found in the sources file"""
return read_config(self.sources_file(), raise_if_unreadable=True)
- def sources(self):
- """return a dictionnaries containing sources definitions indexed by
- sources'uri
- """
- return self.read_sources_file()
+ @property
+ def system_source_config(self):
+ return self.read_sources_file()['system']
+
+ @property
+ def default_admin_config(self):
+ return self.read_sources_file()['admin']
def source_enabled(self, source):
if self.sources_mode is not None:
--- a/server/serverctl.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/serverctl.py Mon Feb 17 15:32:50 2014 +0100
@@ -136,8 +136,8 @@
from cubicweb.dbapi import in_memory_repo_cnx
from cubicweb.server.utils import manager_userpasswd
try:
- login = config.sources()['admin']['login']
- pwd = config.sources()['admin']['password']
+ login = config.default_admin_config['login']
+ pwd = config.default_admin_config['password']
except KeyError:
login, pwd = manager_userpasswd()
while True:
@@ -221,7 +221,7 @@
def cleanup(self):
"""remove instance's configuration and database"""
from logilab.database import get_db_helper
- source = self.config.sources()['system']
+ source = self.config.system_source_config
dbname = source['db-name']
helper = get_db_helper(source['db-driver'])
if ASK.confirm('Delete database %s ?' % dbname):
@@ -334,7 +334,7 @@
automatic = self.get('automatic')
appid = args.pop()
config = ServerConfiguration.config_for(appid)
- source = config.sources()['system']
+ source = config.system_source_config
dbname = source['db-name']
driver = source['db-driver']
helper = get_db_helper(driver)
@@ -441,7 +441,7 @@
appid = args[0]
config = ServerConfiguration.config_for(appid)
try:
- system = config.sources()['system']
+ system = config.system_source_config
extra_args = system.get('db-extra-arguments')
extra = extra_args and {'extra_args': extra_args} or {}
get_connection(
@@ -544,7 +544,7 @@
from cubicweb.server.sqlutils import sqlexec, sqlgrants
appid, user = args
config = ServerConfiguration.config_for(appid)
- source = config.sources()['system']
+ source = config.system_source_config
set_owner = self.config.set_owner
cnx = system_source_cnx(source, special_privs='GRANT')
cursor = cnx.cursor()
@@ -734,12 +734,12 @@
mih.backup_database(output, askconfirm=False, format=format)
mih.shutdown()
-def _local_restore(appid, backupfile, drop, systemonly=True, format='native'):
+def _local_restore(appid, backupfile, drop, format='native'):
config = ServerConfiguration.config_for(appid)
config.verbosity = 1 # else we won't be asked for confirmation on problems
config.quick_start = True
mih = config.migration_handler(connect=False, verbosity=1)
- mih.restore_database(backupfile, drop, systemonly, askconfirm=False, format=format)
+ mih.restore_database(backupfile, drop, askconfirm=False, format=format)
repo = mih.repo_connect()
# version of the database
dbversions = repo.get_versions()
@@ -848,13 +848,6 @@
'help': 'for some reason the database doesn\'t exist and so '
'should not be dropped.'}
),
- ('restore-all',
- {'short': 'r', 'action' : 'store_true', 'default' : False,
- 'help': 'restore everything, eg not only the system source database '
- 'but also data for all sources supporting backup/restore and custom '
- 'instance data. In that case, <backupfile> is expected to be the '
- 'timestamp of the backup to restore, not a file'}
- ),
('format',
{'short': 'f', 'default': 'native', 'type': 'choice',
'choices': ('native', 'portable'),
@@ -874,7 +867,6 @@
raise
_local_restore(appid, backupfile,
drop=not self.config.no_drop,
- systemonly=not self.config.restore_all,
format=self.config.format)
if self.config.format == 'portable':
try:
@@ -1014,26 +1006,6 @@
cnx.commit()
-class SynchronizeInstanceSchemaCommand(Command):
- """Synchronize persistent schema with cube schema.
-
- Will synchronize common stuff between the cube schema and the
- actual persistent schema, but will not add/remove any entity or relation.
-
- <instance>
- the identifier of the instance to synchronize.
- """
- name = 'schema-sync'
- arguments = '<instance>'
- min_args = max_args = 1
-
- def run(self, args):
- appid = args[0]
- config = ServerConfiguration.config_for(appid)
- mih = config.migration_handler()
- mih.cmd_synchronize_schema()
-
-
class SynchronizeSourceCommand(Command):
"""Force a source synchronization.
@@ -1112,7 +1084,7 @@
StartRepositoryCommand,
DBDumpCommand, DBRestoreCommand, DBCopyCommand,
AddSourceCommand, CheckRepositoryCommand, RebuildFTICommand,
- SynchronizeInstanceSchemaCommand, SynchronizeSourceCommand, SchemaDiffCommand,
+ SynchronizeSourceCommand, SchemaDiffCommand,
):
CWCTL.register(cmdclass)
--- a/server/session.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/session.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -23,12 +23,15 @@
from time import time
from uuid import uuid4
from warnings import warn
+import json
+import functools
+from contextlib import contextmanager
from logilab.common.deprecation import deprecated
from logilab.common.textutils import unormalize
from logilab.common.registry import objectify_predicate
-from cubicweb import UnknownEid, QueryError, schema, server
+from cubicweb import QueryError, schema, server, ProgrammingError
from cubicweb.req import RequestSessionBase
from cubicweb.utils import make_uid
from cubicweb.rqlrewrite import RQLRewriter
@@ -96,59 +99,75 @@
return obj.deny_all_hooks_but(*categories)
-class _hooks_control(object):
+class _hooks_control(object): # XXX repoapi: remove me when
+ # session stop being connection
"""context manager to control activated hooks categories.
- If mode is session.`HOOKS_DENY_ALL`, given hooks categories will
+ If mode is`HOOKS_DENY_ALL`, given hooks categories will
be enabled.
- If mode is session.`HOOKS_ALLOW_ALL`, given hooks categories will
+ If mode is `HOOKS_ALLOW_ALL`, given hooks categories will
be disabled.
.. sourcecode:: python
- with _hooks_control(self.session, self.session.HOOKS_ALLOW_ALL, 'integrity'):
+ with _hooks_control(cnx, HOOKS_ALLOW_ALL, 'integrity'):
# ... do stuff with all but 'integrity' hooks activated
- with _hooks_control(self.session, self.session.HOOKS_DENY_ALL, 'integrity'):
+ with _hooks_control(cnx, HOOKS_DENY_ALL, 'integrity'):
# ... do stuff with none but 'integrity' hooks activated
This is an internal api, you should rather use
- :meth:`~cubicweb.server.session.Session.deny_all_hooks_but` or
- :meth:`~cubicweb.server.session.Session.allow_all_hooks_but` session
- methods.
+ :meth:`~cubicweb.server.session.Connection.deny_all_hooks_but` or
+ :meth:`~cubicweb.server.session.Connection.allow_all_hooks_but`
+ Transaction methods.
"""
- def __init__(self, session, mode, *categories):
+ def __init__(self, cnx, mode, *categories):
assert mode in (HOOKS_ALLOW_ALL, HOOKS_DENY_ALL)
- self.session = session
- self.tx = session._tx
+ self.cnx = cnx
self.mode = mode
self.categories = categories
self.oldmode = None
self.changes = ()
def __enter__(self):
- self.oldmode = self.tx.hooks_mode
- self.tx.hooks_mode = self.mode
+ self.oldmode = self.cnx.hooks_mode
+ self.cnx.hooks_mode = self.mode
if self.mode is HOOKS_DENY_ALL:
- self.changes = self.tx.enable_hook_categories(*self.categories)
+ self.changes = self.cnx.enable_hook_categories(*self.categories)
else:
- self.changes = self.tx.disable_hook_categories(*self.categories)
- self.tx.ctx_count += 1
+ self.changes = self.cnx.disable_hook_categories(*self.categories)
+ self.cnx.ctx_count += 1
def __exit__(self, exctype, exc, traceback):
- self.tx.ctx_count -= 1
- if self.tx.ctx_count == 0:
- self.session._clear_thread_storage(self.tx)
- else:
- try:
- if self.categories:
- if self.mode is HOOKS_DENY_ALL:
- self.tx.disable_hook_categories(*self.categories)
- else:
- self.tx.enable_hook_categories(*self.categories)
- finally:
- self.tx.hooks_mode = self.oldmode
+ self.cnx.ctx_count -= 1
+ try:
+ if self.categories:
+ if self.mode is HOOKS_DENY_ALL:
+ self.cnx.disable_hook_categories(*self.categories)
+ else:
+ self.cnx.enable_hook_categories(*self.categories)
+ finally:
+ self.cnx.hooks_mode = self.oldmode
+
+class _session_hooks_control(_hooks_control): # XXX repoapi: remove me when
+ # session stop being connection
+ """hook control context manager for session
+
+ Necessary to handle some unholy transaction scope logic."""
+
+
+ def __init__(self, session, mode, *categories):
+ self.session = session
+ super_init = super(_session_hooks_control, self).__init__
+ super_init(session._cnx, mode, *categories)
+
+ def __exit__(self, exctype, exc, traceback):
+ super_exit = super(_session_hooks_control, self).__exit__
+ ret = super_exit(exctype, exc, traceback)
+ if self.cnx.ctx_count == 0:
+ self.session._close_cnx(self.cnx)
+ return ret
@deprecated('[3.17] use <object>.security_enabled instead')
def security_enabled(obj, *args, **kwargs):
@@ -160,9 +179,8 @@
By default security is disabled on queries executed on the repository
side.
"""
- def __init__(self, session, read=None, write=None):
- self.session = session
- self.tx = session._tx
+ def __init__(self, cnx, read=None, write=None):
+ self.cnx = cnx
self.read = read
self.write = write
self.oldread = None
@@ -172,24 +190,39 @@
if self.read is None:
self.oldread = None
else:
- self.oldread = self.tx.read_security
- self.tx.read_security = self.read
+ self.oldread = self.cnx.read_security
+ self.cnx.read_security = self.read
if self.write is None:
self.oldwrite = None
else:
- self.oldwrite = self.tx.write_security
- self.tx.write_security = self.write
- self.tx.ctx_count += 1
+ self.oldwrite = self.cnx.write_security
+ self.cnx.write_security = self.write
+ self.cnx.ctx_count += 1
def __exit__(self, exctype, exc, traceback):
- self.tx.ctx_count -= 1
- if self.tx.ctx_count == 0:
- self.session._clear_thread_storage(self.tx)
- else:
- if self.oldread is not None:
- self.tx.read_security = self.oldread
- if self.oldwrite is not None:
- self.tx.write_security = self.oldwrite
+ self.cnx.ctx_count -= 1
+ if self.oldread is not None:
+ self.cnx.read_security = self.oldread
+ if self.oldwrite is not None:
+ self.cnx.write_security = self.oldwrite
+
+class _session_security_enabled(_security_enabled):
+ """hook security context manager for session
+
+ Necessary To handle some unholy transaction scope logic."""
+
+
+ def __init__(self, session, read=None, write=None):
+ self.session = session
+ super_init = super(_session_security_enabled, self).__init__
+ super_init(session._cnx, read=read, write=write)
+
+ def __exit__(self, exctype, exc, traceback):
+ super_exit = super(_session_security_enabled, self).__exit__
+ ret = super_exit(exctype, exc, traceback)
+ if self.cnx.ctx_count == 0:
+ self.session._close_cnx(self.cnx)
+ return ret
HOOKS_ALLOW_ALL = object()
HOOKS_DENY_ALL = object()
@@ -199,13 +232,13 @@
pass
class CnxSetTracker(object):
- """Keep track of which transaction use which cnxset.
+ """Keep track of which connection use which cnxset.
- There should be one of these object per session (including internal sessions).
+ There should be one of these objects per session (including internal sessions).
- Session objects are responsible of creating their CnxSetTracker object.
+ Session objects are responsible for creating their CnxSetTracker object.
- Transactions should use the :meth:`record` and :meth:`forget` to inform the
+ Connections should use the :meth:`record` and :meth:`forget` to inform the
tracker of cnxsets they have acquired.
.. automethod:: cubicweb.server.session.CnxSetTracker.record
@@ -231,13 +264,13 @@
def __exit__(self, *args):
return self._condition.__exit__(*args)
- def record(self, txid, cnxset):
- """Inform the tracker that a txid has acquired a cnxset
+ def record(self, cnxid, cnxset):
+ """Inform the tracker that a cnxid has acquired a cnxset
- This method is to be used by Transaction objects.
+ This method is to be used by Connection objects.
This method fails when:
- - The txid already has a recorded cnxset.
+ - The cnxid already has a recorded cnxset.
- The tracker is not active anymore.
Notes about the caller:
@@ -264,19 +297,19 @@
with self._condition:
if not self._active:
raise SessionClosedError('Closed')
- old = self._record.get(txid)
+ old = self._record.get(cnxid)
if old is not None:
- raise ValueError('transaction "%s" already has a cnx_set (%r)'
- % (txid, old))
- self._record[txid] = cnxset
+ raise ValueError('connection "%s" already has a cnx_set (%r)'
+ % (cnxid, old))
+ self._record[cnxid] = cnxset
- def forget(self, txid, cnxset):
- """Inform the tracker that a txid have release a cnxset
+ def forget(self, cnxid, cnxset):
+ """Inform the tracker that a cnxid have release a cnxset
- This methode is to be used by Transaction object.
+ This methode is to be used by Connection object.
This method fails when:
- - The cnxset for the txid does not match the recorded one.
+ - The cnxset for the cnxid does not match the recorded one.
Notes about the caller:
(1) It is responsible for releasing the cnxset.
@@ -296,11 +329,11 @@
cnxset = repo._free_cnxset(cnxset) # (1)
"""
with self._condition:
- old = self._record.get(txid, None)
+ old = self._record.get(cnxid, None)
if old is not cnxset:
raise ValueError('recorded cnxset for "%s" mismatch: %r != %r'
- % (txid, old, cnxset))
- self._record.pop(txid)
+ % (cnxid, old, cnxset))
+ self._record.pop(cnxid)
self._condition.notify_all()
def close(self):
@@ -318,7 +351,7 @@
This method is to be used by Session objects.
- Returns a tuple of transaction ids that remain open.
+ Returns a tuple of connection ids that remain open.
"""
with self._condition:
if self._active:
@@ -330,10 +363,29 @@
timeout -= time() - start
return tuple(self._record)
-class Transaction(object):
- """Repository Transaction
+
+def _with_cnx_set(func):
+ """decorator for Connection method that ensure they run with a cnxset """
+ @functools.wraps(func)
+ def wrapper(cnx, *args, **kwargs):
+ with cnx.ensure_cnx_set:
+ return func(cnx, *args, **kwargs)
+ return wrapper
- Holds all transaction related data
+def _open_only(func):
+ """decorator for Connection method that check it is open"""
+ @functools.wraps(func)
+ def check_open(cnx, *args, **kwargs):
+ if not cnx._open:
+ raise ProgrammingError('Closed Connection: %s'
+ % cnx.connectionid)
+ return func(cnx, *args, **kwargs)
+ return check_open
+
+class Connection(RequestSessionBase):
+ """Repository Connection
+
+ Holds all connection related data
Database connection resources:
@@ -342,11 +394,11 @@
:attr:`cnxset`, the connections set to use to execute queries on sources.
If the transaction is read only, the connection set may be freed between
- actual queries. This allows multiple transactions with a reasonably low
+ actual queries. This allows multiple connections with a reasonably low
connection set pool size. Control mechanism is detailed below.
- .. automethod:: cubicweb.server.session.Transaction.set_cnxset
- .. automethod:: cubicweb.server.session.Transaction.free_cnxset
+ .. automethod:: cubicweb.server.session.Connection.set_cnxset
+ .. automethod:: cubicweb.server.session.Connection.free_cnxset
:attr:`mode`, string telling the connections set handling mode, may be one
of 'read' (connections set may be freed), 'write' (some write was done in
@@ -387,15 +439,40 @@
"""
- def __init__(self, txid, session, rewriter):
- #: transaction unique id
- self.transactionid = txid
+ is_request = False
+
+ def __init__(self, session, cnxid=None, session_handled=False):
+ # using super(Connection, self) confuse some test hack
+ RequestSessionBase.__init__(self, session.vreg)
+ # only the session provide explicite
+ if cnxid is not None:
+ assert session_handled # only session profive explicite cnxid
+ #: connection unique id
+ self._open = None
+ if cnxid is None:
+ cnxid = '%s-%s' % (session.sessionid, uuid4().hex)
+ self.connectionid = cnxid
+ self.sessionid = session.sessionid
+ #: self._session_handled
+ #: are the life cycle of this Connection automatically controlled by the
+ #: Session This is the old backward compatibility mode
+ self._session_handled = session_handled
#: reentrance handling
self.ctx_count = 0
+ #: count the number of entry in a context needing a cnxset
+ self._cnxset_count = 0
+ #: Boolean for compat with the older explicite set_cnxset/free_cnx API
+ #: When a call set_cnxset is done, no automatic freeing will be done
+ #: until free_cnx is called.
+ self._auto_free_cnx_set = True
#: server.Repository object
self.repo = session.repo
self.vreg = self.repo.vreg
+ self._execute = self.repo.querier.execute
+
+ # other session utility
+ self._session_timestamp = session._timestamp
#: connection handling mode
self.mode = session.default_mode
@@ -403,11 +480,14 @@
self._cnxset = None
#: CnxSetTracker used to report cnxset usage
self._cnxset_tracker = session._cnxset_tracker
- #: is this transaction from a client or internal to the repo
+ #: is this connection from a client or internal to the repo
self.running_dbapi_query = True
+ # internal (root) session
+ self.is_internal_session = session.is_internal_session
#: dict containing arbitrary data cleared at the end of the transaction
- self.data = {}
+ self.transaction_data = {}
+ self._session_data = session.data
#: ordered list of operations to be processed on commit/rollback
self.pending_operations = []
#: (None, 'precommit', 'postcommit', 'uncommitable')
@@ -432,27 +512,79 @@
self.undo_actions = config['undo-enabled']
# RQLRewriter are not thread safe
- self._rewriter = rewriter
+ self._rewriter = RQLRewriter(self)
+
+ # other session utility
+ if session.user.login == '__internal_manager__':
+ self.user = session.user
+ else:
+ self._set_user(session.user)
+
+
+ # live cycle handling ####################################################
+
+ def __enter__(self):
+ assert self._open is None # first opening
+ self._open = True
+ return self
+
+ def __exit__(self, exctype=None, excvalue=None, tb=None):
+ assert self._open # actually already open
+ self._free_cnxset(ignoremode=True)
+ self.clear()
+ self._open = False
+
+
+
+ # shared data handling ###################################################
@property
- def transaction_data(self):
- return self.data
+ def data(self):
+ return self._session_data
+
+ @property
+ def rql_rewriter(self):
+ return self._rewriter
+
+ @_open_only
+ def get_shared_data(self, key, default=None, pop=False, txdata=False):
+ """return value associated to `key` in session data"""
+ if txdata:
+ data = self.transaction_data
+ else:
+ data = self._session_data
+ if pop:
+ return data.pop(key, default)
+ else:
+ return data.get(key, default)
+
+ @_open_only
+ def set_shared_data(self, key, value, txdata=False):
+ """set value associated to `key` in session data"""
+ if txdata:
+ self.transaction_data[key] = value
+ else:
+ self._session_data[key] = value
def clear(self):
"""reset internal data"""
- self.data = {}
+ self.transaction_data = {}
#: ordered list of operations to be processed on commit/rollback
self.pending_operations = []
#: (None, 'precommit', 'postcommit', 'uncommitable')
self.commit_state = None
self.pruned_hooks_cache = {}
+ self.local_perm_cache.clear()
+ self.rewriter = RQLRewriter(self)
# Connection Set Management ###############################################
@property
+ @_open_only
def cnxset(self):
return self._cnxset
@cnxset.setter
+ @_open_only
def cnxset(self, new_cnxset):
with self._cnxset_tracker:
old_cnxset = self._cnxset
@@ -461,30 +593,27 @@
if old_cnxset is not None:
self._cnxset = None
self.ctx_count -= 1
- self._cnxset_tracker.forget(self.transactionid, old_cnxset)
+ self._cnxset_tracker.forget(self.connectionid, old_cnxset)
if new_cnxset is not None:
- self._cnxset_tracker.record(self.transactionid, new_cnxset)
+ self._cnxset_tracker.record(self.connectionid, new_cnxset)
self._cnxset = new_cnxset
self.ctx_count += 1
- def set_cnxset(self):
- """the transaction need a connections set to execute some queries"""
+ @_open_only
+ def _set_cnxset(self):
+ """the connection need a connections set to execute some queries"""
if self.cnxset is None:
cnxset = self.repo._get_cnxset()
try:
self.cnxset = cnxset
- try:
- cnxset.cnxset_set()
- except:
- self.cnxset = None
- raise
except:
self.repo._free_cnxset(cnxset)
raise
return self.cnxset
- def free_cnxset(self, ignoremode=False):
- """the transaction is no longer using its connections set, at least for some time"""
+ @_open_only
+ def _free_cnxset(self, ignoremode=False):
+ """the connection is no longer using its connections set, at least for some time"""
# cnxset may be none if no operation has been done since last commit
# or rollback
cnxset = self.cnxset
@@ -495,55 +624,229 @@
cnxset.cnxset_freed()
self.repo._free_cnxset(cnxset)
+ @deprecated('[3.19] cnxset are automatically managed now.'
+ ' stop using explicit set and free.')
+ def set_cnxset(self):
+ self._auto_free_cnx_set = False
+ return self._set_cnxset()
+
+ @deprecated('[3.19] cnxset are automatically managed now.'
+ ' stop using explicit set and free.')
+ def free_cnxset(self, ignoremode=False):
+ self._auto_free_cnx_set = True
+ return self._free_cnxset(ignoremode=ignoremode)
+
+
+ @property
+ @contextmanager
+ @_open_only
+ def ensure_cnx_set(self):
+ assert self._cnxset_count >= 0
+ if self._cnxset_count == 0:
+ self._set_cnxset()
+ try:
+ self._cnxset_count += 1
+ yield
+ finally:
+ self._cnxset_count = max(self._cnxset_count - 1, 0)
+ if self._cnxset_count == 0 and self._auto_free_cnx_set:
+ self._free_cnxset()
+
# Entity cache management #################################################
#
- # The transaction entity cache as held in tx.data is removed at the
- # end of the transaction (commit and rollback)
+ # The connection entity cache as held in cnx.transaction_data is removed at the
+ # end of the connection (commit and rollback)
#
- # XXX transaction level caching may be a pb with multiple repository
+ # XXX connection level caching may be a pb with multiple repository
# instances, but 1. this is probably not the only one :$ and 2. it may be
# an acceptable risk. Anyway we could activate it or not according to a
# configuration option
def set_entity_cache(self, entity):
- """Add `entity` to the transaction entity cache"""
- ecache = self.data.setdefault('ecache', {})
+ """Add `entity` to the connection entity cache"""
+ # XXX not using _open_only because before at creation time. _set_user
+ # call this function to cache the Connection user.
+ if entity.cw_etype != 'CWUser' and not self._open:
+ raise ProgrammingError('Closed Connection: %s'
+ % self.connectionid)
+ ecache = self.transaction_data.setdefault('ecache', {})
ecache.setdefault(entity.eid, entity)
+ @_open_only
def entity_cache(self, eid):
"""get cache entity for `eid`"""
- return self.data['ecache'][eid]
+ return self.transaction_data['ecache'][eid]
+ @_open_only
def cached_entities(self):
"""return the whole entity cache"""
- return self.data.get('ecache', {}).values()
+ return self.transaction_data.get('ecache', {}).values()
+ @_open_only
def drop_entity_cache(self, eid=None):
"""drop entity from the cache
If eid is None, the whole cache is dropped"""
if eid is None:
- self.data.pop('ecache', None)
+ self.transaction_data.pop('ecache', None)
else:
- del self.data['ecache'][eid]
+ del self.transaction_data['ecache'][eid]
+
+ # relations handling #######################################################
+
+ @_open_only
+ def add_relation(self, fromeid, rtype, toeid):
+ """provide direct access to the repository method to add a relation.
+
+ This is equivalent to the following rql query:
+
+ SET X rtype Y WHERE X eid fromeid, T eid toeid
+
+ without read security check but also all the burden of rql execution.
+ You may use this in hooks when you know both eids of the relation you
+ want to add.
+ """
+ self.add_relations([(rtype, [(fromeid, toeid)])])
+
+ @_open_only
+ def add_relations(self, relations):
+ '''set many relation using a shortcut similar to the one in add_relation
+
+ relations is a list of 2-uples, the first element of each
+ 2-uple is the rtype, and the second is a list of (fromeid,
+ toeid) tuples
+ '''
+ edited_entities = {}
+ relations_dict = {}
+ with self.security_enabled(False, False):
+ for rtype, eids in relations:
+ if self.vreg.schema[rtype].inlined:
+ for fromeid, toeid in eids:
+ if fromeid not in edited_entities:
+ entity = self.entity_from_eid(fromeid)
+ edited = EditedEntity(entity)
+ edited_entities[fromeid] = edited
+ else:
+ edited = edited_entities[fromeid]
+ edited.edited_attribute(rtype, toeid)
+ else:
+ relations_dict[rtype] = eids
+ self.repo.glob_add_relations(self, relations_dict)
+ for edited in edited_entities.itervalues():
+ self.repo.glob_update_entity(self, edited)
+
+
+ @_open_only
+ def delete_relation(self, fromeid, rtype, toeid):
+ """provide direct access to the repository method to delete a relation.
+
+ This is equivalent to the following rql query:
+
+ DELETE X rtype Y WHERE X eid fromeid, T eid toeid
+
+ without read security check but also all the burden of rql execution.
+ You may use this in hooks when you know both eids of the relation you
+ want to delete.
+ """
+ with self.security_enabled(False, False):
+ if self.vreg.schema[rtype].inlined:
+ entity = self.entity_from_eid(fromeid)
+ entity.cw_attr_cache[rtype] = None
+ self.repo.glob_update_entity(self, entity, set((rtype,)))
+ else:
+ self.repo.glob_delete_relation(self, fromeid, rtype, toeid)
+
+ # relations cache handling #################################################
+
+ @_open_only
+ def update_rel_cache_add(self, subject, rtype, object, symmetric=False):
+ self._update_entity_rel_cache_add(subject, rtype, 'subject', object)
+ if symmetric:
+ self._update_entity_rel_cache_add(object, rtype, 'subject', subject)
+ else:
+ self._update_entity_rel_cache_add(object, rtype, 'object', subject)
+
+ @_open_only
+ def update_rel_cache_del(self, subject, rtype, object, symmetric=False):
+ self._update_entity_rel_cache_del(subject, rtype, 'subject', object)
+ if symmetric:
+ self._update_entity_rel_cache_del(object, rtype, 'object', object)
+ else:
+ self._update_entity_rel_cache_del(object, rtype, 'object', subject)
+
+ @_open_only
+ def _update_entity_rel_cache_add(self, eid, rtype, role, targeteid):
+ try:
+ entity = self.entity_cache(eid)
+ except KeyError:
+ return
+ rcache = entity.cw_relation_cached(rtype, role)
+ if rcache is not None:
+ rset, entities = rcache
+ rset = rset.copy()
+ entities = list(entities)
+ rset.rows.append([targeteid])
+ if not isinstance(rset.description, list): # else description not set
+ rset.description = list(rset.description)
+ rset.description.append([self.entity_metas(targeteid)['type']])
+ targetentity = self.entity_from_eid(targeteid)
+ if targetentity.cw_rset is None:
+ targetentity.cw_rset = rset
+ targetentity.cw_row = rset.rowcount
+ targetentity.cw_col = 0
+ rset.rowcount += 1
+ entities.append(targetentity)
+ entity._cw_related_cache['%s_%s' % (rtype, role)] = (
+ rset, tuple(entities))
+
+ @_open_only
+ def _update_entity_rel_cache_del(self, eid, rtype, role, targeteid):
+ try:
+ entity = self.entity_cache(eid)
+ except KeyError:
+ return
+ rcache = entity.cw_relation_cached(rtype, role)
+ if rcache is not None:
+ rset, entities = rcache
+ for idx, row in enumerate(rset.rows):
+ if row[0] == targeteid:
+ break
+ else:
+ # this may occurs if the cache has been filed by a hook
+ # after the database update
+ self.debug('cache inconsistency for %s %s %s %s', eid, rtype,
+ role, targeteid)
+ return
+ rset = rset.copy()
+ entities = list(entities)
+ del rset.rows[idx]
+ if isinstance(rset.description, list): # else description not set
+ del rset.description[idx]
+ del entities[idx]
+ rset.rowcount -= 1
+ entity._cw_related_cache['%s_%s' % (rtype, role)] = (
+ rset, tuple(entities))
# Tracking of entities added of removed in the transaction ##################
+ @_open_only
def deleted_in_transaction(self, eid):
"""return True if the entity of the given eid is being deleted in the
current transaction
"""
- return eid in self.data.get('pendingeids', ())
+ return eid in self.transaction_data.get('pendingeids', ())
+ @_open_only
def added_in_transaction(self, eid):
"""return True if the entity of the given eid is being created in the
current transaction
"""
- return eid in self.data.get('neweids', ())
+ return eid in self.transaction_data.get('neweids', ())
# Operation management ####################################################
+ @_open_only
def add_operation(self, operation, index=None):
"""add an operation to be executed at the end of the transaction"""
if index is None:
@@ -553,6 +856,15 @@
# Hooks control ###########################################################
+ @_open_only
+ def allow_all_hooks_but(self, *categories):
+ return _hooks_control(self, HOOKS_ALLOW_ALL, *categories)
+
+ @_open_only
+ def deny_all_hooks_but(self, *categories):
+ return _hooks_control(self, HOOKS_DENY_ALL, *categories)
+
+ @_open_only
def disable_hook_categories(self, *categories):
"""disable the given hook categories:
@@ -572,6 +884,7 @@
disabledcats |= changes # changes is small hence faster
return tuple(changes)
+ @_open_only
def enable_hook_categories(self, *categories):
"""enable the given hook categories:
@@ -591,6 +904,7 @@
disabledcats -= changes # changes is small hence faster
return tuple(changes)
+ @_open_only
def is_hook_category_activated(self, category):
"""return a boolean telling if the given category is currently activated
or not
@@ -599,6 +913,7 @@
return category in self.enabled_hook_cats
return category not in self.disabled_hook_cats
+ @_open_only
def is_hook_activated(self, hook):
"""return a boolean telling if the given hook class is currently
activated or not
@@ -606,11 +921,18 @@
return self.is_hook_category_activated(hook.category)
# Security management #####################################################
+
+ @_open_only
+ def security_enabled(self, read=None, write=None):
+ return _security_enabled(self, read=read, write=write)
+
@property
+ @_open_only
def read_security(self):
return self._read_security
@read_security.setter
+ @_open_only
def read_security(self, activated):
oldmode = self._read_security
self._read_security = activated
@@ -636,53 +958,222 @@
# undo support ############################################################
+ @_open_only
def ertype_supports_undo(self, ertype):
return self.undo_actions and ertype not in NO_UNDO_TYPES
+ @_open_only
def transaction_uuid(self, set=True):
- uuid = self.data.get('tx_uuid')
+ uuid = self.transaction_data.get('tx_uuid')
if set and uuid is None:
- raise KeyError
+ self.transaction_data['tx_uuid'] = uuid = uuid4().hex
+ self.repo.system_source.start_undoable_transaction(self, uuid)
return uuid
+ @_open_only
def transaction_inc_action_counter(self):
- num = self.data.setdefault('tx_action_count', 0) + 1
- self.data['tx_action_count'] = num
+ num = self.transaction_data.setdefault('tx_action_count', 0) + 1
+ self.transaction_data['tx_action_count'] = num
return num
# db-api like interface ###################################################
+ @_open_only
def source_defs(self):
return self.repo.source_defs()
+ @deprecated('[3.19] use .entity_metas(eid) instead')
+ @_with_cnx_set
+ @_open_only
def describe(self, eid, asdict=False):
"""return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
- metas = self.repo.type_and_source_from_eid(eid, self)
+ etype, extid, source = self.repo.type_and_source_from_eid(eid, self)
+ metas = {'type': etype, 'source': source, 'extid': extid}
if asdict:
- return dict(zip(('type', 'source', 'extid', 'asource'), metas))
- # XXX :-1 for cw compat, use asdict=True for full information
- return metas[:-1]
+ metas['asource'] = meta['source'] # XXX pre 3.19 client compat
+ return meta
+ return etype, source, extid
+
+ @_with_cnx_set
+ @_open_only
+ def entity_metas(self, eid):
+ """return a tuple (type, sourceuri, extid) for the entity with id <eid>"""
+ etype, extid, source = self.repo.type_and_source_from_eid(eid, self)
+ return {'type': etype, 'source': source, 'extid': extid}
+
+ # core method #############################################################
+
+ @_with_cnx_set
+ @_open_only
+ def execute(self, rql, kwargs=None, eid_key=None, build_descr=True):
+ """db-api like method directly linked to the querier execute method.
+
+ See :meth:`cubicweb.dbapi.Cursor.execute` documentation.
+ """
+ self._session_timestamp.touch()
+ if eid_key is not None:
+ warn('[3.8] eid_key is deprecated, you can safely remove this argument',
+ DeprecationWarning, stacklevel=2)
+ rset = self._execute(self, rql, kwargs, build_descr)
+ rset.req = self
+ self._session_timestamp.touch()
+ return rset
+
+ @_open_only
+ def rollback(self, free_cnxset=True, reset_pool=None):
+ """rollback the current transaction"""
+ if reset_pool is not None:
+ warn('[3.13] use free_cnxset argument instead for reset_pool',
+ DeprecationWarning, stacklevel=2)
+ free_cnxset = reset_pool
+ cnxset = self.cnxset
+ if cnxset is None:
+ self.clear()
+ self._session_timestamp.touch()
+ self.debug('rollback transaction %s done (no db activity)', self.connectionid)
+ return
+ try:
+ # by default, operations are executed with security turned off
+ with self.security_enabled(False, False):
+ while self.pending_operations:
+ try:
+ operation = self.pending_operations.pop(0)
+ operation.handle_event('rollback_event')
+ except BaseException:
+ self.critical('rollback error', exc_info=sys.exc_info())
+ continue
+ cnxset.rollback()
+ self.debug('rollback for transaction %s done', self.connectionid)
+ finally:
+ self._session_timestamp.touch()
+ if free_cnxset:
+ self._free_cnxset(ignoremode=True)
+ self.clear()
- def source_from_eid(self, eid):
- """return the source where the entity with id <eid> is located"""
- return self.repo.source_from_eid(eid, self)
+ @_open_only
+ def commit(self, free_cnxset=True, reset_pool=None):
+ """commit the current session's transaction"""
+ if reset_pool is not None:
+ warn('[3.13] use free_cnxset argument instead for reset_pool',
+ DeprecationWarning, stacklevel=2)
+ free_cnxset = reset_pool
+ if self.cnxset is None:
+ assert not self.pending_operations
+ self.clear()
+ self._session_timestamp.touch()
+ self.debug('commit transaction %s done (no db activity)', self.connectionid)
+ return
+ cstate = self.commit_state
+ if cstate == 'uncommitable':
+ raise QueryError('transaction must be rolled back')
+ if cstate is not None:
+ return
+ # on rollback, an operation should have the following state
+ # information:
+ # - processed by the precommit/commit event or not
+ # - if processed, is it the failed operation
+ debug = server.DEBUG & server.DBG_OPS
+ try:
+ # by default, operations are executed with security turned off
+ with self.security_enabled(False, False):
+ processed = []
+ self.commit_state = 'precommit'
+ if debug:
+ print self.commit_state, '*' * 20
+ try:
+ while self.pending_operations:
+ operation = self.pending_operations.pop(0)
+ operation.processed = 'precommit'
+ processed.append(operation)
+ if debug:
+ print operation
+ operation.handle_event('precommit_event')
+ self.pending_operations[:] = processed
+ self.debug('precommit transaction %s done', self.connectionid)
+ except BaseException:
+ # if error on [pre]commit:
+ #
+ # * set .failed = True on the operation causing the failure
+ # * call revert<event>_event on processed operations
+ # * call rollback_event on *all* operations
+ #
+ # that seems more natural than not calling rollback_event
+ # for processed operations, and allow generic rollback
+ # instead of having to implements rollback, revertprecommit
+ # and revertcommit, that will be enough in mont case.
+ operation.failed = True
+ if debug:
+ print self.commit_state, '*' * 20
+ for operation in reversed(processed):
+ if debug:
+ print operation
+ try:
+ operation.handle_event('revertprecommit_event')
+ except BaseException:
+ self.critical('error while reverting precommit',
+ exc_info=True)
+ # XXX use slice notation since self.pending_operations is a
+ # read-only property.
+ self.pending_operations[:] = processed + self.pending_operations
+ self.rollback(free_cnxset)
+ raise
+ self.cnxset.commit()
+ self.commit_state = 'postcommit'
+ if debug:
+ print self.commit_state, '*' * 20
+ while self.pending_operations:
+ operation = self.pending_operations.pop(0)
+ if debug:
+ print operation
+ operation.processed = 'postcommit'
+ try:
+ operation.handle_event('postcommit_event')
+ except BaseException:
+ self.critical('error while postcommit',
+ exc_info=sys.exc_info())
+ self.debug('postcommit transaction %s done', self.connectionid)
+ return self.transaction_uuid(set=False)
+ finally:
+ self._session_timestamp.touch()
+ if free_cnxset:
+ self._free_cnxset(ignoremode=True)
+ self.clear()
# resource accessors ######################################################
+ @_with_cnx_set
+ @_open_only
+ def call_service(self, regid, **kwargs):
+ json.dumps(kwargs) # This line ensure that people use serialisable
+ # argument for call service. this is very important
+ # to enforce that from start to make sure RPC
+ # version is available.
+ self.info('calling service %s', regid)
+ service = self.vreg['services'].select(regid, self, **kwargs)
+ result = service.call(**kwargs)
+ json.dumps(result) # This line ensure that service have serialisable
+ # output. this is very important to enforce that
+ # from start to make sure RPC version is
+ # available.
+ return result
+
+ @_with_cnx_set
+ @_open_only
def system_sql(self, sql, args=None, rollback_on_failure=True):
"""return a sql cursor on the system database"""
if sql.split(None, 1)[0].upper() != 'SELECT':
self.mode = 'write'
- source = self.cnxset.source('system')
+ source = self.repo.system_source
try:
return source.doexec(self, sql, args, rollback=rollback_on_failure)
except (source.OperationalError, source.InterfaceError):
if not rollback_on_failure:
raise
source.warning("trying to reconnect")
- self.cnxset.reconnect(source)
+ self.cnxset.reconnect()
return source.doexec(self, sql, args, rollback=rollback_on_failure)
+ @_open_only
def rtype_eids_rdef(self, rtype, eidfrom, eidto):
# use type_and_source_from_eid instead of type_from_eid for optimization
# (avoid two extra methods call)
@@ -691,31 +1182,49 @@
return self.vreg.schema.rschema(rtype).rdefs[(subjtype, objtype)]
-def tx_attr(attr_name, writable=False):
- """return a property to forward attribute access to transaction.
+def cnx_attr(attr_name, writable=False):
+ """return a property to forward attribute access to connection.
This is to be used by session"""
args = {}
- def attr_from_tx(session):
- return getattr(session._tx, attr_name)
- args['fget'] = attr_from_tx
+ @deprecated('[3.19] use a Connection object instead')
+ def attr_from_cnx(session):
+ return getattr(session._cnx, attr_name)
+ args['fget'] = attr_from_cnx
if writable:
+ @deprecated('[3.19] use a Connection object instead')
def write_attr(session, value):
- return setattr(session._tx, attr_name, value)
+ return setattr(session._cnx, attr_name, value)
args['fset'] = write_attr
return property(**args)
-def tx_meth(meth_name):
- """return a function forwarding calls to transaction.
+def cnx_meth(meth_name):
+ """return a function forwarding calls to connection.
This is to be used by session"""
- def meth_from_tx(session, *args, **kwargs):
- return getattr(session._tx, meth_name)(*args, **kwargs)
- meth_from_tx.__doc__ = getattr(Transaction, meth_name).__doc__
- return meth_from_tx
+ @deprecated('[3.19] use a Connection object instead')
+ def meth_from_cnx(session, *args, **kwargs):
+ result = getattr(session._cnx, meth_name)(*args, **kwargs)
+ if getattr(result, '_cw', None) is not None:
+ result._cw = session
+ return result
+ meth_from_cnx.__doc__ = getattr(Connection, meth_name).__doc__
+ return meth_from_cnx
+
+class Timestamp(object):
+
+ def __init__(self):
+ self.value = time()
+
+ def touch(self):
+ self.value = time()
+
+ def __float__(self):
+ return float(self.value)
-class Session(RequestSessionBase):
+class Session(RequestSessionBase): # XXX repoapi: stop being a
+ # RequestSessionBase at some point
"""Repository user session
This ties all together:
@@ -733,23 +1242,23 @@
:attr:`data` is a dictionary containing shared data, used to communicate
extra information between the client and the repository
- :attr:`_txs` is a dictionary of :class:`TransactionData` instance, one
- for each running transaction. The key is the transaction id. By default
- the transaction id is the thread name but it can be otherwise (per dbapi
+ :attr:`_cnxs` is a dictionary of :class:`Connection` instance, one
+ for each running connection. The key is the connection id. By default
+ the connection id is the thread name but it can be otherwise (per dbapi
cursor for instance, or per thread name *from another process*).
- :attr:`__threaddata` is a thread local storage whose `tx` attribute
- refers to the proper instance of :class:`Transaction` according to the
- transaction.
+ :attr:`__threaddata` is a thread local storage whose `cnx` attribute
+ refers to the proper instance of :class:`Connection` according to the
+ connection.
- You should not have to use neither :attr:`_tx` nor :attr:`__threaddata`,
- simply access transaction data transparently through the :attr:`_tx`
+ You should not have to use neither :attr:`_cnx` nor :attr:`__threaddata`,
+ simply access connection data transparently through the :attr:`_cnx`
property. Also, you usually don't have to access it directly since current
- transaction's data may be accessed/modified through properties / methods:
+ connection's data may be accessed/modified through properties / methods:
- :attr:`transaction_data`, similarly to :attr:`data`, is a dictionary
+ :attr:`connection_data`, similarly to :attr:`data`, is a dictionary
containing some shared data that should be cleared at the end of the
- transaction. Hooks and operations may put arbitrary data in there, and
+ connection. Hooks and operations may put arbitrary data in there, and
this may also be used as a communication channel between the client and
the repository.
@@ -758,7 +1267,7 @@
.. automethod:: cubicweb.server.session.Session.added_in_transaction
.. automethod:: cubicweb.server.session.Session.deleted_in_transaction
- Transaction state information:
+ Connection state information:
:attr:`running_dbapi_query`, boolean flag telling if the executing query
is coming from a dbapi connection or is a query from within the repository
@@ -831,10 +1340,10 @@
def __init__(self, user, repo, cnxprops=None, _id=None):
super(Session, self).__init__(repo.vreg)
- self.id = _id or make_uid(unormalize(user.login).encode('UTF8'))
- self.user = user
+ self.sessionid = _id or make_uid(unormalize(user.login).encode('UTF8'))
+ self.user = user # XXX repoapi: deprecated and store only a login.
self.repo = repo
- self.timestamp = time()
+ self._timestamp = Timestamp()
self.default_mode = 'read'
# short cut to querier .execute method
self._execute = repo.querier.execute
@@ -844,53 +1353,96 @@
# i18n initialization
self.set_language(user.prefered_language())
### internals
- # Transaction of this section
- self._txs = {}
+ # Connection of this section
+ self._cnxs = {} # XXX repoapi: remove this when nobody use the session
+ # as a Connection
# Data local to the thread
- self.__threaddata = threading.local()
+ self.__threaddata = threading.local() # XXX repoapi: remove this when
+ # nobody use the session as a Connection
self._cnxset_tracker = CnxSetTracker()
self._closed = False
self._lock = threading.RLock()
def __unicode__(self):
return '<session %s (%s 0x%x)>' % (
- unicode(self.user.login), self.id, id(self))
+ unicode(self.user.login), self.sessionid, id(self))
+ @property
+ def timestamp(self):
+ return float(self._timestamp)
+
+ @property
+ @deprecated('[3.19] session.id is deprecated. use session.sessionid')
+ def id(self):
+ return self.sessionid
- def get_tx(self, txid):
- """return the <txid> transaction attached to this session
+ @property
+ def login(self):
+ return self.user.login
+
+ def new_cnx(self):
+ """Return a new Connection object linked to the session
- Transaction is created if necessary"""
- with self._lock: # no transaction exist with the same id
+ The returned Connection will *not* be managed by the Session.
+ """
+ return Connection(self)
+
+ def _get_cnx(self, cnxid):
+ """return the <cnxid> connection attached to this session
+
+ Connection is created if necessary"""
+ with self._lock: # no connection exist with the same id
try:
if self.closed:
- raise SessionClosedError('try to access connections set on a closed session %s' % self.id)
- tx = self._txs[txid]
+ raise SessionClosedError('try to access connections set on'
+ ' a closed session %s' % self.id)
+ cnx = self._cnxs[cnxid]
+ assert cnx._session_handled
except KeyError:
- rewriter = RQLRewriter(self)
- tx = Transaction(txid, self, rewriter)
- self._txs[txid] = tx
- return tx
+ cnx = Connection(self, cnxid=cnxid, session_handled=True)
+ self._cnxs[cnxid] = cnx
+ cnx.__enter__()
+ return cnx
- def set_tx(self, txid=None):
- """set the default transaction of the current thread to <txid>
+ def _close_cnx(self, cnx):
+ """Close a Connection related to a session"""
+ assert cnx._session_handled
+ cnx.__exit__()
+ self._cnxs.pop(cnx.connectionid, None)
+ try:
+ if self.__threaddata.cnx is cnx:
+ del self.__threaddata.cnx
+ except AttributeError:
+ pass
- Transaction is created if necessary"""
- if txid is None:
- txid = threading.currentThread().getName()
- self.__threaddata.tx = self.get_tx(txid)
+ def set_cnx(self, cnxid=None):
+ # XXX repoapi: remove this when nobody use the session as a Connection
+ """set the default connection of the current thread to <cnxid>
+
+ Connection is created if necessary"""
+ if cnxid is None:
+ cnxid = threading.currentThread().getName()
+ cnx = self._get_cnx(cnxid)
+ # New style session should not be accesed through the session.
+ assert cnx._session_handled
+ self.__threaddata.cnx = cnx
@property
- def _tx(self):
- """default transaction for current session in current thread"""
+ def _cnx(self):
+ """default connection for current session in current thread"""
try:
- return self.__threaddata.tx
+ return self.__threaddata.cnx
except AttributeError:
- self.set_tx()
- return self.__threaddata.tx
+ self.set_cnx()
+ return self.__threaddata.cnx
+ @deprecated('[3.19] use a Connection object instead')
def get_option_value(self, option, foreid=None):
- return self.repo.get_option_value(option, foreid)
+ if foreid is not None:
+ warn('[3.19] foreid argument is deprecated', DeprecationWarning,
+ stacklevel=2)
+ return self.repo.get_option_value(option)
+ @deprecated('[3.19] use a Connection object instead')
def transaction(self, free_cnxset=True):
"""return context manager to enter a transaction for the session: when
exiting the `with` block on exception, call `session.rollback()`, else
@@ -901,184 +1453,55 @@
"""
return transaction(self, free_cnxset)
-
- @deprecated('[3.17] do not use hijack_user. create new Session object')
- def hijack_user(self, user):
- """return a fake request/session using specified user"""
- session = Session(user, self.repo)
- tx = session._tx
- tx.cnxset = self.cnxset
- # share pending_operations, else operation added in the hi-jacked
- # session such as SendMailOp won't ever be processed
- tx.pending_operations = self.pending_operations
- # everything in tx.data should be copied back but the entity
- # type cache we don't want to avoid security pb
- tx.data = self._tx.data.copy()
- tx.data.pop('ecache', None)
- return session
-
- def add_relation(self, fromeid, rtype, toeid):
- """provide direct access to the repository method to add a relation.
-
- This is equivalent to the following rql query:
-
- SET X rtype Y WHERE X eid fromeid, T eid toeid
-
- without read security check but also all the burden of rql execution.
- You may use this in hooks when you know both eids of the relation you
- want to add.
- """
- self.add_relations([(rtype, [(fromeid, toeid)])])
-
- def add_relations(self, relations):
- '''set many relation using a shortcut similar to the one in add_relation
-
- relations is a list of 2-uples, the first element of each
- 2-uple is the rtype, and the second is a list of (fromeid,
- toeid) tuples
- '''
- edited_entities = {}
- relations_dict = {}
- with self.security_enabled(False, False):
- for rtype, eids in relations:
- if self.vreg.schema[rtype].inlined:
- for fromeid, toeid in eids:
- if fromeid not in edited_entities:
- entity = self.entity_from_eid(fromeid)
- edited = EditedEntity(entity)
- edited_entities[fromeid] = edited
- else:
- edited = edited_entities[fromeid]
- edited.edited_attribute(rtype, toeid)
- else:
- relations_dict[rtype] = eids
- self.repo.glob_add_relations(self, relations_dict)
- for edited in edited_entities.itervalues():
- self.repo.glob_update_entity(self, edited)
-
-
- def delete_relation(self, fromeid, rtype, toeid):
- """provide direct access to the repository method to delete a relation.
-
- This is equivalent to the following rql query:
-
- DELETE X rtype Y WHERE X eid fromeid, T eid toeid
-
- without read security check but also all the burden of rql execution.
- You may use this in hooks when you know both eids of the relation you
- want to delete.
- """
- with self.security_enabled(False, False):
- if self.vreg.schema[rtype].inlined:
- entity = self.entity_from_eid(fromeid)
- entity.cw_attr_cache[rtype] = None
- self.repo.glob_update_entity(self, entity, set((rtype,)))
- else:
- self.repo.glob_delete_relation(self, fromeid, rtype, toeid)
+ add_relation = cnx_meth('add_relation')
+ add_relations = cnx_meth('add_relations')
+ delete_relation = cnx_meth('delete_relation')
# relations cache handling #################################################
- def update_rel_cache_add(self, subject, rtype, object, symmetric=False):
- self._update_entity_rel_cache_add(subject, rtype, 'subject', object)
- if symmetric:
- self._update_entity_rel_cache_add(object, rtype, 'subject', subject)
- else:
- self._update_entity_rel_cache_add(object, rtype, 'object', subject)
-
- def update_rel_cache_del(self, subject, rtype, object, symmetric=False):
- self._update_entity_rel_cache_del(subject, rtype, 'subject', object)
- if symmetric:
- self._update_entity_rel_cache_del(object, rtype, 'object', object)
- else:
- self._update_entity_rel_cache_del(object, rtype, 'object', subject)
-
- def _update_entity_rel_cache_add(self, eid, rtype, role, targeteid):
- try:
- entity = self.entity_cache(eid)
- except KeyError:
- return
- rcache = entity.cw_relation_cached(rtype, role)
- if rcache is not None:
- rset, entities = rcache
- rset = rset.copy()
- entities = list(entities)
- rset.rows.append([targeteid])
- if not isinstance(rset.description, list): # else description not set
- rset.description = list(rset.description)
- rset.description.append([self.describe(targeteid)[0]])
- targetentity = self.entity_from_eid(targeteid)
- if targetentity.cw_rset is None:
- targetentity.cw_rset = rset
- targetentity.cw_row = rset.rowcount
- targetentity.cw_col = 0
- rset.rowcount += 1
- entities.append(targetentity)
- entity._cw_related_cache['%s_%s' % (rtype, role)] = (
- rset, tuple(entities))
-
- def _update_entity_rel_cache_del(self, eid, rtype, role, targeteid):
- try:
- entity = self.entity_cache(eid)
- except KeyError:
- return
- rcache = entity.cw_relation_cached(rtype, role)
- if rcache is not None:
- rset, entities = rcache
- for idx, row in enumerate(rset.rows):
- if row[0] == targeteid:
- break
- else:
- # this may occurs if the cache has been filed by a hook
- # after the database update
- self.debug('cache inconsistency for %s %s %s %s', eid, rtype,
- role, targeteid)
- return
- rset = rset.copy()
- entities = list(entities)
- del rset.rows[idx]
- if isinstance(rset.description, list): # else description not set
- del rset.description[idx]
- del entities[idx]
- rset.rowcount -= 1
- entity._cw_related_cache['%s_%s' % (rtype, role)] = (
- rset, tuple(entities))
+ update_rel_cache_add = cnx_meth('update_rel_cache_add')
+ update_rel_cache_del = cnx_meth('update_rel_cache_del')
# resource accessors ######################################################
- system_sql = tx_meth('system_sql')
- deleted_in_transaction = tx_meth('deleted_in_transaction')
- added_in_transaction = tx_meth('added_in_transaction')
- rtype_eids_rdef = tx_meth('rtype_eids_rdef')
+ system_sql = cnx_meth('system_sql')
+ deleted_in_transaction = cnx_meth('deleted_in_transaction')
+ added_in_transaction = cnx_meth('added_in_transaction')
+ rtype_eids_rdef = cnx_meth('rtype_eids_rdef')
# security control #########################################################
-
+ @deprecated('[3.19] use a Connection object instead')
def security_enabled(self, read=None, write=None):
- return _security_enabled(self, read=read, write=write)
+ return _session_security_enabled(self, read=read, write=write)
- read_security = tx_attr('read_security', writable=True)
- write_security = tx_attr('write_security', writable=True)
- running_dbapi_query = tx_attr('running_dbapi_query')
+ read_security = cnx_attr('read_security', writable=True)
+ write_security = cnx_attr('write_security', writable=True)
+ running_dbapi_query = cnx_attr('running_dbapi_query')
# hooks activation control #################################################
# all hooks should be activated during normal execution
+
+ @deprecated('[3.19] use a Connection object instead')
def allow_all_hooks_but(self, *categories):
- return _hooks_control(self, HOOKS_ALLOW_ALL, *categories)
+ return _session_hooks_control(self, HOOKS_ALLOW_ALL, *categories)
+ @deprecated('[3.19] use a Connection object instead')
def deny_all_hooks_but(self, *categories):
- return _hooks_control(self, HOOKS_DENY_ALL, *categories)
-
- hooks_mode = tx_attr('hooks_mode')
+ return _session_hooks_control(self, HOOKS_DENY_ALL, *categories)
- disabled_hook_categories = tx_attr('disabled_hook_cats')
- enabled_hook_categories = tx_attr('enabled_hook_cats')
- disable_hook_categories = tx_meth('disable_hook_categories')
- enable_hook_categories = tx_meth('enable_hook_categories')
- is_hook_category_activated = tx_meth('is_hook_category_activated')
- is_hook_activated = tx_meth('is_hook_activated')
+ hooks_mode = cnx_attr('hooks_mode')
+
+ disabled_hook_categories = cnx_attr('disabled_hook_cats')
+ enabled_hook_categories = cnx_attr('enabled_hook_cats')
+ disable_hook_categories = cnx_meth('disable_hook_categories')
+ enable_hook_categories = cnx_meth('enable_hook_categories')
+ is_hook_category_activated = cnx_meth('is_hook_category_activated')
+ is_hook_activated = cnx_meth('is_hook_activated')
# connection management ###################################################
+ @deprecated('[3.19] use a Connection object instead')
def keep_cnxset_mode(self, mode):
"""set `mode`, e.g. how the session will keep its connections set:
@@ -1099,16 +1522,17 @@
else: # mode == 'write'
self.default_mode = 'read'
- mode = tx_attr('mode', writable=True)
- commit_state = tx_attr('commit_state', writable=True)
+ mode = cnx_attr('mode', writable=True)
+ commit_state = cnx_attr('commit_state', writable=True)
@property
+ @deprecated('[3.19] use a Connection object instead')
def cnxset(self):
"""connections set, set according to transaction mode for each query"""
if self._closed:
self.free_cnxset(True)
raise SessionClosedError('try to access connections set on a closed session %s' % self.id)
- return self._tx.cnxset
+ return self._cnx.cnxset
def set_cnxset(self):
"""the session need a connections set to execute some queries"""
@@ -1116,20 +1540,27 @@
if self._closed:
self.free_cnxset(True)
raise SessionClosedError('try to set connections set on a closed session %s' % self.id)
- return self._tx.set_cnxset()
- free_cnxset = tx_meth('free_cnxset')
+ return self._cnx.set_cnxset()
+ free_cnxset = cnx_meth('free_cnxset')
+ ensure_cnx_set = cnx_attr('ensure_cnx_set')
def _touch(self):
"""update latest session usage timestamp and reset mode to read"""
- self.timestamp = time()
- self.local_perm_cache.clear() # XXX simply move in tx.data, no?
+ self._timestamp.touch()
+
+ local_perm_cache = cnx_attr('local_perm_cache')
+ @local_perm_cache.setter
+ def local_perm_cache(self, value):
+ #base class assign an empty dict:-(
+ assert value == {}
+ pass
# shared data handling ###################################################
def get_shared_data(self, key, default=None, pop=False, txdata=False):
"""return value associated to `key` in session data"""
if txdata:
- data = self._tx.data
+ return self._cnx.get_shared_data(key, default, pop, txdata=True)
else:
data = self.data
if pop:
@@ -1140,47 +1571,39 @@
def set_shared_data(self, key, value, txdata=False):
"""set value associated to `key` in session data"""
if txdata:
- self._tx.data[key] = value
+ return self._cnx.set_shared_data(key, value, txdata=True)
else:
self.data[key] = value
# server-side service call #################################################
- def call_service(self, regid, async=False, **kwargs):
- return self.repo._call_service_with_session(self, regid, async,
- **kwargs)
-
+ call_service = cnx_meth('call_service')
# request interface #######################################################
@property
+ @deprecated('[3.19] use a Connection object instead')
def cursor(self):
"""return a rql cursor"""
return self
- set_entity_cache = tx_meth('set_entity_cache')
- entity_cache = tx_meth('entity_cache')
- cache_entities = tx_meth('cached_entities')
- drop_entity_cache = tx_meth('drop_entity_cache')
+ set_entity_cache = cnx_meth('set_entity_cache')
+ entity_cache = cnx_meth('entity_cache')
+ cache_entities = cnx_meth('cached_entities')
+ drop_entity_cache = cnx_meth('drop_entity_cache')
- def from_controller(self):
- """return the id (string) of the controller issuing the request (no
- sense here, always return 'view')
- """
- return 'view'
-
- source_defs = tx_meth('source_defs')
- describe = tx_meth('describe')
- source_from_eid = tx_meth('source_from_eid')
+ source_defs = cnx_meth('source_defs')
+ entity_metas = cnx_meth('entity_metas')
+ describe = cnx_meth('describe') # XXX deprecated in 3.19
- def execute(self, rql, kwargs=None, build_descr=True):
+ @deprecated('[3.19] use a Connection object instead')
+ def execute(self, *args, **kwargs):
"""db-api like method directly linked to the querier execute method.
See :meth:`cubicweb.dbapi.Cursor.execute` documentation.
"""
- self.timestamp = time() # update timestamp
- rset = self._execute(self, rql, kwargs, build_descr)
+ rset = self._cnx.execute(*args, **kwargs)
rset.req = self
return rset
@@ -1190,150 +1613,39 @@
by _touch
"""
try:
- tx = self.__threaddata.tx
+ cnx = self.__threaddata.cnx
except AttributeError:
pass
else:
if free_cnxset:
- self.free_cnxset()
- if tx.ctx_count == 0:
- self._clear_thread_storage(tx)
+ cnx._free_cnxset()
+ if cnx.ctx_count == 0:
+ self._close_cnx(cnx)
else:
- self._clear_tx_storage(tx)
+ cnx.clear()
else:
- self._clear_tx_storage(tx)
+ cnx.clear()
- def _clear_thread_storage(self, tx):
- self._txs.pop(tx.transactionid, None)
- try:
- del self.__threaddata.tx
- except AttributeError:
- pass
-
- def _clear_tx_storage(self, tx):
- tx.clear()
- tx._rewriter = RQLRewriter(self)
-
+ @deprecated('[3.19] use a Connection object instead')
def commit(self, free_cnxset=True, reset_pool=None):
"""commit the current session's transaction"""
- if reset_pool is not None:
- warn('[3.13] use free_cnxset argument instead for reset_pool',
- DeprecationWarning, stacklevel=2)
- free_cnxset = reset_pool
- if self.cnxset is None:
- assert not self.pending_operations
- self._clear_thread_data()
- self._touch()
- self.debug('commit session %s done (no db activity)', self.id)
- return
- cstate = self.commit_state
+ cstate = self._cnx.commit_state
if cstate == 'uncommitable':
raise QueryError('transaction must be rolled back')
- if cstate is not None:
- return
- # on rollback, an operation should have the following state
- # information:
- # - processed by the precommit/commit event or not
- # - if processed, is it the failed operation
- debug = server.DEBUG & server.DBG_OPS
try:
- # by default, operations are executed with security turned off
- with self.security_enabled(False, False):
- processed = []
- self.commit_state = 'precommit'
- if debug:
- print self.commit_state, '*' * 20
- try:
- while self.pending_operations:
- operation = self.pending_operations.pop(0)
- operation.processed = 'precommit'
- processed.append(operation)
- if debug:
- print operation
- operation.handle_event('precommit_event')
- self.pending_operations[:] = processed
- self.debug('precommit session %s done', self.id)
- except BaseException:
- # save exception context, it may be clutered below by
- # exception in revert_* event
- exc_info = sys.exc_info()
- # if error on [pre]commit:
- #
- # * set .failed = True on the operation causing the failure
- # * call revert<event>_event on processed operations
- # * call rollback_event on *all* operations
- #
- # that seems more natural than not calling rollback_event
- # for processed operations, and allow generic rollback
- # instead of having to implements rollback, revertprecommit
- # and revertcommit, that will be enough in mont case.
- operation.failed = True
- if debug:
- print self.commit_state, '*' * 20
- for operation in reversed(processed):
- if debug:
- print operation
- try:
- operation.handle_event('revertprecommit_event')
- except BaseException:
- self.critical('error while reverting precommit',
- exc_info=True)
- # XXX use slice notation since self.pending_operations is a
- # read-only property.
- self.pending_operations[:] = processed + self.pending_operations
- self.rollback(free_cnxset)
- raise exc_info[0], exc_info[1], exc_info[2]
- self.cnxset.commit()
- self.commit_state = 'postcommit'
- if debug:
- print self.commit_state, '*' * 20
- while self.pending_operations:
- operation = self.pending_operations.pop(0)
- if debug:
- print operation
- operation.processed = 'postcommit'
- try:
- operation.handle_event('postcommit_event')
- except BaseException:
- self.critical('error while postcommit',
- exc_info=sys.exc_info())
- self.debug('postcommit session %s done', self.id)
- return self.transaction_uuid(set=False)
+ return self._cnx.commit(free_cnxset, reset_pool)
finally:
- self._touch()
- if free_cnxset:
- self.free_cnxset(ignoremode=True)
self._clear_thread_data(free_cnxset)
- def rollback(self, free_cnxset=True, reset_pool=None):
+ @deprecated('[3.19] use a Connection object instead')
+ def rollback(self, *args, **kwargs):
"""rollback the current session's transaction"""
- if reset_pool is not None:
- warn('[3.13] use free_cnxset argument instead for reset_pool',
- DeprecationWarning, stacklevel=2)
- free_cnxset = reset_pool
- # don't use self.cnxset, rollback may be called with _closed == True
- cnxset = self._tx.cnxset
- if cnxset is None:
- self._clear_thread_data()
- self._touch()
- self.debug('rollback session %s done (no db activity)', self.id)
- return
+ return self._rollback(*args, **kwargs)
+
+ def _rollback(self, free_cnxset=True, **kwargs):
try:
- # by default, operations are executed with security turned off
- with self.security_enabled(False, False):
- while self.pending_operations:
- try:
- operation = self.pending_operations.pop(0)
- operation.handle_event('rollback_event')
- except BaseException:
- self.critical('rollback error', exc_info=sys.exc_info())
- continue
- cnxset.rollback()
- self.debug('rollback for session %s done', self.id)
+ return self._cnx.rollback(free_cnxset, **kwargs)
finally:
- self._touch()
- if free_cnxset:
- self.free_cnxset(ignoremode=True)
self._clear_thread_data(free_cnxset)
def close(self):
@@ -1342,63 +1654,64 @@
with self._lock:
self._closed = True
tracker.close()
- self.rollback()
- self.debug('waiting for open transaction of session: %s', self)
+ self._rollback()
+ self.debug('waiting for open connection of session: %s', self)
timeout = 10
pendings = tracker.wait(timeout)
if pendings:
- self.error('%i transaction still alive after 10 seconds, will close '
+ self.error('%i connection still alive after 10 seconds, will close '
'session anyway', len(pendings))
- for txid in pendings:
- tx = self._txs.get(txid)
- if tx is not None:
- # drop tx.cnxset
+ for cnxid in pendings:
+ cnx = self._cnxs.get(cnxid)
+ if cnx is not None:
+ # drop cnx.cnxset
with tracker:
try:
- cnxset = tx.cnxset
+ cnxset = cnx.cnxset
if cnxset is None:
continue
- tx.cnxset = None
+ cnx.cnxset = None
except RuntimeError:
msg = 'issue while force free of cnxset in %s'
- self.error(msg, tx)
+ self.error(msg, cnx)
# cnxset.reconnect() do an hard reset of the cnxset
# it force it to be freed
cnxset.reconnect()
self.repo._free_cnxset(cnxset)
del self.__threaddata
- del self._txs
+ del self._cnxs
@property
def closed(self):
- return not hasattr(self, '_txs')
+ return not hasattr(self, '_cnxs')
# transaction data/operations management ##################################
- transaction_data = tx_attr('data')
- pending_operations = tx_attr('pending_operations')
- pruned_hooks_cache = tx_attr('pruned_hooks_cache')
- add_operation = tx_meth('add_operation')
+ transaction_data = cnx_attr('transaction_data')
+ pending_operations = cnx_attr('pending_operations')
+ pruned_hooks_cache = cnx_attr('pruned_hooks_cache')
+ add_operation = cnx_meth('add_operation')
# undo support ############################################################
- ertype_supports_undo = tx_meth('ertype_supports_undo')
- transaction_inc_action_counter = tx_meth('transaction_inc_action_counter')
-
- def transaction_uuid(self, set=True):
- try:
- return self._tx.transaction_uuid(set=set)
- except KeyError:
- self._tx.data['tx_uuid'] = uuid = uuid4().hex
- self.repo.system_source.start_undoable_transaction(self, uuid)
- return uuid
+ ertype_supports_undo = cnx_meth('ertype_supports_undo')
+ transaction_inc_action_counter = cnx_meth('transaction_inc_action_counter')
+ transaction_uuid = cnx_meth('transaction_uuid')
# querier helpers #########################################################
- rql_rewriter = tx_attr('_rewriter')
+ rql_rewriter = cnx_attr('_rewriter')
# deprecated ###############################################################
+ @property
+ @deprecated('[3.19] use a Connection object instead')
+ def anonymous_session(self):
+ # XXX for now, anonymous-user is a web side option.
+ # It will only be present inside all-in-one instance.
+ # there is plan to move it down to global config.
+ return self.user.login == self.repo.config.get('anonymous-user')
+
@deprecated('[3.13] use getattr(session.rtype_eids_rdef(rtype, eidfrom, eidto), prop)')
def schema_rproperty(self, rtype, eidfrom, eidto, rprop):
return getattr(self.rtype_eids_rdef(rtype, eidfrom, eidto), rprop)
@@ -1438,7 +1751,7 @@
if not safe:
self.disable_hook_categories('integrity')
self.disable_hook_categories('security')
- self._tx.ctx_count += 1
+ self._cnx.ctx_count += 1
def __enter__(self):
return self
@@ -1452,7 +1765,7 @@
if self.repo.shutting_down:
self.free_cnxset(True)
raise ShuttingDown('repository is shutting down')
- return self._tx.cnxset
+ return self._cnx.cnxset
class InternalManager(object):
@@ -1460,10 +1773,12 @@
bootstrapping the repository or creating regular users according to
repository content
"""
- def __init__(self):
+ def __init__(self, lang='en'):
self.eid = -1
self.login = u'__internal_manager__'
self.properties = {}
+ self.groups = set(['managers'])
+ self.lang = lang
def matching_groups(self, groups):
return 1
@@ -1476,7 +1791,7 @@
def property_value(self, key):
if key == 'ui.language':
- return 'en'
+ return self.lang
return None
def prefered_language(self, language=None):
@@ -1501,3 +1816,4 @@
from logging import getLogger
from cubicweb import set_log_methods
set_log_methods(Session, getLogger('cubicweb.session'))
+set_log_methods(Connection, getLogger('cubicweb.session'))
--- a/server/sources/__init__.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/sources/__init__.py Mon Feb 17 15:32:50 2014 +0100
@@ -61,32 +61,9 @@
# return true so it can be used as assertion (and so be killed by python -O)
return True
-class TimedCache(dict):
- def __init__(self, ttl):
- # time to live in seconds
- if ttl <= 0:
- raise ValueError('TimedCache initialized with a ttl of %ss' % ttl.seconds)
- self.ttl = timedelta(seconds=ttl)
-
- def __setitem__(self, key, value):
- dict.__setitem__(self, key, (datetime.utcnow(), value))
-
- def __getitem__(self, key):
- return dict.__getitem__(self, key)[1]
-
- def clear_expired(self):
- now_ = datetime.utcnow()
- ttl = self.ttl
- for key, (timestamp, value) in self.items():
- if now_ - timestamp > ttl:
- del self[key]
-
class AbstractSource(object):
"""an abstract class for sources"""
- # does the source copy data into the system source, or is it a *true* source
- # (i.e. entities are not stored physically here)
- copy_based_source = False
# boolean telling if modification hooks should be called when something is
# modified in this source
@@ -108,10 +85,6 @@
# a reference to the instance'schema (may differs from the source'schema)
schema = None
- # multi-sources planning control
- dont_cross_relations = ()
- cross_relations = ()
-
# force deactivation (configuration error for instance)
disabled = False
@@ -259,29 +232,15 @@
"""open and return a connection to the source"""
raise NotImplementedError(self)
- def check_connection(self, cnx):
- """Check connection validity, return None if the connection is still
- valid else a new connection (called when the connections set using the
- given connection is being attached to a session). Do nothing by default.
- """
- pass
-
def close_source_connections(self):
for cnxset in self.repo.cnxsets:
- cnxset._cursors.pop(self.uri, None)
- cnxset.source_cnxs[self.uri][1].close()
+ cnxset.cu = None
+ cnxset.cnx.close()
def open_source_connections(self):
for cnxset in self.repo.cnxsets:
- cnxset.source_cnxs[self.uri] = (self, self.get_connection())
-
- def cnxset_freed(self, cnx):
- """the connections set holding the given connection is being reseted
- from its current attached session.
-
- do nothing by default
- """
- pass
+ cnxset.cnx = self.get_connection()
+ cnxset.cu = cnxset.cnx.cursor()
# cache handling ###########################################################
@@ -333,22 +292,6 @@
return wsupport
return True
- def may_cross_relation(self, rtype):
- """return True if the relation may be crossed among sources. Rules are:
-
- * if this source support the relation, can't be crossed unless explicitly
- specified in .cross_relations
-
- * if this source doesn't support the relation, can be crossed unless
- explicitly specified in .dont_cross_relations
- """
- # XXX find a way to have relation such as state_of in dont cross
- # relation (eg composite relation without both end type available?
- # card 1 relation? ...)
- if self.support_relation(rtype):
- return rtype in self.cross_relations
- return rtype not in self.dont_cross_relations
-
def before_entity_insertion(self, session, lid, etype, eid, sourceparams):
"""called by the repository when an eid has been attributed for an
entity stored here but the entity has not been inserted in the system
@@ -403,7 +346,7 @@
# user authentication api ##################################################
- def authenticate(self, session, login, **kwargs):
+ def authenticate(self, cnx, login, **kwargs):
"""if the source support CWUser entity type, it should implement
this method which should return CWUser eid for the given login/password
if this account is defined in this source and valid login / password is
@@ -413,7 +356,7 @@
# RQL query api ############################################################
- def syntax_tree_search(self, session, union,
+ def syntax_tree_search(self, cnx, union,
args=None, cachekey=None, varmap=None, debug=0):
"""return result from this source for a rql query (actually from a rql
syntax tree and a solution dictionary mapping each used variable to a
@@ -422,15 +365,6 @@
"""
raise NotImplementedError(self)
- def flying_insert(self, table, session, union, args=None, varmap=None):
- """similar as .syntax_tree_search, but inserts data in the temporary
- table (on-the-fly if possible, eg for the system source whose the given
- cursor come from). If not possible, inserts all data by calling
- .executemany().
- """
- res = self.syntax_tree_search(session, union, args, varmap=varmap)
- session.cnxset.source('system').manual_insert(res, table, session)
-
# write modification api ###################################################
# read-only sources don't have to implement methods below
@@ -487,22 +421,6 @@
"""mark entity as being modified, fulltext reindex if needed"""
raise NotImplementedError(self)
- def delete_info_multi(self, session, entities, uri):
- """delete system information on deletion of a list of entities with the
- same etype and belinging to the same source
- """
- raise NotImplementedError(self)
-
- def modified_entities(self, session, etypes, mtime):
- """return a 2-uple:
- * list of (etype, eid) of entities of the given types which have been
- modified since the given timestamp (actually entities whose full text
- index content has changed)
- * list of (etype, eid) of entities of the given types which have been
- deleted since the given timestamp
- """
- raise NotImplementedError(self)
-
def index_entity(self, session, entity):
"""create an operation to [re]index textual content of the given entity
on commit
@@ -525,90 +443,18 @@
"""execute the query and return its result"""
raise NotImplementedError(self)
- def temp_table_def(self, selection, solution, table, basemap):
- raise NotImplementedError(self)
-
def create_index(self, session, table, column, unique=False):
raise NotImplementedError(self)
def drop_index(self, session, table, column, unique=False):
raise NotImplementedError(self)
- def create_temp_table(self, session, table, schema):
- raise NotImplementedError(self)
-
- def clean_temp_data(self, session, temptables):
- """remove temporary data, usually associated to temporary tables"""
- pass
-
-
- @deprecated('[3.13] use repo.eid2extid(source, eid, session)')
- def eid2extid(self, eid, session=None):
- return self.repo.eid2extid(self, eid, session)
@deprecated('[3.13] use extid2eid(source, value, etype, session, **kwargs)')
def extid2eid(self, value, etype, session=None, **kwargs):
return self.repo.extid2eid(self, value, etype, session, **kwargs)
-class TrFunc(object):
- """lower, upper"""
- def __init__(self, trname, index, attrname=None):
- self._tr = trname.lower()
- self.index = index
- self.attrname = attrname
-
- def apply(self, resdict):
- value = resdict.get(self.attrname)
- if value is not None:
- return getattr(value, self._tr)()
- return None
-
-
-class GlobTrFunc(TrFunc):
- """count, sum, max, min, avg"""
- funcs = {
- 'count': len,
- 'sum': sum,
- 'max': max,
- 'min': min,
- # XXX avg
- }
- def apply(self, result):
- """have to 'groupby' manually. For instance, if we 'count' for index 1:
- >>> self.apply([(1, 2), (3, 4), (1, 5)])
- [(1, 7), (3, 4)]
- """
- keys, values = [], {}
- for row in result:
- key = tuple(v for i, v in enumerate(row) if i != self.index)
- value = row[self.index]
- try:
- values[key].append(value)
- except KeyError:
- keys.append(key)
- values[key] = [value]
- result = []
- trfunc = self.funcs[self._tr]
- for key in keys:
- row = list(key)
- row.insert(self.index, trfunc(values[key]))
- result.append(row)
- return result
-
-
-class ConnectionWrapper(object):
- def __init__(self, cnx=None):
- self.cnx = cnx
- def commit(self):
- pass
- def rollback(self):
- pass
- def cursor(self):
- return None # no actual cursor support
- def close(self):
- if hasattr(self.cnx, 'close'):
- self.cnx.close()
from cubicweb.server import SOURCE_TYPES
--- a/server/sources/datafeed.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/sources/datafeed.py Mon Feb 17 15:32:50 2014 +0100
@@ -35,7 +35,6 @@
class DataFeedSource(AbstractSource):
- copy_based_source = True
use_cwuri_as_url = True
options = (
--- a/server/sources/extlite.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,302 +0,0 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""provide an abstract class for external sources using a sqlite database helper
-"""
-
-__docformat__ = "restructuredtext en"
-
-
-from os.path import join, exists
-
-from cubicweb import server
-from cubicweb.server.sqlutils import SQL_PREFIX, SQLAdapterMixIn, sqlexec
-from cubicweb.server.sources import native, rql2sql
-from cubicweb.server.sources import AbstractSource, dbg_st_search, dbg_results
-
-class ConnectionWrapper(object):
- def __init__(self, source=None):
- self.source = source
- self._cnx = None
-
- def cursor(self):
- if self._cnx is None:
- self._cnx = self.source._sqlcnx
- if server.DEBUG & server.DBG_SQL:
- print 'sql cnx OPEN', self._cnx
- return self._cnx.cursor()
-
- def commit(self):
- if self._cnx is not None:
- if server.DEBUG & (server.DBG_SQL | server.DBG_RQL):
- print 'sql cnx COMMIT', self._cnx
- self._cnx.commit()
-
- def rollback(self):
- if self._cnx is not None:
- if server.DEBUG & (server.DBG_SQL | server.DBG_RQL):
- print 'sql cnx ROLLBACK', self._cnx
- self._cnx.rollback()
-
- def close(self):
- if self._cnx is not None:
- if server.DEBUG & server.DBG_SQL:
- print 'sql cnx CLOSE', self._cnx
- self._cnx.close()
- self._cnx = None
-
-
-class SQLiteAbstractSource(AbstractSource):
- """an abstract class for external sources using a sqlite database helper
- """
- sqlgen_class = rql2sql.SQLGenerator
- @classmethod
- def set_nonsystem_types(cls):
- # those entities are only in this source, we don't want them in the
- # system source
- for etype in cls.support_entities:
- native.NONSYSTEM_ETYPES.add(etype)
- for rtype in cls.support_relations:
- native.NONSYSTEM_RELATIONS.add(rtype)
-
- options = (
- ('helper-db-path',
- {'type' : 'string',
- 'default': None,
- 'help': 'path to the sqlite database file used to do queries on the \
-repository.',
- 'level': 2,
- }),
- )
-
- def __init__(self, repo, appschema, source_config, *args, **kwargs):
- # the helper db is used to easy querying and will store everything but
- # actual file content
- dbpath = source_config.get('helper-db-path')
- if dbpath is None:
- dbpath = join(repo.config.appdatahome,
- '%(uri)s.sqlite' % source_config)
- self.dbpath = dbpath
- self.sqladapter = SQLAdapterMixIn({'db-driver': 'sqlite',
- 'db-name': dbpath})
- # those attributes have to be initialized before ancestor's __init__
- # which will call set_schema
- self._need_sql_create = not exists(dbpath)
- self._need_full_import = self._need_sql_create
- AbstractSource.__init__(self, repo, appschema, source_config,
- *args, **kwargs)
-
- def backup(self, backupfile, confirm):
- """method called to create a backup of the source's data"""
- self.close_source_connections()
- try:
- self.sqladapter.backup_to_file(backupfile, confirm)
- finally:
- self.open_source_connections()
-
- def restore(self, backupfile, confirm, drop):
- """method called to restore a backup of source's data"""
- self.close_source_connections()
- try:
- self.sqladapter.restore_from_file(backupfile, confirm, drop)
- finally:
- self.open_source_connections()
-
- @property
- def _sqlcnx(self):
- # XXX: sqlite connections can only be used in the same thread, so
- # create a new one each time necessary. If it appears to be time
- # consuming, find another way
- return self.sqladapter.get_connection()
-
- def _is_schema_complete(self):
- for etype in self.support_entities:
- if not etype in self.schema:
- self.warning('not ready to generate %s database, %s support missing from schema',
- self.uri, etype)
- return False
- for rtype in self.support_relations:
- if not rtype in self.schema:
- self.warning('not ready to generate %s database, %s support missing from schema',
- self.uri, rtype)
- return False
- return True
-
- def _create_database(self):
- from yams.schema2sql import eschema2sql, rschema2sql
- from cubicweb.toolsutils import restrict_perms_to_user
- self.warning('initializing sqlite database for %s source' % self.uri)
- cnx = self._sqlcnx
- cu = cnx.cursor()
- schema = self.schema
- for etype in self.support_entities:
- eschema = schema.eschema(etype)
- createsqls = eschema2sql(self.sqladapter.dbhelper, eschema,
- skip_relations=('data',), prefix=SQL_PREFIX)
- sqlexec(createsqls, cu, withpb=False)
- for rtype in self.support_relations:
- rschema = schema.rschema(rtype)
- if not rschema.inlined:
- sqlexec(rschema2sql(rschema), cu, withpb=False)
- cnx.commit()
- cnx.close()
- self._need_sql_create = False
- if self.repo.config['uid']:
- from logilab.common.shellutils import chown
- # database file must be owned by the uid of the server process
- self.warning('set %s as owner of the database file',
- self.repo.config['uid'])
- chown(self.dbpath, self.repo.config['uid'])
- restrict_perms_to_user(self.dbpath, self.info)
-
- def set_schema(self, schema):
- super(SQLiteAbstractSource, self).set_schema(schema)
- if self._need_sql_create and self._is_schema_complete() and self.dbpath:
- self._create_database()
- self.rqlsqlgen = self.sqlgen_class(schema, self.sqladapter.dbhelper)
-
- def get_connection(self):
- return ConnectionWrapper(self)
-
- def check_connection(self, cnx):
- """check connection validity, return None if the connection is still valid
- else a new connection (called when the connections set holding the given connection is
- being attached to a session)
-
- always return the connection to reset eventually cached cursor
- """
- return cnx
-
- def cnxset_freed(self, cnx):
- """the connections set holding the given connection is being freed from its current
- attached session: release the connection lock if the connection wrapper
- has a connection set
- """
- # reset _cnx to ensure next thread using cnx will get a new
- # connection
- cnx.close()
-
- def syntax_tree_search(self, session, union, args=None, cachekey=None,
- varmap=None):
- """return result from this source for a rql query (actually from a rql
- syntax tree and a solution dictionary mapping each used variable to a
- possible type). If cachekey is given, the query necessary to fetch the
- results (but not the results themselves) may be cached using this key.
- """
- if self._need_sql_create:
- return []
- assert dbg_st_search(self.uri, union, varmap, args, cachekey)
- sql, qargs, cbs = self.rqlsqlgen.generate(union, args)
- args = self.sqladapter.merge_args(args, qargs)
- cursor = self.doexec(session, sql, args)
- results = self.sqladapter.process_result(cursor, cbs)
- assert dbg_results(results)
- return results
-
- def local_add_entity(self, session, entity):
- """insert the entity in the local database.
-
- This is not provided as add_entity implementation since usually source
- don't want to simply do this, so let raise NotImplementedError and the
- source implementor may use this method if necessary
- """
- attrs = self.sqladapter.preprocess_entity(entity)
- sql = self.sqladapter.sqlgen.insert(SQL_PREFIX + str(entity.e_schema), attrs)
- self.doexec(session, sql, attrs)
-
- def add_entity(self, session, entity):
- """add a new entity to the source"""
- raise NotImplementedError()
-
- def local_update_entity(self, session, entity, attrs=None):
- """update an entity in the source
-
- This is not provided as update_entity implementation since usually
- source don't want to simply do this, so let raise NotImplementedError
- and the source implementor may use this method if necessary
- """
- if attrs is None:
- attrs = self.sqladapter.preprocess_entity(entity)
- sql = self.sqladapter.sqlgen.update(SQL_PREFIX + str(entity.e_schema),
- attrs, [SQL_PREFIX + 'eid'])
- self.doexec(session, sql, attrs)
-
- def update_entity(self, session, entity):
- """update an entity in the source"""
- raise NotImplementedError()
-
- def delete_entity(self, session, entity):
- """delete an entity from the source
-
- this is not deleting a file in the svn but deleting entities from the
- source. Main usage is to delete repository content when a Repository
- entity is deleted.
- """
- attrs = {'cw_eid': entity.eid}
- sql = self.sqladapter.sqlgen.delete(SQL_PREFIX + entity.cw_etype, attrs)
- self.doexec(session, sql, attrs)
-
- def local_add_relation(self, session, subject, rtype, object):
- """add a relation to the source
-
- This is not provided as add_relation implementation since usually
- source don't want to simply do this, so let raise NotImplementedError
- and the source implementor may use this method if necessary
- """
- attrs = {'eid_from': subject, 'eid_to': object}
- sql = self.sqladapter.sqlgen.insert('%s_relation' % rtype, attrs)
- self.doexec(session, sql, attrs)
-
- def add_relation(self, session, subject, rtype, object):
- """add a relation to the source"""
- raise NotImplementedError()
-
- def delete_relation(self, session, subject, rtype, object):
- """delete a relation from the source"""
- rschema = self.schema.rschema(rtype)
- if rschema.inlined:
- if subject in session.transaction_data.get('pendingeids', ()):
- return
- table = SQL_PREFIX + session.describe(subject)[0]
- column = SQL_PREFIX + rtype
- sql = 'UPDATE %s SET %s=NULL WHERE %seid=%%(eid)s' % (table, column, SQL_PREFIX)
- attrs = {'eid' : subject}
- else:
- attrs = {'eid_from': subject, 'eid_to': object}
- sql = self.sqladapter.sqlgen.delete('%s_relation' % rtype, attrs)
- self.doexec(session, sql, attrs)
-
- def doexec(self, session, query, args=None):
- """Execute a query.
- it's a function just so that it shows up in profiling
- """
- if server.DEBUG:
- print 'exec', query, args
- cursor = session.cnxset[self.uri]
- try:
- # str(query) to avoid error if it's an unicode string
- cursor.execute(str(query), args)
- except Exception as ex:
- self.critical("sql: %r\n args: %s\ndbms message: %r",
- query, args, ex.args[0])
- try:
- session.cnxset.connection(self.uri).rollback()
- self.critical('transaction has been rolled back')
- except Exception:
- pass
- raise
- return cursor
--- a/server/sources/ldapfeed.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/sources/ldapfeed.py Mon Feb 17 15:32:50 2014 +0100
@@ -17,24 +17,39 @@
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""cubicweb ldap feed source"""
+from __future__ import division # XXX why?
+
+from datetime import datetime
+
import ldap
+from ldap.ldapobject import ReconnectLDAPObject
from ldap.filter import filter_format
+from ldapurl import LDAPUrl
from logilab.common.configuration import merge_options
+from cubicweb import ValidationError, AuthenticationError, Binary
+from cubicweb.server import utils
from cubicweb.server.sources import datafeed
-from cubicweb.server import ldaputils, utils
-from cubicweb import Binary
_ = unicode
# search scopes
-ldapscope = {'BASE': ldap.SCOPE_BASE,
- 'ONELEVEL': ldap.SCOPE_ONELEVEL,
- 'SUBTREE': ldap.SCOPE_SUBTREE}
+BASE = ldap.SCOPE_BASE
+ONELEVEL = ldap.SCOPE_ONELEVEL
+SUBTREE = ldap.SCOPE_SUBTREE
+LDAP_SCOPES = {'BASE': ldap.SCOPE_BASE,
+ 'ONELEVEL': ldap.SCOPE_ONELEVEL,
+ 'SUBTREE': ldap.SCOPE_SUBTREE}
-class LDAPFeedSource(ldaputils.LDAPSourceMixIn,
- datafeed.DataFeedSource):
+# map ldap protocol to their standard port
+PROTO_PORT = {'ldap': 389,
+ 'ldaps': 636,
+ 'ldapi': None,
+ }
+
+
+class LDAPFeedSource(datafeed.DataFeedSource):
"""LDAP feed source: unlike ldapuser source, this source is copy based and
will import ldap content (beside passwords for authentication) into the
system source.
@@ -42,7 +57,79 @@
support_entities = {'CWUser': False}
use_cwuri_as_url = False
- options_group = (
+ options = (
+ ('auth-mode',
+ {'type' : 'choice',
+ 'default': 'simple',
+ 'choices': ('simple', 'cram_md5', 'digest_md5', 'gssapi'),
+ 'help': 'authentication mode used to authenticate user to the ldap.',
+ 'group': 'ldap-source', 'level': 3,
+ }),
+ ('auth-realm',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'realm to use when using gssapi/kerberos authentication.',
+ 'group': 'ldap-source', 'level': 3,
+ }),
+
+ ('data-cnx-dn',
+ {'type' : 'string',
+ 'default': '',
+ 'help': 'user dn to use to open data connection to the ldap (eg used \
+to respond to rql queries). Leave empty for anonymous bind',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('data-cnx-password',
+ {'type' : 'string',
+ 'default': '',
+ 'help': 'password to use to open data connection to the ldap (eg used to respond to rql queries). Leave empty for anonymous bind.',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+
+ ('user-base-dn',
+ {'type' : 'string',
+ 'default': '',
+ 'help': 'base DN to lookup for users; disable user importation mechanism if unset',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('user-scope',
+ {'type' : 'choice',
+ 'default': 'ONELEVEL',
+ 'choices': ('BASE', 'ONELEVEL', 'SUBTREE'),
+ 'help': 'user search scope (valid values: "BASE", "ONELEVEL", "SUBTREE")',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('user-classes',
+ {'type' : 'csv',
+ 'default': ('top', 'posixAccount'),
+ 'help': 'classes of user (with Active Directory, you want to say "user" here)',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('user-filter',
+ {'type': 'string',
+ 'default': '',
+ 'help': 'additional filters to be set in the ldap query to find valid users',
+ 'group': 'ldap-source', 'level': 2,
+ }),
+ ('user-login-attr',
+ {'type' : 'string',
+ 'default': 'uid',
+ 'help': 'attribute used as login on authentication (with Active Directory, you want to use "sAMAccountName" here)',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('user-default-group',
+ {'type' : 'csv',
+ 'default': ('users',),
+ 'help': 'name of a group in which ldap users will be by default. \
+You can set multiple groups by separating them by a comma.',
+ 'group': 'ldap-source', 'level': 1,
+ }),
+ ('user-attrs-map',
+ {'type' : 'named',
+ 'default': {'uid': 'login', 'gecos': 'email', 'userPassword': 'upassword'},
+ 'help': 'map from ldap user attributes to cubicweb attributes (with Active Directory, you want to use sAMAccountName:login,mail:email,givenName:firstname,sn:surname)',
+ 'group': 'ldap-source', 'level': 1,
+ }),
('group-base-dn',
{'type' : 'string',
'default': '',
@@ -76,18 +163,33 @@
}),
)
- options = merge_options(datafeed.DataFeedSource.options
- + ldaputils.LDAPSourceMixIn.options
- + options_group,
+ options = merge_options(datafeed.DataFeedSource.options + options,
optgroup='ldap-source',)
+ _conn = None
+
def update_config(self, source_entity, typedconfig):
"""update configuration from source entity. `typedconfig` is config
properly typed with defaults set
"""
super(LDAPFeedSource, self).update_config(source_entity, typedconfig)
+ self.authmode = typedconfig['auth-mode']
+ self._authenticate = getattr(self, '_auth_%s' % self.authmode)
+ self.cnx_dn = typedconfig['data-cnx-dn']
+ self.cnx_pwd = typedconfig['data-cnx-password']
+ self.user_base_dn = str(typedconfig['user-base-dn'])
+ self.user_base_scope = globals()[typedconfig['user-scope']]
+ self.user_login_attr = typedconfig['user-login-attr']
+ self.user_default_groups = typedconfig['user-default-group']
+ self.user_attrs = {'dn': 'eid', 'modifyTimestamp': 'modification_date'}
+ self.user_attrs.update(typedconfig['user-attrs-map'])
+ self.user_rev_attrs = dict((v, k) for k, v in self.user_attrs.iteritems())
+ self.base_filters = [filter_format('(%s=%s)', ('objectClass', o))
+ for o in typedconfig['user-classes']]
+ if typedconfig['user-filter']:
+ self.base_filters.append(typedconfig['user-filter'])
self.group_base_dn = str(typedconfig['group-base-dn'])
- self.group_base_scope = ldapscope[typedconfig['group-scope']]
+ self.group_base_scope = LDAP_SCOPES[typedconfig['group-scope']]
self.group_attrs = typedconfig['group-attrs-map']
self.group_attrs = {'dn': 'eid', 'modifyTimestamp': 'modification_date'}
self.group_attrs.update(typedconfig['group-attrs-map'])
@@ -96,11 +198,191 @@
for o in typedconfig['group-classes']]
if typedconfig['group-filter']:
self.group_base_filters.append(typedconfig['group-filter'])
+ self._conn = None
+
+ def _entity_update(self, source_entity):
+ super(LDAPFeedSource, self)._entity_update(source_entity)
+ if self.urls:
+ if len(self.urls) > 1:
+ raise ValidationError(source_entity.eid, {'url': _('can only have one url')})
+ try:
+ protocol, hostport = self.urls[0].split('://')
+ except ValueError:
+ raise ValidationError(source_entity.eid, {'url': _('badly formatted url')})
+ if protocol not in PROTO_PORT:
+ raise ValidationError(source_entity.eid, {'url': _('unsupported protocol')})
+
+ def connection_info(self):
+ assert len(self.urls) == 1, self.urls
+ protocol, hostport = self.urls[0].split('://')
+ if protocol != 'ldapi' and not ':' in hostport:
+ hostport = '%s:%s' % (hostport, PROTO_PORT[protocol])
+ return protocol, hostport
+
+ def authenticate(self, cnx, login, password=None, **kwargs):
+ """return CWUser eid for the given login/password if this account is
+ defined in this source, else raise `AuthenticationError`
+
+ two queries are needed since passwords are stored crypted, so we have
+ to fetch the salt first
+ """
+ self.info('ldap authenticate %s', login)
+ if not password:
+ # On Windows + ADAM this would have succeeded (!!!)
+ # You get Authenticated as: 'NT AUTHORITY\ANONYMOUS LOGON'.
+ # we really really don't want that
+ raise AuthenticationError()
+ searchfilter = [filter_format('(%s=%s)', (self.user_login_attr, login))]
+ searchfilter.extend(self.base_filters)
+ searchstr = '(&%s)' % ''.join(searchfilter)
+ # first search the user
+ try:
+ user = self._search(cnx, self.user_base_dn,
+ self.user_base_scope, searchstr)[0]
+ except (IndexError, ldap.SERVER_DOWN):
+ # no such user
+ raise AuthenticationError()
+ # check password by establishing a (unused) connection
+ try:
+ self._connect(user, password)
+ except ldap.LDAPError as ex:
+ # Something went wrong, most likely bad credentials
+ self.info('while trying to authenticate %s: %s', user, ex)
+ raise AuthenticationError()
+ except Exception:
+ self.error('while trying to authenticate %s', user, exc_info=True)
+ raise AuthenticationError()
+ eid = self.repo.extid2eid(self, user['dn'], 'CWUser', session=cnx, insert=False)
+ if eid < 0:
+ # user has been moved away from this source
+ raise AuthenticationError()
+ return eid
+
+ def _connect(self, user=None, userpwd=None):
+ protocol, hostport = self.connection_info()
+ self.info('connecting %s://%s as %s', protocol, hostport,
+ user and user['dn'] or 'anonymous')
+ # don't require server certificate when using ldaps (will
+ # enable self signed certs)
+ ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
+ url = LDAPUrl(urlscheme=protocol, hostport=hostport)
+ conn = ReconnectLDAPObject(url.initializeUrl())
+ # Set the protocol version - version 3 is preferred
+ try:
+ conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3)
+ except ldap.LDAPError: # Invalid protocol version, fall back safely
+ conn.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION2)
+ # Deny auto-chasing of referrals to be safe, we handle them instead
+ # Required for AD
+ try:
+ conn.set_option(ldap.OPT_REFERRALS, 0)
+ except ldap.LDAPError: # Cannot set referrals, so do nothing
+ pass
+ #conn.set_option(ldap.OPT_NETWORK_TIMEOUT, conn_timeout)
+ #conn.timeout = op_timeout
+ # Now bind with the credentials given. Let exceptions propagate out.
+ if user is None:
+ # XXX always use simple bind for data connection
+ if not self.cnx_dn:
+ conn.simple_bind_s(self.cnx_dn, self.cnx_pwd)
+ else:
+ self._authenticate(conn, {'dn': self.cnx_dn}, self.cnx_pwd)
+ else:
+ # user specified, we want to check user/password, no need to return
+ # the connection which will be thrown out
+ self._authenticate(conn, user, userpwd)
+ return conn
+
+ def _auth_simple(self, conn, user, userpwd):
+ conn.simple_bind_s(user['dn'], userpwd)
+
+ def _auth_cram_md5(self, conn, user, userpwd):
+ from ldap import sasl
+ auth_token = sasl.cram_md5(user['dn'], userpwd)
+ conn.sasl_interactive_bind_s('', auth_token)
+
+ def _auth_digest_md5(self, conn, user, userpwd):
+ from ldap import sasl
+ auth_token = sasl.digest_md5(user['dn'], userpwd)
+ conn.sasl_interactive_bind_s('', auth_token)
+
+ def _auth_gssapi(self, conn, user, userpwd):
+ # print XXX not proper sasl/gssapi
+ import kerberos
+ if not kerberos.checkPassword(user[self.user_login_attr], userpwd):
+ raise Exception('BAD login / mdp')
+ #from ldap import sasl
+ #conn.sasl_interactive_bind_s('', sasl.gssapi())
+
+ def _search(self, cnx, base, scope,
+ searchstr='(objectClass=*)', attrs=()):
+ """make an ldap query"""
+ self.debug('ldap search %s %s %s %s %s', self.uri, base, scope,
+ searchstr, list(attrs))
+ if self._conn is None:
+ self._conn = self._connect()
+ ldapcnx = self._conn
+ try:
+ res = ldapcnx.search_s(base, scope, searchstr, attrs)
+ except ldap.PARTIAL_RESULTS:
+ res = ldapcnx.result(all=0)[1]
+ except ldap.NO_SUCH_OBJECT:
+ self.info('ldap NO SUCH OBJECT %s %s %s', base, scope, searchstr)
+ self._process_no_such_object(cnx, base)
+ return []
+ # except ldap.REFERRAL as e:
+ # ldapcnx = self.handle_referral(e)
+ # try:
+ # res = ldapcnx.search_s(base, scope, searchstr, attrs)
+ # except ldap.PARTIAL_RESULTS:
+ # res_type, res = ldapcnx.result(all=0)
+ result = []
+ for rec_dn, rec_dict in res:
+ # When used against Active Directory, "rec_dict" may not be
+ # be a dictionary in some cases (instead, it can be a list)
+ #
+ # An example of a useless "res" entry that can be ignored
+ # from AD is
+ # (None, ['ldap://ForestDnsZones.PORTAL.LOCAL/DC=ForestDnsZones,DC=PORTAL,DC=LOCAL'])
+ # This appears to be some sort of internal referral, but
+ # we can't handle it, so we need to skip over it.
+ try:
+ items = rec_dict.iteritems()
+ except AttributeError:
+ continue
+ else:
+ itemdict = self._process_ldap_item(rec_dn, items)
+ result.append(itemdict)
+ self.debug('ldap built results %s', len(result))
+ return result
def _process_ldap_item(self, dn, iterator):
- itemdict = super(LDAPFeedSource, self)._process_ldap_item(dn, iterator)
+ """Turn an ldap received item into a proper dict."""
+ itemdict = {'dn': dn}
+ for key, value in iterator:
+ if self.user_attrs.get(key) == 'upassword': # XXx better password detection
+ value = value[0].encode('utf-8')
+ # we only support ldap_salted_sha1 for ldap sources, see: server/utils.py
+ if not value.startswith('{SSHA}'):
+ value = utils.crypt_password(value)
+ itemdict[key] = Binary(value)
+ elif self.user_attrs.get(key) == 'modification_date':
+ itemdict[key] = datetime.strptime(value[0], '%Y%m%d%H%M%SZ')
+ else:
+ value = [unicode(val, 'utf-8', 'replace') for val in value]
+ if len(value) == 1:
+ itemdict[key] = value = value[0]
+ else:
+ itemdict[key] = value
# we expect memberUid to be a list of user ids, make sure of it
member = self.group_rev_attrs['member']
if isinstance(itemdict.get(member), basestring):
itemdict[member] = [itemdict[member]]
return itemdict
+
+ def _process_no_such_object(self, cnx, dn):
+ """Some search return NO_SUCH_OBJECT error, handle this (usually because
+ an object whose dn is no more existent in ldap as been encountered).
+
+ Do nothing by default, let sub-classes handle that.
+ """
--- a/server/sources/native.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/sources/native.py Mon Feb 17 15:32:50 2014 +0100
@@ -35,7 +35,7 @@
from datetime import datetime
from base64 import b64decode, b64encode
from contextlib import contextmanager
-from os.path import abspath, basename
+from os.path import basename
import re
import itertools
import zipfile
@@ -52,7 +52,7 @@
from yams.schema import role_name
from cubicweb import (UnknownEid, AuthenticationError, ValidationError, Binary,
- UniqueTogetherError, QueryError, UndoTransactionException)
+ UniqueTogetherError, UndoTransactionException)
from cubicweb import transaction as tx, server, neg_role
from cubicweb.utils import QueryCache
from cubicweb.schema import VIRTUAL_RTYPES
@@ -95,37 +95,6 @@
return self.cu.fetchone()
-def make_schema(selected, solution, table, typemap):
- """return a sql schema to store RQL query result"""
- sql = []
- varmap = {}
- for i, term in enumerate(selected):
- name = 'C%s' % i
- key = term.as_string()
- varmap[key] = '%s.%s' % (table, name)
- ttype = term.get_type(solution)
- try:
- sql.append('%s %s' % (name, typemap[ttype]))
- except KeyError:
- # assert not schema(ttype).final
- sql.append('%s %s' % (name, typemap['Int']))
- return ','.join(sql), varmap
-
-
-def _modified_sql(table, etypes):
- # XXX protect against sql injection
- if len(etypes) > 1:
- restr = 'type IN (%s)' % ','.join("'%s'" % etype for etype in etypes)
- else:
- restr = "type='%s'" % etypes[0]
- if table == 'entities':
- attr = 'mtime'
- else:
- attr = 'dtime'
- return 'SELECT type, eid FROM %s WHERE %s AND %s > %%(time)s' % (
- table, restr, attr)
-
-
def sql_or_clauses(sql, clauses):
select, restr = sql.split(' WHERE ', 1)
restrclauses = restr.split(' AND ')
@@ -138,6 +107,7 @@
restr = '(%s)' % ' OR '.join(clauses)
return '%s WHERE %s' % (select, restr)
+
def rdef_table_column(rdef):
"""return table and column used to store the given relation definition in
the database
@@ -145,6 +115,7 @@
return (SQL_PREFIX + str(rdef.subject),
SQL_PREFIX + str(rdef.rtype))
+
def rdef_physical_info(dbhelper, rdef):
"""return backend type and a boolean flag if NULL values should be allowed
for a given relation definition
@@ -292,39 +263,17 @@
self.do_fti = not repo.config['delay-full-text-indexation']
# sql queries cache
self._cache = QueryCache(repo.config['rql-cache-size'])
- self._temp_table_data = {}
# we need a lock to protect eid attribution function (XXX, really?
# explain)
self._eid_cnx_lock = Lock()
self._eid_creation_cnx = None
# (etype, attr) / storage mapping
self._storages = {}
- # entity types that may be used by other multi-sources instances
- self.multisources_etypes = set(repo.config['multi-sources-etypes'])
- # XXX no_sqlite_wrap trick since we've a sqlite locking pb when
- # running unittest_multisources with the wrapping below
- if self.dbdriver == 'sqlite' and \
- not getattr(repo.config, 'no_sqlite_wrap', False):
- from cubicweb.server.sources.extlite import ConnectionWrapper
- self.dbhelper.dbname = abspath(self.dbhelper.dbname)
- self.get_connection = lambda: ConnectionWrapper(self)
- self.check_connection = lambda cnx: cnx
- def cnxset_freed(cnx):
- cnx.close()
- self.cnxset_freed = cnxset_freed
if self.dbdriver == 'sqlite':
self._create_eid = None
self.create_eid = self._create_eid_sqlite
self.binary_to_str = self.dbhelper.dbapi_module.binary_to_str
-
- @property
- def _sqlcnx(self):
- # XXX: sqlite connections can only be used in the same thread, so
- # create a new one each time necessary. If it appears to be time
- # consuming, find another way
- return SQLAdapterMixIn.get_connection(self)
-
def check_config(self, source_entity):
"""check configuration of source entity"""
if source_entity.host_config:
@@ -356,10 +305,9 @@
if self.do_fti:
if cnxset is None:
_cnxset = self.repo._get_cnxset()
- _cnxset.cnxset_set()
else:
_cnxset = cnxset
- if not self.dbhelper.has_fti_table(_cnxset['system']):
+ if not self.dbhelper.has_fti_table(_cnxset.cu):
if not self.repo.config.creating:
self.critical('no text index table')
self.do_fti = False
@@ -491,16 +439,13 @@
# can't claim not supporting a relation
return True #not rtype == 'content_for'
- def may_cross_relation(self, rtype):
- return True
-
- def authenticate(self, session, login, **kwargs):
+ def authenticate(self, cnx, login, **kwargs):
"""return CWUser eid for the given login and other authentication
information found in kwargs, else raise `AuthenticationError`
"""
for authentifier in self.authentifiers:
try:
- return authentifier.authenticate(session, login, **kwargs)
+ return authentifier.authenticate(cnx, login, **kwargs)
except AuthenticationError:
continue
raise AuthenticationError()
@@ -539,13 +484,13 @@
raise
# FIXME: better detection of deconnection pb
self.warning("trying to reconnect")
- session.cnxset.reconnect(self)
+ session.cnxset.reconnect()
cursor = self.doexec(session, sql, args)
except self.DbapiError as exc:
# We get this one with pyodbc and SQL Server when connection was reset
if exc.args[0] == '08S01' and session.mode != 'write':
self.warning("trying to reconnect")
- session.cnxset.reconnect(self)
+ session.cnxset.reconnect()
cursor = self.doexec(session, sql, args)
else:
raise
@@ -553,52 +498,6 @@
assert dbg_results(results)
return results
- def flying_insert(self, table, session, union, args=None, varmap=None):
- """similar as .syntax_tree_search, but inserts data in the
- temporary table (on-the-fly if possible, eg for the system
- source whose the given cursor come from). If not possible,
- inserts all data by calling .executemany().
- """
- assert dbg_st_search(
- self.uri, union, varmap, args,
- prefix='ON THE FLY temp data insertion into %s from' % table)
- # generate sql queries if we are able to do so
- sql, qargs, cbs = self._rql_sqlgen.generate(union, args, varmap)
- query = 'INSERT INTO %s %s' % (table, sql.encode(self._dbencoding))
- self.doexec(session, query, self.merge_args(args, qargs))
-
- def manual_insert(self, results, table, session):
- """insert given result into a temporary table on the system source"""
- if server.DEBUG & server.DBG_RQL:
- print ' manual insertion of', len(results), 'results into', table
- if not results:
- return
- query_args = ['%%(%s)s' % i for i in xrange(len(results[0]))]
- query = 'INSERT INTO %s VALUES(%s)' % (table, ','.join(query_args))
- kwargs_list = []
- for row in results:
- kwargs = {}
- row = tuple(row)
- for index, cell in enumerate(row):
- if isinstance(cell, Binary):
- cell = self._binary(cell.getvalue())
- kwargs[str(index)] = cell
- kwargs_list.append(kwargs)
- self.doexecmany(session, query, kwargs_list)
-
- def clean_temp_data(self, session, temptables):
- """remove temporary data, usually associated to temporary tables"""
- if temptables:
- for table in temptables:
- try:
- self.doexec(session,'DROP TABLE %s' % table)
- except Exception:
- pass
- try:
- del self._temp_table_data[table]
- except KeyError:
- continue
-
@contextmanager
def _storage_handler(self, entity, event):
# 1/ memorize values as they are before the storage is called.
@@ -693,7 +592,7 @@
else: # used by data import
etypes = {}
for subject, object in subj_obj_list:
- etype = session.describe(subject)[0]
+ etype = session.entity_metas(subject)['type']
if etype in etypes:
etypes[etype].append((subject, object))
else:
@@ -718,7 +617,7 @@
def _delete_relation(self, session, subject, rtype, object, inlined=False):
"""delete a relation from the source"""
if inlined:
- table = SQL_PREFIX + session.describe(subject)[0]
+ table = SQL_PREFIX + session.entity_metas(subject)['type']
column = SQL_PREFIX + rtype
sql = 'UPDATE %s SET %s=NULL WHERE %seid=%%(eid)s' % (table, column,
SQL_PREFIX)
@@ -732,10 +631,10 @@
"""Execute a query.
it's a function just so that it shows up in profiling
"""
- cursor = session.cnxset[self.uri]
+ cursor = session.cnxset.cu
if server.DEBUG & server.DBG_SQL:
- cnx = session.cnxset.connection(self.uri)
- # getattr to get the actual connection if cnx is a ConnectionWrapper
+ cnx = session.cnxset.cnx
+ # getattr to get the actual connection if cnx is a CnxLoggingWrapper
# instance
print 'exec', query, args, getattr(cnx, '_cnx', cnx)
try:
@@ -749,7 +648,7 @@
query, args, ex.args[0])
if rollback:
try:
- session.cnxset.connection(self.uri).rollback()
+ session.cnxset.rollback()
if self.repo.config.mode != 'test':
self.critical('transaction has been rolled back')
except Exception as ex:
@@ -777,7 +676,7 @@
"""
if server.DEBUG & server.DBG_SQL:
print 'execmany', query, 'with', len(args), 'arguments'
- cursor = session.cnxset[self.uri]
+ cursor = session.cnxset.cu
try:
# str(query) to avoid error if it's an unicode string
cursor.executemany(str(query), args)
@@ -788,7 +687,7 @@
self.critical("sql many: %r\n args: %s\ndbms message: %r",
query, args, ex.args[0])
try:
- session.cnxset.connection(self.uri).rollback()
+ session.cnxset.rollback()
if self.repo.config.mode != 'test':
self.critical('transaction has been rolled back')
except Exception:
@@ -806,7 +705,7 @@
self.error("backend can't alter %s.%s to %s%s", table, column, coltype,
not allownull and 'NOT NULL' or '')
return
- self.dbhelper.change_col_type(LogCursor(session.cnxset[self.uri]),
+ self.dbhelper.change_col_type(LogCursor(session.cnxset.cu),
table, column, coltype, allownull)
self.info('altered %s.%s: now %s%s', table, column, coltype,
not allownull and 'NOT NULL' or '')
@@ -821,7 +720,7 @@
return
table, column = rdef_table_column(rdef)
coltype, allownull = rdef_physical_info(self.dbhelper, rdef)
- self.dbhelper.set_null_allowed(LogCursor(session.cnxset[self.uri]),
+ self.dbhelper.set_null_allowed(LogCursor(session.cnxset.cu),
table, column, coltype, allownull)
def update_rdef_indexed(self, session, rdef):
@@ -839,11 +738,11 @@
self.drop_index(session, table, column, unique=True)
def create_index(self, session, table, column, unique=False):
- cursor = LogCursor(session.cnxset[self.uri])
+ cursor = LogCursor(session.cnxset.cu)
self.dbhelper.create_index(cursor, table, column, unique)
def drop_index(self, session, table, column, unique=False):
- cursor = LogCursor(session.cnxset[self.uri])
+ cursor = LogCursor(session.cnxset.cu)
self.dbhelper.drop_index(cursor, table, column, unique)
# system source interface #################################################
@@ -856,7 +755,7 @@
except (self.OperationalError, self.InterfaceError):
if session.mode == 'read' and _retry:
self.warning("trying to reconnect (eid_type_source())")
- session.cnxset.reconnect(self)
+ session.cnxset.reconnect()
return self._eid_type_source(session, eid, sql, _retry=False)
except Exception:
assert session.cnxset, 'session has no connections set'
@@ -865,7 +764,7 @@
def eid_type_source(self, session, eid): # pylint: disable=E0202
"""return a tuple (type, source, extid) for the entity with id <eid>"""
- sql = 'SELECT type, source, extid, asource FROM entities WHERE eid=%s' % eid
+ sql = 'SELECT type, extid, asource FROM entities WHERE eid=%s' % eid
res = self._eid_type_source(session, eid, sql)
if res[-2] is not None:
if not isinstance(res, list):
@@ -875,7 +774,7 @@
def eid_type_source_pre_131(self, session, eid):
"""return a tuple (type, source, extid) for the entity with id <eid>"""
- sql = 'SELECT type, source, extid FROM entities WHERE eid=%s' % eid
+ sql = 'SELECT type, extid FROM entities WHERE eid=%s' % eid
res = self._eid_type_source(session, eid, sql)
if not isinstance(res, list):
res = list(res)
@@ -884,13 +783,12 @@
res.append(res[1])
return res
- def extid2eid(self, session, source_uri, extid):
+ def extid2eid(self, session, extid):
"""get eid from an external id. Return None if no record found."""
assert isinstance(extid, str)
cursor = self.doexec(session,
- 'SELECT eid FROM entities '
- 'WHERE extid=%(x)s AND source=%(s)s',
- {'x': b64encode(extid), 's': source_uri})
+ 'SELECT eid FROM entities WHERE extid=%(x)s',
+ {'x': b64encode(extid)})
# XXX testing rowcount cause strange bug with sqlite, results are there
# but rowcount is 0
#if cursor.rowcount > 0:
@@ -902,26 +800,12 @@
pass
return None
- def make_temp_table_name(self, table):
- return self.dbhelper.temporary_table_name(table)
-
- def temp_table_def(self, selected, sol, table):
- return make_schema(selected, sol, table, self.dbhelper.TYPE_MAPPING)
-
- def create_temp_table(self, session, table, schema):
- # we don't want on commit drop, this may cause problem when
- # running with an ldap source, and table will be deleted manually any way
- # on commit
- sql = self.dbhelper.sql_temporary_table(table, schema, False)
- self.doexec(session, sql)
-
def _create_eid_sqlite(self, session):
with self._eid_cnx_lock:
for sql in self.dbhelper.sqls_increment_sequence('entities_id_seq'):
cursor = self.doexec(session, sql)
return cursor.fetchone()[0]
-
def create_eid(self, session): # pylint: disable=E0202
# lock needed to prevent 'Connection is busy with results for another
# command (0)' errors with SQLServer
@@ -972,34 +856,34 @@
def add_info(self, session, entity, source, extid, complete):
"""add type and source info for an eid into the system table"""
- # begin by inserting eid/type/source/extid into the entities table
- if extid is not None:
- assert isinstance(extid, str)
- extid = b64encode(extid)
- uri = 'system' if source.copy_based_source else source.uri
- attrs = {'type': entity.cw_etype, 'eid': entity.eid, 'extid': extid,
- 'source': uri, 'asource': source.uri, 'mtime': datetime.utcnow()}
- self._handle_insert_entity_sql(session, self.sqlgen.insert('entities', attrs), attrs)
- # insert core relations: is, is_instance_of and cw_source
- try:
- self._handle_is_relation_sql(session, 'INSERT INTO is_relation(eid_from,eid_to) VALUES (%s,%s)',
- (entity.eid, eschema_eid(session, entity.e_schema)))
- except IndexError:
- # during schema serialization, skip
- pass
- else:
- for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
- self._handle_is_relation_sql(session,
- 'INSERT INTO is_instance_of_relation(eid_from,eid_to) VALUES (%s,%s)',
- (entity.eid, eschema_eid(session, eschema)))
- if 'CWSource' in self.schema and source.eid is not None: # else, cw < 3.10
- self._handle_is_relation_sql(session, 'INSERT INTO cw_source_relation(eid_from,eid_to) VALUES (%s,%s)',
- (entity.eid, source.eid))
- # now we can update the full text index
- if self.do_fti and self.need_fti_indexation(entity.cw_etype):
- if complete:
- entity.complete(entity.e_schema.indexable_attributes())
- self.index_entity(session, entity=entity)
+ with session.ensure_cnx_set:
+ # begin by inserting eid/type/source/extid into the entities table
+ if extid is not None:
+ assert isinstance(extid, str)
+ extid = b64encode(extid)
+ attrs = {'type': entity.cw_etype, 'eid': entity.eid, 'extid': extid,
+ 'asource': source.uri}
+ self._handle_insert_entity_sql(session, self.sqlgen.insert('entities', attrs), attrs)
+ # insert core relations: is, is_instance_of and cw_source
+ try:
+ self._handle_is_relation_sql(session, 'INSERT INTO is_relation(eid_from,eid_to) VALUES (%s,%s)',
+ (entity.eid, eschema_eid(session, entity.e_schema)))
+ except IndexError:
+ # during schema serialization, skip
+ pass
+ else:
+ for eschema in entity.e_schema.ancestors() + [entity.e_schema]:
+ self._handle_is_relation_sql(session,
+ 'INSERT INTO is_instance_of_relation(eid_from,eid_to) VALUES (%s,%s)',
+ (entity.eid, eschema_eid(session, eschema)))
+ if 'CWSource' in self.schema and source.eid is not None: # else, cw < 3.10
+ self._handle_is_relation_sql(session, 'INSERT INTO cw_source_relation(eid_from,eid_to) VALUES (%s,%s)',
+ (entity.eid, source.eid))
+ # now we can update the full text index
+ if self.do_fti and self.need_fti_indexation(entity.cw_etype):
+ if complete:
+ entity.complete(entity.e_schema.indexable_attributes())
+ self.index_entity(session, entity=entity)
def update_info(self, session, entity, need_fti_update):
"""mark entity as being modified, fulltext reindex if needed"""
@@ -1007,59 +891,22 @@
# reindex the entity only if this query is updating at least
# one indexable attribute
self.index_entity(session, entity=entity)
- # update entities.mtime.
- # XXX Only if entity.cw_etype in self.multisources_etypes?
- attrs = {'eid': entity.eid, 'mtime': datetime.utcnow()}
- self.doexec(session, self.sqlgen.update('entities', attrs, ['eid']), attrs)
- def delete_info_multi(self, session, entities, uri):
+ def delete_info_multi(self, session, entities):
"""delete system information on deletion of a list of entities with the
same etype and belinging to the same source
* update the fti
* remove record from the `entities` table
- * transfer it to the `deleted_entities`
"""
self.fti_unindex_entities(session, entities)
attrs = {'eid': '(%s)' % ','.join([str(_e.eid) for _e in entities])}
self.doexec(session, self.sqlgen.delete_many('entities', attrs), attrs)
- if entities[0].__regid__ not in self.multisources_etypes:
- return
- attrs = {'type': entities[0].__regid__,
- 'source': uri, 'dtime': datetime.utcnow()}
- for entity in entities:
- extid = entity.cw_metainformation()['extid']
- if extid is not None:
- assert isinstance(extid, str), type(extid)
- extid = b64encode(extid)
- attrs.update({'eid': entity.eid, 'extid': extid})
- self.doexec(session, self.sqlgen.insert('deleted_entities', attrs), attrs)
-
- def modified_entities(self, session, etypes, mtime):
- """return a 2-uple:
- * list of (etype, eid) of entities of the given types which have been
- modified since the given timestamp (actually entities whose full text
- index content has changed)
- * list of (etype, eid) of entities of the given types which have been
- deleted since the given timestamp
- """
- for etype in etypes:
- if not etype in self.multisources_etypes:
- self.error('%s not listed as a multi-sources entity types. '
- 'Modify your configuration' % etype)
- self.multisources_etypes.add(etype)
- modsql = _modified_sql('entities', etypes)
- cursor = self.doexec(session, modsql, {'time': mtime})
- modentities = cursor.fetchall()
- delsql = _modified_sql('deleted_entities', etypes)
- cursor = self.doexec(session, delsql, {'time': mtime})
- delentities = cursor.fetchall()
- return modentities, delentities
# undo support #############################################################
def undoable_transactions(self, session, ueid=None, **actionfilters):
- """See :class:`cubicweb.dbapi.Connection.undoable_transactions`"""
+ """See :class:`cubicweb.repoapi.ClientConnection.undoable_transactions`"""
# force filtering to session's user if not a manager
if not session.user.is_in_group('managers'):
ueid = session.user.eid
@@ -1131,11 +978,11 @@
return [tx.Transaction(*args) for args in cu.fetchall()]
def tx_info(self, session, txuuid):
- """See :class:`cubicweb.dbapi.Connection.transaction_info`"""
+ """See :class:`cubicweb.repoapi.ClientConnection.transaction_info`"""
return tx.Transaction(txuuid, *self._tx_info(session, txuuid))
def tx_actions(self, session, txuuid, public):
- """See :class:`cubicweb.dbapi.Connection.transaction_actions`"""
+ """See :class:`cubicweb.repoapi.ClientConnection.transaction_actions`"""
self._tx_info(session, txuuid)
restr = {'tx_uuid': txuuid}
if public:
@@ -1155,7 +1002,7 @@
return sorted(actions, key=lambda x: x.order)
def undo_transaction(self, session, txuuid):
- """See :class:`cubicweb.dbapi.Connection.undo_transaction`
+ """See :class:`cubicweb.repoapi.ClientConnection.undo_transaction`
important note: while undoing of a transaction, only hooks in the
'integrity', 'activeintegrity' and 'undo' categories are called.
@@ -1222,17 +1069,19 @@
raise `NoSuchTransaction` if there is no such transaction of if the
session's user isn't allowed to see it.
"""
- restr = {'tx_uuid': txuuid}
- sql = self.sqlgen.select('transactions', restr, ('tx_time', 'tx_user'))
- cu = self.doexec(session, sql, restr)
- try:
- time, ueid = cu.fetchone()
- except TypeError:
- raise tx.NoSuchTransaction(txuuid)
- if not (session.user.is_in_group('managers')
- or session.user.eid == ueid):
- raise tx.NoSuchTransaction(txuuid)
- return time, ueid
+ with session.ensure_cnx_set:
+ restr = {'tx_uuid': txuuid}
+ sql = self.sqlgen.select('transactions', restr,
+ ('tx_time', 'tx_user'))
+ cu = self.doexec(session, sql, restr)
+ try:
+ time, ueid = cu.fetchone()
+ except TypeError:
+ raise tx.NoSuchTransaction(txuuid)
+ if not (session.user.is_in_group('managers')
+ or session.user.eid == ueid):
+ raise tx.NoSuchTransaction(txuuid)
+ return time, ueid
def _reedit_entity(self, entity, changes, err):
session = entity._cw
@@ -1300,10 +1149,6 @@
self.doexec(session, sql, action.changes)
# restore record in entities (will update fti if needed)
self.add_info(session, entity, self, None, True)
- # remove record from deleted_entities if entity's type is multi-sources
- if entity.cw_etype in self.multisources_etypes:
- self.doexec(session,
- 'DELETE FROM deleted_entities WHERE eid=%s' % eid)
self.repo.hm.call_hooks('after_add_entity', session, entity=entity)
return errors
@@ -1367,7 +1212,7 @@
sql = self.sqlgen.delete(SQL_PREFIX + entity.cw_etype, attrs)
self.doexec(session, sql, attrs)
# remove record from entities (will update fti if needed)
- self.delete_info_multi(session, [entity], self.uri)
+ self.delete_info_multi(session, [entity])
self.repo.hm.call_hooks('after_delete_entity', session, entity=entity)
return ()
@@ -1442,7 +1287,7 @@
def fti_unindex_entities(self, session, entities):
"""remove text content for entities from the full text index
"""
- cursor = session.cnxset['system']
+ cursor = session.cnxset.cu
cursor_unindex_object = self.dbhelper.cursor_unindex_object
try:
for entity in entities:
@@ -1455,7 +1300,7 @@
"""add text content of created/modified entities to the full text index
"""
cursor_index_object = self.dbhelper.cursor_index_object
- cursor = session.cnxset['system']
+ cursor = session.cnxset.cu
try:
# use cursor_index_object, not cursor_reindex_object since
# unindexing done in the FTIndexEntityOp
@@ -1503,26 +1348,12 @@
CREATE TABLE entities (
eid INTEGER PRIMARY KEY NOT NULL,
type VARCHAR(64) NOT NULL,
- source VARCHAR(128) NOT NULL,
asource VARCHAR(128) NOT NULL,
- mtime %s NOT NULL,
extid VARCHAR(256)
);;
CREATE INDEX entities_type_idx ON entities(type);;
-CREATE INDEX entities_mtime_idx ON entities(mtime);;
CREATE INDEX entities_extid_idx ON entities(extid);;
-CREATE TABLE deleted_entities (
- eid INTEGER PRIMARY KEY NOT NULL,
- type VARCHAR(64) NOT NULL,
- source VARCHAR(128) NOT NULL,
- dtime %s NOT NULL,
- extid VARCHAR(256)
-);;
-CREATE INDEX deleted_entities_type_idx ON deleted_entities(type);;
-CREATE INDEX deleted_entities_dtime_idx ON deleted_entities(dtime);;
-CREATE INDEX deleted_entities_extid_idx ON deleted_entities(extid);;
-
CREATE TABLE transactions (
tx_uuid CHAR(32) PRIMARY KEY NOT NULL,
tx_user INTEGER NOT NULL,
@@ -1561,7 +1392,7 @@
CREATE INDEX tx_relation_actions_eid_to_idx ON tx_relation_actions(eid_to);;
CREATE INDEX tx_relation_actions_tx_uuid_idx ON tx_relation_actions(tx_uuid);;
""" % (helper.sql_create_sequence('entities_id_seq').replace(';', ';;'),
- typemap['Datetime'], typemap['Datetime'], typemap['Datetime'],
+ typemap['Datetime'],
typemap['Boolean'], typemap['Bytes'], typemap['Boolean'])
if helper.backend_name == 'sqlite':
# sqlite support the ON DELETE CASCADE syntax but do nothing
@@ -1581,7 +1412,6 @@
return """
%s
DROP TABLE entities;
-DROP TABLE deleted_entities;
DROP TABLE tx_entity_actions;
DROP TABLE tx_relation_actions;
DROP TABLE transactions;
@@ -1590,7 +1420,7 @@
def grant_schema(user, set_owner=True):
result = ''
- for table in ('entities', 'deleted_entities', 'entities_id_seq',
+ for table in ('entities', 'entities_id_seq',
'transactions', 'tx_entity_actions', 'tx_relation_actions'):
if set_owner:
result = 'ALTER TABLE %s OWNER TO %s;\n' % (table, user)
@@ -1620,7 +1450,7 @@
self._passwd_rqlst = self.source.compile_rql(self.passwd_rql, self._sols)
self._auth_rqlst = self.source.compile_rql(self.auth_rql, self._sols)
- def authenticate(self, session, login, password=None, **kwargs):
+ def authenticate(self, cnx, login, password=None, **kwargs):
"""return CWUser eid for the given login/password if this account is
defined in this source, else raise `AuthenticationError`
@@ -1629,7 +1459,7 @@
"""
args = {'login': login, 'pwd' : None}
if password is not None:
- rset = self.source.syntax_tree_search(session, self._passwd_rqlst, args)
+ rset = self.source.syntax_tree_search(cnx, self._passwd_rqlst, args)
try:
pwd = rset[0][0]
except IndexError:
@@ -1640,7 +1470,7 @@
# passwords are stored using the Bytes type, so we get a StringIO
args['pwd'] = Binary(crypt_password(password, pwd.getvalue()))
# get eid from login and (crypted) password
- rset = self.source.syntax_tree_search(session, self._auth_rqlst, args)
+ rset = self.source.syntax_tree_search(cnx, self._auth_rqlst, args)
try:
user = rset[0][0]
# If the stored hash uses a deprecated scheme (e.g. DES or MD5 used
@@ -1650,32 +1480,33 @@
if not verify: # should not happen, but...
raise AuthenticationError('bad password')
if newhash:
- session.system_sql("UPDATE %s SET %s=%%(newhash)s WHERE %s=%%(login)s" % (
+ cnx.system_sql("UPDATE %s SET %s=%%(newhash)s WHERE %s=%%(login)s" % (
SQL_PREFIX + 'CWUser',
SQL_PREFIX + 'upassword',
SQL_PREFIX + 'login'),
{'newhash': self.source._binary(newhash),
'login': login})
- session.commit(free_cnxset=False)
+ cnx.commit(free_cnxset=False)
return user
except IndexError:
raise AuthenticationError('bad password')
class EmailPasswordAuthentifier(BaseAuthentifier):
- def authenticate(self, session, login, **authinfo):
+ def authenticate(self, cnx, login, **authinfo):
# email_auth flag prevent from infinite recursion (call to
# repo.check_auth_info at the end of this method may lead us here again)
if not '@' in login or authinfo.pop('email_auth', None):
raise AuthenticationError('not an email')
- rset = session.execute('Any L WHERE U login L, U primary_email M, '
+ rset = cnx.execute('Any L WHERE U login L, U primary_email M, '
'M address %(login)s', {'login': login},
build_descr=False)
if rset.rowcount != 1:
raise AuthenticationError('unexisting email')
login = rset.rows[0][0]
authinfo['email_auth'] = True
- return self.source.repo.check_auth_info(session, login, authinfo)
+ return self.source.repo.check_auth_info(cnx, login, authinfo)
+
class DatabaseIndependentBackupRestore(object):
"""Helper class to perform db backend agnostic backup and restore
@@ -1721,7 +1552,7 @@
self.cnx = self.get_connection()
try:
self.cursor = self.cnx.cursor()
- self.cursor.arraysize=100
+ self.cursor.arraysize = 100
self.logger.info('writing metadata')
self.write_metadata(archive)
for seq in self.get_sequences():
@@ -1737,7 +1568,6 @@
def get_tables(self):
non_entity_tables = ['entities',
- 'deleted_entities',
'transactions',
'tx_entity_actions',
'tx_relation_actions',
@@ -1765,8 +1595,8 @@
archive.writestr('tables.txt', '\n'.join(self.get_tables()))
archive.writestr('sequences.txt', '\n'.join(self.get_sequences()))
versions = self._get_versions()
- versions_str = '\n'.join('%s %s' % (k,v)
- for k,v in versions)
+ versions_str = '\n'.join('%s %s' % (k, v)
+ for k, v in versions)
archive.writestr('versions.txt', versions_str)
def write_sequence(self, archive, seq):
--- a/server/sources/pyrorql.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,63 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""Source to query another RQL repository using pyro"""
-
-__docformat__ = "restructuredtext en"
-_ = unicode
-
-# module is lazily imported
-import warnings
-warnings.warn('Imminent drop of pyrorql source. Switch to datafeed now!',
- DeprecationWarning)
-
-import threading
-from Pyro.errors import PyroError, ConnectionClosedError
-
-from cubicweb import ConnectionError
-from cubicweb.server.sources import ConnectionWrapper
-
-from cubicweb.server.sources.remoterql import RemoteSource
-
-class PyroRQLSource(RemoteSource):
- """External repository source, using Pyro connection"""
-
- def get_connection(self):
- try:
- return self._get_connection()
- except (ConnectionError, PyroError) as ex:
- self.critical("can't get connection to source %s: %s", self.uri, ex)
- return ConnectionWrapper()
-
- def check_connection(self, cnx):
- """check connection validity, return None if the connection is still valid
- else a new connection
- """
- # we have to transfer manually thread ownership. This can be done safely
- # since the connections set holding the connection is affected to one
- # session/thread and can't be called simultaneously
- try:
- cnx._repo._transferThread(threading.currentThread())
- except AttributeError:
- # inmemory connection
- pass
- try:
- return super(PyroRQLSource, self).check_connection(cnx)
- except ConnectionClosedError:
- # try to reconnect
- return self.get_connection()
-
--- a/server/sources/remoterql.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,670 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""Source to query another RQL remote repository"""
-
-__docformat__ = "restructuredtext en"
-_ = unicode
-
-from os.path import join
-from base64 import b64decode
-
-from logilab.common.configuration import REQUIRED
-
-from yams.schema import role_name
-
-from rql.nodes import Constant
-from rql.utils import rqlvar_maker
-
-from cubicweb import dbapi, server
-from cubicweb import ValidationError, BadConnectionId, UnknownEid
-from cubicweb.schema import VIRTUAL_RTYPES
-from cubicweb.server.sources import (AbstractSource, ConnectionWrapper,
- TimedCache, dbg_st_search, dbg_results)
-from cubicweb.server.msplanner import neged_relation
-
-def uidtype(union, col, etype, args):
- select, col = union.locate_subquery(col, etype, args)
- return getattr(select.selection[col], 'uidtype', None)
-
-
-class ReplaceByInOperator(Exception):
- def __init__(self, eids):
- self.eids = eids
-
-class RemoteSource(AbstractSource):
- """Generic external repository source"""
-
- # boolean telling if modification hooks should be called when something is
- # modified in this source
- should_call_hooks = False
- # boolean telling if the repository should connect to this source during
- # migration
- connect_for_migration = False
-
- options = (
-
- ('cubicweb-user',
- {'type' : 'string',
- 'default': REQUIRED,
- 'help': 'user to use for connection on the distant repository',
- 'group': 'remote-source', 'level': 0,
- }),
- ('cubicweb-password',
- {'type' : 'password',
- 'default': '',
- 'help': 'user to use for connection on the distant repository',
- 'group': 'remote-source', 'level': 0,
- }),
- ('base-url',
- {'type' : 'string',
- 'default': '',
- 'help': 'url of the web site for the distant repository, if you want '
- 'to generate external link to entities from this repository',
- 'group': 'remote-source', 'level': 1,
- }),
- ('skip-external-entities',
- {'type' : 'yn',
- 'default': False,
- 'help': 'should entities not local to the source be considered or not',
- 'group': 'remote-source', 'level': 0,
- }),
- ('synchronization-interval',
- {'type' : 'time',
- 'default': '5min',
- 'help': 'interval between synchronization with the external \
-repository (default to 5 minutes).',
- 'group': 'remote-source', 'level': 2,
- }))
-
- PUBLIC_KEYS = AbstractSource.PUBLIC_KEYS + ('base-url',)
-
- _conn = None
-
- def __init__(self, repo, source_config, eid=None):
- super(RemoteSource, self).__init__(repo, source_config, eid)
- self._query_cache = TimedCache(1800)
-
- def update_config(self, source_entity, processed_config):
- """update configuration from source entity"""
- super(RemoteSource, self).update_config(source_entity, processed_config)
- baseurl = processed_config.get('base-url')
- if baseurl and not baseurl.endswith('/'):
- processed_config['base-url'] += '/'
- self.config = processed_config
- self._skip_externals = processed_config['skip-external-entities']
- if source_entity is not None:
- self.latest_retrieval = source_entity.latest_retrieval
-
- def _entity_update(self, source_entity):
- super(RemoteSource, self)._entity_update(source_entity)
- if self.urls and len(self.urls) > 1:
- raise ValidationError(source_entity.eid, {'url': _('can only have one url')})
-
- def get_connection(self):
- try:
- return self._get_connection()
- except ConnectionError as ex:
- self.critical("can't get connection to source %s: %s", self.uri, ex)
- return ConnectionWrapper()
-
- def _get_connection(self):
- """open and return a connection to the source"""
- self.info('connecting to source %s as user %s',
- self.urls[0], self.config['cubicweb-user'])
- # XXX check protocol according to source type (zmq / pyro)
- return dbapi.connect(self.urls[0], login=self.config['cubicweb-user'],
- password=self.config['cubicweb-password'])
-
- def reset_caches(self):
- """method called during test to reset potential source caches"""
- self._query_cache = TimedCache(1800)
-
- def init(self, activated, source_entity):
- """method called by the repository once ready to handle request"""
- super(RemoteSource, self).init(activated, source_entity)
- self.load_mapping(source_entity._cw)
- if activated:
- interval = self.config['synchronization-interval']
- self.repo.looping_task(interval, self.synchronize)
- self.repo.looping_task(self._query_cache.ttl.seconds/10,
- self._query_cache.clear_expired)
- self.latest_retrieval = source_entity.latest_retrieval
-
- def load_mapping(self, session=None):
- self.support_entities = {}
- self.support_relations = {}
- self.dont_cross_relations = set(('owned_by', 'created_by'))
- self.cross_relations = set()
- assert self.eid is not None
- self._schemacfg_idx = {}
- self._load_mapping(session)
-
- etype_options = set(('write',))
- rtype_options = set(('maycross', 'dontcross', 'write',))
-
- def _check_options(self, schemacfg, allowedoptions):
- if schemacfg.options:
- options = set(w.strip() for w in schemacfg.options.split(':'))
- else:
- options = set()
- if options - allowedoptions:
- options = ', '.join(sorted(options - allowedoptions))
- msg = _('unknown option(s): %s' % options)
- raise ValidationError(schemacfg.eid, {role_name('options', 'subject'): msg})
- return options
-
- def add_schema_config(self, schemacfg, checkonly=False):
- """added CWSourceSchemaConfig, modify mapping accordingly"""
- try:
- ertype = schemacfg.schema.name
- except AttributeError:
- msg = schemacfg._cw._("attribute/relation can't be mapped, only "
- "entity and relation types")
- raise ValidationError(schemacfg.eid, {role_name('cw_for_schema', 'subject'): msg})
- if schemacfg.schema.__regid__ == 'CWEType':
- options = self._check_options(schemacfg, self.etype_options)
- if not checkonly:
- self.support_entities[ertype] = 'write' in options
- else: # CWRType
- if ertype in ('is', 'is_instance_of', 'cw_source') or ertype in VIRTUAL_RTYPES:
- msg = schemacfg._cw._('%s relation should not be in mapped') % ertype
- raise ValidationError(schemacfg.eid, {role_name('cw_for_schema', 'subject'): msg})
- options = self._check_options(schemacfg, self.rtype_options)
- if 'dontcross' in options:
- if 'maycross' in options:
- msg = schemacfg._("can't mix dontcross and maycross options")
- raise ValidationError(schemacfg.eid, {role_name('options', 'subject'): msg})
- if 'write' in options:
- msg = schemacfg._("can't mix dontcross and write options")
- raise ValidationError(schemacfg.eid, {role_name('options', 'subject'): msg})
- if not checkonly:
- self.dont_cross_relations.add(ertype)
- elif not checkonly:
- self.support_relations[ertype] = 'write' in options
- if 'maycross' in options:
- self.cross_relations.add(ertype)
- if not checkonly:
- # add to an index to ease deletion handling
- self._schemacfg_idx[schemacfg.eid] = ertype
-
- def del_schema_config(self, schemacfg, checkonly=False):
- """deleted CWSourceSchemaConfig, modify mapping accordingly"""
- if checkonly:
- return
- try:
- ertype = self._schemacfg_idx[schemacfg.eid]
- if ertype[0].isupper():
- del self.support_entities[ertype]
- else:
- if ertype in self.support_relations:
- del self.support_relations[ertype]
- if ertype in self.cross_relations:
- self.cross_relations.remove(ertype)
- else:
- self.dont_cross_relations.remove(ertype)
- except Exception:
- self.error('while updating mapping consequently to removal of %s',
- schemacfg)
-
- def local_eid(self, cnx, extid, session):
- etype, dexturi, dextid = cnx.describe(extid)
- if dexturi == 'system' or not (
- dexturi in self.repo.sources_by_uri or self._skip_externals):
- assert etype in self.support_entities, etype
- eid = self.repo.extid2eid(self, str(extid), etype, session)
- if eid > 0:
- return eid, True
- elif dexturi in self.repo.sources_by_uri:
- source = self.repo.sources_by_uri[dexturi]
- cnx = session.cnxset.connection(source.uri)
- eid = source.local_eid(cnx, dextid, session)[0]
- return eid, False
- return None, None
-
- def synchronize(self, mtime=None):
- """synchronize content known by this repository with content in the
- external repository
- """
- self.info('synchronizing remote source %s', self.uri)
- cnx = self.get_connection()
- try:
- extrepo = cnx._repo
- except AttributeError:
- # fake connection wrapper returned when we can't connect to the
- # external source (hence we've no chance to synchronize...)
- return
- etypes = list(self.support_entities)
- if mtime is None:
- mtime = self.latest_retrieval
- updatetime, modified, deleted = extrepo.entities_modified_since(etypes, mtime)
- self._query_cache.clear()
- repo = self.repo
- session = repo.internal_session()
- source = repo.system_source
- try:
- for etype, extid in modified:
- try:
- eid = self.local_eid(cnx, extid, session)[0]
- if eid is not None:
- rset = session.eid_rset(eid, etype)
- entity = rset.get_entity(0, 0)
- entity.complete(entity.e_schema.indexable_attributes())
- source.index_entity(session, entity)
- except Exception:
- self.exception('while updating %s with external id %s of source %s',
- etype, extid, self.uri)
- continue
- for etype, extid in deleted:
- try:
- eid = self.repo.extid2eid(self, str(extid), etype, session,
- insert=False)
- # entity has been deleted from external repository but is not known here
- if eid is not None:
- entity = session.entity_from_eid(eid, etype)
- repo.delete_info(session, entity, self.uri,
- scleanup=self.eid)
- except Exception:
- if self.repo.config.mode == 'test':
- raise
- self.exception('while updating %s with external id %s of source %s',
- etype, extid, self.uri)
- continue
- self.latest_retrieval = updatetime
- session.execute('SET X latest_retrieval %(date)s WHERE X eid %(x)s',
- {'x': self.eid, 'date': self.latest_retrieval})
- session.commit()
- finally:
- session.close()
-
- def get_connection(self):
- raise NotImplementedError()
-
- def check_connection(self, cnx):
- """check connection validity, return None if the connection is still valid
- else a new connection
- """
- if not isinstance(cnx, ConnectionWrapper):
- try:
- cnx.check()
- return # ok
- except BadConnectionId:
- pass
- # try to reconnect
- return self.get_connection()
-
- def syntax_tree_search(self, session, union, args=None, cachekey=None,
- varmap=None):
- assert dbg_st_search(self.uri, union, varmap, args, cachekey)
- rqlkey = union.as_string(kwargs=args)
- try:
- results = self._query_cache[rqlkey]
- except KeyError:
- results = self._syntax_tree_search(session, union, args)
- self._query_cache[rqlkey] = results
- assert dbg_results(results)
- return results
-
- def _syntax_tree_search(self, session, union, args):
- """return result from this source for a rql query (actually from a rql
- syntax tree and a solution dictionary mapping each used variable to a
- possible type). If cachekey is given, the query necessary to fetch the
- results (but not the results themselves) may be cached using this key.
- """
- if not args is None:
- args = args.copy()
- # get cached cursor anyway
- cu = session.cnxset[self.uri]
- if cu is None:
- # this is a ConnectionWrapper instance
- msg = session._("can't connect to source %s, some data may be missing")
- session.set_shared_data('sources_error', msg % self.uri, txdata=True)
- return []
- translator = RQL2RQL(self)
- try:
- rql = translator.generate(session, union, args)
- except UnknownEid as ex:
- if server.DEBUG:
- print ' unknown eid', ex, 'no results'
- return []
- if server.DEBUG & server.DBG_RQL:
- print ' translated rql', rql
- try:
- rset = cu.execute(rql, args)
- except Exception as ex:
- self.exception(str(ex))
- msg = session._("error while querying source %s, some data may be missing")
- session.set_shared_data('sources_error', msg % self.uri, txdata=True)
- return []
- descr = rset.description
- if rset:
- needtranslation = []
- rows = rset.rows
- for i, etype in enumerate(descr[0]):
- if (etype is None or not self.schema.eschema(etype).final
- or uidtype(union, i, etype, args)):
- needtranslation.append(i)
- if needtranslation:
- cnx = session.cnxset.connection(self.uri)
- for rowindex in xrange(rset.rowcount - 1, -1, -1):
- row = rows[rowindex]
- localrow = False
- for colindex in needtranslation:
- if row[colindex] is not None: # optional variable
- eid, local = self.local_eid(cnx, row[colindex], session)
- if local:
- localrow = True
- if eid is not None:
- row[colindex] = eid
- else:
- # skip this row
- del rows[rowindex]
- del descr[rowindex]
- break
- else:
- # skip row if it only contains eids of entities which
- # are actually from a source we also know locally,
- # except if some args specified (XXX should actually
- # check if there are some args local to the source)
- if not (translator.has_local_eid or localrow):
- del rows[rowindex]
- del descr[rowindex]
- results = rows
- else:
- results = []
- return results
-
- def _entity_relations_and_kwargs(self, session, entity):
- relations = []
- kwargs = {'x': self.repo.eid2extid(self, entity.eid, session)}
- for key, val in entity.cw_attr_cache.iteritems():
- relations.append('X %s %%(%s)s' % (key, key))
- kwargs[key] = val
- return relations, kwargs
-
- def add_entity(self, session, entity):
- """add a new entity to the source"""
- raise NotImplementedError()
-
- def update_entity(self, session, entity):
- """update an entity in the source"""
- relations, kwargs = self._entity_relations_and_kwargs(session, entity)
- cu = session.cnxset[self.uri]
- cu.execute('SET %s WHERE X eid %%(x)s' % ','.join(relations), kwargs)
- self._query_cache.clear()
- entity.cw_clear_all_caches()
-
- def delete_entity(self, session, entity):
- """delete an entity from the source"""
- if session.deleted_in_transaction(self.eid):
- # source is being deleted, don't propagate
- self._query_cache.clear()
- return
- cu = session.cnxset[self.uri]
- cu.execute('DELETE %s X WHERE X eid %%(x)s' % entity.cw_etype,
- {'x': self.repo.eid2extid(self, entity.eid, session)})
- self._query_cache.clear()
-
- def add_relation(self, session, subject, rtype, object):
- """add a relation to the source"""
- cu = session.cnxset[self.uri]
- cu.execute('SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
- {'x': self.repo.eid2extid(self, subject, session),
- 'y': self.repo.eid2extid(self, object, session)})
- self._query_cache.clear()
- session.entity_from_eid(subject).cw_clear_all_caches()
- session.entity_from_eid(object).cw_clear_all_caches()
-
- def delete_relation(self, session, subject, rtype, object):
- """delete a relation from the source"""
- if session.deleted_in_transaction(self.eid):
- # source is being deleted, don't propagate
- self._query_cache.clear()
- return
- cu = session.cnxset[self.uri]
- cu.execute('DELETE X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % rtype,
- {'x': self.repo.eid2extid(self, subject, session),
- 'y': self.repo.eid2extid(self, object, session)})
- self._query_cache.clear()
- session.entity_from_eid(subject).cw_clear_all_caches()
- session.entity_from_eid(object).cw_clear_all_caches()
-
-
-class RQL2RQL(object):
- """translate a local rql query to be executed on a distant repository"""
- def __init__(self, source):
- self.source = source
- self.repo = source.repo
- self.current_operator = None
-
- def _accept_children(self, node):
- res = []
- for child in node.children:
- rql = child.accept(self)
- if rql is not None:
- res.append(rql)
- return res
-
- def generate(self, session, rqlst, args):
- self._session = session
- self.kwargs = args
- self.need_translation = False
- self.has_local_eid = False
- return self.visit_union(rqlst)
-
- def visit_union(self, node):
- s = self._accept_children(node)
- if len(s) > 1:
- return ' UNION '.join('(%s)' % q for q in s)
- return s[0]
-
- def visit_select(self, node):
- """return the tree as an encoded rql string"""
- self._varmaker = rqlvar_maker(defined=node.defined_vars.copy())
- self._const_var = {}
- if node.distinct:
- base = 'DISTINCT Any'
- else:
- base = 'Any'
- s = ['%s %s' % (base, ','.join(v.accept(self) for v in node.selection))]
- if node.groupby:
- s.append('GROUPBY %s' % ', '.join(group.accept(self)
- for group in node.groupby))
- if node.orderby:
- s.append('ORDERBY %s' % ', '.join(self.visit_sortterm(term)
- for term in node.orderby))
- if node.limit is not None:
- s.append('LIMIT %s' % node.limit)
- if node.offset:
- s.append('OFFSET %s' % node.offset)
- restrictions = []
- if node.where is not None:
- nr = node.where.accept(self)
- if nr is not None:
- restrictions.append(nr)
- if restrictions:
- s.append('WHERE %s' % ','.join(restrictions))
-
- if node.having:
- s.append('HAVING %s' % ', '.join(term.accept(self)
- for term in node.having))
- subqueries = []
- for subquery in node.with_:
- subqueries.append('%s BEING (%s)' % (','.join(ca.name for ca in subquery.aliases),
- self.visit_union(subquery.query)))
- if subqueries:
- s.append('WITH %s' % (','.join(subqueries)))
- return ' '.join(s)
-
- def visit_and(self, node):
- res = self._accept_children(node)
- if res:
- return ', '.join(res)
- return
-
- def visit_or(self, node):
- res = self._accept_children(node)
- if len(res) > 1:
- return ' OR '.join('(%s)' % rql for rql in res)
- elif res:
- return res[0]
- return
-
- def visit_not(self, node):
- rql = node.children[0].accept(self)
- if rql:
- return 'NOT (%s)' % rql
- return
-
- def visit_exists(self, node):
- rql = node.children[0].accept(self)
- if rql:
- return 'EXISTS(%s)' % rql
- return
-
- def visit_relation(self, node):
- try:
- if isinstance(node.children[0], Constant):
- # simplified rqlst, reintroduce eid relation
- try:
- restr, lhs = self.process_eid_const(node.children[0])
- except UnknownEid:
- # can safely skip not relation with an unsupported eid
- if neged_relation(node):
- return
- raise
- else:
- lhs = node.children[0].accept(self)
- restr = None
- except UnknownEid:
- # can safely skip not relation with an unsupported eid
- if neged_relation(node):
- return
- # XXX what about optional relation or outer NOT EXISTS()
- raise
- if node.optional in ('left', 'both'):
- lhs += '?'
- if node.r_type == 'eid' or not self.source.schema.rschema(node.r_type).final:
- self.need_translation = True
- self.current_operator = node.operator()
- if isinstance(node.children[0], Constant):
- self.current_etypes = (node.children[0].uidtype,)
- else:
- self.current_etypes = node.children[0].variable.stinfo['possibletypes']
- try:
- rhs = node.children[1].accept(self)
- except UnknownEid:
- # can safely skip not relation with an unsupported eid
- if neged_relation(node):
- return
- # XXX what about optional relation or outer NOT EXISTS()
- raise
- except ReplaceByInOperator as ex:
- rhs = 'IN (%s)' % ','.join(eid for eid in ex.eids)
- self.need_translation = False
- self.current_operator = None
- if node.optional in ('right', 'both'):
- rhs += '?'
- if restr is not None:
- return '%s %s %s, %s' % (lhs, node.r_type, rhs, restr)
- return '%s %s %s' % (lhs, node.r_type, rhs)
-
- def visit_comparison(self, node):
- if node.operator in ('=', 'IS'):
- return node.children[0].accept(self)
- return '%s %s' % (node.operator.encode(),
- node.children[0].accept(self))
-
- def visit_mathexpression(self, node):
- return '(%s %s %s)' % (node.children[0].accept(self),
- node.operator.encode(),
- node.children[1].accept(self))
-
- def visit_function(self, node):
- #if node.name == 'IN':
- res = []
- for child in node.children:
- try:
- rql = child.accept(self)
- except UnknownEid as ex:
- continue
- res.append(rql)
- if not res:
- raise ex
- return '%s(%s)' % (node.name, ', '.join(res))
-
- def visit_constant(self, node):
- if self.need_translation or node.uidtype:
- if node.type == 'Int':
- self.has_local_eid = True
- return str(self.eid2extid(node.value))
- if node.type == 'Substitute':
- key = node.value
- # ensure we have not yet translated the value...
- if not key in self._const_var:
- self.kwargs[key] = self.eid2extid(self.kwargs[key])
- self._const_var[key] = None
- self.has_local_eid = True
- return node.as_string()
-
- def visit_variableref(self, node):
- """get the sql name for a variable reference"""
- return node.name
-
- def visit_sortterm(self, node):
- if node.asc:
- return node.term.accept(self)
- return '%s DESC' % node.term.accept(self)
-
- def process_eid_const(self, const):
- value = const.eval(self.kwargs)
- try:
- return None, self._const_var[value]
- except Exception:
- var = self._varmaker.next()
- self.need_translation = True
- restr = '%s eid %s' % (var, self.visit_constant(const))
- self.need_translation = False
- self._const_var[value] = var
- return restr, var
-
- def eid2extid(self, eid):
- try:
- return self.repo.eid2extid(self.source, eid, self._session)
- except UnknownEid:
- operator = self.current_operator
- if operator is not None and operator != '=':
- # deal with query like "X eid > 12"
- #
- # The problem is that eid order in the external source may
- # differ from the local source
- #
- # So search for all eids from this source matching the condition
- # locally and then to replace the "> 12" branch by "IN (eids)"
- #
- # XXX we may have to insert a huge number of eids...)
- sql = "SELECT extid FROM entities WHERE source='%s' AND type IN (%s) AND eid%s%s"
- etypes = ','.join("'%s'" % etype for etype in self.current_etypes)
- cu = self._session.system_sql(sql % (self.source.uri, etypes,
- operator, eid))
- # XXX buggy cu.rowcount which may be zero while there are some
- # results
- rows = cu.fetchall()
- if rows:
- raise ReplaceByInOperator((b64decode(r[0]) for r in rows))
- raise
-
--- a/server/sources/rql2sql.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/sources/rql2sql.py Mon Feb 17 15:32:50 2014 +0100
@@ -58,8 +58,8 @@
from rql import BadRQLQuery, CoercionError
from rql.utils import common_parent
from rql.stmts import Union, Select
-from rql.nodes import (SortTerm, VariableRef, Constant, Function, Variable, Or,
- Not, Comparison, ColumnAlias, Relation, SubQuery, Exists)
+from rql.nodes import (VariableRef, Constant, Function, Variable, Or,
+ Not, Comparison, ColumnAlias, Relation, SubQuery)
from cubicweb import QueryError
from cubicweb.rqlrewrite import cleanup_solutions
--- a/server/sources/storages.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/sources/storages.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -236,7 +236,7 @@
"""return the current fs_path of the attribute, or None is the attr is
not stored yet.
"""
- sysource = entity._cw.cnxset.source('system')
+ sysource = entity._cw.repo.system_source
cu = sysource.doexec(entity._cw,
'SELECT cw_%s FROM cw_%s WHERE cw_eid=%s' % (
attr, entity.cw_etype, entity.eid))
--- a/server/sources/zmqrql.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-# copyright 2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""Source to query another RQL repository using pyro"""
-
-__docformat__ = "restructuredtext en"
-_ = unicode
-
-from cubicweb.server.sources.remoterql import RemoteSource
-
-class ZMQRQLSource(RemoteSource):
- """External repository source, using ZMQ sockets"""
--- a/server/sqlutils.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/sqlutils.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -19,15 +19,18 @@
__docformat__ = "restructuredtext en"
+import sys
import os
import re
import subprocess
-from datetime import datetime, date
+from os.path import abspath
from itertools import ifilter
+from logging import getLogger
from logilab import database as db, common as lgc
from logilab.common.shellutils import ProgressBar
-from logilab.common.date import todate, todatetime, utcdatetime, utctime
+from logilab.common.deprecation import deprecated
+from logilab.common.logging_ext import set_log_methods
from logilab.database.sqlgen import SQLGenerator
from cubicweb import Binary, ConfigurationError
@@ -35,7 +38,6 @@
from cubicweb.schema import PURE_VIRTUAL_RTYPES
from cubicweb.server import SQL_CONNECT_HOOKS
from cubicweb.server.utils import crypt_password
-from rql.utils import RQL_FUNCTIONS_REGISTRY
lgc.USE_MX_DATETIME = False
SQL_PREFIX = 'cw_'
@@ -177,10 +179,125 @@
for name in ifilter(_SQL_DROP_ALL_USER_TABLES_FILTER_FUNCTION, dbhelper.list_tables(sqlcursor))]
return '\n'.join(cmds)
+
+class ConnectionWrapper(object):
+ """handle connection to the system source, at some point associated to a
+ :class:`Session`
+ """
+
+ # since 3.19, we only have to manage the system source connection
+ def __init__(self, system_source):
+ # dictionary of (source, connection), indexed by sources'uri
+ self._source = system_source
+ self.cnx = system_source.get_connection()
+ self.cu = self.cnx.cursor()
+
+ def commit(self):
+ """commit the current transaction for this user"""
+ # let exception propagates
+ self.cnx.commit()
+
+ def rollback(self):
+ """rollback the current transaction for this user"""
+ # catch exceptions, rollback other sources anyway
+ try:
+ self.cnx.rollback()
+ except Exception:
+ self._source.critical('rollback error', exc_info=sys.exc_info())
+ # error on rollback, the connection is much probably in a really
+ # bad state. Replace it by a new one.
+ self.reconnect()
+
+ def close(self, i_know_what_i_do=False):
+ """close all connections in the set"""
+ if i_know_what_i_do is not True: # unexpected closing safety belt
+ raise RuntimeError('connections set shouldn\'t be closed')
+ try:
+ self.cu.close()
+ self.cu = None
+ except Exception:
+ pass
+ try:
+ self.cnx.close()
+ self.cnx = None
+ except Exception:
+ pass
+
+ # internals ###############################################################
+
+ def cnxset_freed(self):
+ """connections set is being freed from a session"""
+ pass # no nothing by default
+
+ def reconnect(self):
+ """reopen a connection for this source or all sources if none specified
+ """
+ try:
+ # properly close existing connection if any
+ self.cnx.close()
+ except Exception:
+ pass
+ self._source.info('trying to reconnect')
+ self.cnx = self._source.get_connection()
+ self.cu = self.cnx.cursor()
+
+ @deprecated('[3.19] use .cu instead')
+ def __getitem__(self, uri):
+ assert uri == 'system'
+ return self.cu
+
+ @deprecated('[3.19] use repo.system_source instead')
+ def source(self, uid):
+ assert uid == 'system'
+ return self._source
+
+ @deprecated('[3.19] use .cnx instead')
+ def connection(self, uid):
+ assert uid == 'system'
+ return self.cnx
+
+
+class SqliteConnectionWrapper(ConnectionWrapper):
+ """Sqlite specific connection wrapper: close the connection each time it's
+ freed (and reopen it later when needed)
+ """
+ def __init__(self, system_source):
+ # don't call parent's __init__, we don't want to initiate the connection
+ self._source = system_source
+
+ _cnx = None
+
+ def cnxset_freed(self):
+ self.cu.close()
+ self.cnx.close()
+ self.cnx = self.cu = None
+
+ @property
+ def cnx(self):
+ if self._cnx is None:
+ self._cnx = self._source.get_connection()
+ self._cu = self._cnx.cursor()
+ return self._cnx
+ @cnx.setter
+ def cnx(self, value):
+ self._cnx = value
+
+ @property
+ def cu(self):
+ if self._cnx is None:
+ self._cnx = self._source.get_connection()
+ self._cu = self._cnx.cursor()
+ return self._cu
+ @cu.setter
+ def cu(self, value):
+ self._cu = value
+
+
class SQLAdapterMixIn(object):
"""Mixin for SQL data sources, getting a connection from a configuration
dictionary and handling connection locking
"""
+ cnx_wrap = ConnectionWrapper
def __init__(self, source_config):
try:
@@ -208,6 +325,15 @@
self._binary = self.dbhelper.binary_value
self._process_value = dbapi_module.process_value
self._dbencoding = dbencoding
+ if self.dbdriver == 'sqlite':
+ self.cnx_wrap = SqliteConnectionWrapper
+ self.dbhelper.dbname = abspath(self.dbhelper.dbname)
+
+ def wrapped_connection(self):
+ """open and return a connection to the database, wrapped into a class
+ handling reconnection and all
+ """
+ return self.cnx_wrap(self)
def get_connection(self):
"""open and return a connection to the database"""
@@ -319,10 +445,11 @@
# only defining here to prevent pylint from complaining
info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
-from logging import getLogger
-from cubicweb import set_log_methods
set_log_methods(SQLAdapterMixIn, getLogger('cubicweb.sqladapter'))
+
+# connection initialization functions ##########################################
+
def init_sqlite_connexion(cnx):
class group_concat(object):
--- a/server/ssplanner.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/ssplanner.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -68,13 +68,13 @@
"""return a dict mapping rqlst variable object to their eid if specified in
the syntax tree
"""
- session = plan.session
+ cnx = plan.cnx
if rqlst.where is None:
return {}
eidconsts = {}
- neweids = session.transaction_data.get('neweids', ())
- checkread = session.read_security
- eschema = session.vreg.schema.eschema
+ neweids = cnx.transaction_data.get('neweids', ())
+ checkread = cnx.read_security
+ eschema = cnx.vreg.schema.eschema
for rel in rqlst.where.get_nodes(Relation):
# only care for 'eid' relations ...
if (rel.r_type == 'eid'
@@ -89,9 +89,9 @@
# the generated select substep if not emited (eg nothing
# to be selected)
if checkread and eid not in neweids:
- with session.security_enabled(read=False):
- eschema(session.describe(eid)[0]).check_perm(
- session, 'read', eid=eid)
+ with cnx.security_enabled(read=False):
+ eschema(cnx.entity_metas(eid)['type']).check_perm(
+ cnx, 'read', eid=eid)
eidconsts[lhs.variable] = eid
return eidconsts
@@ -145,17 +145,17 @@
the rqlst should not be tagged at this point.
"""
plan.preprocess(rqlst)
- return (OneFetchStep(plan, rqlst, plan.session.repo.sources),)
+ return (OneFetchStep(plan, rqlst),)
def build_insert_plan(self, plan, rqlst):
"""get an execution plan from an INSERT RQL query"""
# each variable in main variables is a new entity to insert
to_build = {}
- session = plan.session
- etype_class = session.vreg['etypes'].etype_class
+ cnx = plan.cnx
+ etype_class = cnx.vreg['etypes'].etype_class
for etype, var in rqlst.main_variables:
# need to do this since entity class is shared w. web client code !
- to_build[var.name] = EditedEntity(etype_class(etype)(session))
+ to_build[var.name] = EditedEntity(etype_class(etype)(cnx))
plan.add_entity_def(to_build[var.name])
# add constant values to entity def, mark variables to be selected
to_select = _extract_const_attributes(plan, rqlst, to_build)
@@ -311,24 +311,6 @@
maprepr[var] = '%s.%s' % (tablesinorder[table], col)
return maprepr
-def offset_result(offset, result):
- offset -= len(result)
- if offset < 0:
- result = result[offset:]
- offset = None
- elif offset == 0:
- offset = None
- result = ()
- return offset, result
-
-
-class LimitOffsetMixIn(object):
- limit = offset = None
- def set_limit_offset(self, limit, offset):
- self.limit = limit
- self.offset = offset or None
-
-
class Step(object):
"""base abstract class for execution step"""
def __init__(self, plan):
@@ -357,29 +339,21 @@
[step.test_repr() for step in self.children],)
-class OneFetchStep(LimitOffsetMixIn, Step):
+class OneFetchStep(Step):
"""step consisting in fetching data from sources and directly returning
results
"""
- def __init__(self, plan, union, sources, inputmap=None):
+ def __init__(self, plan, union, inputmap=None):
Step.__init__(self, plan)
self.union = union
- self.sources = sources
self.inputmap = inputmap
- self.set_limit_offset(union.children[-1].limit, union.children[-1].offset)
-
- def set_limit_offset(self, limit, offset):
- LimitOffsetMixIn.set_limit_offset(self, limit, offset)
- for select in self.union.children:
- select.limit = limit
- select.offset = offset
def execute(self):
"""call .syntax_tree_search with the given syntax tree on each
source for each solution
"""
self.execute_children()
- session = self.plan.session
+ cnx = self.plan.cnx
args = self.plan.args
inputmap = self.inputmap
union = self.union
@@ -395,31 +369,9 @@
cachekey = tuple(cachekey)
else:
cachekey = union.as_string()
- result = []
- # limit / offset processing
- limit = self.limit
- offset = self.offset
- if offset is not None:
- if len(self.sources) > 1:
- # we'll have to deal with limit/offset by ourself
- if union.children[-1].limit:
- union.children[-1].limit = limit + offset
- union.children[-1].offset = None
- else:
- offset, limit = None, None
- for source in self.sources:
- if offset is None and limit is not None:
- # modifying the sample rqlst is enough since sql generation
- # will pick it here as well
- union.children[-1].limit = limit - len(result)
- result_ = source.syntax_tree_search(session, union, args, cachekey,
- inputmap)
- if offset is not None:
- offset, result_ = offset_result(offset, result_)
- result += result_
- if limit is not None:
- if len(result) >= limit:
- return result[:limit]
+ # get results for query
+ source = cnx.repo.system_source
+ result = source.syntax_tree_search(cnx, union, args, cachekey, inputmap)
#print 'ONEFETCH RESULT %s' % (result)
return result
@@ -432,8 +384,7 @@
return (self.__class__.__name__,
sorted((r.as_string(kwargs=self.plan.args), r.solutions)
for r in self.union.children),
- self.limit, self.offset,
- sorted(self.sources), inputmap)
+ inputmap)
# UPDATE/INSERT/DELETE steps ##################################################
@@ -515,8 +466,8 @@
results = self.execute_child()
if results:
todelete = frozenset(int(eid) for eid, in results)
- session = self.plan.session
- session.repo.glob_delete_entities(session, todelete)
+ cnx = self.plan.cnx
+ cnx.repo.glob_delete_entities(cnx, todelete)
return results
class DeleteRelationsStep(Step):
@@ -528,10 +479,10 @@
def execute(self):
"""execute this step"""
- session = self.plan.session
- delete = session.repo.glob_delete_relation
+ cnx = self.plan.cnx
+ delete = cnx.repo.glob_delete_relation
for subj, obj in self.execute_child():
- delete(session, subj, self.rtype, obj)
+ delete(cnx, subj, self.rtype, obj)
class UpdateStep(Step):
@@ -545,8 +496,8 @@
def execute(self):
"""execute this step"""
- session = self.plan.session
- repo = session.repo
+ cnx = self.plan.cnx
+ repo = cnx.repo
edefs = {}
relations = {}
# insert relations
@@ -564,7 +515,7 @@
try:
edited = edefs[eid]
except KeyError:
- edef = session.entity_from_eid(eid)
+ edef = cnx.entity_from_eid(eid)
edefs[eid] = edited = EditedEntity(edef)
edited.edited_attribute(str(rschema), rhsval)
else:
@@ -575,9 +526,9 @@
relations[str_rschema] = [(lhsval, rhsval)]
result[i] = newrow
# update entities
- repo.glob_add_relations(session, relations)
+ repo.glob_add_relations(cnx, relations)
for eid, edited in edefs.iteritems():
- repo.glob_update_entity(session, edited)
+ repo.glob_update_entity(cnx, edited)
return result
def _handle_relterm(info, row, newrow):
--- a/server/test/data-schemaserial/bootstrap_cubes Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-card,comment,folder,tag,basket,email,file,localperms
--- a/server/test/data-schemaserial/schema.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/data-schemaserial/schema.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -16,241 +16,17 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-from yams.buildobjs import (EntityType, RelationType, RelationDefinition,
- SubjectRelation, RichString, String, Int, Float,
- Boolean, Datetime, TZDatetime, Bytes)
-from yams.constraints import SizeConstraint
-from cubicweb.schema import (WorkflowableEntityType,
- RQLConstraint, RQLUniqueConstraint,
- ERQLExpression, RRQLExpression)
-
-from yams.buildobjs import make_type
+from yams.buildobjs import EntityType, SubjectRelation, String, make_type
BabarTestType = make_type('BabarTestType')
-
-class Affaire(WorkflowableEntityType):
- __permissions__ = {
- 'read': ('managers',
- ERQLExpression('X owned_by U'), ERQLExpression('X concerne S?, S owned_by U')),
- 'add': ('managers', ERQLExpression('X concerne S, S owned_by U')),
- 'update': ('managers', 'owners', ERQLExpression('X in_state S, S name in ("pitetre", "en cours")')),
- 'delete': ('managers', 'owners', ERQLExpression('X concerne S, S owned_by U')),
- }
-
- ref = String(fulltextindexed=True, indexed=True,
- constraints=[SizeConstraint(16)])
- sujet = String(fulltextindexed=True,
- constraints=[SizeConstraint(256)])
- descr = RichString(fulltextindexed=True,
- description=_('more detailed description'))
-
- duration = Int()
- invoiced = Float()
- opt_attr = Bytes()
-
- depends_on = SubjectRelation('Affaire')
- require_permission = SubjectRelation('CWPermission')
- concerne = SubjectRelation(('Societe', 'Note'))
- todo_by = SubjectRelation('Personne', cardinality='?*')
- documented_by = SubjectRelation('Card')
-
-
-class Societe(EntityType):
- __unique_together__ = [('nom', 'type', 'cp')]
- __permissions__ = {
- 'read': ('managers', 'users', 'guests'),
- 'update': ('managers', 'owners', ERQLExpression('U login L, X nom L')),
- 'delete': ('managers', 'owners', ERQLExpression('U login L, X nom L')),
- 'add': ('managers', 'users',)
- }
-
- nom = String(maxsize=64, fulltextindexed=True)
- web = String(maxsize=128)
- type = String(maxsize=128) # attribute in common with Note
- tel = Int()
- fax = Int()
- rncs = String(maxsize=128)
- ad1 = String(maxsize=128)
- ad2 = String(maxsize=128)
- ad3 = String(maxsize=128)
- cp = String(maxsize=12)
- ville= String(maxsize=32)
-
-
-class Division(Societe):
- __specializes_schema__ = True
-
-class SubDivision(Division):
- __specializes_schema__ = True
-
-class travaille_subdivision(RelationDefinition):
- subject = 'Personne'
- object = 'SubDivision'
-
-from cubicweb.schemas.base import CWUser
-CWUser.get_relations('login').next().fulltextindexed = True
-
-class Note(WorkflowableEntityType):
- date = String(maxsize=10)
- type = String(maxsize=6)
- para = String(maxsize=512,
- __permissions__ = {
- 'read': ('managers', 'users', 'guests'),
- 'update': ('managers', ERQLExpression('X in_state S, S name "todo"')),
- })
-
- migrated_from = SubjectRelation('Note')
- attachment = SubjectRelation('File')
- inline1 = SubjectRelation('Affaire', inlined=True, cardinality='?*',
- constraints=[RQLUniqueConstraint('S type T, S inline1 A1, A1 todo_by C, '
- 'Y type T, Y inline1 A2, A2 todo_by C',
- 'S,Y')])
- todo_by = SubjectRelation('CWUser')
+class Affaire(EntityType):
+ nom = String(unique=True, maxsize=64)
class Personne(EntityType):
__unique_together__ = [('nom', 'prenom', 'inline2')]
nom = String(fulltextindexed=True, required=True, maxsize=64)
prenom = String(fulltextindexed=True, maxsize=64)
- sexe = String(maxsize=1, default='M', fulltextindexed=True)
- promo = String(vocabulary=('bon','pasbon'))
- titre = String(fulltextindexed=True, maxsize=128)
- adel = String(maxsize=128)
- ass = String(maxsize=128)
- web = String(maxsize=128)
- tel = Int()
- fax = Int()
- datenaiss = Datetime()
- tzdatenaiss = TZDatetime()
- test = Boolean(__permissions__={
- 'read': ('managers', 'users', 'guests'),
- 'update': ('managers',),
- })
- description = String()
- firstname = String(fulltextindexed=True, maxsize=64)
-
- concerne = SubjectRelation('Affaire')
- connait = SubjectRelation('Personne')
inline2 = SubjectRelation('Affaire', inlined=True, cardinality='?*')
custom_field_of_jungle = BabarTestType(jungle_speed=42)
-
-class Old(EntityType):
- name = String()
-
-
-class connait(RelationType):
- symmetric = True
-
-class concerne(RelationType):
- __permissions__ = {
- 'read': ('managers', 'users', 'guests'),
- 'add': ('managers', RRQLExpression('U has_update_permission S')),
- 'delete': ('managers', RRQLExpression('O owned_by U')),
- }
-
-class travaille(RelationDefinition):
- __permissions__ = {
- 'read': ('managers', 'users', 'guests'),
- 'add': ('managers', RRQLExpression('U has_update_permission S')),
- 'delete': ('managers', RRQLExpression('O owned_by U')),
- }
- subject = 'Personne'
- object = 'Societe'
-
-class comments(RelationDefinition):
- subject = 'Comment'
- object = 'Personne'
-
-class fiche(RelationDefinition):
- inlined = True
- subject = 'Personne'
- object = 'Card'
- cardinality = '??'
-
-class multisource_inlined_rel(RelationDefinition):
- inlined = True
- cardinality = '?*'
- subject = ('Card', 'Note')
- object = ('Affaire', 'Note')
-
-class multisource_rel(RelationDefinition):
- subject = ('Card', 'Note')
- object = 'Note'
-
-class multisource_crossed_rel(RelationDefinition):
- subject = ('Card', 'Note')
- object = 'Note'
-
-
-class see_also_1(RelationDefinition):
- name = 'see_also'
- subject = object = 'Folder'
-
-class see_also_2(RelationDefinition):
- name = 'see_also'
- subject = ('Bookmark', 'Note')
- object = ('Bookmark', 'Note')
-
-class evaluee(RelationDefinition):
- subject = ('Personne', 'CWUser', 'Societe')
- object = ('Note')
-
-class ecrit_par(RelationType):
- inlined = True
-
-class ecrit_par_1(RelationDefinition):
- name = 'ecrit_par'
- subject = 'Note'
- object ='Personne'
- constraints = [RQLConstraint('E concerns P, S version_of P')]
- cardinality = '?*'
-
-class ecrit_par_2(RelationDefinition):
- name = 'ecrit_par'
- subject = 'Note'
- object ='CWUser'
- cardinality='?*'
-
-
-class copain(RelationDefinition):
- subject = object = 'CWUser'
-
-class tags(RelationDefinition):
- subject = 'Tag'
- object = ('CWUser', 'CWGroup', 'State', 'Note', 'Card', 'Affaire')
-
-class filed_under(RelationDefinition):
- subject = ('Note', 'Affaire')
- object = 'Folder'
-
-class require_permission(RelationDefinition):
- subject = ('Card', 'Note', 'Personne')
- object = 'CWPermission'
-
-class require_state(RelationDefinition):
- subject = 'CWPermission'
- object = 'State'
-
-class personne_composite(RelationDefinition):
- subject='Personne'
- object='Personne'
- composite='subject'
-
-class personne_inlined(RelationDefinition):
- subject='Personne'
- object='Personne'
- cardinality='?*'
- inlined=True
-
-
-class login_user(RelationDefinition):
- subject = 'Personne'
- object = 'CWUser'
- cardinality = '??'
-
-class ambiguous_inlined(RelationDefinition):
- subject = ('Affaire', 'Note')
- object = 'CWUser'
- inlined = True
- cardinality = '?*'
--- a/server/test/data/schema.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/data/schema.py Mon Feb 17 15:32:50 2014 +0100
@@ -174,14 +174,6 @@
subject = ('Card', 'Note')
object = ('Affaire', 'Note')
-class multisource_rel(RelationDefinition):
- subject = ('Card', 'Note')
- object = 'Note'
-
-class multisource_crossed_rel(RelationDefinition):
- subject = ('Card', 'Note')
- object = 'Note'
-
class see_also_1(RelationDefinition):
name = 'see_also'
--- a/server/test/data/sources_postgres Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,14 +0,0 @@
-[system]
-
-db-driver = postgres
-db-host = localhost
-db-port = 5433
-adapter = native
-db-name = cw_fti_test
-db-encoding = UTF-8
-db-user = syt
-db-password = syt
-
-[admin]
-login = admin
-password = gingkow
--- a/server/test/unittest_datafeed.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/unittest_datafeed.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2011-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2011-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -31,7 +31,6 @@
def test(self):
self.assertIn('myfeed', self.repo.sources_by_uri)
dfsource = self.repo.sources_by_uri['myfeed']
- self.assertNotIn(dfsource, self.repo.sources)
self.assertEqual(dfsource.latest_retrieval, None)
self.assertEqual(dfsource.synchro_interval, timedelta(seconds=60))
self.assertFalse(dfsource.fresh())
@@ -71,8 +70,8 @@
self.assertEqual(entity.absolute_url(), 'http://www.cubicweb.org/')
# test repo cache keys
self.assertEqual(self.repo._type_source_cache[entity.eid],
- ('Card', 'system', 'http://www.cubicweb.org/', 'myfeed'))
- self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')],
+ ('Card', 'http://www.cubicweb.org/', 'myfeed'))
+ self.assertEqual(self.repo._extid_cache['http://www.cubicweb.org/'],
entity.eid)
# test repull
session.set_cnxset()
@@ -87,8 +86,8 @@
self.assertEqual(stats['created'], set())
self.assertEqual(stats['updated'], set((entity.eid,)))
self.assertEqual(self.repo._type_source_cache[entity.eid],
- ('Card', 'system', 'http://www.cubicweb.org/', 'myfeed'))
- self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')],
+ ('Card', 'http://www.cubicweb.org/', 'myfeed'))
+ self.assertEqual(self.repo._extid_cache['http://www.cubicweb.org/'],
entity.eid)
self.assertEqual(dfsource.source_cwuris(self.session),
@@ -110,8 +109,8 @@
'extid': 'http://www.cubicweb.org/'}
)
self.assertEqual(self.repo._type_source_cache[entity.eid],
- ('Card', 'system', 'http://www.cubicweb.org/', 'myrenamedfeed'))
- self.assertEqual(self.repo._extid_cache[('http://www.cubicweb.org/', 'system')],
+ ('Card', 'http://www.cubicweb.org/', 'myrenamedfeed'))
+ self.assertEqual(self.repo._extid_cache['http://www.cubicweb.org/'],
entity.eid)
# test_delete_source
--- a/server/test/unittest_migractions.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/unittest_migractions.py Mon Feb 17 15:32:50 2014 +0100
@@ -45,19 +45,18 @@
tags = CubicWebTC.tags | Tags(('server', 'migration', 'migractions'))
- @classmethod
- def _init_repo(cls):
- super(MigrationCommandsTC, cls)._init_repo()
+ def _init_repo(self):
+ super(MigrationCommandsTC, self)._init_repo()
# we have to read schema from the database to get eid for schema entities
- cls.repo.set_schema(cls.repo.deserialize_schema(), resetvreg=False)
+ self.repo.set_schema(self.repo.deserialize_schema(), resetvreg=False)
# hack to read the schema from data/migrschema
- config = cls.config
+ config = self.config
config.appid = join('data', 'migratedapp')
- config._apphome = cls.datapath('migratedapp')
+ config._apphome = self.datapath('migratedapp')
global migrschema
migrschema = config.load_schema()
config.appid = 'data'
- config._apphome = cls.datadir
+ config._apphome = self.datadir
assert 'Folder' in migrschema
def setUp(self):
@@ -628,12 +627,12 @@
#
# also we need more tests about introducing/removing base classes or
# specialization relationship...
- self.session.data['rebuild-infered'] = True
+ self.session.set_shared_data('rebuild-infered', True)
try:
self.session.execute('DELETE X specializes Y WHERE Y name "Para"')
self.session.commit(free_cnxset=False)
finally:
- self.session.data['rebuild-infered'] = False
+ self.session.set_shared_data('rebuild-infered', False)
self.assertEqual(sorted(et.type for et in self.schema['Para'].specialized_by()),
[])
self.assertEqual(self.schema['Note'].specializes(), None)
--- a/server/test/unittest_msplanner.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,2809 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-"""unit tests for module cubicweb.server.msplanner"""
-
-from logilab.common.decorators import clear_cache
-from yams.buildobjs import RelationDefinition
-from rql import BadRQLQuery
-
-from cubicweb.devtools import get_test_db_handler, TestServerConfiguration
-from cubicweb.devtools.repotest import BasePlannerTC, test_plan
-
-class _SetGenerator(object):
- """singleton to easily create set using "s[0]" or "s[0,1,2]" for instance
- """
- def __getitem__(self, key):
- try:
- it = iter(key)
- except TypeError:
- it = (key,)
- return set(it)
-s = _SetGenerator()
-
-from cubicweb.schema import ERQLExpression
-from cubicweb.server.sources import AbstractSource
-from cubicweb.server.msplanner import MSPlanner, PartPlanInformation
-
-class FakeUserROSource(AbstractSource):
- support_entities = {'CWUser': False}
- support_relations = {}
- def syntax_tree_search(self, *args, **kwargs):
- return []
-
-
-class FakeCardSource(AbstractSource):
- support_entities = {'Card': True, 'Note': True, 'State': True}
- support_relations = {'in_state': True, 'multisource_rel': True, 'multisource_inlined_rel': True,
- 'multisource_crossed_rel': True,}
- dont_cross_relations = set(('fiche', 'state_of'))
- cross_relations = set(('multisource_crossed_rel',))
-
- def syntax_tree_search(self, *args, **kwargs):
- return []
-
-
-class FakeDataFeedSource(FakeCardSource):
- copy_based_source = True
-
-X_ALL_SOLS = sorted([{'X': 'Affaire'}, {'X': 'BaseTransition'}, {'X': 'Basket'},
- {'X': 'Bookmark'}, {'X': 'CWAttribute'}, {'X': 'CWCache'},
- {'X': 'CWConstraint'}, {'X': 'CWConstraintType'}, {'X': 'CWDataImport'}, {'X': 'CWEType'},
- {'X': 'CWGroup'}, {'X': 'CWPermission'}, {'X': 'CWProperty'},
- {'X': 'CWRType'}, {'X': 'CWRelation'},
- {'X': 'CWSource'}, {'X': 'CWSourceHostConfig'}, {'X': 'CWSourceSchemaConfig'},
- {'X': 'CWUser'}, {'X': 'CWUniqueTogetherConstraint'},
- {'X': 'Card'}, {'X': 'Comment'}, {'X': 'Division'},
- {'X': 'Email'}, {'X': 'EmailAddress'}, {'X': 'EmailPart'},
- {'X': 'EmailThread'}, {'X': 'ExternalUri'}, {'X': 'File'},
- {'X': 'Folder'}, {'X': 'Note'}, {'X': 'Old'},
- {'X': 'Personne'}, {'X': 'RQLExpression'}, {'X': 'Societe'},
- {'X': 'State'}, {'X': 'SubDivision'}, {'X': 'SubWorkflowExitPoint'},
- {'X': 'Tag'}, {'X': 'TrInfo'}, {'X': 'Transition'},
- {'X': 'Workflow'}, {'X': 'WorkflowTransition'}])
-
-
-# keep cnx so it's not garbage collected and the associated session is closed
-def setUpModule(*args):
- global repo, cnx
- handler = get_test_db_handler(TestServerConfiguration(apphome=BaseMSPlannerTC.datadir))
- handler.build_db_cache()
- repo, cnx = handler.get_repo_and_cnx()
-
-def tearDownModule(*args):
- global repo, cnx
- del repo, cnx
-
-
-class BaseMSPlannerTC(BasePlannerTC):
- """test planner related feature on a 3-sources repository:
-
- * system source supporting everything
- * ldap source supporting CWUser
- * rql source supporting Card
- """
-
- def setUp(self):
- self.__class__.repo = repo
- #_QuerierTC.setUp(self)
- self.setup()
- # hijack Affaire security
- affreadperms = list(self.schema['Affaire'].permissions['read'])
- self.prevrqlexpr_affaire = affreadperms[-1]
- # add access to type attribute so S can't be invariant
- affreadperms[-1] = ERQLExpression('X concerne S?, S owned_by U, S type "X"')
- self.schema['Affaire'].set_action_permissions('read', affreadperms)
- # hijack CWUser security
- userreadperms = list(self.schema['CWUser'].permissions['read'])
- self.prevrqlexpr_user = userreadperms[-1]
- userreadperms[-1] = ERQLExpression('X owned_by U')
- self.schema['CWUser'].set_action_permissions('read', userreadperms)
- self.add_source(FakeUserROSource, 'ldap')
- self.add_source(FakeCardSource, 'cards')
- self.add_source(FakeDataFeedSource, 'datafeed')
-
- def tearDown(self):
- # restore hijacked security
- self.restore_orig_affaire_security()
- self.restore_orig_cwuser_security()
- super(BaseMSPlannerTC, self).tearDown()
-
- def restore_orig_affaire_security(self):
- affreadperms = list(self.schema['Affaire'].permissions['read'])
- affreadperms[-1] = self.prevrqlexpr_affaire
- self.schema['Affaire'].set_action_permissions('read', affreadperms)
-
- def restore_orig_cwuser_security(self):
- if hasattr(self, '_orig_cwuser_security_restored'):
- return
- self._orig_cwuser_security_restored = True
- userreadperms = list(self.schema['CWUser'].permissions['read'])
- userreadperms[-1] = self.prevrqlexpr_user
- self.schema['CWUser'].set_action_permissions('read', userreadperms)
-
-
-class PartPlanInformationTC(BaseMSPlannerTC):
-
- def _test(self, rql, *args):
- if len(args) == 3:
- kwargs, sourcesterms, needsplit = args
- else:
- sourcesterms, needsplit = args
- kwargs = None
- plan = self._prepare_plan(rql, kwargs)
- union = plan.rqlst
- plan.preprocess(union)
- ppi = PartPlanInformation(plan, union.children[0])
- for sourcevars in ppi._sourcesterms.itervalues():
- for var in list(sourcevars):
- solindices = sourcevars.pop(var)
- sourcevars[var._ms_table_key()] = solindices
- self.assertEqual(ppi._sourcesterms, sourcesterms)
- self.assertEqual(ppi.needsplit, needsplit)
-
-
- def test_simple_system_only(self):
- """retrieve entities only supported by the system source"""
- self._test('CWGroup X',
- {self.system: {'X': s[0]}}, False)
-
- def test_simple_system_ldap(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- self._test('CWUser X',
- {self.system: {'X': s[0]}, self.ldap: {'X': s[0]}}, False)
-
- def test_simple_system_rql(self):
- """retrieve Card X from both sources and return concatenation of results
- """
- self._test('Any X, XT WHERE X is Card, X title XT',
- {self.system: {'X': s[0]}, self.cards: {'X': s[0]}}, False)
-
- def test_simple_eid_specified(self):
- """retrieve CWUser X from system source (eid is specified, can locate the entity)
- """
- ueid = self.session.user.eid
- self._test('Any X,L WHERE X eid %(x)s, X login L', {'x': ueid},
- {self.system: {'X': s[0]}}, False)
-
- def test_simple_eid_invariant(self):
- """retrieve CWUser X from system source (eid is specified, can locate the entity)
- """
- ueid = self.session.user.eid
- self._test('Any X WHERE X eid %(x)s', {'x': ueid},
- {self.system: {'x': s[0]}}, False)
-
- def test_simple_invariant(self):
- """retrieve CWUser X from system source only (X is invariant and in_group not supported by ldap source)
- """
- self._test('Any X WHERE X is CWUser, X in_group G, G name "users"',
- {self.system: {'X': s[0], 'G': s[0], 'in_group': s[0]}}, False)
-
- def test_security_has_text(self):
- """retrieve CWUser X from system source only (has_text not supported by ldap source)
- """
- # specify CWUser instead of any since the way this test is written we aren't well dealing
- # with ambigous query (eg only considering the first solution)
- self._test('CWUser X WHERE X has_text "bla"',
- {self.system: {'X': s[0]}}, False)
-
- def test_complex_base(self):
- """
- 1. retrieve Any X, L WHERE X is CWUser, X login L from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X, L WHERE X is TMP, X login L, X in_group G,
- G name 'users' on the system source
- """
- self._test('Any X,L WHERE X is CWUser, X in_group G, X login L, G name "users"',
- {self.system: {'X': s[0], 'G': s[0], 'in_group': s[0]},
- self.ldap : {'X': s[0]}}, True)
-
- def test_complex_invariant_ordered(self):
- """
- 1. retrieve Any X,AA WHERE X modification_date AA from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X,AA ORDERBY AA WHERE %s owned_by X, X modification_date AA
- on the system source
- """
- ueid = self.session.user.eid
- self._test('Any X,AA ORDERBY AA WHERE E eid %(x)s, E owned_by X, X modification_date AA', {'x': ueid},
- {self.system: {'x': s[0], 'X': s[0], 'owned_by': s[0]},
- self.ldap : {'X': s[0]}}, True)
-
- def test_complex_invariant(self):
- """
- 1. retrieve Any X,L,AA WHERE X login L, X modification_date AA from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X,L,AA WHERE %s owned_by X, X login L, X modification_date AA
- on the system source
- """
- ueid = self.session.user.eid
- self._test('Any X,L,AA WHERE E eid %(x)s, E owned_by X, X login L, X modification_date AA', {'x': ueid},
- {self.system: {'x': s[0], 'X': s[0], 'owned_by': s[0]},
- self.ldap : {'X': s[0]}}, True)
-
- def test_complex_ambigous(self):
- """retrieve CWUser X from system and ldap sources, Person X from system source only
- """
- self._test('Any X,F WHERE X firstname F',
- {self.system: {'X': s[0, 1]},
- self.ldap: {'X': s[0]}}, True)
-
- def test_complex_multiple(self):
- """
- 1. retrieve Any X,A,Y,B WHERE X login A, Y login B from system and ldap sources, store
- cartesian product of results into a temporary table
- 2. return the result of Any X,Y WHERE X login 'syt', Y login 'adim'
- on the system source
- """
- ueid = self.session.user.eid
- self._test('Any X,Y WHERE X login "syt", Y login "adim"', {'x': ueid},
- {self.system: {'Y': s[0], 'X': s[0]},
- self.ldap: {'Y': s[0], 'X': s[0]}}, True)
-
- def test_complex_aggregat(self):
- solindexes = set(range(len([e for e in self.schema.entities() if not e.final])))
- self._test('Any MAX(X)',
- {self.system: {'X': solindexes}}, False)
-
- def test_complex_optional(self):
- ueid = self.session.user.eid
- self._test('Any U WHERE WF wf_info_for X, X eid %(x)s, WF owned_by U?, WF from_state FS', {'x': ueid},
- {self.system: {'WF': s[0], 'FS': s[0], 'U': s[0],
- 'from_state': s[0], 'owned_by': s[0], 'wf_info_for': s[0],
- 'x': s[0]}},
- False)
-
- def test_exists4(self):
- """
- State S could come from both rql source and system source,
- but since X cannot come from the rql source, the solution
- {self.cards : 'S'} must be removed
- """
- self._test('Any G,L WHERE X in_group G, X login L, G name "managers", '
- 'EXISTS(X copain T, T login L, T login in ("comme", "cochon")) OR '
- 'EXISTS(X in_state S, S name "pascontent", NOT X copain T2, T2 login "billy")',
- {self.system: {'X': s[0], 'S': s[0], 'T2': s[0], 'T': s[0], 'G': s[0], 'copain': s[0], 'in_group': s[0]},
- self.ldap: {'X': s[0], 'T2': s[0], 'T': s[0]}},
- True)
-
- def test_relation_need_split(self):
- self._test('Any X, S WHERE X in_state S',
- {self.system: {'X': s[0, 1, 2], 'S': s[0, 1, 2]},
- self.cards: {'X': s[2], 'S': s[2]}},
- True)
-
- def test_not_relation_need_split(self):
- self._test('Any SN WHERE NOT X in_state S, S name SN',
- {self.cards: {'X': s[2], 'S': s[0, 1, 2]},
- self.system: {'X': s[0, 1, 2], 'S': s[0, 1, 2]}},
- True)
-
- def test_not_relation_no_split_external(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- # similar to the above test but with an eid coming from the external source.
- # the same plan may be used, since we won't find any record in the system source
- # linking 9999999 to a state
- self._test('Any SN WHERE NOT X in_state S, X eid %(x)s, S name SN',
- {'x': 999999},
- {self.cards: {'x': s[0], 'S': s[0]},
- self.system: {'x': s[0], 'S': s[0]}},
- False)
-
- def test_relation_restriction_ambigous_need_split(self):
- self._test('Any X,T WHERE X in_state S, S name "pending", T tags X',
- {self.system: {'X': s[0, 1, 2], 'S': s[0, 1, 2], 'T': s[0, 1, 2], 'tags': s[0, 1, 2]},
- self.cards: {'X': s[2], 'S': s[2]}},
- True)
-
- def test_simplified_var(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- # need access to source since X table has to be accessed because of the outer join
- self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR (X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
- {'x': 999999, 'u': self.session.user.eid},
- {self.system: {'P': s[0], 'G': s[0],
- 'require_permission': s[0], 'in_group': s[0], 'P': s[0], 'require_group': s[0],
- 'u': s[0]},
- self.cards: {'X': s[0]}},
- True)
-
- def test_delete_relation1(self):
- ueid = self.session.user.eid
- self._test('Any X, Y WHERE X created_by Y, X eid %(x)s, NOT Y eid %(y)s',
- {'x': ueid, 'y': ueid},
- {self.system: {'Y': s[0], 'created_by': s[0], 'x': s[0]}},
- False)
-
- def test_crossed_relation_eid_1_needattr(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- ueid = self.session.user.eid
- self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T',
- {'x': 999999,},
- {self.cards: {'Y': s[0]}, self.system: {'Y': s[0], 'x': s[0]}},
- True)
-
- def test_crossed_relation_eid_1_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
- {'x': 999999},
- {self.system: {'Y': s[0], 'x': s[0]}},
- False)
-
- def test_crossed_relation_eid_2_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
- {'x': 999999,},
- {self.cards: {'Y': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]},
- self.system: {'Y': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]}},
- False)
-
- def test_version_crossed_depends_on_1(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
- {'x': 999999},
- {self.cards: {'X': s[0], 'AD': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]},
- self.system: {'X': s[0], 'AD': s[0], 'multisource_crossed_rel': s[0], 'x': s[0]}},
- True)
-
- def test_version_crossed_depends_on_2(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
- {'x': 999999},
- {self.cards: {'X': s[0], 'AD': s[0]},
- self.system: {'X': s[0], 'AD': s[0], 'x': s[0]}},
- True)
-
- def test_simplified_var_3(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'cards', 999998, 'cards')
- self._test('Any S,T WHERE S eid %(s)s, N eid %(n)s, N type T, N is Note, S is State',
- {'n': 999999, 's': 999998},
- {self.cards: {'s': s[0], 'N': s[0]}}, False)
-
-
-
-class MSPlannerTC(BaseMSPlannerTC):
-
- def setUp(self):
- BaseMSPlannerTC.setUp(self)
- self.planner = MSPlanner(self.o.schema, self.repo.vreg.rqlhelper)
- for cached in ('rel_type_sources', 'can_cross_relation', 'is_multi_sources_relation'):
- clear_cache(self.repo, cached)
-
- _test = test_plan
-
- def test_simple_system_only(self):
- """retrieve entities only supported by the system source
- """
- self._test('CWGroup X',
- [('OneFetchStep', [('Any X WHERE X is CWGroup', [{'X': 'CWGroup'}])],
- None, None, [self.system], {}, [])])
-
- def test_simple_system_only_limit(self):
- """retrieve entities only supported by the system source
- """
- self._test('CWGroup X LIMIT 10',
- [('OneFetchStep', [('Any X LIMIT 10 WHERE X is CWGroup', [{'X': 'CWGroup'}])],
- 10, None, [self.system], {}, [])])
-
- def test_simple_system_only_limit_offset(self):
- """retrieve entities only supported by the system source
- """
- self._test('CWGroup X LIMIT 10 OFFSET 10',
- [('OneFetchStep', [('Any X LIMIT 10 OFFSET 10 WHERE X is CWGroup', [{'X': 'CWGroup'}])],
- 10, 10, [self.system], {}, [])])
-
- def test_simple_system_ldap(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- self._test('CWUser X',
- [('OneFetchStep', [('Any X WHERE X is CWUser', [{'X': 'CWUser'}])],
- None, None, [self.ldap, self.system], {}, [])])
-
- def test_simple_system_ldap_limit(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- self._test('CWUser X LIMIT 10',
- [('OneFetchStep', [('Any X LIMIT 10 WHERE X is CWUser', [{'X': 'CWUser'}])],
- 10, None, [self.ldap, self.system], {}, [])])
-
- def test_simple_system_ldap_limit_offset(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- self._test('CWUser X LIMIT 10 OFFSET 10',
- [('OneFetchStep', [('Any X LIMIT 10 OFFSET 10 WHERE X is CWUser', [{'X': 'CWUser'}])],
- 10, 10, [self.ldap, self.system], {}, [])])
-
- def test_simple_system_ldap_ordered_limit_offset(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- self._test('CWUser X ORDERBY X LIMIT 10 OFFSET 10',
- [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY table0.C0\nLIMIT 10\nOFFSET 10', None, [
- ('FetchStep', [('Any X WHERE X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], {}, {'X': 'table0.C0'}, []),
- ]),
- ])
- def test_simple_system_ldap_aggregat(self):
- """retrieve CWUser X from both sources and return concatenation of results
- """
- # COUNT(X) is kept in sub-step and transformed into SUM(X) in the AggrStep
- self._test('Any COUNT(X) WHERE X is CWUser',
- [('AggrStep', 'SELECT SUM(table0.C0) FROM table0', None, [
- ('FetchStep', [('Any COUNT(X) WHERE X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], {}, {'COUNT(X)': 'table0.C0'}, []),
- ]),
- ])
-
- def test_simple_system_rql(self):
- """retrieve Card X from both sources and return concatenation of results
- """
- self._test('Any X, XT WHERE X is Card, X title XT',
- [('OneFetchStep', [('Any X,XT WHERE X is Card, X title XT', [{'X': 'Card', 'XT': 'String'}])],
- None, None, [self.cards, self.system], {}, [])])
-
- def test_simple_eid_specified(self):
- """retrieve CWUser X from system source (eid is specified, can locate the entity)
- """
- ueid = self.session.user.eid
- self._test('Any X,L WHERE X eid %(x)s, X login L',
- [('OneFetchStep', [('Any X,L WHERE X eid %s, X login L'%ueid, [{'X': 'CWUser', 'L': 'String'}])],
- None, None, [self.system], {}, [])],
- {'x': ueid})
-
- def test_simple_eid_invariant(self):
- """retrieve CWUser X from system source (eid is specified, can locate the entity)
- """
- ueid = self.session.user.eid
- self._test('Any X WHERE X eid %(x)s',
- [('OneFetchStep', [('Any %s'%ueid, [{}])],
- None, None, [self.system], {}, [])],
- {'x': ueid})
-
- def test_simple_invariant(self):
- """retrieve CWUser X from system source only (X is invariant and in_group not supported by ldap source)
- """
- self._test('Any X WHERE X is CWUser, X in_group G, G name "users"',
- [('OneFetchStep', [('Any X WHERE X is CWUser, X in_group G, G name "users"',
- [{'X': 'CWUser', 'G': 'CWGroup'}])],
- None, None, [self.system], {}, [])])
-
- def test_complex_base(self):
- """
- 1. retrieve Any X, L WHERE X is CWUser, X login L from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X, L WHERE X is TMP, X login LX in_group G,
- G name 'users' on the system source
- """
- self._test('Any X,L WHERE X is CWUser, X in_group G, X login L, G name "users"',
- [('FetchStep', [('Any X,L WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,L WHERE X in_group G, X login L, G name "users", G is CWGroup, X is CWUser',
- [{'X': 'CWUser', 'L': 'String', 'G': 'CWGroup'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, [])
- ])
-
- def test_complex_base_limit_offset(self):
- """
- 1. retrieve Any X, L WHERE X is CWUser, X login L from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X, L WHERE X is TMP, X login LX in_group G,
- G name 'users' on the system source
- """
- self._test('Any X,L LIMIT 10 OFFSET 10 WHERE X is CWUser, X in_group G, X login L, G name "users"',
- [('FetchStep', [('Any X,L WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,L LIMIT 10 OFFSET 10 WHERE X in_group G, X login L, G name "users", G is CWGroup, X is CWUser',
- [{'X': 'CWUser', 'L': 'String', 'G': 'CWGroup'}])],
- 10, 10,
- [self.system], {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, [])
- ])
-
- def test_complex_ordered(self):
- self._test('Any L ORDERBY L WHERE X login L',
- [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY table0.C0', None,
- [('FetchStep', [('Any L WHERE X login L, X is CWUser',
- [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], {}, {'X.login': 'table0.C0', 'L': 'table0.C0'}, []),
- ])
- ])
-
- def test_complex_ordered_limit_offset(self):
- self._test('Any L ORDERBY L LIMIT 10 OFFSET 10 WHERE X login L',
- [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY table0.C0\nLIMIT 10\nOFFSET 10', None,
- [('FetchStep', [('Any L WHERE X login L, X is CWUser',
- [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], {}, {'X.login': 'table0.C0', 'L': 'table0.C0'}, []),
- ])
- ])
-
- def test_complex_invariant_ordered(self):
- """
- 1. retrieve Any X,AA WHERE X modification_date AA from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X,AA ORDERBY AA WHERE %s owned_by X, X modification_date AA
- on the system source
-
- herrr, this is what is expected by the XXX :(, not the actual result (which is correct anyway)
- """
- ueid = self.session.user.eid
- self._test('Any X,AA ORDERBY AA WHERE E eid %(x)s, E owned_by X, X modification_date AA',
- [('FetchStep',
- [('Any X,AA WHERE X modification_date AA, X is CWUser',
- [{'AA': 'Datetime', 'X': 'CWUser'}])],
- [self.ldap, self.system], None,
- {'AA': 'table0.C1', 'X': 'table0.C0', 'X.modification_date': 'table0.C1'}, []),
- ('OneFetchStep',
- [('Any X,AA ORDERBY AA WHERE %s owned_by X, X modification_date AA, X is CWUser' % ueid,
- [{'AA': 'Datetime', 'X': 'CWUser'}])],
- None, None, [self.system],
- {'AA': 'table0.C1', 'X': 'table0.C0', 'X.modification_date': 'table0.C1'}, []),
- ],
- {'x': ueid})
-
- def test_complex_invariant(self):
- """
- 1. retrieve Any X,L,AA WHERE X login L, X modification_date AA from system and ldap sources, store
- concatenation of results into a temporary table
- 2. return the result of Any X,L,AA WHERE %s owned_by X, X login L, X modification_date AA
- on the system source
- """
- ueid = self.session.user.eid
- self._test('Any X,L,AA WHERE E eid %(x)s, E owned_by X, X login L, X modification_date AA',
- [('FetchStep', [('Any X,L,AA WHERE X login L, X modification_date AA, X is CWUser',
- [{'AA': 'Datetime', 'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'AA': 'table0.C2', 'X': 'table0.C0', 'X.login': 'table0.C1', 'X.modification_date': 'table0.C2', 'L': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,L,AA WHERE %s owned_by X, X login L, X modification_date AA, X is CWUser'%ueid,
- [{'AA': 'Datetime', 'X': 'CWUser', 'L': 'String'}])],
- None, None, [self.system],
- {'AA': 'table0.C2', 'X': 'table0.C0', 'X.login': 'table0.C1', 'X.modification_date': 'table0.C2', 'L': 'table0.C1'}, [])],
- {'x': ueid})
-
- def test_complex_ambigous(self):
- """retrieve CWUser X from system and ldap sources, Person X from system source only
- """
- self._test('Any X,F WHERE X firstname F',
- [('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,F WHERE X firstname F, X is CWUser',
- [{'X': 'CWUser', 'F': 'String'}])],
- None, None, [self.ldap, self.system], {}, []),
- ('OneFetchStep', [('Any X,F WHERE X firstname F, X is Personne',
- [{'X': 'Personne', 'F': 'String'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_complex_ambigous_limit_offset(self):
- """retrieve CWUser X from system and ldap sources, Person X from system source only
- """
- self._test('Any X,F LIMIT 10 OFFSET 10 WHERE X firstname F',
- [('UnionStep', 10, 10, [
- ('OneFetchStep', [('Any X,F WHERE X firstname F, X is CWUser',
- [{'X': 'CWUser', 'F': 'String'}])],
- None, None,
- [self.ldap, self.system], {}, []),
- ('OneFetchStep', [('Any X,F WHERE X firstname F, X is Personne',
- [{'X': 'Personne', 'F': 'String'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_complex_ambigous_ordered(self):
- """
- 1. retrieve CWUser X from system and ldap sources, Person X from system source only, store
- each result in the same temp table
- 2. return content of the table sorted
- """
- self._test('Any X,F ORDERBY F WHERE X firstname F',
- [('AggrStep', 'SELECT table0.C0, table0.C1 FROM table0\nORDER BY table0.C1', None,
- [('FetchStep', [('Any X,F WHERE X firstname F, X is CWUser',
- [{'X': 'CWUser', 'F': 'String'}])],
- [self.ldap, self.system], {},
- {'X': 'table0.C0', 'X.firstname': 'table0.C1', 'F': 'table0.C1'}, []),
- ('FetchStep', [('Any X,F WHERE X firstname F, X is Personne',
- [{'X': 'Personne', 'F': 'String'}])],
- [self.system], {},
- {'X': 'table0.C0', 'X.firstname': 'table0.C1', 'F': 'table0.C1'}, []),
- ]),
- ])
-
- def test_complex_multiple(self):
- """
- 1. retrieve Any X,A,Y,B WHERE X login A, Y login B from system and ldap sources, store
- cartesian product of results into a temporary table
- 2. return the result of Any X,Y WHERE X login 'syt', Y login 'adim'
- on the system source
- """
- ueid = self.session.user.eid
- self._test('Any X,Y WHERE X login "syt", Y login "adim"',
- [('FetchStep',
- [('Any X WHERE X login "syt", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None,
- {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any Y WHERE Y login "adim", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system], None,
- {'Y': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any X,Y WHERE X is CWUser, Y is CWUser', [{'X': 'CWUser', 'Y': 'CWUser'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'Y': 'table1.C0'}, [])
- ], {'x': ueid})
-
- def test_complex_multiple_limit_offset(self):
- """
- 1. retrieve Any X,A,Y,B WHERE X login A, Y login B from system and ldap sources, store
- cartesian product of results into a temporary table
- 2. return the result of Any X,Y WHERE X login 'syt', Y login 'adim'
- on the system source
- """
- self._test('Any X,Y LIMIT 10 OFFSET 10 WHERE X login "syt", Y login "adim"',
- [('FetchStep',
- [('Any X WHERE X login "syt", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any Y WHERE Y login "adim", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system], None, {'Y': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any X,Y LIMIT 10 OFFSET 10 WHERE X is CWUser, Y is CWUser', [{'X': 'CWUser', 'Y': 'CWUser'}])],
- 10, 10, [self.system],
- {'X': 'table0.C0', 'Y': 'table1.C0'}, [])
- ])
-
- def test_complex_aggregat(self):
- self._test('Any MAX(X)',
- [('OneFetchStep',
- [('Any MAX(X)', X_ALL_SOLS)],
- None, None, [self.system], {}, [])
- ])
-
- def test_complex_typed_aggregat(self):
- self._test('Any MAX(X) WHERE X is Card',
- [('AggrStep', 'SELECT MAX(table0.C0) FROM table0', None,
- [('FetchStep',
- [('Any MAX(X) WHERE X is Card', [{'X': 'Card'}])],
- [self.cards, self.system], {}, {'MAX(X)': 'table0.C0'}, [])
- ])
- ])
-
- def test_complex_greater_eid(self):
- self._test('Any X WHERE X eid > 12',
- [('OneFetchStep',
- [('Any X WHERE X eid > 12', X_ALL_SOLS)],
- None, None, [self.system], {}, [])
- ])
-
- def test_complex_greater_typed_eid(self):
- self._test('Any X WHERE X eid > 12, X is Card',
- [('OneFetchStep',
- [('Any X WHERE X eid > 12, X is Card', [{'X': 'Card'}])],
- None, None, [self.system], {}, [])
- ])
-
- def test_complex_optional(self):
- ueid = self.session.user.eid
- self._test('Any U WHERE WF wf_info_for X, X eid %(x)s, WF owned_by U?, WF from_state FS',
- [('OneFetchStep', [('Any U WHERE WF wf_info_for %s, WF owned_by U?, WF from_state FS' % ueid,
- [{'WF': 'TrInfo', 'FS': 'State', 'U': 'CWUser'}])],
- None, None, [self.system], {}, [])],
- {'x': ueid})
-
- def test_complex_optional(self):
- ueid = self.session.user.eid
- self._test('Any U WHERE WF wf_info_for X, X eid %(x)s, WF owned_by U?, WF from_state FS',
- [('OneFetchStep', [('Any U WHERE WF wf_info_for %s, WF owned_by U?, WF from_state FS' % ueid,
- [{'WF': 'TrInfo', 'FS': 'State', 'U': 'CWUser'}])],
- None, None, [self.system], {}, [])],
- {'x': ueid})
-
-
- def test_3sources_ambigous(self):
- self._test('Any X,T WHERE X owned_by U, U login "syt", X title T, X is IN(Bookmark, Card, EmailThread)',
- [('FetchStep', [('Any X,T WHERE X title T, X is Card', [{'X': 'Card', 'T': 'String'}])],
- [self.cards, self.system], None,
- {'T': 'table0.C1', 'X': 'table0.C0', 'X.title': 'table0.C1'}, []),
- ('FetchStep', [('Any U WHERE U login "syt", U is CWUser', [{'U': 'CWUser'}])],
- [self.ldap, self.system], None,
- {'U': 'table1.C0'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,T WHERE X owned_by U, X title T, U is CWUser, X is IN(Bookmark, EmailThread)',
- [{'T': 'String', 'U': 'CWUser', 'X': 'Bookmark'},
- {'T': 'String', 'U': 'CWUser', 'X': 'EmailThread'}])],
- None, None, [self.system], {'U': 'table1.C0'}, []),
- ('OneFetchStep', [('Any X,T WHERE X owned_by U, X title T, U is CWUser, X is Card',
- [{'X': 'Card', 'U': 'CWUser', 'T': 'String'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'T': 'table0.C1', 'U': 'table1.C0'}, []),
- ]),
- ])
-
- def test_restricted_max(self):
- # dumb query to emulate the one generated by svnfile.entities.rql_revision_content
- self._test('Any V, MAX(VR) WHERE V is Card, V creation_date VR, '
- '(V creation_date TODAY OR (V creation_date < TODAY AND NOT EXISTS('
- 'X is Card, X creation_date < TODAY, X creation_date >= VR)))',
- [('FetchStep', [('Any VR WHERE X creation_date < TODAY, X creation_date VR, X is Card',
- [{'X': 'Card', 'VR': 'Datetime'}])],
- [self.cards, self.system], None,
- {'VR': 'table0.C0', 'X.creation_date': 'table0.C0'}, []),
- ('FetchStep', [('Any V,VR WHERE V creation_date VR, V is Card',
- [{'VR': 'Datetime', 'V': 'Card'}])],
- [self.cards, self.system], None,
- {'VR': 'table1.C1', 'V': 'table1.C0', 'V.creation_date': 'table1.C1'}, []),
- ('OneFetchStep', [('Any V,MAX(VR) WHERE V creation_date VR, (V creation_date TODAY) OR (V creation_date < TODAY, NOT EXISTS(X creation_date >= VR, X is Card)), V is Card',
- [{'X': 'Card', 'VR': 'Datetime', 'V': 'Card'}])],
- None, None, [self.system],
- {'VR': 'table1.C1', 'V': 'table1.C0', 'V.creation_date': 'table1.C1', 'X.creation_date': 'table0.C0'}, [])
- ])
-
- def test_outer_supported_rel1(self):
- # both system and rql support all variables, can be
- self._test('Any X, R WHERE X is Note, X in_state S, X type R, '
- 'NOT EXISTS(Y is Note, Y in_state S, Y type R, X identity Y)',
- [('OneFetchStep', [('Any X,R WHERE X is Note, X in_state S, X type R, NOT EXISTS(Y is Note, Y in_state S, Y type R, X identity Y), S is State',
- [{'Y': 'Note', 'X': 'Note', 'S': 'State', 'R': 'String'}])],
- None, None,
- [self.cards, self.system], {}, [])
- ])
-
- def test_not_identity(self):
- ueid = self.session.user.eid
- self._test('Any X WHERE NOT X identity U, U eid %s, X is CWUser' % ueid,
- [('OneFetchStep',
- [('Any X WHERE NOT X identity %s, X is CWUser' % ueid, [{'X': 'CWUser'}])],
- None, None,
- [self.ldap, self.system], {}, [])
- ])
-
- def test_outer_supported_rel2(self):
- self._test('Any X, MAX(R) GROUPBY X WHERE X in_state S, X login R, '
- 'NOT EXISTS(Y is Note, Y in_state S, Y type R)',
- [('FetchStep', [('Any A,R WHERE Y in_state A, Y type R, A is State, Y is Note',
- [{'Y': 'Note', 'A': 'State', 'R': 'String'}])],
- [self.cards, self.system], None,
- {'A': 'table0.C0', 'R': 'table0.C1', 'Y.type': 'table0.C1'}, []),
- ('FetchStep', [('Any X,R WHERE X login R, X is CWUser', [{'X': 'CWUser', 'R': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table1.C0', 'X.login': 'table1.C1', 'R': 'table1.C1'}, []),
- ('OneFetchStep', [('Any X,MAX(R) GROUPBY X WHERE X in_state S, X login R, NOT EXISTS(Y type R, S identity A, A is State, Y is Note), S is State, X is CWUser',
- [{'Y': 'Note', 'X': 'CWUser', 'S': 'State', 'R': 'String', 'A': 'State'}])],
- None, None, [self.system],
- {'A': 'table0.C0', 'X': 'table1.C0', 'X.login': 'table1.C1', 'R': 'table1.C1', 'Y.type': 'table0.C1'}, [])
- ])
-
- def test_security_has_text(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X WHERE X has_text "bla"',
- [('FetchStep', [('Any E WHERE E type "X", E is Note', [{'E': 'Note'}])],
- [self.cards, self.system], None, {'E': 'table0.C0'}, []),
- ('UnionStep', None, None,
- [('OneFetchStep',
- [(u'Any X WHERE X has_text "bla", (EXISTS(X owned_by %(ueid)s)) OR ((((EXISTS(D concerne C?, C owned_by %(ueid)s, C type "X", X identity D, C is Division, D is Affaire)) OR (EXISTS(H concerne G?, G owned_by %(ueid)s, G type "X", X identity H, G is SubDivision, H is Affaire))) OR (EXISTS(I concerne F?, F owned_by %(ueid)s, F type "X", X identity I, F is Societe, I is Affaire))) OR (EXISTS(J concerne E?, E owned_by %(ueid)s, X identity J, E is Note, J is Affaire))), X is Affaire' % {'ueid': ueid},
- [{'C': 'Division', 'E': 'Note', 'D': 'Affaire', 'G': 'SubDivision', 'F': 'Societe', 'I': 'Affaire', 'H': 'Affaire', 'J': 'Affaire', 'X': 'Affaire'}])],
- None, None, [self.system], {'E': 'table0.C0'}, []),
- ('OneFetchStep',
- [('Any X WHERE X has_text "bla", EXISTS(X owned_by %s), X is IN(Basket, CWUser)' % ueid,
- [{'X': 'Basket'}, {'X': 'CWUser'}]),
- ('Any X WHERE X has_text "bla", X is IN(Card, Comment, Division, Email, EmailThread, File, Folder, Note, Personne, Societe, SubDivision, Tag)',
- [{'X': 'Card'}, {'X': 'Comment'},
- {'X': 'Division'}, {'X': 'Email'}, {'X': 'EmailThread'},
- {'X': 'File'}, {'X': 'Folder'},
- {'X': 'Note'}, {'X': 'Personne'}, {'X': 'Societe'},
- {'X': 'SubDivision'}, {'X': 'Tag'}]),],
- None, None, [self.system], {}, []),
- ])
- ])
-
- def test_security_has_text_limit_offset(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- # note: same as the above query but because of the subquery usage, the
- # display differs (not printing solutions for each union)
- self._test('Any X LIMIT 10 OFFSET 10 WHERE X has_text "bla"',
- [('FetchStep', [('Any E WHERE E type "X", E is Note', [{'E': 'Note'}])],
- [self.cards, self.system], None, {'E': 'table1.C0'}, []),
- ('UnionFetchStep', [
- ('FetchStep', [('Any X WHERE X has_text "bla", (EXISTS(X owned_by %(ueid)s)) OR ((((EXISTS(D concerne C?, C owned_by %(ueid)s, C type "X", X identity D, C is Division, D is Affaire)) OR (EXISTS(H concerne G?, G owned_by %(ueid)s, G type "X", X identity H, G is SubDivision, H is Affaire))) OR (EXISTS(I concerne F?, F owned_by %(ueid)s, F type "X", X identity I, F is Societe, I is Affaire))) OR (EXISTS(J concerne E?, E owned_by %(ueid)s, X identity J, E is Note, J is Affaire))), X is Affaire' % {'ueid': ueid},
- [{'C': 'Division', 'E': 'Note', 'D': 'Affaire', 'G': 'SubDivision', 'F': 'Societe', 'I': 'Affaire', 'H': 'Affaire', 'J': 'Affaire', 'X': 'Affaire'}])],
- [self.system], {'E': 'table1.C0'}, {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any X WHERE X has_text "bla", EXISTS(X owned_by %s), X is IN(Basket, CWUser)' % ueid,
- [{'X': 'Basket'}, {'X': 'CWUser'}]),
- ('Any X WHERE X has_text "bla", X is IN(Card, Comment, Division, Email, EmailThread, File, Folder, Note, Personne, Societe, SubDivision, Tag)',
- [{'X': 'Card'}, {'X': 'Comment'},
- {'X': 'Division'}, {'X': 'Email'}, {'X': 'EmailThread'},
- {'X': 'File'}, {'X': 'Folder'},
- {'X': 'Note'}, {'X': 'Personne'}, {'X': 'Societe'},
- {'X': 'SubDivision'}, {'X': 'Tag'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ]),
- ('OneFetchStep',
- [('Any X LIMIT 10 OFFSET 10',
- [{'X': 'Affaire'}, {'X': 'Basket'},
- {'X': 'CWUser'}, {'X': 'Card'}, {'X': 'Comment'},
- {'X': 'Division'}, {'X': 'Email'}, {'X': 'EmailThread'},
- {'X': 'File'}, {'X': 'Folder'},
- {'X': 'Note'}, {'X': 'Personne'}, {'X': 'Societe'},
- {'X': 'SubDivision'}, {'X': 'Tag'}])],
- 10, 10, [self.system], {'X': 'table0.C0'}, [])
- ])
-
- def test_security_user(self):
- """a guest user trying to see another user: EXISTS(X owned_by U) is automatically inserted"""
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X WHERE X login "bla"',
- [('FetchStep',
- [('Any X WHERE X login "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('OneFetchStep',
- [('Any X WHERE EXISTS(X owned_by %s), X is CWUser' % ueid, [{'X': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0'}, [])])
-
- def test_security_complex_has_text(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X WHERE X has_text "bla", X firstname "bla"',
- [('FetchStep', [('Any X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [('Any X WHERE X has_text "bla", EXISTS(X owned_by %s), X is CWUser' % ueid, [{'X': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0'}, []),
- ('OneFetchStep', [('Any X WHERE X has_text "bla", X firstname "bla", X is Personne', [{'X': 'Personne'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_security_complex_has_text_limit_offset(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X LIMIT 10 OFFSET 10 WHERE X has_text "bla", X firstname "bla"',
- [('FetchStep', [('Any X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table1.C0'}, []),
- ('UnionFetchStep', [
- ('FetchStep', [('Any X WHERE X has_text "bla", EXISTS(X owned_by %s), X is CWUser' % ueid, [{'X': 'CWUser'}])],
- [self.system], {'X': 'table1.C0'}, {'X': 'table0.C0'}, []),
- ('FetchStep', [('Any X WHERE X has_text "bla", X firstname "bla", X is Personne', [{'X': 'Personne'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ]),
- ('OneFetchStep',
- [('Any X LIMIT 10 OFFSET 10', [{'X': 'CWUser'}, {'X': 'Personne'}])],
- 10, 10, [self.system], {'X': 'table0.C0'}, [])
- ])
-
- def test_security_complex_aggregat(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- ALL_SOLS = X_ALL_SOLS[:]
- ALL_SOLS.remove({'X': 'CWSourceHostConfig'}) # not authorized
- ALL_SOLS.remove({'X': 'CWSourceSchemaConfig'}) # not authorized
- ALL_SOLS.remove({'X': 'CWDataImport'}) # not authorized
- self._test('Any MAX(X)',
- [('FetchStep', [('Any E WHERE E type "X", E is Note', [{'E': 'Note'}])],
- [self.cards, self.system], None, {'E': 'table1.C0'}, []),
- ('FetchStep', [('Any X WHERE X is IN(CWUser)', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table2.C0'}, []),
- ('UnionFetchStep', [
- ('FetchStep', [('Any X WHERE EXISTS(%s use_email X), X is EmailAddress' % ueid,
- [{'X': 'EmailAddress'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ('UnionFetchStep',
- [('FetchStep', [('Any X WHERE X is IN(Card, Note, State)',
- [{'X': 'Card'}, {'X': 'Note'}, {'X': 'State'}])],
- [self.cards, self.system], {}, {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any X WHERE X is IN(BaseTransition, Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, CWSource, CWUniqueTogetherConstraint, Comment, Division, Email, EmailPart, EmailThread, ExternalUri, File, Folder, Old, Personne, RQLExpression, Societe, SubDivision, SubWorkflowExitPoint, Tag, TrInfo, Transition, Workflow, WorkflowTransition)',
- [{'X': 'BaseTransition'}, {'X': 'Bookmark'},
- {'X': 'CWAttribute'}, {'X': 'CWCache'},
- {'X': 'CWConstraint'}, {'X': 'CWConstraintType'},
- {'X': 'CWEType'}, {'X': 'CWGroup'},
- {'X': 'CWPermission'}, {'X': 'CWProperty'},
- {'X': 'CWRType'}, {'X': 'CWRelation'},
- {'X': 'CWSource'},
- {'X': 'CWUniqueTogetherConstraint'},
- {'X': 'Comment'}, {'X': 'Division'},
- {'X': 'Email'},
- {'X': 'EmailPart'}, {'X': 'EmailThread'},
- {'X': 'ExternalUri'}, {'X': 'File'},
- {'X': 'Folder'}, {'X': 'Old'},
- {'X': 'Personne'}, {'X': 'RQLExpression'},
- {'X': 'Societe'}, {'X': 'SubDivision'},
- {'X': 'SubWorkflowExitPoint'}, {'X': 'Tag'},
- {'X': 'TrInfo'}, {'X': 'Transition'},
- {'X': 'Workflow'}, {'X': 'WorkflowTransition'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ]),
- ('FetchStep', [('Any X WHERE (EXISTS(X owned_by %(ueid)s)) OR ((((EXISTS(D concerne C?, C owned_by %(ueid)s, C type "X", X identity D, C is Division, D is Affaire)) OR (EXISTS(H concerne G?, G owned_by %(ueid)s, G type "X", X identity H, G is SubDivision, H is Affaire))) OR (EXISTS(I concerne F?, F owned_by %(ueid)s, F type "X", X identity I, F is Societe, I is Affaire))) OR (EXISTS(J concerne E?, E owned_by %(ueid)s, X identity J, E is Note, J is Affaire))), X is Affaire' % {'ueid': ueid},
- [{'C': 'Division', 'E': 'Note', 'D': 'Affaire', 'G': 'SubDivision', 'F': 'Societe', 'I': 'Affaire', 'H': 'Affaire', 'J': 'Affaire', 'X': 'Affaire'}])],
- [self.system], {'E': 'table1.C0'}, {'X': 'table0.C0'}, []),
- ('UnionFetchStep', [
- ('FetchStep', [('Any X WHERE EXISTS(X owned_by %s), X is Basket' % ueid,
- [{'X': 'Basket'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ('FetchStep', [('Any X WHERE EXISTS(X owned_by %s), X is CWUser' % ueid,
- [{'X': 'CWUser'}])],
- [self.system], {'X': 'table2.C0'}, {'X': 'table0.C0'}, []),
- ]),
- ]),
- ('OneFetchStep', [('Any MAX(X)', ALL_SOLS)],
- None, None, [self.system], {'X': 'table0.C0'}, [])
- ])
-
- def test_security_complex_aggregat2(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- X_ET_ALL_SOLS = []
- for s in X_ALL_SOLS:
- if s in ({'X': 'CWSourceHostConfig'}, {'X': 'CWSourceSchemaConfig'}, {'X': 'CWDataImport'}):
- continue # not authorized
- ets = {'ET': 'CWEType'}
- ets.update(s)
- X_ET_ALL_SOLS.append(ets)
- self._test('Any ET, COUNT(X) GROUPBY ET ORDERBY ET WHERE X is ET',
- [('FetchStep', [('Any X WHERE X is IN(Card, Note, State)',
- [{'X': 'Card'}, {'X': 'Note'}, {'X': 'State'}])],
- [self.cards, self.system], None, {'X': 'table1.C0'}, []),
- ('FetchStep', [('Any E WHERE E type "X", E is Note', [{'E': 'Note'}])],
- [self.cards, self.system], None, {'E': 'table2.C0'}, []),
- ('FetchStep', [('Any X WHERE X is IN(CWUser)', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table3.C0'}, []),
- ('UnionFetchStep',
- [('FetchStep', [('Any ET,X WHERE X is ET, EXISTS(%s use_email X), ET is CWEType, X is EmailAddress' % ueid,
- [{'ET': 'CWEType', 'X': 'EmailAddress'}]),
- ],
- [self.system], {}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
- # extra UnionFetchStep could be avoided but has no cost, so don't care
- ('UnionFetchStep',
- [('FetchStep', [('Any ET,X WHERE X is ET, ET is CWEType, X is IN(BaseTransition, Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, CWSource, CWUniqueTogetherConstraint, Comment, Division, Email, EmailPart, EmailThread, ExternalUri, File, Folder, Old, Personne, RQLExpression, Societe, SubDivision, SubWorkflowExitPoint, Tag, TrInfo, Transition, Workflow, WorkflowTransition)',
- [{'X': 'BaseTransition', 'ET': 'CWEType'},
- {'X': 'Bookmark', 'ET': 'CWEType'}, {'X': 'CWAttribute', 'ET': 'CWEType'},
- {'X': 'CWCache', 'ET': 'CWEType'}, {'X': 'CWConstraint', 'ET': 'CWEType'},
- {'X': 'CWConstraintType', 'ET': 'CWEType'},
- {'X': 'CWEType', 'ET': 'CWEType'},
- {'X': 'CWGroup', 'ET': 'CWEType'}, {'X': 'CWPermission', 'ET': 'CWEType'},
- {'X': 'CWProperty', 'ET': 'CWEType'}, {'X': 'CWRType', 'ET': 'CWEType'},
- {'X': 'CWSource', 'ET': 'CWEType'},
- {'X': 'CWRelation', 'ET': 'CWEType'},
- {'X': 'CWUniqueTogetherConstraint', 'ET': 'CWEType'},
- {'X': 'Comment', 'ET': 'CWEType'},
- {'X': 'Division', 'ET': 'CWEType'}, {'X': 'Email', 'ET': 'CWEType'},
- {'X': 'EmailPart', 'ET': 'CWEType'},
- {'X': 'EmailThread', 'ET': 'CWEType'}, {'X': 'ExternalUri', 'ET': 'CWEType'},
- {'X': 'File', 'ET': 'CWEType'}, {'X': 'Folder', 'ET': 'CWEType'},
- {'X': 'Old', 'ET': 'CWEType'}, {'X': 'Personne', 'ET': 'CWEType'},
- {'X': 'RQLExpression', 'ET': 'CWEType'}, {'X': 'Societe', 'ET': 'CWEType'},
- {'X': 'SubDivision', 'ET': 'CWEType'}, {'X': 'SubWorkflowExitPoint', 'ET': 'CWEType'},
- {'X': 'Tag', 'ET': 'CWEType'}, {'X': 'TrInfo', 'ET': 'CWEType'},
- {'X': 'Transition', 'ET': 'CWEType'}, {'X': 'Workflow', 'ET': 'CWEType'},
- {'X': 'WorkflowTransition', 'ET': 'CWEType'}])],
- [self.system], {}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
- ('FetchStep',
- [('Any ET,X WHERE X is ET, ET is CWEType, X is IN(Card, Note, State)',
- [{'ET': 'CWEType', 'X': 'Card'},
- {'ET': 'CWEType', 'X': 'Note'},
- {'ET': 'CWEType', 'X': 'State'}])],
- [self.system], {'X': 'table1.C0'}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
- ]),
-
- ('FetchStep', [('Any ET,X WHERE X is ET, (EXISTS(X owned_by %(ueid)s)) OR ((((EXISTS(D concerne C?, C owned_by %(ueid)s, C type "X", X identity D, C is Division, D is Affaire)) OR (EXISTS(H concerne G?, G owned_by %(ueid)s, G type "X", X identity H, G is SubDivision, H is Affaire))) OR (EXISTS(I concerne F?, F owned_by %(ueid)s, F type "X", X identity I, F is Societe, I is Affaire))) OR (EXISTS(J concerne E?, E owned_by %(ueid)s, X identity J, E is Note, J is Affaire))), ET is CWEType, X is Affaire' % {'ueid': ueid},
- [{'C': 'Division', 'E': 'Note', 'D': 'Affaire',
- 'G': 'SubDivision', 'F': 'Societe', 'I': 'Affaire',
- 'H': 'Affaire', 'J': 'Affaire', 'X': 'Affaire',
- 'ET': 'CWEType'}])],
- [self.system], {'E': 'table2.C0'}, {'ET': 'table0.C0', 'X': 'table0.C1'},
- []),
- ('UnionFetchStep', [
- ('FetchStep', [('Any ET,X WHERE X is ET, EXISTS(X owned_by %s), ET is CWEType, X is Basket' % ueid,
- [{'ET': 'CWEType', 'X': 'Basket'}])],
- [self.system], {}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
- ('FetchStep', [('Any ET,X WHERE X is ET, EXISTS(X owned_by %s), ET is CWEType, X is CWUser' % ueid,
- [{'ET': 'CWEType', 'X': 'CWUser'}])],
- [self.system], {'X': 'table3.C0'}, {'ET': 'table0.C0', 'X': 'table0.C1'}, []),
- ]),
- ]),
- ('OneFetchStep',
- [('Any ET,COUNT(X) GROUPBY ET ORDERBY ET', X_ET_ALL_SOLS)],
- None, None, [self.system], {'ET': 'table0.C0', 'X': 'table0.C1'}, [])
- ])
-
- def test_security_3sources(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X, XT WHERE X is Card, X owned_by U, X title XT, U login "syt"',
- [('FetchStep',
- [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'}, []),
- ('FetchStep',
- [('Any U WHERE U login "syt", U is CWUser', [{'U': 'CWUser'}])],
- [self.ldap, self.system], None, {'U': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any X,XT WHERE X owned_by U, X title XT, EXISTS(U owned_by %s), U is CWUser, X is Card' % ueid,
- [{'X': 'Card', 'U': 'CWUser', 'XT': 'String'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1', 'U': 'table1.C0'}, [])
- ])
-
- def test_security_3sources_identity(self):
- self.restore_orig_cwuser_security()
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X, XT WHERE X is Card, X owned_by U, X title XT, U login "syt"',
- [('FetchStep',
- [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'}, []),
- ('OneFetchStep',
- [('Any X,XT WHERE X owned_by U, X title XT, U login "syt", EXISTS(U identity %s), U is CWUser, X is Card' % ueid,
- [{'U': 'CWUser', 'X': 'Card', 'XT': 'String'}])],
- None, None, [self.system], {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'}, [])
- ])
-
- def test_security_3sources_identity_optional_var(self):
- self.restore_orig_cwuser_security()
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X,XT,U WHERE X is Card, X owned_by U?, X title XT, U login L',
- [('FetchStep',
- [('Any U,L WHERE U login L, EXISTS(U identity %s), U is CWUser' % ueid,
- [{'L': 'String', u'U': 'CWUser'}])],
- [self.system], {}, {'L': 'table0.C1', 'U': 'table0.C0', 'U.login': 'table0.C1'}, []),
- ('FetchStep',
- [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
- [self.cards, self.system], None, {'X': 'table1.C0', 'X.title': 'table1.C1', 'XT': 'table1.C1'}, []),
- ('OneFetchStep',
- [('Any X,XT,U WHERE X owned_by U?, X title XT, X is Card',
- [{'X': 'Card', 'U': 'CWUser', 'XT': 'String'}])],
- None, None, [self.system], {'L': 'table0.C1',
- 'U': 'table0.C0',
- 'X': 'table1.C0',
- 'X.title': 'table1.C1',
- 'XT': 'table1.C1'}, [])
- ])
-
- def test_security_3sources_limit_offset(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X, XT LIMIT 10 OFFSET 10 WHERE X is Card, X owned_by U, X title XT, U login "syt"',
- [('FetchStep',
- [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'}, []),
- ('FetchStep',
- [('Any U WHERE U login "syt", U is CWUser', [{'U': 'CWUser'}])],
- [self.ldap, self.system], None, {'U': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any X,XT LIMIT 10 OFFSET 10 WHERE X owned_by U, X title XT, EXISTS(U owned_by %s), U is CWUser, X is Card' % ueid,
- [{'X': 'Card', 'U': 'CWUser', 'XT': 'String'}])],
- 10, 10, [self.system],
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1', 'U': 'table1.C0'}, [])
- ])
-
- def test_exists_base(self):
- self._test('Any X,L,S WHERE X in_state S, X login L, EXISTS(X in_group G, G name "bougloup")',
- [('FetchStep', [('Any X,L WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, []),
- ('OneFetchStep', [("Any X,L,S WHERE X in_state S, X login L, "
- 'EXISTS(X in_group G, G name "bougloup", G is CWGroup), S is State, X is CWUser',
- [{'X': 'CWUser', 'L': 'String', 'S': 'State', 'G': 'CWGroup'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'L': 'table0.C1'}, [])])
-
- def test_exists_complex(self):
- self._test('Any G WHERE X in_group G, G name "managers", EXISTS(X copain T, T login in ("comme", "cochon"))',
- [('FetchStep', [('Any T WHERE T login IN("comme", "cochon"), T is CWUser', [{'T': 'CWUser'}])],
- [self.ldap, self.system], None, {'T': 'table0.C0'}, []),
- ('OneFetchStep',
- [('Any G WHERE X in_group G, G name "managers", EXISTS(X copain T, T is CWUser), G is CWGroup, X is CWUser',
- [{'X': 'CWUser', 'T': 'CWUser', 'G': 'CWGroup'}])],
- None, None, [self.system], {'T': 'table0.C0'}, [])])
-
- def test_exists3(self):
- self._test('Any G,L WHERE X in_group G, X login L, G name "managers", EXISTS(X copain T, T login in ("comme", "cochon"))',
- [('FetchStep',
- [('Any T WHERE T login IN("comme", "cochon"), T is CWUser',
- [{'T': 'CWUser'}])],
- [self.ldap, self.system], None, {'T': 'table0.C0'}, []),
- ('FetchStep',
- [('Any L,X WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table1.C1', 'X.login': 'table1.C0', 'L': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any G,L WHERE X in_group G, X login L, G name "managers", EXISTS(X copain T, T is CWUser), G is CWGroup, X is CWUser',
- [{'G': 'CWGroup', 'L': 'String', 'T': 'CWUser', 'X': 'CWUser'}])],
- None, None,
- [self.system], {'T': 'table0.C0', 'X': 'table1.C1', 'X.login': 'table1.C0', 'L': 'table1.C0'}, [])])
-
- def test_exists4(self):
- self._test('Any G,L WHERE X in_group G, X login L, G name "managers", '
- 'EXISTS(X copain T, T login L, T login in ("comme", "cochon")) OR '
- 'EXISTS(X in_state S, S name "pascontent", NOT X copain T2, T2 login "billy")',
- [('FetchStep',
- [('Any T,L WHERE T login L, T login IN("comme", "cochon"), T is CWUser', [{'T': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'T': 'table0.C0', 'T.login': 'table0.C1', 'L': 'table0.C1'}, []),
- ('FetchStep',
- [('Any T2 WHERE T2 login "billy", T2 is CWUser', [{'T2': 'CWUser'}])],
- [self.ldap, self.system], None, {'T2': 'table1.C0'}, []),
- ('FetchStep',
- [('Any L,X WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None, {'X': 'table2.C1', 'X.login': 'table2.C0', 'L': 'table2.C0'}, []),
- ('OneFetchStep',
- [('Any G,L WHERE X in_group G, X login L, G name "managers", (EXISTS(X copain T, T login L, T is CWUser)) OR (EXISTS(X in_state S, S name "pascontent", NOT EXISTS(X copain T2), S is State)), G is CWGroup, T2 is CWUser, X is CWUser',
- [{'G': 'CWGroup', 'L': 'String', 'S': 'State', 'T': 'CWUser', 'T2': 'CWUser', 'X': 'CWUser'}])],
- None, None, [self.system],
- {'T2': 'table1.C0', 'L': 'table2.C0',
- 'T': 'table0.C0', 'T.login': 'table0.C1', 'X': 'table2.C1', 'X.login': 'table2.C0'}, [])])
-
- def test_exists5(self):
- self._test('Any GN,L WHERE X in_group G, X login L, G name GN, '
- 'EXISTS(X copain T, T login in ("comme", "cochon")) AND '
- 'NOT EXISTS(X copain T2, T2 login "billy")',
- [('FetchStep', [('Any T WHERE T login IN("comme", "cochon"), T is CWUser',
- [{'T': 'CWUser'}])],
- [self.ldap, self.system], None, {'T': 'table0.C0'}, []),
- ('FetchStep', [('Any T2 WHERE T2 login "billy", T2 is CWUser', [{'T2': 'CWUser'}])],
- [self.ldap, self.system], None, {'T2': 'table1.C0'}, []),
- ('FetchStep', [('Any L,X WHERE X login L, X is CWUser', [{'X': 'CWUser', 'L': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table2.C1', 'X.login': 'table2.C0', 'L': 'table2.C0'}, []),
- ('OneFetchStep', [('Any GN,L WHERE X in_group G, X login L, G name GN, EXISTS(X copain T, T is CWUser), NOT EXISTS(X copain T2, T2 is CWUser), G is CWGroup, X is CWUser',
- [{'G': 'CWGroup', 'GN': 'String', 'L': 'String', 'T': 'CWUser', 'T2': 'CWUser', 'X': 'CWUser'}])],
- None, None, [self.system],
- {'T': 'table0.C0', 'T2': 'table1.C0',
- 'X': 'table2.C1', 'X.login': 'table2.C0', 'L': 'table2.C0'}, [])])
-
- def test_exists_security_no_invariant(self):
- ueid = self.session.user.eid
- self._test('Any X,AA,AB,AC,AD ORDERBY AA WHERE X is CWUser, X login AA, X firstname AB, X surname AC, X modification_date AD, A eid %(B)s, \
- EXISTS(((X identity A) OR \
- (EXISTS(X in_group C, C name IN("managers", "staff"), C is CWGroup))) OR \
- (EXISTS(X in_group D, A in_group D, NOT D name "users", D is CWGroup)))',
- [('FetchStep', [('Any X,AA,AB,AC,AD WHERE X login AA, X firstname AB, X surname AC, X modification_date AD, X is CWUser',
- [{'AA': 'String', 'AB': 'String', 'AC': 'String', 'AD': 'Datetime',
- 'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'AA': 'table0.C1', 'AB': 'table0.C2',
- 'AC': 'table0.C3', 'AD': 'table0.C4',
- 'X': 'table0.C0',
- 'X.firstname': 'table0.C2',
- 'X.login': 'table0.C1',
- 'X.modification_date': 'table0.C4',
- 'X.surname': 'table0.C3'}, []),
- ('OneFetchStep', [('Any X,AA,AB,AC,AD ORDERBY AA WHERE X login AA, X firstname AB, X surname AC, X modification_date AD, EXISTS(((X identity %(ueid)s) OR (EXISTS(X in_group C, C name IN("managers", "staff"), C is CWGroup))) OR (EXISTS(X in_group D, %(ueid)s in_group D, NOT D name "users", D is CWGroup))), X is CWUser' % {'ueid': ueid},
- [{'AA': 'String', 'AB': 'String', 'AC': 'String', 'AD': 'Datetime',
- 'C': 'CWGroup', 'D': 'CWGroup', 'X': 'CWUser'}])],
- None, None, [self.system],
- {'AA': 'table0.C1', 'AB': 'table0.C2', 'AC': 'table0.C3', 'AD': 'table0.C4',
- 'X': 'table0.C0',
- 'X.firstname': 'table0.C2', 'X.login': 'table0.C1', 'X.modification_date': 'table0.C4', 'X.surname': 'table0.C3'},
- [])],
- {'B': ueid})
-
- def test_relation_need_split(self):
- self._test('Any X, S WHERE X in_state S',
- [('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,S WHERE X in_state S, S is State, X is IN(Affaire, CWUser)',
- [{'X': 'Affaire', 'S': 'State'}, {'X': 'CWUser', 'S': 'State'}])],
- None, None, [self.system], {}, []),
- ('OneFetchStep', [('Any X,S WHERE X in_state S, S is State, X is Note',
- [{'X': 'Note', 'S': 'State'}])],
- None, None, [self.cards, self.system], {}, []),
- ])])
-
- def test_relation_selection_need_split(self):
- self._test('Any X,S,U WHERE X in_state S, X todo_by U',
- [('FetchStep', [('Any X,S WHERE X in_state S, S is State, X is Note',
- [{'X': 'Note', 'S': 'State'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'S': 'table0.C1'}, []),
- ('UnionStep', None, None,
- [('OneFetchStep', [('Any X,S,U WHERE X in_state S, X todo_by U, S is State, U is Personne, X is Affaire',
- [{'X': 'Affaire', 'S': 'State', 'U': 'Personne'}])],
- None, None, [self.system], {}, []),
- ('OneFetchStep', [('Any X,S,U WHERE X todo_by U, S is State, U is CWUser, X is Note',
- [{'X': 'Note', 'S': 'State', 'U': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0', 'S': 'table0.C1'}, []),
- ])
- ])
-
- def test_relation_restriction_need_split(self):
- self._test('Any X,U WHERE X in_state S, S name "pending", X todo_by U',
- [('FetchStep', [('Any X WHERE X in_state S, S name "pending", S is State, X is Note',
- [{'X': 'Note', 'S': 'State'}])],
- [self.cards, self.system], None, {'X': 'table0.C0'}, []),
- ('UnionStep', None, None,
- [('OneFetchStep', [('Any X,U WHERE X todo_by U, U is CWUser, X is Note',
- [{'X': 'Note', 'U': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0'}, []),
- ('OneFetchStep', [('Any X,U WHERE X in_state S, S name "pending", X todo_by U, S is State, U is Personne, X is Affaire',
- [{'S': 'State', 'U': 'Personne', 'X': 'Affaire'}])],
- None, None, [self.system], {}, [])
- ])
- ])
-
- def test_relation_restriction_ambigous_need_split(self):
- self._test('Any X,T WHERE X in_state S, S name "pending", T tags X',
- [('FetchStep', [('Any X WHERE X in_state S, S name "pending", S is State, X is Note',
- [{'X': 'Note', 'S': 'State'}])],
- [self.cards, self.system], None, {'X': 'table0.C0'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,T WHERE T tags X, T is Tag, X is Note',
- [{'X': 'Note', 'T': 'Tag'}])],
- None, None,
- [self.system], {'X': 'table0.C0'}, []),
- ('OneFetchStep', [('Any X,T WHERE X in_state S, S name "pending", T tags X, S is State, T is Tag, X is IN(Affaire, CWUser)',
- [{'X': 'Affaire', 'S': 'State', 'T': 'Tag'},
- {'X': 'CWUser', 'S': 'State', 'T': 'Tag'}])],
- None, None,
- [self.system], {}, []),
- ])
- ])
-
- def test_not_relation_no_split_internal(self):
- ueid = self.session.user.eid
- # NOT on a relation supported by rql and system source: we want to get
- # all states (eg from both sources) which are not related to entity with the
- # given eid. The "NOT X in_state S, X eid %(x)s" expression is necessarily true
- # in the source where %(x)s is not coming from and will be removed during rql
- # generation for the external source
- self._test('Any SN WHERE NOT X in_state S, X eid %(x)s, S name SN',
- [('OneFetchStep', [('Any SN WHERE NOT EXISTS(%s in_state S), S name SN, S is State' % ueid,
- [{'S': 'State', 'SN': 'String'}])],
- None, None, [self.cards, self.system], {}, [])],
- {'x': ueid})
-
- def test_not_relation_no_split_external(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- # similar to the above test but with an eid coming from the external source.
- # the same plan may be used, since we won't find any record in the system source
- # linking 9999999 to a state
- self._test('Any SN WHERE NOT X in_state S, X eid %(x)s, S name SN',
- [('OneFetchStep', [('Any SN WHERE NOT EXISTS(999999 in_state S), S name SN, S is State',
- [{'S': 'State', 'SN': 'String'}])],
- None, None, [self.cards, self.system], {}, [])],
- {'x': 999999})
-
- def test_not_relation_need_split(self):
- self._test('Any SN WHERE NOT X in_state S, S name SN',
- [('FetchStep', [('Any SN,S WHERE S name SN, S is State',
- [{'S': 'State', 'SN': 'String'}])],
- [self.cards, self.system], None, {'S': 'table0.C1', 'S.name': 'table0.C0', 'SN': 'table0.C0'},
- []),
- ('IntersectStep', None, None,
- [('OneFetchStep',
- [('Any SN WHERE NOT EXISTS(X in_state S, X is Note), S name SN, S is State',
- [{'S': 'State', 'SN': 'String', 'X': 'Note'}])],
- None, None, [self.cards, self.system], {},
- []),
- ('OneFetchStep',
- [('Any SN WHERE NOT EXISTS(X in_state S, X is IN(Affaire, CWUser)), S name SN, S is State',
- [{'S': 'State', 'SN': 'String', 'X': 'Affaire'},
- {'S': 'State', 'SN': 'String', 'X': 'CWUser'}])],
- None, None, [self.system], {'S': 'table0.C1', 'S.name': 'table0.C0', 'SN': 'table0.C0'},
- []),]
- )])
-
- def test_external_attributes_and_relation(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any A,B,C,D WHERE A eid %(x)s,A creation_date B,A modification_date C, A todo_by D?',
- [('FetchStep', [('Any A,B,C WHERE A eid 999999, A creation_date B, A modification_date C, A is Note',
- [{'A': 'Note', 'C': 'Datetime', 'B': 'Datetime'}])],
- [self.cards], None,
- {'A': 'table0.C0', 'A.creation_date': 'table0.C1', 'A.modification_date': 'table0.C2', 'C': 'table0.C2', 'B': 'table0.C1'}, []),
- #('FetchStep', [('Any D WHERE D is CWUser', [{'D': 'CWUser'}])],
- # [self.ldap, self.system], None, {'D': 'table1.C0'}, []),
- ('OneFetchStep', [('Any A,B,C,D WHERE A creation_date B, A modification_date C, A todo_by D?, A is Note, D is CWUser',
- [{'A': 'Note', 'C': 'Datetime', 'B': 'Datetime', 'D': 'CWUser'}])],
- None, None, [self.system],
- {'A': 'table0.C0', 'A.creation_date': 'table0.C1', 'A.modification_date': 'table0.C2', 'C': 'table0.C2', 'B': 'table0.C1'}, [])],
- {'x': 999999})
-
-
- def test_simplified_var_1(self):
- ueid = self.session.user.eid
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- # need access to cards source since X table has to be accessed because of the outer join
- self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR '
- '(X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
- [('FetchStep',
- [('Any 999999', [{}])], [self.cards],
- None, {u'%(x)s': 'table0.C0'}, []),
- ('OneFetchStep',
- [(u'Any 6 WHERE 6 in_group G, (G name IN("managers", "logilab")) OR '
- '(X require_permission P?, P name "bla", P require_group G), '
- 'G is CWGroup, P is CWPermission, X is Note',
- [{'G': 'CWGroup', 'P': 'CWPermission', 'X': 'Note'}])],
- None, None, [self.system], {u'%(x)s': 'table0.C0'}, [])],
- {'x': 999999, 'u': ueid})
-
- def test_simplified_var_2(self):
- ueid = self.session.user.eid
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- # no need access to source since X is invariant
- self._test('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR '
- '(X require_permission P, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
- [('OneFetchStep', [('Any %s WHERE %s in_group G, (G name IN("managers", "logilab")) OR (999999 require_permission P, P name "bla", P require_group G)' % (ueid, ueid),
- [{'G': 'CWGroup', 'P': 'CWPermission'}])],
- None, None, [self.system], {}, [])],
- {'x': 999999, 'u': ueid})
-
- def test_has_text(self):
- self._test('Card X WHERE X has_text "toto"',
- [('OneFetchStep', [('Any X WHERE X has_text "toto", X is Card',
- [{'X': 'Card'}])],
- None, None, [self.system], {}, [])])
-
- def test_has_text_3(self):
- self._test('Any X WHERE X has_text "toto", X title "zoubidou", X is IN (Card, EmailThread)',
- [('FetchStep', [(u'Any X WHERE X title "zoubidou", X is Card',
- [{'X': 'Card'}])],
- [self.cards, self.system], None, {'X': 'table0.C0'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [(u'Any X WHERE X has_text "toto", X is Card',
- [{'X': 'Card'}])],
- None, None, [self.system], {'X': 'table0.C0'}, []),
- ('OneFetchStep', [(u'Any X WHERE X has_text "toto", X title "zoubidou", X is EmailThread',
- [{'X': 'EmailThread'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_has_text_orderby_rank(self):
- self._test('Any X ORDERBY FTIRANK(X) WHERE X has_text "bla", X firstname "bla"',
- [('FetchStep', [('Any X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('AggrStep', 'SELECT table1.C1 FROM table1\nORDER BY table1.C0', None, [
- ('FetchStep', [('Any FTIRANK(X),X WHERE X has_text "bla", X is CWUser',
- [{'X': 'CWUser'}])],
- [self.system], {'X': 'table0.C0'}, {'FTIRANK(X)': 'table1.C0', 'X': 'table1.C1'}, []),
- ('FetchStep', [('Any FTIRANK(X),X WHERE X has_text "bla", X firstname "bla", X is Personne',
- [{'X': 'Personne'}])],
- [self.system], {}, {'FTIRANK(X)': 'table1.C0', 'X': 'table1.C1'}, []),
- ]),
- ])
-
- def test_security_has_text_orderby_rank(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X ORDERBY FTIRANK(X) WHERE X has_text "bla", X firstname "bla"',
- [('FetchStep', [('Any X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table1.C0'}, []),
- ('UnionFetchStep',
- [('FetchStep', [('Any X WHERE X firstname "bla", X is Personne', [{'X': 'Personne'}])],
- [self.system], {}, {'X': 'table0.C0'}, []),
- ('FetchStep', [('Any X WHERE EXISTS(X owned_by %s), X is CWUser' % ueid, [{'X': 'CWUser'}])],
- [self.system], {'X': 'table1.C0'}, {'X': 'table0.C0'}, [])]),
- ('OneFetchStep', [('Any X ORDERBY FTIRANK(X) WHERE X has_text "bla"',
- [{'X': 'CWUser'}, {'X': 'Personne'}])],
- None, None, [self.system], {'X': 'table0.C0'}, []),
- ])
-
- def test_has_text_select_rank(self):
- self._test('Any X, FTIRANK(X) WHERE X has_text "bla", X firstname "bla"',
- # XXX unecessary duplicate selection
- [('FetchStep', [('Any X,X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C1'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,FTIRANK(X) WHERE X has_text "bla", X is CWUser', [{'X': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,FTIRANK(X) WHERE X has_text "bla", X firstname "bla", X is Personne', [{'X': 'Personne'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_security_has_text_select_rank(self):
- # use a guest user
- self.session = self.user_groups_session('guests')
- ueid = self.session.user.eid
- self._test('Any X, FTIRANK(X) WHERE X has_text "bla", X firstname "bla"',
- [('FetchStep', [('Any X,X WHERE X firstname "bla", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C1'}, []),
- ('UnionStep', None, None, [
- ('OneFetchStep', [('Any X,FTIRANK(X) WHERE X has_text "bla", EXISTS(X owned_by %s), X is CWUser' % ueid, [{'X': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C1'}, []),
- ('OneFetchStep', [('Any X,FTIRANK(X) WHERE X has_text "bla", X firstname "bla", X is Personne', [{'X': 'Personne'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
-
- def test_sort_func(self):
- self._test('Note X ORDERBY DUMB_SORT(RF) WHERE X type RF',
- [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY DUMB_SORT(table0.C1)', None, [
- ('FetchStep', [('Any X,RF WHERE X type RF, X is Note',
- [{'X': 'Note', 'RF': 'String'}])],
- [self.cards, self.system], {}, {'X': 'table0.C0', 'X.type': 'table0.C1', 'RF': 'table0.C1'}, []),
- ])
- ])
-
- def test_ambigous_sort_func(self):
- self._test('Any X ORDERBY DUMB_SORT(RF) WHERE X title RF, X is IN (Bookmark, Card, EmailThread)',
- [('AggrStep', 'SELECT table0.C0 FROM table0\nORDER BY DUMB_SORT(table0.C1)', None,
- [('FetchStep', [('Any X,RF WHERE X title RF, X is Card',
- [{'X': 'Card', 'RF': 'String'}])],
- [self.cards, self.system], {},
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'RF': 'table0.C1'}, []),
- ('FetchStep', [('Any X,RF WHERE X title RF, X is IN(Bookmark, EmailThread)',
- [{'RF': 'String', 'X': 'Bookmark'},
- {'RF': 'String', 'X': 'EmailThread'}])],
- [self.system], {},
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'RF': 'table0.C1'}, []),
- ]),
- ])
-
- def test_attr_unification_1(self):
- self._test('Any X,Y WHERE X is Bookmark, Y is Card, X title T, Y title T',
- [('FetchStep',
- [('Any Y,T WHERE Y title T, Y is Card', [{'T': 'String', 'Y': 'Card'}])],
- [self.cards, self.system], None,
- {'T': 'table0.C1', 'Y': 'table0.C0', 'Y.title': 'table0.C1'}, []),
- ('OneFetchStep',
- [('Any X,Y WHERE X title T, Y title T, X is Bookmark, Y is Card',
- [{'T': 'String', 'X': 'Bookmark', 'Y': 'Card'}])],
- None, None, [self.system],
- {'T': 'table0.C1', 'Y': 'table0.C0', 'Y.title': 'table0.C1'}, [])
- ])
-
- def test_attr_unification_2(self):
- self._test('Any X,Y WHERE X is Note, Y is Card, X type T, Y title T',
- [('FetchStep',
- [('Any X,T WHERE X type T, X is Note', [{'T': 'String', 'X': 'Note'}])],
- [self.cards, self.system], None,
- {'T': 'table0.C1', 'X': 'table0.C0', 'X.type': 'table0.C1'}, []),
- ('FetchStep',
- [('Any Y,T WHERE Y title T, Y is Card', [{'T': 'String', 'Y': 'Card'}])],
- [self.cards, self.system], None,
- {'T': 'table1.C1', 'Y': 'table1.C0', 'Y.title': 'table1.C1'}, []),
- ('OneFetchStep',
- [('Any X,Y WHERE X type T, Y title T, X is Note, Y is Card',
- [{'T': 'String', 'X': 'Note', 'Y': 'Card'}])],
- None, None, [self.system],
- {'T': 'table1.C1',
- 'X': 'table0.C0', 'X.type': 'table0.C1',
- 'Y': 'table1.C0', 'Y.title': 'table1.C1'}, [])
- ])
-
- def test_attr_unification_neq_1(self):
- self._test('Any X,Y WHERE X is Bookmark, Y is Card, X creation_date D, Y creation_date > D',
- [('FetchStep',
- [('Any Y,D WHERE Y creation_date D, Y is Card',
- [{'D': 'Datetime', 'Y': 'Card'}])],
- [self.cards,self.system], None,
- {'D': 'table0.C1', 'Y': 'table0.C0', 'Y.creation_date': 'table0.C1'}, []),
- ('OneFetchStep',
- [('Any X,Y WHERE X creation_date D, Y creation_date > D, X is Bookmark, Y is Card',
- [{'D': 'Datetime', 'X': 'Bookmark', 'Y': 'Card'}])], None, None,
- [self.system],
- {'D': 'table0.C1', 'Y': 'table0.C0', 'Y.creation_date': 'table0.C1'}, [])
- ])
-
- def test_subquery_1(self):
- ueid = self.session.user.eid
- self._test('DISTINCT Any B,C ORDERBY C WHERE A created_by B, B login C, EXISTS(B owned_by D), D eid %(E)s '
- 'WITH A,N BEING ((Any X,N WHERE X is Tag, X name N) UNION (Any X,T WHERE X is Bookmark, X title T))',
- [('FetchStep', [('Any X,N WHERE X is Tag, X name N', [{'N': 'String', 'X': 'Tag'}]),
- ('Any X,T WHERE X is Bookmark, X title T',
- [{'T': 'String', 'X': 'Bookmark'}])],
- [self.system], {}, {'N': 'table0.C1', 'X': 'table0.C0', 'X.name': 'table0.C1'}, []),
- ('FetchStep',
- [('Any B,C WHERE B login C, B is CWUser', [{'B': 'CWUser', 'C': 'String'}])],
- [self.ldap, self.system], None, {'B': 'table1.C0', 'B.login': 'table1.C1', 'C': 'table1.C1'}, []),
- ('OneFetchStep', [('DISTINCT Any B,C ORDERBY C WHERE A created_by B, B login C, EXISTS(B owned_by %s), B is CWUser, A is IN(Bookmark, Tag)' % ueid,
- [{'A': 'Bookmark', 'B': 'CWUser', 'C': 'String'},
- {'A': 'Tag', 'B': 'CWUser', 'C': 'String'}])],
- None, None, [self.system],
- {'A': 'table0.C0',
- 'B': 'table1.C0', 'B.login': 'table1.C1',
- 'C': 'table1.C1',
- 'N': 'table0.C1'},
- [])],
- {'E': ueid})
-
- def test_subquery_2(self):
- ueid = self.session.user.eid
- self._test('DISTINCT Any B,C ORDERBY C WHERE A created_by B, B login C, EXISTS(B owned_by D), D eid %(E)s '
- 'WITH A,N BEING ((Any X,N WHERE X is Tag, X name N) UNION (Any X,T WHERE X is Card, X title T))',
- [('UnionFetchStep',
- [('FetchStep', [('Any X,N WHERE X is Tag, X name N', [{'N': 'String', 'X': 'Tag'}])],
- [self.system], {},
- {'N': 'table0.C1',
- 'T': 'table0.C1',
- 'X': 'table0.C0',
- 'X.name': 'table0.C1',
- 'X.title': 'table0.C1'}, []),
- ('FetchStep', [('Any X,T WHERE X is Card, X title T',
- [{'T': 'String', 'X': 'Card'}])],
- [self.cards, self.system], {},
- {'N': 'table0.C1',
- 'T': 'table0.C1',
- 'X': 'table0.C0',
- 'X.name': 'table0.C1',
- 'X.title': 'table0.C1'}, []),
- ]),
- ('FetchStep',
- [('Any B,C WHERE B login C, B is CWUser', [{'B': 'CWUser', 'C': 'String'}])],
- [self.ldap, self.system], None, {'B': 'table1.C0', 'B.login': 'table1.C1', 'C': 'table1.C1'}, []),
- ('OneFetchStep', [('DISTINCT Any B,C ORDERBY C WHERE A created_by B, B login C, EXISTS(B owned_by %s), B is CWUser, A is IN(Card, Tag)' % ueid,
- [{'A': 'Card', 'B': 'CWUser', 'C': 'String'},
- {'A': 'Tag', 'B': 'CWUser', 'C': 'String'}])],
- None, None, [self.system],
- {'A': 'table0.C0',
- 'B': 'table1.C0', 'B.login': 'table1.C1',
- 'C': 'table1.C1',
- 'N': 'table0.C1'},
- [])],
- {'E': ueid})
-
- def test_eid_dont_cross_relation_1(self):
- repo._type_source_cache[999999] = ('Personne', 'system', 999999, 'system')
- self._test('Any Y,YT WHERE X eid %(x)s, X fiche Y, Y title YT',
- [('OneFetchStep', [('Any Y,YT WHERE X eid 999999, X fiche Y, Y title YT',
- [{'X': 'Personne', 'Y': 'Card', 'YT': 'String'}])],
- None, None, [self.system], {}, [])],
- {'x': 999999})
-
- def test_eid_dont_cross_relation_2(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.cards.dont_cross_relations.add('concerne')
- try:
- self._test('Any Y,S,YT,X WHERE Y concerne X, Y in_state S, X eid 999999, Y ref YT',
- [('OneFetchStep', [('Any Y,S,YT,999999 WHERE Y concerne 999999, Y in_state S, Y ref YT',
- [{'Y': 'Affaire', 'YT': 'String', 'S': 'State'}])],
- None, None, [self.system], {}, [])],
- {'x': 999999})
- finally:
- self.cards.dont_cross_relations.remove('concerne')
-
-
- # external source w/ .cross_relations == ['multisource_crossed_rel'] ######
-
- def test_crossed_relation_eid_1_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
- [('OneFetchStep', [('Any Y WHERE 999999 multisource_crossed_rel Y', [{u'Y': 'Note'}])],
- None, None, [self.system], {}, [])
- ],
- {'x': 999999,})
-
- def test_crossed_relation_eid_1_needattr(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T',
- [('FetchStep', [('Any Y,T WHERE Y type T, Y is Note', [{'T': 'String', 'Y': 'Note'}])],
- [self.cards, self.system], None,
- {'T': 'table0.C1', 'Y': 'table0.C0', 'Y.type': 'table0.C1'}, []),
- ('OneFetchStep', [('Any Y,T WHERE 999999 multisource_crossed_rel Y, Y type T, Y is Note',
- [{'T': 'String', 'Y': 'Note'}])],
- None, None, [self.system],
- {'T': 'table0.C1', 'Y': 'table0.C0', 'Y.type': 'table0.C1'}, []),
- ],
- {'x': 999999,})
-
- def test_crossed_relation_eid_2_invariant(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any Y WHERE X eid %(x)s, X multisource_crossed_rel Y',
- [('OneFetchStep', [('Any Y WHERE 999999 multisource_crossed_rel Y, Y is Note', [{'Y': 'Note'}])],
- None, None, [self.cards, self.system], {}, [])
- ],
- {'x': 999999,})
-
- def test_crossed_relation_eid_2_needattr(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any Y,T WHERE X eid %(x)s, X multisource_crossed_rel Y, Y type T',
- [('OneFetchStep', [('Any Y,T WHERE 999999 multisource_crossed_rel Y, Y type T, Y is Note',
- [{'T': 'String', 'Y': 'Note'}])],
- None, None, [self.cards, self.system], {},
- []),
- ],
- {'x': 999999,})
-
- def test_crossed_relation_eid_not_1(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any Y WHERE X eid %(x)s, NOT X multisource_crossed_rel Y',
- [('FetchStep', [('Any Y WHERE Y is Note', [{'Y': 'Note'}])],
- [self.cards, self.system], None, {'Y': 'table0.C0'}, []),
- ('OneFetchStep', [('Any Y WHERE NOT EXISTS(999999 multisource_crossed_rel Y), Y is Note',
- [{'Y': 'Note'}])],
- None, None, [self.system],
- {'Y': 'table0.C0'}, [])],
- {'x': 999999,})
-
-# def test_crossed_relation_eid_not_2(self):
-# repo._type_source_cache[999999] = ('Note', 'cards', 999999)
-# self._test('Any Y WHERE X eid %(x)s, NOT X multisource_crossed_rel Y',
-# [],
-# {'x': 999999,})
-
- def test_crossed_relation_base_XXXFIXME(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any X,Y,T WHERE X multisource_crossed_rel Y, Y type T, X type T',
- [('FetchStep', [('Any X,T WHERE X type T, X is Note', [{'T': 'String', 'X': 'Note'}])],
- [self.cards, self.system], None,
- {'T': 'table0.C1', 'X': 'table0.C0', 'X.type': 'table0.C1'}, []),
- ('FetchStep', [('Any Y,T WHERE Y type T, Y is Note', [{'T': 'String', 'Y': 'Note'}])],
- [self.cards, self.system], None,
- {'T': 'table1.C1', 'Y': 'table1.C0', 'Y.type': 'table1.C1'}, []),
- ('FetchStep', [('Any X,Y WHERE X multisource_crossed_rel Y, X is Note, Y is Note',
- [{'X': 'Note', 'Y': 'Note'}])],
- [self.cards, self.system], None,
- {'X': 'table2.C0', 'Y': 'table2.C1'},
- []),
- ('OneFetchStep', [('Any X,Y,T WHERE X multisource_crossed_rel Y, Y type T, X type T, '
- 'X is Note, Y is Note, Y identity A, X identity B, A is Note, B is Note',
- [{u'A': 'Note', u'B': 'Note', 'T': 'String', 'X': 'Note', 'Y': 'Note'}])],
- None, None,
- [self.system],
- {'A': 'table1.C0',
- 'B': 'table0.C0',
- 'T': 'table1.C1',
- 'X': 'table2.C0',
- 'X.type': 'table0.C1',
- 'Y': 'table2.C1',
- 'Y.type': 'table1.C1'},
- []),
- ],
- {'x': 999999,})
-
- def test_crossed_relation_noeid_needattr(self):
- # http://www.cubicweb.org/ticket/1382452
- self._test('DISTINCT Any DEP WHERE DEP is Note, P type "cubicweb-foo", P multisource_crossed_rel DEP, DEP type LIKE "cubicweb%"',
- [('FetchStep', [(u'Any DEP WHERE DEP type LIKE "cubicweb%", DEP is Note',
- [{'DEP': 'Note'}])],
- [self.cards, self.system], None,
- {'DEP': 'table0.C0'},
- []),
- ('FetchStep', [(u'Any P WHERE P type "cubicweb-foo", P is Note', [{'P': 'Note'}])],
- [self.cards, self.system], None, {'P': 'table1.C0'},
- []),
- ('FetchStep', [('Any DEP,P WHERE P multisource_crossed_rel DEP, DEP is Note, P is Note',
- [{'DEP': 'Note', 'P': 'Note'}])],
- [self.cards, self.system], None, {'DEP': 'table2.C0', 'P': 'table2.C1'},
- []),
- ('OneFetchStep',
- [('DISTINCT Any DEP WHERE P multisource_crossed_rel DEP, DEP is Note, '
- 'P is Note, DEP identity A, P identity B, A is Note, B is Note',
- [{u'A': 'Note', u'B': 'Note', 'DEP': 'Note', 'P': 'Note'}])],
- None, None, [self.system],
- {'A': 'table0.C0', 'B': 'table1.C0', 'DEP': 'table2.C0', 'P': 'table2.C1'},
- [])])
-
- def test_crossed_relation_noeid_invariant(self):
- # see comment in http://www.cubicweb.org/ticket/1382452
- self.schema.add_relation_def(
- RelationDefinition(subject='Note', name='multisource_crossed_rel', object='Affaire'))
- self.repo.set_schema(self.schema)
- try:
- self._test('DISTINCT Any P,DEP WHERE P type "cubicweb-foo", P multisource_crossed_rel DEP',
- [('FetchStep',
- [('Any DEP WHERE DEP is Note', [{'DEP': 'Note'}])],
- [self.cards, self.system], None, {'DEP': 'table0.C0'}, []),
- ('FetchStep',
- [(u'Any P WHERE P type "cubicweb-foo", P is Note', [{'P': 'Note'}])],
- [self.cards, self.system], None, {'P': 'table1.C0'}, []),
- ('UnionStep', None, None,
- [('OneFetchStep',
- [('DISTINCT Any P,DEP WHERE P multisource_crossed_rel DEP, DEP is Note, P is Note',
- [{'DEP': 'Note', 'P': 'Note'}])],
- None, None, [self.cards], None, []),
- ('OneFetchStep',
- [('DISTINCT Any P,DEP WHERE P multisource_crossed_rel DEP, DEP is Note, P is Note',
- [{'DEP': 'Note', 'P': 'Note'}])],
- None, None, [self.system],
- {'DEP': 'table0.C0', 'P': 'table1.C0'},
- []),
- ('OneFetchStep',
- [('DISTINCT Any P,DEP WHERE P multisource_crossed_rel DEP, DEP is Affaire, P is Note',
- [{'DEP': 'Affaire', 'P': 'Note'}])],
- None, None, [self.system], {'P': 'table1.C0'},
- [])])
- ])
- finally:
- self.schema.del_relation_def('Note', 'multisource_crossed_rel', 'Affaire')
- self.repo.set_schema(self.schema)
-
- # edition queries tests ###################################################
-
- def test_insert_simplified_var_1(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'system', None, 'system')
- self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T',
- [('InsertStep',
- [('InsertRelationsStep',
- [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
- [{'N': 'Note', 'T': 'String'}])],
- None, None, [self.cards], {}, [])])
- ])
- ],
- {'n': 999999, 's': 999998})
-
- def test_insert_simplified_var_2(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'system', None, 'system')
- self._test('INSERT Note X: X in_state S, X type T, X migrated_from N WHERE S eid %(s)s, N eid %(n)s, N type T',
- [('InsertStep',
- [('InsertRelationsStep',
- [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
- [{'N': 'Note', 'T': 'String'}])],
- None, None, [self.cards], {}, [])
- ])
- ])
- ],
- {'n': 999999, 's': 999998})
-
- def test_insert_simplified_var_3(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'cards', 999998, 'cards')
- self._test('INSERT Note X: X in_state S, X type T WHERE S eid %(s)s, N eid %(n)s, N type T',
- [('InsertStep',
- [('InsertRelationsStep',
- [('OneFetchStep', [('Any T WHERE N eid 999999, N type T, N is Note',
- [{'N': 'Note', 'T': 'String'}])],
- None, None, [self.cards], {}, [])]
- )]
- )],
- {'n': 999999, 's': 999998})
-
- def test_insert_simplified_var_4(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'system', None, 'system')
- self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s',
- [('InsertStep',
- [('InsertRelationsStep',
- [('OneFetchStep',
- [('Any 999999', [{}])],
- None, None,
- [self.system], {},
- [])])]
- )],
- {'n': 999999, 's': 999998})
-
- def test_insert_simplified_var_5(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('State', 'system', None, 'system')
- self._test('INSERT Note X: X in_state S, X type "bla", X migrated_from N WHERE S eid %(s)s, N eid %(n)s, A concerne N',
- [('InsertStep',
- [('InsertRelationsStep',
- [('OneFetchStep',
- [('Any A WHERE A concerne 999999, A is Affaire',
- [{'A': 'Affaire'}])],
- None, None, [self.system], {}, []),
- ]),
- ])
- ],
- {'n': 999999, 's': 999998})
-
- def test_delete_relation1(self):
- ueid = self.session.user.eid
- self._test('DELETE X created_by Y WHERE X eid %(x)s, NOT Y eid %(y)s',
- [('DeleteRelationsStep', [
- ('OneFetchStep', [('Any %s,Y WHERE %s created_by Y, NOT Y eid %s, Y is CWUser' % (ueid, ueid, ueid),
- [{'Y': 'CWUser'}])],
- None, None, [self.system], {}, []),
- ]),
- ],
- {'x': ueid, 'y': ueid})
-
- def test_delete_relation2(self):
- ueid = self.session.user.eid
- self._test('DELETE X created_by Y WHERE X eid %(x)s, NOT Y login "syt"',
- [('FetchStep', [('Any Y WHERE NOT Y login "syt", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system], None, {'Y': 'table0.C0'}, []),
- ('DeleteRelationsStep', [
- ('OneFetchStep', [('Any %s,Y WHERE %s created_by Y, Y is CWUser'%(ueid,ueid), [{'Y': 'CWUser'}])],
- None, None, [self.system], {'Y': 'table0.C0'}, []),
- ]),
- ],
- {'x': ueid, 'y': ueid})
-
- def test_delete_relation3(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.assertRaises(
- BadRQLQuery, self._test,
- 'DELETE Y multisource_inlined_rel X WHERE X eid %(x)s, '
- 'NOT (Y cw_source S, S name %(source)s)', [],
- {'x': 999999, 'source': 'cards'})
-
- def test_delete_relation4(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.assertRaises(
- BadRQLQuery, self._test,
- 'DELETE X multisource_inlined_rel Y WHERE Y is Note, X eid %(x)s, '
- 'NOT (Y cw_source S, S name %(source)s)', [],
- {'x': 999999, 'source': 'cards'})
-
- def test_delete_entity1(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('DELETE Note X WHERE X eid %(x)s, NOT Y multisource_rel X',
- [('DeleteEntitiesStep',
- [('OneFetchStep', [('Any 999999 WHERE NOT EXISTS(Y multisource_rel 999999), Y is IN(Card, Note)',
- [{'Y': 'Card'}, {'Y': 'Note'}])],
- None, None, [self.system], {}, [])
- ])
- ],
- {'x': 999999})
-
- def test_delete_entity2(self):
- repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('DELETE Note X WHERE X eid %(x)s, NOT X multisource_inlined_rel Y',
- [('DeleteEntitiesStep',
- [('OneFetchStep', [('Any X WHERE X eid 999999, NOT X multisource_inlined_rel Y, X is Note, Y is IN(Affaire, Note)',
- [{'X': 'Note', 'Y': 'Affaire'}, {'X': 'Note', 'Y': 'Note'}])],
- None, None, [self.system], {}, [])
- ])
- ],
- {'x': 999999})
-
- def test_update(self):
- self._test('SET X copain Y WHERE X login "comme", Y login "cochon"',
- [('FetchStep',
- [('Any X WHERE X login "comme", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any Y WHERE Y login "cochon", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system], None, {'Y': 'table1.C0'}, []),
- ('UpdateStep',
- [('OneFetchStep',
- [('DISTINCT Any X,Y WHERE X is CWUser, Y is CWUser',
- [{'X': 'CWUser', 'Y': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0', 'Y': 'table1.C0'}, [])
- ])
- ])
-
- def test_update2(self):
- self._test('SET U in_group G WHERE G name ~= "bougloup%", U login "admin"',
- [('FetchStep', [('Any U WHERE U login "admin", U is CWUser', [{'U': 'CWUser'}])],
- [self.ldap, self.system], None, {'U': 'table0.C0'}, []),
- ('UpdateStep', [
- ('OneFetchStep', [('DISTINCT Any U,G WHERE G name ILIKE "bougloup%", G is CWGroup, U is CWUser',
- [{'U': 'CWUser', 'G': 'CWGroup'}])],
- None, None, [self.system], {'U': 'table0.C0'}, []),
- ]),
- ])
-
- def test_update3(self):
- anoneid = self.user_groups_session('guests').user.eid
- # since we are adding a in_state relation for an entity in the system
- # source, states should only be searched in the system source as well
- self._test('SET X in_state S WHERE X eid %(x)s, S name "deactivated"',
- [('UpdateStep', [
- ('OneFetchStep', [('DISTINCT Any S WHERE S name "deactivated", S is State',
- [{'S': 'State'}])],
- None, None, [self.system], {}, []),
- ]),
- ],
- {'x': anoneid})
-
-# def test_update4(self):
-# # since we are adding a in_state relation with a state from the system
-# # source, CWUser should only be searched only in the system source as well
-# rset = self.execute('State X WHERE X name "activated"')
-# assert len(rset) == 1, rset
-# activatedeid = rset[0][0]
-# self._test('SET X in_state S WHERE X is CWUser, S eid %s' % activatedeid,
-# [('UpdateStep', [
-# ('OneFetchStep', [('DISTINCT Any X,%s WHERE X is CWUser' % activatedeid,
-# [{'X': 'CWUser'}])],
-# None, None, [self.system], {}, []),
-# ]),
-# ])
-
- def test_ldap_user_related_to_invariant_and_dont_cross_rel(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.cards.dont_cross_relations.add('created_by')
- try:
- self._test('Any X,XL WHERE E eid %(x)s, E created_by X, X login XL',
- [('FetchStep', [('Any X,XL WHERE X login XL, X is CWUser',
- [{'X': 'CWUser', 'XL': 'String'}])],
- [self.ldap, self.system], None,
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'XL': 'table0.C1'},
- []),
- ('OneFetchStep',
- [('Any X,XL WHERE 999999 created_by X, X login XL, X is CWUser',
- [{'X': 'CWUser', 'XL': 'String'}])],
- None, None,
- [self.system],
- {'X': 'table0.C0', 'X.login': 'table0.C1', 'XL': 'table0.C1'},
- [])],
- {'x': 999999})
- finally:
- self.cards.dont_cross_relations.remove('created_by')
-
- def test_ambigous_cross_relation(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.cards.support_relations['see_also'] = True
- self.cards.cross_relations.add('see_also')
- try:
- self._test('Any X,AA ORDERBY AA WHERE E eid %(x)s, E see_also X, X modification_date AA',
- [('AggrStep',
- 'SELECT table0.C0, table0.C1 FROM table0\nORDER BY table0.C1',
- None,
- [('FetchStep',
- [('Any X,AA WHERE 999999 see_also X, X modification_date AA, X is Note',
- [{'AA': 'Datetime', 'X': 'Note'}])], [self.cards, self.system], {},
- {'AA': 'table0.C1', 'X': 'table0.C0',
- 'X.modification_date': 'table0.C1'},
- []),
- ('FetchStep',
- [('Any X,AA WHERE 999999 see_also X, X modification_date AA, X is Bookmark',
- [{'AA': 'Datetime', 'X': 'Bookmark'}])],
- [self.system], {},
- {'AA': 'table0.C1', 'X': 'table0.C0',
- 'X.modification_date': 'table0.C1'},
- [])])],
- {'x': 999999})
- finally:
- del self.cards.support_relations['see_also']
- self.cards.cross_relations.remove('see_also')
-
- def test_state_of_cross(self):
- self._test('DELETE State X WHERE NOT X state_of Y',
- [('DeleteEntitiesStep',
- [('OneFetchStep',
- [('Any X WHERE NOT X state_of Y, X is State, Y is Workflow',
- [{'X': 'State', 'Y': 'Workflow'}])],
- None, None, [self.system], {}, [])])]
- )
-
-
- def test_source_specified_0_0(self):
- self._test('Card X WHERE X cw_source S, S eid 1',
- [('OneFetchStep', [('Any X WHERE X cw_source 1, X is Card',
- [{'X': 'Card'}])],
- None, None,
- [self.system],{}, [])
- ])
-
- def test_source_specified_0_1(self):
- self._test('Any X, S WHERE X is Card, X cw_source S, S eid 1',
- [('OneFetchStep', [('Any X,1 WHERE X is Card, X cw_source 1',
- [{'X': 'Card'}])],
- None, None,
- [self.system],{}, [])
- ])
-
- def test_source_specified_1_0(self):
- self._test('Card X WHERE X cw_source S, S name "system"',
- [('OneFetchStep', [('Any X WHERE X cw_source S, S name "system", X is Card',
- [{'X': 'Card', 'S': 'CWSource'}])],
- None, None,
- [self.system],{}, [])
- ])
-
- def test_source_specified_1_1(self):
- self._test('Any X, SN WHERE X is Card, X cw_source S, S name "system", S name SN',
- [('OneFetchStep', [('Any X,SN WHERE X is Card, X cw_source S, S name "system", '
- 'S name SN',
- [{'S': 'CWSource', 'SN': 'String', 'X': 'Card'}])],
- None, None, [self.system], {}, [])
- ])
-
- def test_source_specified_1_2(self):
- self._test('Card X WHERE X cw_source S, S name "datafeed"',
- [('OneFetchStep', [('Any X WHERE X cw_source S, S name "datafeed", X is Card',
- [{'X': 'Card', 'S': 'CWSource'}])],
- None, None,
- [self.system],{}, [])
- ])
-
- def test_source_specified_1_3(self):
- self._test('Any X, SN WHERE X is Card, X cw_source S, S name "datafeed", S name SN',
- [('OneFetchStep', [('Any X,SN WHERE X is Card, X cw_source S, S name "datafeed", '
- 'S name SN',
- [{'S': 'CWSource', 'SN': 'String', 'X': 'Card'}])],
- None, None, [self.system], {}, [])
- ])
-
- def test_source_specified_1_4(self):
- sols = []
- for sol in X_ALL_SOLS:
- sol = sol.copy()
- sol['S'] = 'CWSource'
- sols.append(sol)
- self._test('Any X WHERE X cw_source S, S name "cards"',
- [('OneFetchStep', [('Any X WHERE X cw_source S, S name "cards"',
- sols)],
- None, None,
- [self.system],{}, [])
- ])
-
- def test_source_specified_2_0(self):
- # self._test('Card X WHERE X cw_source S, NOT S eid 1',
- # [('OneFetchStep', [('Any X WHERE X is Card',
- # [{'X': 'Card'}])],
- # None, None,
- # [self.cards],{}, [])
- # ])
- self._test('Card X WHERE NOT X cw_source S, S eid 1',
- [('OneFetchStep', [('Any X WHERE X is Card',
- [{'X': 'Card'}])],
- None, None,
- [self.cards],{}, [])
- ])
-
- def test_source_specified_2_1(self):
- self._test('Card X WHERE X cw_source S, NOT S name "system"',
- [('OneFetchStep', [('Any X WHERE X is Card',
- [{'X': 'Card'}])],
- None, None,
- [self.cards],{}, [])
- ])
- self._test('Card X WHERE NOT X cw_source S, S name "system"',
- [('OneFetchStep', [('Any X WHERE X is Card',
- [{'X': 'Card'}])],
- None, None,
- [self.cards],{}, [])
- ])
-
- def test_source_specified_3_1(self):
- self._test('Any X,XT WHERE X is Card, X title XT, X cw_source S, S name "cards"',
- [('OneFetchStep',
- [('Any X,XT WHERE X is Card, X title XT',
- [{'X': 'Card', 'XT': 'String'}])],
- None, None, [self.cards], {}, [])
- ])
-
- def test_source_specified_3_2(self):
- self._test('Any X,XT WHERE X is Card, X title XT, X cw_source S, S name "datafeed"',
- [('OneFetchStep',
- [('Any X,XT WHERE X is Card, X title XT, X cw_source S, S name "datafeed"',
- [{'X': 'Card', 'XT': 'String', 'S': 'CWSource'}])],
- None, None, [self.system], {}, [])
- ])
-
- def test_source_specified_3_3(self):
- self.skipTest('oops')
- self._test('Any STN WHERE X is Note, X type XT, X in_state ST, ST name STN, X cw_source S, S name "cards"',
- [('OneFetchStep',
- [('Any X,XT WHERE X is Card, X title XT',
- [{'X': 'Card', 'XT': 'String'}])],
- None, None, [self.cards], {}, [])
- ])
-
- def test_source_conflict_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- with self.assertRaises(BadRQLQuery) as cm:
- self._test('Any X WHERE X cw_source S, S name "system", X eid %(x)s',
- [], {'x': 999999})
- self.assertEqual(str(cm.exception), 'source conflict for term %(x)s')
-
- def test_source_conflict_2(self):
- with self.assertRaises(BadRQLQuery) as cm:
- self._test('Card X WHERE X cw_source S, S name "systeme"', [])
- self.assertEqual(str(cm.exception), 'source conflict for term X')
-
- def test_source_conflict_3(self):
- self.skipTest('oops')
- self._test('CWSource X WHERE X cw_source S, S name "cards"',
- [('OneFetchStep',
- [(u'Any X WHERE X cw_source S, S name "cards", X is CWSource',
- [{'S': 'CWSource', 'X': 'CWSource'}])],
- None, None,
- [self.system],
- {}, [])])
-
-
- def test_ambigous_cross_relation_source_specified(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.cards.support_relations['see_also'] = True
- self.cards.cross_relations.add('see_also')
- try:
- self._test('Any X,AA ORDERBY AA WHERE E eid %(x)s, E see_also X, X modification_date AA',
- [('AggrStep',
- 'SELECT table0.C0, table0.C1 FROM table0\nORDER BY table0.C1',
- None,
- [('FetchStep',
- [('Any X,AA WHERE 999999 see_also X, X modification_date AA, X is Note',
- [{'AA': 'Datetime', 'X': 'Note'}])], [self.cards, self.system], {},
- {'AA': 'table0.C1', 'X': 'table0.C0',
- 'X.modification_date': 'table0.C1'},
- []),
- ('FetchStep',
- [('Any X,AA WHERE 999999 see_also X, X modification_date AA, X is Bookmark',
- [{'AA': 'Datetime', 'X': 'Bookmark'}])],
- [self.system], {},
- {'AA': 'table0.C1', 'X': 'table0.C0',
- 'X.modification_date': 'table0.C1'},
- [])])],
- {'x': 999999})
- finally:
- del self.cards.support_relations['see_also']
- self.cards.cross_relations.remove('see_also')
-
- # non regression tests ####################################################
-
- def test_nonregr1(self):
- self._test('Any X, Y WHERE X copain Y, X login "syt", Y login "cochon"',
- [('FetchStep',
- [('Any X WHERE X login "syt", X is CWUser', [{'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'X': 'table0.C0'}, []),
- ('FetchStep',
- [('Any Y WHERE Y login "cochon", Y is CWUser', [{'Y': 'CWUser'}])],
- [self.ldap, self.system], None, {'Y': 'table1.C0'}, []),
- ('OneFetchStep',
- [('Any X,Y WHERE X copain Y, X is CWUser, Y is CWUser',
- [{'X': 'CWUser', 'Y': 'CWUser'}])],
- None, None, [self.system], {'X': 'table0.C0', 'Y': 'table1.C0'}, [])
- ])
-
- def test_nonregr2(self):
- iworkflowable = self.session.user.cw_adapt_to('IWorkflowable')
- iworkflowable.fire_transition('deactivate')
- treid = iworkflowable.latest_trinfo().eid
- self._test('Any X ORDERBY D DESC WHERE E eid %(x)s, E wf_info_for X, X modification_date D',
- [('FetchStep', [('Any X,D WHERE X modification_date D, X is Note',
- [{'X': 'Note', 'D': 'Datetime'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'X.modification_date': 'table0.C1', 'D': 'table0.C1'}, []),
- ('FetchStep', [('Any X,D WHERE X modification_date D, X is CWUser',
- [{'X': 'CWUser', 'D': 'Datetime'}])],
- [self.ldap, self.system], None, {'X': 'table1.C0', 'X.modification_date': 'table1.C1', 'D': 'table1.C1'}, []),
- ('AggrStep', 'SELECT table2.C0 FROM table2\nORDER BY table2.C1 DESC', None, [
- ('FetchStep', [('Any X,D WHERE E eid %s, E wf_info_for X, X modification_date D, E is TrInfo, X is Affaire'%treid,
- [{'X': 'Affaire', 'E': 'TrInfo', 'D': 'Datetime'}])],
- [self.system],
- {},
- {'X': 'table2.C0', 'X.modification_date': 'table2.C1', 'D': 'table2.C1', 'E.wf_info_for': 'table2.C0'}, []),
- ('FetchStep', [('Any X,D WHERE E eid %s, E wf_info_for X, X modification_date D, E is TrInfo, X is CWUser'%treid,
- [{'X': 'CWUser', 'E': 'TrInfo', 'D': 'Datetime'}])],
- [self.system],
- {'X': 'table1.C0', 'X.modification_date': 'table1.C1', 'D': 'table1.C1'},
- {'X': 'table2.C0', 'X.modification_date': 'table2.C1', 'D': 'table2.C1', 'E.wf_info_for': 'table2.C0'}, []),
- ('FetchStep', [('Any X,D WHERE E eid %s, E wf_info_for X, X modification_date D, E is TrInfo, X is Note'%treid,
- [{'X': 'Note', 'E': 'TrInfo', 'D': 'Datetime'}])],
- [self.system],
- {'X': 'table0.C0', 'X.modification_date': 'table0.C1', 'D': 'table0.C1'},
- {'X': 'table2.C0', 'X.modification_date': 'table2.C1', 'D': 'table2.C1', 'E.wf_info_for': 'table2.C0'}, []),
- ]),
- ],
- {'x': treid})
-
- def test_nonregr3(self):
- # original jpl query:
- # Any X, NOW - CD, P WHERE P is Project, U interested_in P, U is CWUser, U login "sthenault", X concerns P, X creation_date CD ORDERBY CD DESC LIMIT 5
- self._test('Any X, NOW - CD, P ORDERBY CD DESC LIMIT 5 WHERE P bookmarked_by U, U login "admin", P is X, X creation_date CD',
- [('FetchStep', [('Any U WHERE U login "admin", U is CWUser', [{'U': 'CWUser'}])],
- [self.ldap, self.system], None, {'U': 'table0.C0'}, []),
- ('OneFetchStep', [('Any X,(NOW - CD),P ORDERBY CD DESC LIMIT 5 WHERE P bookmarked_by U, P is X, X creation_date CD, P is Bookmark, U is CWUser, X is CWEType',
- [{'P': 'Bookmark', 'U': 'CWUser', 'X': 'CWEType', 'CD': 'Datetime'}])],
- 5, None, [self.system], {'U': 'table0.C0'}, [])]
- )
-
- def test_nonregr4(self):
- ueid = self.session.user.eid
- self._test('Any U ORDERBY D DESC WHERE WF wf_info_for X, WF creation_date D, WF from_state FS, '
- 'WF owned_by U?, X eid %(x)s',
- [#('FetchStep', [('Any U WHERE U is CWUser', [{'U': 'CWUser'}])],
- # [self.ldap, self.system], None, {'U': 'table0.C0'}, []),
- ('OneFetchStep', [('Any U ORDERBY D DESC WHERE WF wf_info_for %s, WF creation_date D, WF from_state FS, WF owned_by U?' % ueid,
- [{'WF': 'TrInfo', 'FS': 'State', 'U': 'CWUser', 'D': 'Datetime'}])],
- None, None,
- [self.system], {}, [])],
- {'x': ueid})
-
- def test_nonregr5(self):
- # original jpl query:
- # DISTINCT Version V WHERE MB done_in MV, MV eid %(x)s,
- # MB depends_on B, B done_in V, V version_of P, NOT P eid %(p)s'
- cardeid = self.execute('INSERT Card X: X title "hop"')[0][0]
- noteeid = self.execute('INSERT Note X')[0][0]
- self._test('DISTINCT Card V WHERE MB documented_by MV, MV eid %(x)s, '
- 'MB depends_on B, B documented_by V, V multisource_rel P, NOT P eid %(p)s',
- [('FetchStep', [('Any V WHERE V multisource_rel P, NOT P eid %s, P is Note, V is Card'%noteeid,
- [{'P': 'Note', 'V': 'Card'}])],
- [self.cards, self.system], None, {'V': 'table0.C0'}, []),
- ('OneFetchStep', [('DISTINCT Any V WHERE MB documented_by %s, MB depends_on B, B documented_by V, B is Affaire, MB is Affaire, V is Card'%cardeid,
- [{'B': 'Affaire', 'MB': 'Affaire', 'V': 'Card'}])],
- None, None, [self.system], {'V': 'table0.C0'}, [])],
- {'x': cardeid, 'p': noteeid})
-
- def test_nonregr6(self):
- self._test('Any X WHERE X concerne Y',
- [('OneFetchStep', [('Any X WHERE X concerne Y',
- [{'Y': 'Division', 'X': 'Affaire'},
- {'Y': 'Note', 'X': 'Affaire'},
- {'Y': 'Societe', 'X': 'Affaire'},
- {'Y': 'SubDivision', 'X': 'Affaire'},
- {'Y': 'Affaire', 'X': 'Personne'}])],
- None, None, [self.system], {}, [])
- ])
- self._test('Any X WHERE X concerne Y, Y is Note',
- [('FetchStep', [('Any Y WHERE Y is Note', [{'Y': 'Note'}])],
- [self.cards, self.system], None, {'Y': 'table0.C0'}, []),
- ('OneFetchStep', [('Any X WHERE X concerne Y, X is Affaire, Y is Note',
- [{'X': 'Affaire', 'Y': 'Note'}])],
- None, None, [self.system], {'Y': 'table0.C0'}, [])
- ])
-
- def test_nonregr7(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any S,SUM(DUR),SUM(I),(SUM(I) - SUM(DUR)),MIN(DI),MAX(DI) GROUPBY S ORDERBY S WHERE A is Affaire, A duration DUR, A invoiced I, A modification_date DI, A in_state S, S name SN, (EXISTS(A concerne WP, W multisource_rel WP)) OR (EXISTS(A concerne W)), W eid %(n)s',
- [('FetchStep', [('Any WP WHERE 999999 multisource_rel WP, WP is Note', [{'WP': 'Note'}])],
- [self.cards], None, {'WP': u'table0.C0'}, []),
- ('OneFetchStep', [('Any S,SUM(DUR),SUM(I),(SUM(I) - SUM(DUR)),MIN(DI),MAX(DI) GROUPBY S ORDERBY S WHERE A duration DUR, A invoiced I, A modification_date DI, A in_state S, S name SN, (EXISTS(A concerne WP, WP is Note)) OR (EXISTS(A concerne 999999)), A is Affaire, S is State',
- [{'A': 'Affaire', 'DI': 'Datetime', 'DUR': 'Int', 'I': 'Float', 'S': 'State', 'SN': 'String', 'WP': 'Note'}])],
- None, None, [self.system], {'WP': u'table0.C0'}, [])],
- {'n': 999999})
-
- def test_nonregr8(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X,Z WHERE X eid %(x)s, X multisource_rel Y, Z concerne X',
- [('FetchStep', [('Any 999999 WHERE 999999 multisource_rel Y, Y is Note',
- [{'Y': 'Note'}])],
- [self.cards],
- None, {u'%(x)s': 'table0.C0'},
- []),
- ('OneFetchStep', [('Any 999999,Z WHERE Z concerne 999999, Z is Affaire',
- [{'Z': 'Affaire'}])],
- None, None, [self.system],
- {u'%(x)s': 'table0.C0'}, []),
- ],
- {'x': 999999})
-
- def test_nonregr9(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- repo._type_source_cache[999998] = ('Note', 'cards', 999998, 'cards')
- self._test('SET X migrated_from Y WHERE X eid %(x)s, Y multisource_rel Z, Z eid %(z)s, Y migrated_from Z',
- [('FetchStep', [('Any Y WHERE Y multisource_rel 999998, Y is Note', [{'Y': 'Note'}])],
- [self.cards], None, {'Y': u'table0.C0'}, []),
- ('UpdateStep',
- [('OneFetchStep', [('DISTINCT Any Y WHERE Y migrated_from 999998, Y is Note',
- [{'Y': 'Note'}])],
- None, None, [self.system],
- {'Y': u'table0.C0'}, [])])],
- {'x': 999999, 'z': 999998})
-
- def test_nonregr10(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap')
- self._test('Any X,AA,AB ORDERBY AA WHERE E eid %(x)s, E owned_by X, X login AA, X modification_date AB',
- [('FetchStep',
- [('Any X,AA,AB WHERE X login AA, X modification_date AB, X is CWUser',
- [{'AA': 'String', 'AB': 'Datetime', 'X': 'CWUser'}])],
- [self.ldap, self.system], None, {'AA': 'table0.C1', 'AB': 'table0.C2',
- 'X': 'table0.C0', 'X.login': 'table0.C1', 'X.modification_date': 'table0.C2'},
- []),
- ('OneFetchStep',
- [('Any X,AA,AB ORDERBY AA WHERE 999999 owned_by X, X login AA, X modification_date AB, X is CWUser',
- [{'AA': 'String', 'AB': 'Datetime', 'X': 'CWUser'}])],
- None, None, [self.system], {'AA': 'table0.C1', 'AB': 'table0.C2',
- 'X': 'table0.C0', 'X.login': 'table0.C1', 'X.modification_date': 'table0.C2'},
- [])
- ],
- {'x': 999999})
-
- def test_nonregr11(self):
- repo._type_source_cache[999999] = ('Bookmark', 'system', 999999, 'system')
- self._test('SET X bookmarked_by Y WHERE X eid %(x)s, Y login "hop"',
- [('UpdateStep',
- [('OneFetchStep', [('DISTINCT Any Y WHERE Y login "hop", Y is CWUser', [{'Y': 'CWUser'}])],
- None, None, [self.ldap, self.system], {}, [])]
- )],
- {'x': 999999})
-
- def test_nonregr12(self):
- repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X ORDERBY Z DESC WHERE X modification_date Z, E eid %(x)s, E see_also X',
- [('FetchStep', [('Any X,Z WHERE X modification_date Z, X is Note',
- [{'X': 'Note', 'Z': 'Datetime'}])],
- [self.cards, self.system], None, {'X': 'table0.C0', 'X.modification_date': 'table0.C1', 'Z': 'table0.C1'},
- []),
- ('AggrStep', 'SELECT table1.C0 FROM table1\nORDER BY table1.C1 DESC', None,
- [('FetchStep', [('Any X,Z WHERE X modification_date Z, 999999 see_also X, X is Bookmark',
- [{'X': 'Bookmark', 'Z': 'Datetime'}])],
- [self.system], {}, {'X': 'table1.C0', 'X.modification_date': 'table1.C1',
- 'Z': 'table1.C1'},
- []),
- ('FetchStep', [('Any X,Z WHERE X modification_date Z, 999999 see_also X, X is Note',
- [{'X': 'Note', 'Z': 'Datetime'}])],
- [self.system], {'X': 'table0.C0', 'X.modification_date': 'table0.C1',
- 'Z': 'table0.C1'},
- {'X': 'table1.C0', 'X.modification_date': 'table1.C1',
- 'Z': 'table1.C1'},
- [])]
- )],
- {'x': 999999})
-
- def test_nonregr13_1(self):
- ueid = self.session.user.eid
- # identity wrapped into exists:
- # should'nt propagate constraint that U is in the same source as ME
- self._test('Any B,U,UL GROUPBY B,U,UL WHERE B created_by U?, B is File '
- 'WITH U,UL BEING (Any U,UL WHERE ME eid %(x)s, (EXISTS(U identity ME) '
- 'OR (EXISTS(U in_group G, G name IN("managers", "staff")))) '
- 'OR (EXISTS(U in_group H, ME in_group H, NOT H name "users")), U login UL, U is CWUser)',
- [('FetchStep', [('Any U,UL WHERE U login UL, U is CWUser',
- [{'U': 'CWUser', 'UL': 'String'}])],
- [self.ldap, self.system], None,
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- []),
- ('FetchStep', [('Any U,UL WHERE ((EXISTS(U identity %s)) OR (EXISTS(U in_group G, G name IN("managers", "staff"), G is CWGroup))) OR (EXISTS(U in_group H, %s in_group H, NOT H name "users", H is CWGroup)), U login UL, U is CWUser' % (ueid, ueid),
- [{'G': 'CWGroup', 'H': 'CWGroup', 'U': 'CWUser', 'UL': 'String'}])],
- [self.system],
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- {'U': 'table1.C0', 'U.login': 'table1.C1', 'UL': 'table1.C1'},
- []),
- ('OneFetchStep', [('Any B,U,UL GROUPBY B,U,UL WHERE B created_by U?, B is File',
- [{'B': 'File', 'U': 'CWUser', 'UL': 'String'}])],
- None, None, [self.system],
- {'U': 'table1.C0', 'UL': 'table1.C1'},
- [])],
- {'x': ueid})
-
- def test_nonregr13_2(self):
- # identity *not* wrapped into exists.
- #
- # XXX this test fail since in this case, in "U identity 5" U and 5 are
- # from the same scope so constraints are applied (telling the U should
- # come from the same source as user with eid 5).
- #
- # IMO this is normal, unless we introduce a special case for the
- # identity relation. BUT I think it's better to leave it as is and to
- # explain constraint propagation rules, and so why this should be
- # wrapped in exists() if used in multi-source
- self.skipTest('take a look at me if you wish')
- ueid = self.session.user.eid
- self._test('Any B,U,UL GROUPBY B,U,UL WHERE B created_by U?, B is File '
- 'WITH U,UL BEING (Any U,UL WHERE ME eid %(x)s, (U identity ME '
- 'OR (EXISTS(U in_group G, G name IN("managers", "staff")))) '
- 'OR (EXISTS(U in_group H, ME in_group H, NOT H name "users")), U login UL, U is CWUser)',
- [('FetchStep', [('Any U,UL WHERE U login UL, U is CWUser',
- [{'U': 'CWUser', 'UL': 'String'}])],
- [self.ldap, self.system], None,
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- []),
- ('FetchStep', [('Any U,UL WHERE ((U identity %s) OR (EXISTS(U in_group G, G name IN("managers", "staff"), G is CWGroup))) OR (EXISTS(U in_group H, %s in_group H, NOT H name "users", H is CWGroup)), U login UL, U is CWUser' % (ueid, ueid),
- [{'G': 'CWGroup', 'H': 'CWGroup', 'U': 'CWUser', 'UL': 'String'}])],
- [self.system],
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- {'U': 'table1.C0', 'U.login': 'table1.C1', 'UL': 'table1.C1'},
- []),
- ('OneFetchStep', [('Any B,U,UL GROUPBY B,U,UL WHERE B created_by U?, B is File',
- [{'B': 'File', 'U': 'CWUser', 'UL': 'String'}])],
- None, None, [self.system],
- {'U': 'table1.C0', 'UL': 'table1.C1'},
- [])],
- {'x': self.session.user.eid})
-
- def test_nonregr14_1(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap')
- self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
- [('OneFetchStep', [('Any 999999 WHERE 999999 owned_by 999999', [{}])],
- None, None, [self.system], {}, [])],
- {'x': 999999, 'u': 999999})
-
- def test_nonregr14_2(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999999, 'ldap')
- repo._type_source_cache[999998] = ('Note', 'system', 999998, 'system')
- self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
- [('OneFetchStep', [('Any 999998 WHERE 999998 owned_by 999999', [{}])],
- None, None, [self.system], {}, [])],
- {'x': 999998, 'u': 999999})
-
- def test_nonregr14_3(self):
- repo._type_source_cache[999999] = ('CWUser', 'system', 999999, 'system')
- repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998, 'ldap')
- self._test('Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
- [('OneFetchStep', [('Any 999998 WHERE 999998 owned_by 999999', [{}])],
- None, None, [self.system], {}, [])],
- {'x': 999998, 'u': 999999})
-
- def test_nonregr_identity_no_source_access_1(self):
- repo._type_source_cache[999999] = ('CWUser', 'ldap', 999998, 'ldap')
- self._test('Any S WHERE S identity U, S eid %(s)s, U eid %(u)s',
- [('OneFetchStep', [('Any 999999 WHERE 999999 identity 999999', [{}])],
- None, None, [self.system], {}, [])],
- {'s': 999999, 'u': 999999})
-
- def test_nonregr_identity_no_source_access_2(self):
- repo._type_source_cache[999999] = ('EmailAddress', 'system', 999999, 'system')
- repo._type_source_cache[999998] = ('CWUser', 'ldap', 999998, 'ldap')
- self._test('Any X WHERE O use_email X, ((EXISTS(O identity U)) OR (EXISTS(O in_group G, G name IN("managers", "staff")))) OR (EXISTS(O in_group G2, U in_group G2, NOT G2 name "users")), X eid %(x)s, U eid %(u)s',
- [('OneFetchStep', [('Any 999999 WHERE O use_email 999999, ((EXISTS(O identity 999998)) OR (EXISTS(O in_group G, G name IN("managers", "staff")))) OR (EXISTS(O in_group G2, 999998 in_group G2, NOT G2 name "users"))',
- [{'G': 'CWGroup', 'G2': 'CWGroup', 'O': 'CWUser'}])],
- None, None, [self.system], {}, [])],
- {'x': 999999, 'u': 999998})
-
- def test_nonregr_similar_subquery(self):
- repo._type_source_cache[999999] = ('Personne', 'system', 999999, 'system')
- self._test('Any T,TD,U,T,UL WITH T,TD,U,UL BEING ('
- '(Any T,TD,U,UL WHERE X eid %(x)s, T comments X, T content TD, T created_by U?, U login UL)'
- ' UNION '
- '(Any T,TD,U,UL WHERE X eid %(x)s, X connait P, T comments P, T content TD, T created_by U?, U login UL))',
- # XXX optimization: use a OneFetchStep with a UNION of both queries
- [('FetchStep', [('Any U,UL WHERE U login UL, U is CWUser',
- [{'U': 'CWUser', 'UL': 'String'}])],
- [self.ldap, self.system], None,
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- []),
- ('UnionFetchStep',
- [('FetchStep',
- [('Any T,TD,U,UL WHERE T comments 999999, T content TD, T created_by U?, U login UL, T is Comment, U is CWUser',
- [{'T': 'Comment', 'TD': 'String', 'U': 'CWUser', 'UL': 'String'}])],
- [self.system],
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- {'T': 'table1.C0',
- 'T.content': 'table1.C1',
- 'TD': 'table1.C1',
- 'U': 'table1.C2',
- 'U.login': 'table1.C3',
- 'UL': 'table1.C3'},
- []),
- ('FetchStep',
- [('Any T,TD,U,UL WHERE 999999 connait P, T comments P, T content TD, T created_by U?, U login UL, P is Personne, T is Comment, U is CWUser',
- [{'P': 'Personne',
- 'T': 'Comment',
- 'TD': 'String',
- 'U': 'CWUser',
- 'UL': 'String'}])],
- [self.system],
- {'U': 'table0.C0', 'U.login': 'table0.C1', 'UL': 'table0.C1'},
- {'T': 'table1.C0',
- 'T.content': 'table1.C1',
- 'TD': 'table1.C1',
- 'U': 'table1.C2',
- 'U.login': 'table1.C3',
- 'UL': 'table1.C3'},
- [])]),
- ('OneFetchStep',
- [('Any T,TD,U,T,UL',
- [{'T': 'Comment', 'TD': 'String', 'U': 'CWUser', 'UL': 'String'}])],
- None, None,
- [self.system],
- {'T': 'table1.C0', 'TD': 'table1.C1', 'U': 'table1.C2', 'UL': 'table1.C3'},
- [])],
- {'x': 999999})
-
- def test_nonregr_dont_readd_already_processed_relation(self):
- self._test('Any WO,D,SO WHERE WO is Note, D tags WO, WO in_state SO',
- [('FetchStep',
- [('Any WO,SO WHERE WO in_state SO, SO is State, WO is Note',
- [{'SO': 'State', 'WO': 'Note'}])],
- [self.cards, self.system], None,
- {'SO': 'table0.C1', 'WO': 'table0.C0'},
- []),
- ('OneFetchStep',
- [('Any WO,D,SO WHERE D tags WO, D is Tag, SO is State, WO is Note',
- [{'D': 'Tag', 'SO': 'State', 'WO': 'Note'}])],
- None, None, [self.system],
- {'SO': 'table0.C1', 'WO': 'table0.C0'},
- [])
- ])
-
-class MSPlannerTwoSameExternalSourcesTC(BasePlannerTC):
- """test planner related feature on a 3-sources repository:
-
- * 2 rql sources supporting Card
- """
-
- def setUp(self):
- self.__class__.repo = repo
- self.setup()
- self.add_source(FakeCardSource, 'cards')
- self.add_source(FakeCardSource, 'cards2')
- self.planner = MSPlanner(self.o.schema, self.repo.vreg.rqlhelper)
- assert repo.sources_by_uri['cards2'].support_relation('multisource_crossed_rel')
- assert 'multisource_crossed_rel' in repo.sources_by_uri['cards2'].cross_relations
- assert repo.sources_by_uri['cards'].support_relation('multisource_crossed_rel')
- assert 'multisource_crossed_rel' in repo.sources_by_uri['cards'].cross_relations
- _test = test_plan
-
-
- def test_linked_external_entities(self):
- repo._type_source_cache[999999] = ('Tag', 'system', 999999, 'system')
- self._test('Any X,XT WHERE X is Card, X title XT, T tags X, T eid %(t)s',
- [('FetchStep',
- [('Any X,XT WHERE X title XT, X is Card', [{'X': 'Card', 'XT': 'String'}])],
- [self.cards, self.cards2, self.system],
- None, {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'},
- []),
- ('OneFetchStep',
- [('Any X,XT WHERE X title XT, 999999 tags X, X is Card',
- [{'X': 'Card', 'XT': 'String'}])],
- None, None, [self.system],
- {'X': 'table0.C0', 'X.title': 'table0.C1', 'XT': 'table0.C1'},
- [])],
- {'t': 999999})
-
- def test_version_depends_on(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X,AD,AE WHERE E eid %(x)s, E migrated_from X, X in_state AD, AD name AE',
- [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- [self.cards, self.cards2, self.system],
- None, {'AD': 'table0.C1', 'AD.name': 'table0.C2',
- 'AE': 'table0.C2', 'X': 'table0.C0'},
- []),
- ('OneFetchStep', [('Any X,AD,AE WHERE 999999 migrated_from X, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- None, None, [self.system],
- {'AD': 'table0.C1', 'AD.name': 'table0.C2', 'AE': 'table0.C2', 'X': 'table0.C0'},
- [])],
- {'x': 999999})
-
- def test_version_crossed_depends_on_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
- [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- [self.cards, self.cards2, self.system],
- None, {'AD': 'table0.C1', 'AD.name': 'table0.C2',
- 'AE': 'table0.C2', 'X': 'table0.C0'},
- []),
- ('UnionStep', None, None,
- [('OneFetchStep', [('Any X,AD,AE WHERE 999999 multisource_crossed_rel X, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- None, None, [self.cards], None,
- []),
- ('OneFetchStep', [('Any X,AD,AE WHERE 999999 multisource_crossed_rel X, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- None, None, [self.system],
- {'AD': 'table0.C1', 'AD.name': 'table0.C2',
- 'AE': 'table0.C2', 'X': 'table0.C0'},
- [])]
- )],
- {'x': 999999})
-
- def test_version_crossed_depends_on_2(self):
- self.repo._type_source_cache[999999] = ('Note', 'system', 999999, 'system')
- self._test('Any X,AD,AE WHERE E eid %(x)s, E multisource_crossed_rel X, X in_state AD, AD name AE',
- [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- [self.cards, self.cards2, self.system],
- None, {'AD': 'table0.C1', 'AD.name': 'table0.C2',
- 'AE': 'table0.C2', 'X': 'table0.C0'},
- []),
- ('OneFetchStep', [('Any X,AD,AE WHERE 999999 multisource_crossed_rel X, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- None, None, [self.system],
- {'AD': 'table0.C1', 'AD.name': 'table0.C2', 'AE': 'table0.C2', 'X': 'table0.C0'},
- [])],
- {'x': 999999})
-
- def test_version_crossed_depends_on_3(self):
- self._test('Any X,AD,AE WHERE E multisource_crossed_rel X, X in_state AD, AD name AE, E is Note',
- [('FetchStep', [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
- [{'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- [self.cards, self.cards2, self.system],
- None, {'AD': 'table0.C1', 'AD.name': 'table0.C2',
- 'AE': 'table0.C2', 'X': 'table0.C0'},
- []),
- ('FetchStep', [('Any E WHERE E is Note', [{'E': 'Note'}])],
- [self.cards, self.cards2, self.system],
- None, {'E': 'table1.C0'},
- []),
- ('UnionStep', None, None,
- [('OneFetchStep', [('Any X,AD,AE WHERE E multisource_crossed_rel X, AD name AE, AD is State, E is Note, X is Note',
- [{'AD': 'State', 'AE': 'String', 'E': 'Note', 'X': 'Note'}])],
- None, None, [self.cards, self.cards2], None,
- []),
- ('OneFetchStep', [('Any X,AD,AE WHERE E multisource_crossed_rel X, AD name AE, AD is State, E is Note, X is Note',
- [{'AD': 'State', 'AE': 'String', 'E': 'Note', 'X': 'Note'}])],
- None, None, [self.system],
- {'AD': 'table0.C1',
- 'AD.name': 'table0.C2',
- 'AE': 'table0.C2',
- 'E': 'table1.C0',
- 'X': 'table0.C0'},
- [])]
- )]
- )
-
- def test_version_crossed_depends_on_4(self):
- self._test('Any X,AD,AE WHERE EXISTS(E multisource_crossed_rel X), X in_state AD, AD name AE, E is Note',
- [('FetchStep',
- [('Any X,AD,AE WHERE X in_state AD, AD name AE, AD is State, X is Note',
- [{'X': 'Note', 'AD': 'State', 'AE': 'String'}])],
- [self.cards, self.cards2, self.system], None,
- {'X': 'table0.C0',
- 'AD': 'table0.C1',
- 'AD.name': 'table0.C2',
- 'AE': 'table0.C2'},
- []),
- ('FetchStep',
- [('Any A WHERE E multisource_crossed_rel A, A is Note, E is Note',
- [{'A': 'Note', 'E': 'Note'}])],
- [self.cards, self.cards2, self.system], None,
- {'A': 'table1.C0'},
- []),
- ('OneFetchStep',
- [('Any X,AD,AE WHERE EXISTS(X identity A), AD name AE, A is Note, AD is State, X is Note',
- [{'A': 'Note', 'AD': 'State', 'AE': 'String', 'X': 'Note'}])],
- None, None,
- [self.system],
- {'A': 'table1.C0',
- 'AD': 'table0.C1',
- 'AD.name': 'table0.C2',
- 'AE': 'table0.C2',
- 'X': 'table0.C0'},
- []
- )]
- )
-
- def test_nonregr_dont_cross_rel_source_filtering_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any S WHERE E eid %(x)s, E in_state S, NOT S name "moved"',
- [('OneFetchStep', [('Any S WHERE 999999 in_state S, NOT S name "moved", S is State',
- [{'S': 'State'}])],
- None, None, [self.cards], {}, []
- )],
- {'x': 999999})
-
- def test_nonregr_dont_cross_rel_source_filtering_2(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X,AA,AB WHERE E eid %(x)s, E in_state X, X name AA, X modification_date AB',
- [('OneFetchStep', [('Any X,AA,AB WHERE 999999 in_state X, X name AA, X modification_date AB, X is State',
- [{'AA': 'String', 'AB': 'Datetime', 'X': 'State'}])],
- None, None, [self.cards], {}, []
- )],
- {'x': 999999})
-
- def test_nonregr_eid_query(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Any X WHERE X eid 999999',
- [('OneFetchStep', [('Any 999999', [{}])],
- None, None, [self.system], {}, []
- )],
- {'x': 999999})
-
-
- def test_nonregr_not_is(self):
- self._test("Any X WHERE X owned_by U, U login 'anon', NOT X is Comment",
- [('FetchStep', [('Any X WHERE X is IN(Card, Note, State)',
- [{'X': 'Note'}, {'X': 'State'}, {'X': 'Card'}])],
- [self.cards, self.cards2, self.system],
- None, {'X': 'table0.C0'}, []),
- ('UnionStep', None, None,
- [('OneFetchStep',
- [(u'Any X WHERE X owned_by U, U login "anon", U is CWUser, X is IN(Affaire, BaseTransition, Basket, Bookmark, CWAttribute, CWCache, CWConstraint, CWConstraintType, CWDataImport, CWEType, CWGroup, CWPermission, CWProperty, CWRType, CWRelation, CWSource, CWSourceHostConfig, CWSourceSchemaConfig, CWUniqueTogetherConstraint, CWUser, Division, Email, EmailAddress, EmailPart, EmailThread, ExternalUri, File, Folder, Old, Personne, RQLExpression, Societe, SubDivision, SubWorkflowExitPoint, Tag, TrInfo, Transition, Workflow, WorkflowTransition)',
- [{'U': 'CWUser', 'X': 'Affaire'},
- {'U': 'CWUser', 'X': 'BaseTransition'},
- {'U': 'CWUser', 'X': 'Basket'},
- {'U': 'CWUser', 'X': 'Bookmark'},
- {'U': 'CWUser', 'X': 'CWAttribute'},
- {'U': 'CWUser', 'X': 'CWCache'},
- {'U': 'CWUser', 'X': 'CWConstraint'},
- {'U': 'CWUser', 'X': 'CWConstraintType'},
- {'U': 'CWUser', 'X': 'CWDataImport'},
- {'U': 'CWUser', 'X': 'CWEType'},
- {'U': 'CWUser', 'X': 'CWGroup'},
- {'U': 'CWUser', 'X': 'CWPermission'},
- {'U': 'CWUser', 'X': 'CWProperty'},
- {'U': 'CWUser', 'X': 'CWRType'},
- {'U': 'CWUser', 'X': 'CWRelation'},
- {'U': 'CWUser', 'X': 'CWSource'},
- {'U': 'CWUser', 'X': 'CWSourceHostConfig'},
- {'U': 'CWUser', 'X': 'CWSourceSchemaConfig'},
- {'U': 'CWUser', 'X': 'CWUniqueTogetherConstraint'},
- {'U': 'CWUser', 'X': 'CWUser'},
- {'U': 'CWUser', 'X': 'Division'},
- {'U': 'CWUser', 'X': 'Email'},
- {'U': 'CWUser', 'X': 'EmailAddress'},
- {'U': 'CWUser', 'X': 'EmailPart'},
- {'U': 'CWUser', 'X': 'EmailThread'},
- {'U': 'CWUser', 'X': 'ExternalUri'},
- {'U': 'CWUser', 'X': 'File'},
- {'U': 'CWUser', 'X': 'Folder'},
- {'U': 'CWUser', 'X': 'Old'},
- {'U': 'CWUser', 'X': 'Personne'},
- {'U': 'CWUser', 'X': 'RQLExpression'},
- {'U': 'CWUser', 'X': 'Societe'},
- {'U': 'CWUser', 'X': 'SubDivision'},
- {'U': 'CWUser', 'X': 'SubWorkflowExitPoint'},
- {'U': 'CWUser', 'X': 'Tag'},
- {'U': 'CWUser', 'X': 'TrInfo'},
- {'U': 'CWUser', 'X': 'Transition'},
- {'U': 'CWUser', 'X': 'Workflow'},
- {'U': 'CWUser', 'X': 'WorkflowTransition'}])],
- None, None,
- [self.system], {}, []),
- ('OneFetchStep',
- [(u'Any X WHERE X owned_by U, U login "anon", U is CWUser, X is IN(Card, Note, State)',
- [{'U': 'CWUser', 'X': 'Note'},
- {'U': 'CWUser', 'X': 'State'},
- {'U': 'CWUser', 'X': 'Card'}])],
- None, None,
- [self.system], {'X': 'table0.C0'}, [])
- ])
- ])
-
- def test_remove_from_deleted_source_1(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self._test('Note X WHERE X eid 999999, NOT X cw_source Y',
- [('OneFetchStep',
- [('Any 999999 WHERE NOT EXISTS(999999 cw_source Y)',
- [{'Y': 'CWSource'}])],
- None, None, [self.system], {}, [])
- ])
-
- def test_remove_from_deleted_source_2(self):
- self.repo._type_source_cache[999999] = ('Note', 'cards', 999999, 'cards')
- self.repo._type_source_cache[999998] = ('Note', 'cards', 999998, 'cards')
- self._test('Note X WHERE X eid IN (999998, 999999), NOT X cw_source Y',
- [('FetchStep',
- [('Any X WHERE X eid IN(999998, 999999), X is Note',
- [{'X': 'Note'}])],
- [self.cards], None, {'X': 'table0.C0'}, []),
- ('OneFetchStep',
- [('Any X WHERE NOT EXISTS(X cw_source Y, Y is CWSource), X is Note',
- [{'X': 'Note', 'Y': 'CWSource'}])],
- None, None, [self.system],{'X': 'table0.C0'}, [])
- ])
-
-
-class FakeVCSSource(AbstractSource):
- uri = 'ccc'
- support_entities = {'Card': True, 'Note': True}
- support_relations = {'multisource_inlined_rel': True,
- 'multisource_rel': True}
-
- def syntax_tree_search(self, *args, **kwargs):
- return []
-
-class MSPlannerVCSSource(BasePlannerTC):
-
- def setUp(self):
- self.__class__.repo = repo
- self.setup()
- self.add_source(FakeVCSSource, 'vcs')
- self.planner = MSPlanner(self.o.schema, self.repo.vreg.rqlhelper)
- _test = test_plan
-
- def test_multisource_inlined_rel_skipped(self):
- self._test('Any MAX(VC) '
- 'WHERE VC multisource_inlined_rel R2, R para %(branch)s, VC in_state S, S name "published", '
- '(EXISTS(R identity R2)) OR (EXISTS(R multisource_rel R2))',
- [('FetchStep', [('Any VC WHERE VC multisource_inlined_rel R2, R para "???", (EXISTS(R identity R2)) OR (EXISTS(R multisource_rel R2)), R is Note, R2 is Note, VC is Note',
- [{'R': 'Note', 'R2': 'Note', 'VC': 'Note'}])],
- [self.vcs, self.system], None,
- {'VC': 'table0.C0'},
- []),
- ('OneFetchStep', [(u'Any MAX(VC) WHERE VC in_state S, S name "published", S is State, VC is Note',
- [{'S': 'State', 'VC': 'Note'}])],
- None, None, [self.system],
- {'VC': 'table0.C0'},
- [])
- ])
-
- def test_fully_simplified_extsource(self):
- self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998, 'vcs')
- self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999, 'vcs')
- self._test('Any X, Y WHERE NOT X multisource_rel Y, X eid 999998, Y eid 999999',
- [('OneFetchStep', [('Any 999998,999999 WHERE NOT EXISTS(999998 multisource_rel 999999)', [{}])],
- None, None, [self.vcs], {}, [])
- ])
-
- def test_nonregr_fully_simplified_extsource(self):
- self.repo._type_source_cache[999998] = ('Note', 'vcs', 999998, 'vcs')
- self.repo._type_source_cache[999999] = ('Note', 'vcs', 999999, 'vcs')
- self.repo._type_source_cache[1000000] = ('Note', 'system', 1000000, 'system')
- self._test('DISTINCT Any T,FALSE,L,M WHERE L eid 1000000, M eid 999999, T eid 999998',
- [('OneFetchStep', [('DISTINCT Any 999998,FALSE,1000000,999999', [{}])],
- None, None, [self.system], {}, [])
- ])
-
-
-if __name__ == '__main__':
- from logilab.common.testlib import unittest_main
- unittest_main()
--- a/server/test/unittest_multisources.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,394 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-
-from datetime import datetime, timedelta
-from itertools import repeat
-
-from cubicweb.devtools import TestServerConfiguration, init_test_database
-from cubicweb.devtools.testlib import CubicWebTC, Tags
-from cubicweb.devtools.repotest import do_monkey_patch, undo_monkey_patch
-from cubicweb.devtools import get_test_db_handler
-
-class ExternalSource1Configuration(TestServerConfiguration):
- sourcefile = 'sources_extern'
-
-class ExternalSource2Configuration(TestServerConfiguration):
- sourcefile = 'sources_multi'
-
-MTIME = datetime.utcnow() - timedelta(0, 10)
-
-EXTERN_SOURCE_CFG = u'''
-cubicweb-user = admin
-cubicweb-password = gingkow
-base-url=http://extern.org/
-'''
-
-# hi-jacking
-from cubicweb.server.sources.pyrorql import PyroRQLSource
-from cubicweb.dbapi import Connection
-
-PyroRQLSource_get_connection = PyroRQLSource.get_connection
-Connection_close = Connection.close
-
-def add_extern_mapping(source):
- source.init_mapping(zip(('Card', 'Affaire', 'State',
- 'in_state', 'documented_by', 'multisource_inlined_rel'),
- repeat(u'write')))
-
-
-def pre_setup_database_extern(session, config):
- session.execute('INSERT Card X: X title "C3: An external card", X wikiid "aaa"')
- session.execute('INSERT Card X: X title "C4: Ze external card", X wikiid "zzz"')
- session.execute('INSERT Affaire X: X ref "AFFREF"')
- session.commit()
-
-def pre_setup_database_multi(session, config):
- session.create_entity('CWSource', name=u'extern', type=u'pyrorql',
- url=u'pyro:///extern', config=EXTERN_SOURCE_CFG)
- session.commit()
-
-
-class TwoSourcesTC(CubicWebTC):
- """Main repo -> extern-multi -> extern
- \-------------/
- """
- test_db_id= 'cw-server-multisources'
- tags = CubicWebTC.tags | Tags(('multisources'))
-
- @classmethod
- def setUpClass(cls):
- cls._cfg2 = ExternalSource1Configuration('data', apphome=TwoSourcesTC.datadir)
- cls._cfg3 = ExternalSource2Configuration('data', apphome=TwoSourcesTC.datadir)
- TestServerConfiguration.no_sqlite_wrap = True
- # hi-jack PyroRQLSource.get_connection to access existing connection (no
- # pyro connection)
- PyroRQLSource.get_connection = lambda x: x.uri == 'extern-multi' and cls.cnx3 or cls.cnx2
- # also necessary since the repository is closing its initial connections
- # pool though we want to keep cnx2 valid
- Connection.close = lambda x: None
-
- @classmethod
- def tearDowncls(cls):
- PyroRQLSource.get_connection = PyroRQLSource_get_connection
- Connection.close = Connection_close
- cls.cnx2.close()
- cls.cnx3.close()
- TestServerConfiguration.no_sqlite_wrap = False
-
- @classmethod
- def _init_repo(cls):
- repo2_handler = get_test_db_handler(cls._cfg2)
- repo2_handler.build_db_cache('4cards-1affaire',pre_setup_func=pre_setup_database_extern)
- cls.repo2, cls.cnx2 = repo2_handler.get_repo_and_cnx('4cards-1affaire')
-
- repo3_handler = get_test_db_handler(cls._cfg3)
- repo3_handler.build_db_cache('multisource',pre_setup_func=pre_setup_database_multi)
- cls.repo3, cls.cnx3 = repo3_handler.get_repo_and_cnx('multisource')
-
-
- super(TwoSourcesTC, cls)._init_repo()
-
- def setUp(self):
- CubicWebTC.setUp(self)
- self.addCleanup(self.cnx2.close)
- self.addCleanup(self.cnx3.close)
- do_monkey_patch()
-
- def tearDown(self):
- for source in self.repo.sources[1:]:
- self.repo.remove_source(source.uri)
- CubicWebTC.tearDown(self)
- self.cnx2.close()
- self.cnx3.close()
- undo_monkey_patch()
-
- @staticmethod
- def pre_setup_database(session, config):
- for uri, src_config in [('extern', EXTERN_SOURCE_CFG),
- ('extern-multi', '''
-cubicweb-user = admin
-cubicweb-password = gingkow
-''')]:
- source = session.create_entity('CWSource', name=unicode(uri),
- type=u'pyrorql', url=u'pyro:///extern-multi',
- config=unicode(src_config))
- session.commit()
- add_extern_mapping(source)
-
- session.commit()
- # trigger discovery
- session.execute('Card X')
- session.execute('Affaire X')
- session.execute('State X')
-
- def setup_database(self):
- cu2 = self.cnx2.cursor()
- self.ec1 = cu2.execute('Any X WHERE X is Card, X title "C3: An external card", X wikiid "aaa"')[0][0]
- self.aff1 = cu2.execute('Any X WHERE X is Affaire, X ref "AFFREF"')[0][0]
- cu2.close()
- # add some entities
- self.ic1 = self.sexecute('INSERT Card X: X title "C1: An internal card", X wikiid "aaai"')[0][0]
- self.ic2 = self.sexecute('INSERT Card X: X title "C2: Ze internal card", X wikiid "zzzi"')[0][0]
-
- def test_eid_comp(self):
- rset = self.sexecute('Card X WHERE X eid > 1')
- self.assertEqual(len(rset), 4)
- rset = self.sexecute('Any X,T WHERE X title T, X eid > 1')
- self.assertEqual(len(rset), 4)
-
- def test_metainformation(self):
- rset = self.sexecute('Card X ORDERBY T WHERE X title T')
- # 2 added to the system source, 2 added to the external source
- self.assertEqual(len(rset), 4)
- # since they are orderd by eid, we know the 3 first one is coming from the system source
- # and the others from external source
- self.assertEqual(rset.get_entity(0, 0).cw_metainformation(),
- {'source': {'type': 'native', 'uri': 'system', 'use-cwuri-as-url': False},
- 'type': u'Card', 'extid': None})
- externent = rset.get_entity(3, 0)
- metainf = externent.cw_metainformation()
- self.assertEqual(metainf['source'], {'type': 'pyrorql', 'base-url': 'http://extern.org/', 'uri': 'extern', 'use-cwuri-as-url': False})
- self.assertEqual(metainf['type'], 'Card')
- self.assert_(metainf['extid'])
- etype = self.sexecute('Any ETN WHERE X is ET, ET name ETN, X eid %(x)s',
- {'x': externent.eid})[0][0]
- self.assertEqual(etype, 'Card')
-
- def test_order_limit_offset(self):
- rsetbase = self.sexecute('Any W,X ORDERBY W,X WHERE X wikiid W')
- self.assertEqual(len(rsetbase), 4)
- self.assertEqual(sorted(rsetbase.rows), rsetbase.rows)
- rset = self.sexecute('Any W,X ORDERBY W,X LIMIT 2 OFFSET 2 WHERE X wikiid W')
- self.assertEqual(rset.rows, rsetbase.rows[2:4])
-
- def test_has_text(self):
- self.repo.sources_by_uri['extern'].synchronize(MTIME) # in case fti_update has been run before
- self.assertTrue(self.sexecute('Any X WHERE X has_text "affref"'))
- self.assertTrue(self.sexecute('Affaire X WHERE X has_text "affref"'))
- self.assertTrue(self.sexecute('Any X ORDERBY FTIRANK(X) WHERE X has_text "affref"'))
- self.assertTrue(self.sexecute('Affaire X ORDERBY FTIRANK(X) WHERE X has_text "affref"'))
-
- def test_anon_has_text(self):
- self.repo.sources_by_uri['extern'].synchronize(MTIME) # in case fti_update has been run before
- self.sexecute('INSERT Affaire X: X ref "no readable card"')[0][0]
- aff1 = self.sexecute('INSERT Affaire X: X ref "card"')[0][0]
- # grant read access
- self.sexecute('SET X owned_by U WHERE X eid %(x)s, U login "anon"', {'x': aff1})
- self.commit()
- cnx = self.login('anon')
- cu = cnx.cursor()
- rset = cu.execute('Any X WHERE X has_text "card"')
- # 5: 4 card + 1 readable affaire
- self.assertEqual(len(rset), 5, zip(rset.rows, rset.description))
- rset = cu.execute('Any X ORDERBY FTIRANK(X) WHERE X has_text "card"')
- self.assertEqual(len(rset), 5, zip(rset.rows, rset.description))
- Connection_close(cnx.cnx) # cnx is a TestCaseConnectionProxy
-
- def test_synchronization(self):
- cu = self.cnx2.cursor()
- assert cu.execute('Any X WHERE X eid %(x)s', {'x': self.aff1})
- cu.execute('SET X ref "BLAH" WHERE X eid %(x)s', {'x': self.aff1})
- aff2 = cu.execute('INSERT Affaire X: X ref "AFFREUX"')[0][0]
- self.cnx2.commit()
- try:
- # force sync
- self.repo.sources_by_uri['extern'].synchronize(MTIME)
- self.assertTrue(self.sexecute('Any X WHERE X has_text "blah"'))
- self.assertTrue(self.sexecute('Any X WHERE X has_text "affreux"'))
- cu.execute('DELETE Affaire X WHERE X eid %(x)s', {'x': aff2})
- self.cnx2.commit()
- self.repo.sources_by_uri['extern'].synchronize(MTIME)
- rset = self.sexecute('Any X WHERE X has_text "affreux"')
- self.assertFalse(rset)
- finally:
- # restore state
- cu.execute('SET X ref "AFFREF" WHERE X eid %(x)s', {'x': self.aff1})
- self.cnx2.commit()
-
- def test_simplifiable_var(self):
- affeid = self.sexecute('Affaire X WHERE X ref "AFFREF"')[0][0]
- rset = self.sexecute('Any X,AA,AB WHERE E eid %(x)s, E in_state X, X name AA, X modification_date AB',
- {'x': affeid})
- self.assertEqual(len(rset), 1)
- self.assertEqual(rset[0][1], "pitetre")
-
- def test_simplifiable_var_2(self):
- affeid = self.sexecute('Affaire X WHERE X ref "AFFREF"')[0][0]
- rset = self.sexecute('Any E WHERE E eid %(x)s, E in_state S, NOT S name "moved"',
- {'x': affeid, 'u': self.session.user.eid})
- self.assertEqual(len(rset), 1)
-
- def test_sort_func(self):
- self.sexecute('Affaire X ORDERBY DUMB_SORT(RF) WHERE X ref RF')
-
- def test_sort_func_ambigous(self):
- self.sexecute('Any X ORDERBY DUMB_SORT(RF) WHERE X title RF')
-
- def test_in_eid(self):
- iec1 = self.repo.extid2eid(self.repo.sources_by_uri['extern'], str(self.ec1),
- 'Card', self.session)
- rset = self.sexecute('Any X WHERE X eid IN (%s, %s)' % (iec1, self.ic1))
- self.assertEqual(sorted(r[0] for r in rset.rows), sorted([iec1, self.ic1]))
-
- def test_greater_eid(self):
- rset = self.sexecute('Any X WHERE X eid > %s' % (self.ic1 - 1))
- self.assertEqual(len(rset.rows), 2) # self.ic1 and self.ic2
- cu = self.cnx2.cursor()
- ec2 = cu.execute('INSERT Card X: X title "glup"')[0][0]
- self.cnx2.commit()
- # 'X eid > something' should not trigger discovery
- rset = self.sexecute('Any X WHERE X eid > %s' % (self.ic1 - 1))
- self.assertEqual(len(rset.rows), 2)
- # trigger discovery using another query
- crset = self.sexecute('Card X WHERE X title "glup"')
- self.assertEqual(len(crset.rows), 1)
- rset = self.sexecute('Any X WHERE X eid > %s' % (self.ic1 - 1))
- self.assertEqual(len(rset.rows), 3)
- rset = self.sexecute('Any MAX(X)')
- self.assertEqual(len(rset.rows), 1)
- self.assertEqual(rset.rows[0][0], crset[0][0])
-
- def test_attr_unification_1(self):
- n1 = self.sexecute('INSERT Note X: X type "AFFREF"')[0][0]
- n2 = self.sexecute('INSERT Note X: X type "AFFREU"')[0][0]
- rset = self.sexecute('Any X,Y WHERE X is Note, Y is Affaire, X type T, Y ref T')
- self.assertEqual(len(rset), 1, rset.rows)
-
- def test_attr_unification_2(self):
- cu = self.cnx2.cursor()
- ec2 = cu.execute('INSERT Card X: X title "AFFREF"')[0][0]
- self.cnx2.commit()
- try:
- c1 = self.sexecute('INSERT Card C: C title "AFFREF"')[0][0]
- rset = self.sexecute('Any X,Y WHERE X is Card, Y is Affaire, X title T, Y ref T')
- self.assertEqual(len(rset), 2, rset.rows)
- finally:
- cu.execute('DELETE Card X WHERE X eid %(x)s', {'x': ec2})
- self.cnx2.commit()
-
- def test_attr_unification_neq_1(self):
- # XXX complete
- self.sexecute('Any X,Y WHERE X is Note, Y is Affaire, X creation_date D, Y creation_date > D')
-
- def test_attr_unification_neq_2(self):
- # XXX complete
- self.sexecute('Any X,Y WHERE X is Card, Y is Affaire, X creation_date D, Y creation_date > D')
-
- def test_union(self):
- afeids = self.sexecute('Affaire X')
- ueids = self.sexecute('CWUser X')
- rset = self.sexecute('(Any X WHERE X is Affaire) UNION (Any X WHERE X is CWUser)')
- self.assertEqual(sorted(r[0] for r in rset.rows),
- sorted(r[0] for r in afeids + ueids))
-
- def test_subquery1(self):
- rsetbase = self.sexecute('Any W,X WITH W,X BEING (Any W,X ORDERBY W,X WHERE X wikiid W)')
- self.assertEqual(len(rsetbase), 4)
- self.assertEqual(sorted(rsetbase.rows), rsetbase.rows)
- rset = self.sexecute('Any W,X LIMIT 2 OFFSET 2 WITH W,X BEING (Any W,X ORDERBY W,X WHERE X wikiid W)')
- self.assertEqual(rset.rows, rsetbase.rows[2:4])
- rset = self.sexecute('Any W,X ORDERBY W,X LIMIT 2 OFFSET 2 WITH W,X BEING (Any W,X WHERE X wikiid W)')
- self.assertEqual(rset.rows, rsetbase.rows[2:4])
- rset = self.sexecute('Any W,X WITH W,X BEING (Any W,X ORDERBY W,X LIMIT 2 OFFSET 2 WHERE X wikiid W)')
- self.assertEqual(rset.rows, rsetbase.rows[2:4])
-
- def test_subquery2(self):
- affeid = self.sexecute('Affaire X WHERE X ref "AFFREF"')[0][0]
- rset = self.sexecute('Any X,AA,AB WITH X,AA,AB BEING (Any X,AA,AB WHERE E eid %(x)s, E in_state X, X name AA, X modification_date AB)',
- {'x': affeid})
- self.assertEqual(len(rset), 1)
- self.assertEqual(rset[0][1], "pitetre")
-
- def test_not_relation(self):
- states = set(tuple(x) for x in self.sexecute('Any S,SN WHERE S is State, S name SN'))
- userstate = self.session.user.in_state[0]
- states.remove((userstate.eid, userstate.name))
- notstates = set(tuple(x) for x in self.sexecute('Any S,SN WHERE S is State, S name SN, NOT X in_state S, X eid %(x)s',
- {'x': self.session.user.eid}))
- self.assertSetEqual(notstates, states)
- aff1 = self.sexecute('Any X WHERE X is Affaire, X ref "AFFREF"')[0][0]
- aff1stateeid, aff1statename = self.sexecute('Any S,SN WHERE X eid %(x)s, X in_state S, S name SN', {'x': aff1})[0]
- self.assertEqual(aff1statename, 'pitetre')
- states.add((userstate.eid, userstate.name))
- states.remove((aff1stateeid, aff1statename))
- notstates = set(tuple(x) for x in self.sexecute('Any S,SN WHERE S is State, S name SN, NOT X in_state S, X eid %(x)s',
- {'x': aff1}))
- self.assertSetEqual(notstates, states)
-
- def test_absolute_url_base_url(self):
- cu = self.cnx2.cursor()
- ceid = cu.execute('INSERT Card X: X title "without wikiid to get eid based url"')[0][0]
- self.cnx2.commit()
- lc = self.sexecute('Card X WHERE X title "without wikiid to get eid based url"').get_entity(0, 0)
- self.assertEqual(lc.absolute_url(), 'http://extern.org/%s' % ceid)
- cu.execute('DELETE Card X WHERE X eid %(x)s', {'x':ceid})
- self.cnx2.commit()
-
- def test_absolute_url_no_base_url(self):
- cu = self.cnx3.cursor()
- ceid = cu.execute('INSERT Card X: X title "without wikiid to get eid based url"')[0][0]
- self.cnx3.commit()
- lc = self.sexecute('Card X WHERE X title "without wikiid to get eid based url"').get_entity(0, 0)
- self.assertEqual(lc.absolute_url(), 'http://testing.fr/cubicweb/%s' % lc.eid)
- cu.execute('DELETE Card X WHERE X eid %(x)s', {'x':ceid})
- self.cnx3.commit()
-
- def test_crossed_relation_noeid_needattr(self):
- """http://www.cubicweb.org/ticket/1382452"""
- aff1 = self.sexecute('INSERT Affaire X: X ref "AFFREF"')[0][0]
- # link within extern source
- ec1 = self.sexecute('Card X WHERE X wikiid "zzz"')[0][0]
- self.sexecute('SET A documented_by C WHERE E eid %(a)s, C eid %(c)s',
- {'a': aff1, 'c': ec1})
- # link from system to extern source
- self.sexecute('SET A documented_by C WHERE E eid %(a)s, C eid %(c)s',
- {'a': aff1, 'c': self.ic2})
- rset = self.sexecute('DISTINCT Any DEP WHERE P ref "AFFREF", P documented_by DEP, DEP wikiid LIKE "z%"')
- self.assertEqual(sorted(rset.rows), [[ec1], [self.ic2]])
-
- def test_nonregr1(self):
- ueid = self.session.user.eid
- affaire = self.sexecute('Affaire X WHERE X ref "AFFREF"').get_entity(0, 0)
- self.sexecute('Any U WHERE U in_group G, (G name IN ("managers", "logilab") OR (X require_permission P?, P name "bla", P require_group G)), X eid %(x)s, U eid %(u)s',
- {'x': affaire.eid, 'u': ueid})
-
- def test_nonregr2(self):
- iworkflowable = self.session.user.cw_adapt_to('IWorkflowable')
- iworkflowable.fire_transition('deactivate')
- treid = iworkflowable.latest_trinfo().eid
- rset = self.sexecute('Any X ORDERBY D DESC WHERE E eid %(x)s, E wf_info_for X, X modification_date D',
- {'x': treid})
- self.assertEqual(len(rset), 1)
- self.assertEqual(rset.rows[0], [self.session.user.eid])
-
- def test_nonregr3(self):
- self.sexecute('DELETE Card X WHERE X eid %(x)s, NOT X multisource_inlined_rel Y', {'x': self.ic1})
-
- def test_nonregr4(self):
- self.sexecute('Any X,S,U WHERE X in_state S, X todo_by U')
-
- def test_delete_source(self):
- req = self.request()
- req.execute('DELETE CWSource S WHERE S name "extern"')
- self.commit()
- cu = self.session.system_sql("SELECT * FROM entities WHERE source='extern'")
- self.assertFalse(cu.fetchall())
-
-if __name__ == '__main__':
- from logilab.common.testlib import unittest_main
- unittest_main()
--- a/server/test/unittest_postgres.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/unittest_postgres.py Mon Feb 17 15:32:50 2014 +0100
@@ -16,27 +16,19 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-import socket
from datetime import datetime
from logilab.common.testlib import SkipTest
-from cubicweb.devtools import ApptestConfiguration
+from cubicweb.devtools import PostgresApptestConfiguration
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.predicates import is_instance
from cubicweb.entities.adapters import IFTIndexableAdapter
-AT_LOGILAB = socket.gethostname().endswith('.logilab.fr') # XXX
-
from unittest_querier import FixedOffset
class PostgresFTITC(CubicWebTC):
- @classmethod
- def setUpClass(cls):
- if not AT_LOGILAB: # XXX here until we can raise SkipTest in setUp to detect we can't connect to the db
- raise SkipTest('XXX %s: require logilab configuration' % cls.__name__)
- cls.config = ApptestConfiguration('data', sourcefile='sources_postgres',
- apphome=cls.datadir)
+ configcls = PostgresApptestConfiguration
def test_occurence_count(self):
req = self.request()
@@ -48,7 +40,7 @@
content=u'cubicweb cubicweb')
self.commit()
self.assertEqual(req.execute('Card X ORDERBY FTIRANK(X) DESC WHERE X has_text "cubicweb"').rows,
- [(c1.eid,), (c3.eid,), (c2.eid,)])
+ [[c1.eid,], [c3.eid,], [c2.eid,]])
def test_attr_weight(self):
@@ -65,7 +57,7 @@
content=u'autre chose')
self.commit()
self.assertEqual(req.execute('Card X ORDERBY FTIRANK(X) DESC WHERE X has_text "cubicweb"').rows,
- [(c3.eid,), (c1.eid,), (c2.eid,)])
+ [[c3.eid,], [c1.eid,], [c2.eid,]])
def test_entity_weight(self):
class PersonneIFTIndexableAdapter(IFTIndexableAdapter):
@@ -78,7 +70,7 @@
c3 = req.create_entity('Comment', content=u'cubicweb cubicweb cubicweb', comments=c1)
self.commit()
self.assertEqual(req.execute('Any X ORDERBY FTIRANK(X) DESC WHERE X has_text "cubicweb"').rows,
- [(c1.eid,), (c3.eid,), (c2.eid,)])
+ [[c1.eid,], [c3.eid,], [c2.eid,]])
def test_tz_datetime(self):
--- a/server/test/unittest_querier.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/unittest_querier.py Mon Feb 17 15:32:50 2014 +0100
@@ -27,7 +27,6 @@
from cubicweb import QueryError, Unauthorized, Binary
from cubicweb.server.sqlutils import SQL_PREFIX
from cubicweb.server.utils import crypt_password
-from cubicweb.server.sources.native import make_schema
from cubicweb.server.querier import manual_build_descr, _make_description
from cubicweb.devtools import get_test_db_handler, TestServerConfiguration
from cubicweb.devtools.testlib import CubicWebTC
@@ -60,17 +59,6 @@
SQL_CONNECT_HOOKS['sqlite'].append(init_sqlite_connexion)
-from logilab.database import _GenericAdvFuncHelper
-TYPEMAP = _GenericAdvFuncHelper.TYPE_MAPPING
-
-class MakeSchemaTC(TestCase):
- def test_known_values(self):
- solution = {'A': 'String', 'B': 'CWUser'}
- self.assertEqual(make_schema((Variable('A'), Variable('B')), solution,
- 'table0', TYPEMAP),
- ('C0 text,C1 integer', {'A': 'table0.C0', 'B': 'table0.C1'}))
-
-
def setUpClass(cls, *args):
global repo, cnx
config = TestServerConfiguration(apphome=UtilsTC.datadir)
@@ -139,7 +127,7 @@
def test_preprocess_security(self):
plan = self._prepare_plan('Any ETN,COUNT(X) GROUPBY ETN '
'WHERE X is ET, ET name ETN')
- plan.session = self.user_groups_session('users')
+ plan.cnx = self.user_groups_session('users')
union = plan.rqlst
plan.preprocess(union)
self.assertEqual(len(union.children), 1)
@@ -222,7 +210,7 @@
def test_preprocess_security_aggregat(self):
plan = self._prepare_plan('Any MAX(X)')
- plan.session = self.user_groups_session('users')
+ plan.cnx = self.user_groups_session('users')
union = plan.rqlst
plan.preprocess(union)
self.assertEqual(len(union.children), 1)
@@ -1169,7 +1157,7 @@
#'INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y'
eeid, = self.execute('INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, X recipients Y WHERE Y is EmailAddress')[0]
self.execute("DELETE Email X")
- sqlc = self.session.cnxset['system']
+ sqlc = self.session.cnxset.cu
sqlc.execute('SELECT * FROM recipients_relation')
self.assertEqual(len(sqlc.fetchall()), 0)
sqlc.execute('SELECT * FROM owned_by_relation WHERE eid_from=%s'%eeid)
@@ -1310,7 +1298,7 @@
self.assertEqual(rset.description, [('CWUser',)])
self.assertRaises(Unauthorized,
self.execute, "Any P WHERE X is CWUser, X login 'bob', X upassword P")
- cursor = self.cnxset['system']
+ cursor = self.cnxset.cu
cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'"
% (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX))
passwd = str(cursor.fetchone()[0])
@@ -1325,7 +1313,7 @@
self.assertEqual(rset.description[0][0], 'CWUser')
rset = self.execute("SET X upassword %(pwd)s WHERE X is CWUser, X login 'bob'",
{'pwd': 'tutu'})
- cursor = self.cnxset['system']
+ cursor = self.cnxset.cu
cursor.execute("SELECT %supassword from %sCWUser WHERE %slogin='bob'"
% (SQL_PREFIX, SQL_PREFIX, SQL_PREFIX))
passwd = str(cursor.fetchone()[0])
--- a/server/test/unittest_repository.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/unittest_repository.py Mon Feb 17 15:32:50 2014 +0100
@@ -424,12 +424,8 @@
cnxid = repo.connect(self.admlogin, password=self.admpassword)
session = repo._get_session(cnxid, setcnxset=True)
self.assertEqual(repo.type_and_source_from_eid(2, session),
- ('CWGroup', 'system', None, 'system'))
+ ('CWGroup', None, 'system'))
self.assertEqual(repo.type_from_eid(2, session), 'CWGroup')
- self.assertEqual(repo.source_from_eid(2, session).uri, 'system')
- self.assertEqual(repo.eid2extid(repo.system_source, 2, session), None)
- class dummysource: uri = 'toto'
- self.assertRaises(UnknownEid, repo.eid2extid, dummysource, 2, session)
repo.close(cnxid)
def test_public_api(self):
@@ -445,7 +441,9 @@
repo = self.repo
cnxid = repo.connect(self.admlogin, password=self.admpassword)
self.assertEqual(repo.user_info(cnxid), (6, 'admin', set([u'managers']), {}))
- self.assertEqual(repo.describe(cnxid, 2), (u'CWGroup', u'system', None, 'system'))
+ self.assertEqual({'type': u'CWGroup', 'extid': None, 'source': 'system'},
+ repo.entity_metas(cnxid, 2))
+ self.assertEqual(repo.describe(cnxid, 2), (u'CWGroup', 'system', None, 'system'))
repo.close(cnxid)
self.assertRaises(BadConnectionId, repo.user_info, cnxid)
self.assertRaises(BadConnectionId, repo.describe, cnxid, 1)
@@ -670,15 +668,6 @@
self.session.set_cnxset()
self.assert_(self.repo.system_source.create_eid(self.session))
- def test_source_from_eid(self):
- self.session.set_cnxset()
- self.assertEqual(self.repo.source_from_eid(1, self.session),
- self.repo.sources_by_uri['system'])
-
- def test_source_from_eid_raise(self):
- self.session.set_cnxset()
- self.assertRaises(UnknownEid, self.repo.source_from_eid, -2, self.session)
-
def test_type_from_eid(self):
self.session.set_cnxset()
self.assertEqual(self.repo.type_from_eid(2, self.session), 'CWGroup')
@@ -695,12 +684,8 @@
self.repo.add_info(self.session, entity, self.repo.system_source)
cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1')
data = cu.fetchall()
- self.assertIsInstance(data[0][4], datetime)
- data[0] = list(data[0])
- data[0][4] = None
- self.assertEqual(tuplify(data), [(-1, 'Personne', 'system', 'system',
- None, None)])
- self.repo.delete_info(self.session, entity, 'system', None)
+ self.assertEqual(tuplify(data), [(-1, 'Personne', 'system', None)])
+ self.repo.delete_info(self.session, entity, 'system')
#self.repo.commit()
cu = self.session.system_sql('SELECT * FROM entities WHERE eid = -1')
data = cu.fetchall()
@@ -709,38 +694,6 @@
class FTITC(CubicWebTC):
- def test_reindex_and_modified_since(self):
- self.repo.system_source.multisources_etypes.add('Personne')
- eidp = self.execute('INSERT Personne X: X nom "toto", X prenom "tutu"')[0][0]
- self.commit()
- ts = datetime.now()
- self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1)
- self.session.set_cnxset()
- cu = self.session.system_sql('SELECT mtime, eid FROM entities WHERE eid = %s' % eidp)
- omtime = cu.fetchone()[0]
- # our sqlite datetime adapter is ignore seconds fraction, so we have to
- # ensure update is done the next seconds
- time.sleep(1 - (ts.second - int(ts.second)))
- self.execute('SET X nom "tata" WHERE X eid %(x)s', {'x': eidp})
- self.commit()
- self.assertEqual(len(self.execute('Personne X WHERE X has_text "tutu"')), 1)
- self.session.set_cnxset()
- cu = self.session.system_sql('SELECT mtime FROM entities WHERE eid = %s' % eidp)
- mtime = cu.fetchone()[0]
- self.assertTrue(omtime < mtime)
- self.commit()
- date, modified, deleted = self.repo.entities_modified_since(('Personne',), omtime)
- self.assertEqual(modified, [('Personne', eidp)])
- self.assertEqual(deleted, [])
- date, modified, deleted = self.repo.entities_modified_since(('Personne',), mtime)
- self.assertEqual(modified, [])
- self.assertEqual(deleted, [])
- self.execute('DELETE Personne X WHERE X eid %(x)s', {'x': eidp})
- self.commit()
- date, modified, deleted = self.repo.entities_modified_since(('Personne',), omtime)
- self.assertEqual(modified, [])
- self.assertEqual(deleted, [('Personne', eidp)])
-
def test_fulltext_container_entity(self):
assert self.schema.rschema('use_email').fulltext_container == 'subject'
req = self.request()
--- a/server/test/unittest_schemaserial.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/unittest_schemaserial.py Mon Feb 17 15:32:50 2014 +0100
@@ -82,10 +82,6 @@
self.assertListEqual([('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
{'et': None, 'x': None}),
('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
- {'et': None, 'x': None}),
- ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
- {'et': None, 'x': None}),
- ('SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s',
{'et': None, 'x': None})],
sorted(specialize2rql(schema)))
@@ -184,7 +180,7 @@
'extra_props': '{"jungle_speed": 42}',
'indexed': False,
'oe': None,
- 'ordernum': 19,
+ 'ordernum': 4,
'rt': None,
'se': None})]
--- a/server/test/unittest_session.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/unittest_session.py Mon Feb 17 15:32:50 2014 +0100
@@ -17,7 +17,7 @@
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
from cubicweb.devtools.testlib import CubicWebTC
-from cubicweb.server.session import HOOKS_ALLOW_ALL, HOOKS_DENY_ALL
+from cubicweb.server.session import HOOKS_ALLOW_ALL, HOOKS_DENY_ALL, Connection
class InternalSessionTC(CubicWebTC):
def test_dbapi_query(self):
@@ -39,10 +39,16 @@
def test_hooks_control(self):
session = self.session
+ # this test check the "old" behavior of session with automatic connection management
+ # close the default cnx, we do nto want it to interfer with the test
+ self.cnx.close()
+ # open a dedicated one
+ session.set_cnx('Some-random-cnx-unrelated-to-the-default-one')
+ # go test go
self.assertEqual(HOOKS_ALLOW_ALL, session.hooks_mode)
self.assertEqual(set(), session.disabled_hook_categories)
self.assertEqual(set(), session.enabled_hook_categories)
- self.assertEqual(1, len(session._txs))
+ self.assertEqual(1, len(session._cnxs))
with session.deny_all_hooks_but('metadata'):
self.assertEqual(HOOKS_DENY_ALL, session.hooks_mode)
self.assertEqual(set(), session.disabled_hook_categories)
@@ -64,12 +70,35 @@
self.assertEqual(set(('metadata',)), session.enabled_hook_categories)
# leaving context manager with no transaction running should reset the
# transaction local storage (and associated cnxset)
- self.assertEqual({}, session._txs)
+ self.assertEqual({}, session._cnxs)
self.assertEqual(None, session.cnxset)
self.assertEqual(HOOKS_ALLOW_ALL, session.hooks_mode, session.HOOKS_ALLOW_ALL)
self.assertEqual(set(), session.disabled_hook_categories)
self.assertEqual(set(), session.enabled_hook_categories)
+ def test_explicite_connection(self):
+ with self.session.new_cnx() as cnx:
+ rset = cnx.execute('Any X LIMIT 1 WHERE X is CWUser')
+ self.assertEqual(1, len(rset))
+ user = rset.get_entity(0, 0)
+ user.cw_delete()
+ cnx.rollback()
+ new_user = cnx.entity_from_eid(user.eid)
+ self.assertIsNotNone(new_user.login)
+ self.assertFalse(cnx._open)
+
+ def test_internal_cnx(self):
+ with self.repo.internal_cnx() as cnx:
+ rset = cnx.execute('Any X LIMIT 1 WHERE X is CWUser')
+ self.assertEqual(1, len(rset))
+ user = rset.get_entity(0, 0)
+ user.cw_delete()
+ cnx.rollback()
+ new_user = cnx.entity_from_eid(user.eid)
+ self.assertIsNotNone(new_user.login)
+ self.assertFalse(cnx._open)
+
+
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
--- a/server/test/unittest_ssplanner.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/unittest_ssplanner.py Mon Feb 17 15:32:50 2014 +0100
@@ -51,8 +51,7 @@
[{'X': 'Basket', 'XN': 'String'},
{'X': 'State', 'XN': 'String'},
{'X': 'Folder', 'XN': 'String'}])],
- None, None,
- [self.system], None, [])])
+ None, [])])
def test_groupeded_ambigous_sol(self):
self._test('Any XN,COUNT(X) GROUPBY XN WHERE X name XN, X is IN (Basket, State, Folder)',
@@ -60,8 +59,7 @@
[{'X': 'Basket', 'XN': 'String'},
{'X': 'State', 'XN': 'String'},
{'X': 'Folder', 'XN': 'String'}])],
- None, None,
- [self.system], None, [])])
+ None, [])])
if __name__ == '__main__':
from logilab.common.testlib import unittest_main
--- a/server/test/unittest_undo.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/test/unittest_undo.py Mon Feb 17 15:32:50 2014 +0100
@@ -20,7 +20,7 @@
from cubicweb import ValidationError
from cubicweb.devtools.testlib import CubicWebTC
import cubicweb.server.session
-from cubicweb.server.session import Transaction as OldTransaction
+from cubicweb.server.session import Connection as OldConnection
from cubicweb.transaction import *
from cubicweb.server.sources.native import UndoTransactionException, _UndoException
@@ -35,14 +35,14 @@
self.txuuid = self.commit()
def setUp(self):
- class Transaction(OldTransaction):
+ class Connection(OldConnection):
"""Force undo feature to be turned on in all case"""
undo_actions = property(lambda tx: True, lambda x, y:None)
- cubicweb.server.session.Transaction = Transaction
+ cubicweb.server.session.Connection = Connection
super(UndoableTransactionTC, self).setUp()
def tearDown(self):
- cubicweb.server.session.Transaction = OldTransaction
+ cubicweb.server.session.Connection = OldConnection
self.restore_connection()
self.session.undo_support = set()
super(UndoableTransactionTC, self).tearDown()
--- a/server/utils.py Mon Feb 17 11:13:27 2014 +0100
+++ b/server/utils.py Mon Feb 17 15:32:50 2014 +0100
@@ -72,24 +72,6 @@
# wrong password
return ''
-def cartesian_product(seqin):
- """returns a generator which returns the cartesian product of `seqin`
-
- for more details, see :
- http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302478
- """
- def rloop(seqin, comb):
- """recursive looping function"""
- if seqin: # any more sequences to process?
- for item in seqin[0]:
- newcomb = comb + [item] # add next item to current combination
- # call rloop w/ remaining seqs, newcomb
- for item in rloop(seqin[1:], newcomb):
- yield item # seqs and newcomb
- else: # processing last sequence
- yield comb # comb finished, add to list
- return rloop(seqin, [])
-
def eschema_eid(session, eschema):
"""get eid of the CWEType entity for the given yams type. You should use
@@ -126,7 +108,7 @@
return user, passwd
-_MARKER=object()
+_MARKER = object()
def func_name(func):
name = getattr(func, '__name__', _MARKER)
if name is _MARKER:
--- a/sobjects/cwxmlparser.py Mon Feb 17 11:13:27 2014 +0100
+++ b/sobjects/cwxmlparser.py Mon Feb 17 15:32:50 2014 +0100
@@ -31,8 +31,7 @@
"""
-from datetime import datetime, timedelta, time
-from urllib import urlencode
+from datetime import datetime, time
from cgi import parse_qs # in urlparse with python >= 2.6
from logilab.common.date import todate, totime
@@ -57,7 +56,7 @@
DEFAULT_CONVERTERS['Date'] = convert_date
def convert_datetime(ustr):
if '.' in ustr: # assume %Y-%m-%d %H:%M:%S.mmmmmm
- ustr = ustr.split('.',1)[0]
+ ustr = ustr.split('.', 1)[0]
return datetime.strptime(ustr, '%Y-%m-%d %H:%M:%S')
DEFAULT_CONVERTERS['Datetime'] = convert_datetime
# XXX handle timezone, though this will be enough as TZDatetime are
@@ -169,7 +168,7 @@
ttype = schemacfg.schema.stype.name
etyperules = self.source.mapping.setdefault(etype, {})
etyperules.setdefault((rtype, role, action), []).append(
- (ttype, options) )
+ (ttype, options))
self.source.mapping_idx[schemacfg.eid] = (
etype, rtype, role, action, ttype)
@@ -204,7 +203,7 @@
* `rels` is for relations and structured as
{role: {relation: [(related item, related rels)...]}
"""
- entity = self.extid2entity(str(item['cwuri']), item['cwtype'],
+ entity = self.extid2entity(str(item['cwuri']), item['cwtype'],
cwsource=item['cwsource'], item=item)
if entity is None:
return None
@@ -432,7 +431,7 @@
self._related_link(ttype, others, searchattrs)
def _related_link(self, ttype, others, searchattrs):
- def issubset(x,y):
+ def issubset(x, y):
return all(z in y for z in x)
eids = [] # local eids
log = self.parser.import_log
--- a/sobjects/notification.py Mon Feb 17 11:13:27 2014 +0100
+++ b/sobjects/notification.py Mon Feb 17 15:32:50 2014 +0100
@@ -30,7 +30,7 @@
from cubicweb.view import Component, EntityView
from cubicweb.server.hook import SendMailOp
from cubicweb.mail import construct_message_id, format_mail
-from cubicweb.server.session import Session
+from cubicweb.server.session import Session, InternalManager
class RecipientsFinder(Component):
@@ -115,20 +115,20 @@
msgid = None
req = self._cw
self.user_data = req.user_data()
- origlang = req.lang
for something in recipients:
- if isinstance(something, Entity):
- # hi-jack self._cw to get a session for the returned user
- self._cw = Session(something, self._cw.repo)
- self._cw.set_cnxset()
+ if isinstance(something, tuple):
+ emailaddr, lang = something
+ user = InternalManager(lang=lang)
+ else:
emailaddr = something.cw_adapt_to('IEmailable').get_email()
- else:
- emailaddr, lang = something
- self._cw.set_language(lang)
- # since the same view (eg self) may be called multiple time and we
- # need a fresh stream at each iteration, reset it explicitly
- self.w = None
+ user = something
+ # hi-jack self._cw to get a session for the returned user
+ self._cw = Session(user, self._cw.repo)
try:
+ self._cw.set_cnxset()
+ # since the same view (eg self) may be called multiple time and we
+ # need a fresh stream at each iteration, reset it explicitly
+ self.w = None
# XXX call render before subject to set .row/.col attributes on the
# view
try:
@@ -145,25 +145,16 @@
msg = format_mail(self.user_data, [emailaddr], content, subject,
config=self._cw.vreg.config, msgid=msgid, references=refs)
yield [emailaddr], msg
- except:
- if isinstance(something, Entity):
- self._cw.rollback()
- raise
- else:
- if isinstance(something, Entity):
- self._cw.commit()
finally:
- if isinstance(something, Entity):
- self._cw.close()
- self._cw = req
- # restore language
- req.set_language(origlang)
+ self._cw.commit()
+ self._cw.close()
+ self._cw = req
# recipients / email sending ###############################################
def recipients(self):
"""return a list of either 2-uple (email, language) or user entity to
- who this email should be sent
+ whom this email should be sent
"""
finder = self._cw.vreg['components'].select(
'recipients_finder', self._cw, rset=self.cw_rset,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/sobjects/services.py Mon Feb 17 15:32:50 2014 +0100
@@ -0,0 +1,102 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
+"""Define server side service provided by cubicweb"""
+
+import threading
+
+from cubicweb.server import Service
+from cubicweb.predicates import match_user_groups
+
+class StatsService(Service):
+ """Return a dictionary containing some statistics about the repository
+ resources usage.
+ """
+
+ __regid__ = 'repo_stats'
+ __select__ = match_user_groups('managers')
+
+ def call(self):
+ repo = self._cw.repo # Service are repo side only.
+ results = {}
+ querier = repo.querier
+ source = repo.system_source
+ for size, maxsize, hits, misses, title in (
+ (len(querier._rql_cache), repo.config['rql-cache-size'],
+ querier.cache_hit, querier.cache_miss, 'rqlt_st'),
+ (len(source._cache), repo.config['rql-cache-size'],
+ source.cache_hit, source.cache_miss, 'sql'),
+ ):
+ results['%s_cache_size' % title] = '%s / %s' % (size, maxsize)
+ results['%s_cache_hit' % title] = hits
+ results['%s_cache_miss' % title] = misses
+ results['%s_cache_hit_percent' % title] = (hits * 100) / (hits + misses)
+ results['type_source_cache_size'] = len(repo._type_source_cache)
+ results['extid_cache_size'] = len(repo._extid_cache)
+ results['sql_no_cache'] = repo.system_source.no_cache
+ results['nb_open_sessions'] = len(repo._sessions)
+ results['nb_active_threads'] = threading.activeCount()
+ looping_tasks = repo._tasks_manager._looping_tasks
+ results['looping_tasks'] = ', '.join(str(t) for t in looping_tasks)
+ results['available_cnxsets'] = repo._cnxsets_pool.qsize()
+ results['threads'] = ', '.join(sorted(str(t) for t in threading.enumerate()))
+ return results
+
+class GcStatsService(Service):
+ """Return a dictionary containing some statistics about the repository
+ resources usage.
+ """
+
+ __regid__ = 'repo_gc_stats'
+ __select__ = match_user_groups('managers')
+
+ def call(self, nmax=20):
+ """Return a dictionary containing some statistics about the repository
+ memory usage.
+
+ This is a public method, not requiring a session id.
+
+ nmax is the max number of (most) referenced object returned as
+ the 'referenced' result
+ """
+
+ from cubicweb._gcdebug import gc_info
+ from cubicweb.appobject import AppObject
+ from cubicweb.rset import ResultSet
+ from cubicweb.dbapi import Connection, Cursor
+ from cubicweb.web.request import CubicWebRequestBase
+ from rql.stmts import Union
+
+ lookupclasses = (AppObject,
+ Union, ResultSet,
+ Connection, Cursor,
+ CubicWebRequestBase)
+ try:
+ from cubicweb.server.session import Session, InternalSession
+ lookupclasses += (InternalSession, Session)
+ except ImportError:
+ pass # no server part installed
+
+ results = {}
+ counters, ocounters, garbage = gc_info(lookupclasses,
+ viewreferrersclasses=())
+ values = sorted(counters.iteritems(), key=lambda x: x[1], reverse=True)
+ results['lookupclasses'] = values
+ values = sorted(ocounters.iteritems(), key=lambda x: x[1], reverse=True)[:nmax]
+ results['referenced'] = values
+ results['unreachable'] = len(garbage)
+ return results
--- a/sobjects/supervising.py Mon Feb 17 11:13:27 2014 +0100
+++ b/sobjects/supervising.py Mon Feb 17 15:32:50 2014 +0100
@@ -145,7 +145,7 @@
session = self._cw
def describe(eid):
try:
- return session._(session.describe(eid)[0]).lower()
+ return session._(session.entity_metas(eid)['type']).lower()
except UnknownEid:
# may occurs when an entity has been deleted from an external
# source and we're cleaning its relation
--- a/test/unittest_dbapi.py Mon Feb 17 11:13:27 2014 +0100
+++ b/test/unittest_dbapi.py Mon Feb 17 15:32:50 2014 +0100
@@ -22,41 +22,43 @@
from logilab.common import tempattr
from cubicweb import ConnectionError, cwconfig, NoSelectableObject
-from cubicweb.dbapi import ProgrammingError
+from cubicweb.dbapi import ProgrammingError, _repo_connect
from cubicweb.devtools.testlib import CubicWebTC
class DBAPITC(CubicWebTC):
def test_public_repo_api(self):
- cnx = self.login('anon')
+ cnx = _repo_connect(self.repo, login='anon', password='anon')
self.assertEqual(cnx.get_schema(), self.repo.schema)
self.assertEqual(cnx.source_defs(), {'system': {'type': 'native', 'uri': 'system',
'use-cwuri-as-url': False}})
- self.restore_connection() # proper way to close cnx
+ cnx.close()
self.assertRaises(ProgrammingError, cnx.get_schema)
self.assertRaises(ProgrammingError, cnx.source_defs)
def test_db_api(self):
- cnx = self.login('anon')
+ cnx = _repo_connect(self.repo, login='anon', password='anon')
self.assertEqual(cnx.rollback(), None)
self.assertEqual(cnx.commit(), None)
- self.restore_connection() # proper way to close cnx
- #self.assertEqual(cnx.close(), None)
+ cnx.close()
self.assertRaises(ProgrammingError, cnx.rollback)
self.assertRaises(ProgrammingError, cnx.commit)
self.assertRaises(ProgrammingError, cnx.close)
def test_api(self):
- cnx = self.login('anon')
+ cnx = _repo_connect(self.repo, login='anon', password='anon')
self.assertEqual(cnx.user(None).login, 'anon')
+ self.assertEqual({'type': u'CWSource', 'source': u'system', 'extid': None},
+ cnx.entity_metas(1))
self.assertEqual(cnx.describe(1), (u'CWSource', u'system', None))
- self.restore_connection() # proper way to close cnx
+ cnx.close()
self.assertRaises(ProgrammingError, cnx.user, None)
+ self.assertRaises(ProgrammingError, cnx.entity_metas, 1)
self.assertRaises(ProgrammingError, cnx.describe, 1)
def test_shared_data_api(self):
- cnx = self.login('anon')
+ cnx = _repo_connect(self.repo, login='anon', password='anon')
self.assertEqual(cnx.get_shared_data('data'), None)
cnx.set_shared_data('data', 4)
self.assertEqual(cnx.get_shared_data('data'), 4)
@@ -65,16 +67,17 @@
self.assertEqual(cnx.get_shared_data('data'), None)
cnx.set_shared_data('data', 4)
self.assertEqual(cnx.get_shared_data('data'), 4)
- self.restore_connection() # proper way to close cnx
+ cnx.close()
self.assertRaises(ProgrammingError, cnx.check)
self.assertRaises(ProgrammingError, cnx.set_shared_data, 'data', 0)
self.assertRaises(ProgrammingError, cnx.get_shared_data, 'data')
def test_web_compatible_request(self):
config = cwconfig.CubicWebNoAppConfiguration()
- with tempattr(self.cnx.vreg, 'config', config):
- self.cnx.use_web_compatible_requests('http://perdu.com')
- req = self.cnx.request()
+ cnx = _repo_connect(self.repo, login='admin', password='gingkow')
+ with tempattr(cnx.vreg, 'config', config):
+ cnx.use_web_compatible_requests('http://perdu.com')
+ req = cnx.request()
self.assertEqual(req.base_url(), 'http://perdu.com')
self.assertEqual(req.from_controller(), 'view')
self.assertEqual(req.relative_path(), '')
--- a/test/unittest_migration.py Mon Feb 17 11:13:27 2014 +0100
+++ b/test/unittest_migration.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -99,7 +99,7 @@
def test_db_creation(self):
"""make sure database can be created"""
config = ApptestConfiguration('data', apphome=self.datadir)
- source = config.sources()['system']
+ source = config.system_source_config
self.assertEqual(source['db-driver'], 'sqlite')
handler = get_test_db_handler(config)
handler.init_test_database()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/unittest_repoapi.py Mon Feb 17 15:32:50 2014 +0100
@@ -0,0 +1,88 @@
+# copyright 2013-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
+"""unittest for cubicweb.dbapi"""
+
+
+from cubicweb.devtools.testlib import CubicWebTC
+
+from cubicweb import ProgrammingError
+from cubicweb.repoapi import ClientConnection, connect, anonymous_cnx
+
+
+class REPOAPITC(CubicWebTC):
+
+ def test_clt_cnx_basic_usage(self):
+ """Test that a client connection can be used to access the data base"""
+ cltcnx = ClientConnection(self.session)
+ with cltcnx:
+ # (1) some RQL request
+ rset = cltcnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+ # (2) ORM usage
+ random_user = rset.get_entity(0, 0)
+ # (3) Write operation
+ random_user.cw_set(surname=u'babar')
+ # (4) commit
+ cltcnx.commit()
+ rset = cltcnx.execute('''Any X WHERE X is CWUser,
+ X surname "babar"
+ ''')
+ self.assertTrue(rset)
+ # prepare test for implicite rollback
+ random_user = rset.get_entity(0, 0)
+ random_user.cw_set(surname=u'celestine')
+ # implicite rollback on exit
+ rset = self.session.execute('''Any X WHERE X is CWUser,
+ X surname "babar"
+ ''')
+ self.assertTrue(rset)
+
+ def test_clt_cnx_life_cycle(self):
+ """Check that ClientConnection requires explicite open and close
+ """
+ cltcnx = ClientConnection(self.session)
+ # connection not open yet
+ with self.assertRaises(ProgrammingError):
+ cltcnx.execute('Any X WHERE X is CWUser')
+ # connection open and working
+ with cltcnx:
+ cltcnx.execute('Any X WHERE X is CWUser')
+ # connection closed
+ with self.assertRaises(ProgrammingError):
+ cltcnx.execute('Any X WHERE X is CWUser')
+
+ def test_connect(self):
+ """check that repoapi.connect works and return a usable connection"""
+ clt_cnx = connect(self.repo, login='admin', password='gingkow')
+ self.assertEqual('admin', clt_cnx.user.login)
+ with clt_cnx:
+ rset = clt_cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+ def test_anonymous_connect(self):
+ """check that you can get anonymous connection when the data exist"""
+
+ clt_cnx = anonymous_cnx(self.repo)
+ self.assertEqual('anon', clt_cnx.user.login)
+ with clt_cnx:
+ rset = clt_cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+
+
+
--- a/transaction.py Mon Feb 17 11:13:27 2014 +0100
+++ b/transaction.py Mon Feb 17 15:32:50 2014 +0100
@@ -53,7 +53,17 @@
self.datetime = time
self.user_eid = ueid
# should be set by the dbapi connection
- self.req = None
+ self.req = None # old style
+ self.cnx = None # new style
+
+ def _execute(self, *args, **kwargs):
+ """execute a query using either the req or the cnx"""
+ if self.req is None:
+ execute = self.cnx.execute
+ else:
+ execute = self.req
+ return execute(*args, **kwargs)
+
def __repr__(self):
return '<Transaction %s by %s on %s>' % (
@@ -63,8 +73,8 @@
"""return the user entity which has done the transaction,
none if not found.
"""
- return self.req.execute('Any X WHERE X eid %(x)s',
- {'x': self.user_eid}).get_entity(0, 0)
+ return self._execute('Any X WHERE X eid %(x)s',
+ {'x': self.user_eid}).get_entity(0, 0)
def actions_list(self, public=True):
"""return an ordered list of action effectued during that transaction
@@ -72,7 +82,11 @@
if public is true, return only 'public' action, eg not ones triggered
under the cover by hooks.
"""
- return self.req.cnx.transaction_actions(self.uuid, public)
+ if self.req is not None:
+ cnx = self.req.cnx
+ else:
+ cnx = self.cnx
+ return cnx.transaction_actions(self.uuid, public)
class AbstractAction(object):
--- a/web/application.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/application.py Mon Feb 17 15:32:50 2014 +0100
@@ -35,7 +35,7 @@
ValidationError, Unauthorized, Forbidden,
AuthenticationError, NoSelectableObject,
BadConnectionId, CW_EVENT_MANAGER)
-from cubicweb.dbapi import DBAPISession, anonymous_session
+from cubicweb.repoapi import anonymous_cnx
from cubicweb.web import LOGGER, component
from cubicweb.web import (
StatusResponse, DirectResponse, Redirect, NotFound, LogOut,
@@ -50,20 +50,23 @@
@contextmanager
def anonymized_request(req):
- orig_session = req.session
- req.set_session(anonymous_session(req.vreg))
+ orig_cnx = req.cnx
+ anon_clt_cnx = anonymous_cnx(orig_cnx._session.repo)
+ req.set_cnx(anon_clt_cnx)
try:
- yield req
+ with anon_clt_cnx:
+ yield req
finally:
- req.set_session(orig_session)
+ req.set_cnx(orig_cnx)
class AbstractSessionManager(component.Component):
"""manage session data associated to a session identifier"""
__regid__ = 'sessionmanager'
- def __init__(self, vreg):
+ def __init__(self, repo):
+ vreg = repo.vreg
self.session_time = vreg.config['http-session-time'] or None
- self.authmanager = vreg['components'].select('authmanager', vreg=vreg)
+ self.authmanager = vreg['components'].select('authmanager', repo=repo)
interval = (self.session_time or 0) / 2.
if vreg.config.anonymous_user()[0] is not None:
self.cleanup_anon_session_time = vreg.config['cleanup-anonymous-session-time'] or 5 * 60
@@ -111,8 +114,7 @@
raise NotImplementedError()
def open_session(self, req):
- """open and return a new session for the given request. The session is
- also bound to the request.
+ """open and return a new session for the given request.
raise :exc:`cubicweb.AuthenticationError` if authentication failed
(no authentication info found or wrong user/password)
@@ -130,8 +132,8 @@
"""authenticate user associated to a request and check session validity"""
__regid__ = 'authmanager'
- def __init__(self, vreg):
- self.vreg = vreg
+ def __init__(self, repo):
+ self.vreg = repo.vreg
def validate_session(self, req, session):
"""check session validity, reconnecting it to the repository if the
@@ -159,9 +161,10 @@
"""a session handler using a cookie to store the session identifier"""
def __init__(self, appli):
+ self.repo = appli.repo
self.vreg = appli.vreg
self.session_manager = self.vreg['components'].select('sessionmanager',
- vreg=self.vreg)
+ repo=self.repo)
global SESSION_MANAGER
SESSION_MANAGER = self.session_manager
if self.vreg.config.mode != 'test':
@@ -173,7 +176,7 @@
def reset_session_manager(self):
data = self.session_manager.dump_data()
self.session_manager = self.vreg['components'].select('sessionmanager',
- vreg=self.vreg)
+ repo=self.repo)
self.session_manager.restore_data(data)
global SESSION_MANAGER
SESSION_MANAGER = self.session_manager
@@ -196,66 +199,40 @@
return '__%s_https_session' % self.vreg.config.appid
return '__%s_session' % self.vreg.config.appid
- def set_session(self, req):
- """associate a session to the request
+ def get_session(self, req):
+ """Return a session object corresponding to credentials held by the req
Session id is searched from :
- # form variable
- cookie
- if no session id is found, open a new session for the connected user
- or request authentification as needed
+ If no session id is found, try opening a new session with credentials
+ found in the request.
- :raise Redirect: if authentication has occurred and succeed
+ Raises AuthenticationError if no session can be found or created.
"""
cookie = req.get_cookie()
sessioncookie = self.session_cookie(req)
try:
sessionid = str(cookie[sessioncookie].value)
- except KeyError: # no session cookie
+ session = self.get_session_by_id(req, sessionid)
+ except (KeyError, InvalidSession): # no valid session cookie
session = self.open_session(req)
- else:
- try:
- session = self.get_session(req, sessionid)
- except InvalidSession:
- # try to open a new session, so we get an anonymous session if
- # allowed
- session = self.open_session(req)
- else:
- if not session.cnx:
- # session exists but is not bound to a connection. We should
- # try to authenticate
- loginsucceed = False
- try:
- if self.open_session(req, allow_no_cnx=False):
- loginsucceed = True
- except Redirect:
- # may be raised in open_session (by postlogin mechanism)
- # on successful connection
- loginsucceed = True
- raise
- except AuthenticationError:
- # authentication failed, continue to use this session
- req.set_session(session)
- finally:
- if loginsucceed:
- # session should be replaced by new session created
- # in open_session
- self.session_manager.close_session(session)
+ return session
- def get_session(self, req, sessionid):
+ def get_session_by_id(self, req, sessionid):
session = self.session_manager.get_session(req, sessionid)
session.mtime = time()
return session
- def open_session(self, req, allow_no_cnx=True):
- session = self.session_manager.open_session(req, allow_no_cnx=allow_no_cnx)
+ def open_session(self, req):
+ session = self.session_manager.open_session(req)
sessioncookie = self.session_cookie(req)
secure = req.https and req.base_url().startswith('https://')
req.set_cookie(sessioncookie, session.sessionid,
maxage=None, secure=secure)
if not session.anonymous_session:
- self.session_manager.postlogin(req)
+ self.session_manager.postlogin(req, session)
return session
def logout(self, req, goto_url):
@@ -277,21 +254,20 @@
The http server will call its main entry point ``application.handle_request``.
.. automethod:: cubicweb.web.application.CubicWebPublisher.main_handle_request
+
+ You have to provide both a repository and web-server config at
+ initialization. In all in one instance both config will be the same.
"""
- def __init__(self, config,
- session_handler_fact=CookieSessionHandler,
- vreg=None):
+ def __init__(self, repo, config, session_handler_fact=CookieSessionHandler):
self.info('starting web instance from %s', config.apphome)
- if vreg is None:
- vreg = cwvreg.CWRegistryStore(config)
- self.vreg = vreg
- # connect to the repository and get instance's schema
- self.repo = config.repository(vreg)
- if not vreg.initialized:
+ self.repo = repo
+ self.vreg = repo.vreg
+ # get instance's schema
+ if not self.vreg.initialized:
config.init_cubes(self.repo.get_cubes())
- vreg.init_properties(self.repo.properties())
- vreg.set_schema(self.repo.get_schema())
+ self.vreg.init_properties(self.repo.properties())
+ self.vreg.set_schema(self.repo.get_schema())
# set the correct publish method
if config['query-log-file']:
from threading import Lock
@@ -310,12 +286,12 @@
self.url_resolver = self.vreg['components'].select('urlpublisher',
vreg=self.vreg)
- def connect(self, req):
- """return a connection for a logged user object according to existing
- sessions (i.e. a new connection may be created or an already existing
- one may be reused
+ def get_session(self, req):
+ """Return a session object corresponding to credentials held by the req
+
+ May raise AuthenticationError.
"""
- self.session_handler.set_session(req)
+ return self.session_handler.get_session(req)
# publish methods #########################################################
@@ -362,7 +338,24 @@
req.set_header('WWW-Authenticate', [('Basic', {'realm' : realm })], raw=False)
content = ''
try:
- self.connect(req)
+ try:
+ session = self.get_session(req)
+ from cubicweb import repoapi
+ cnx = repoapi.ClientConnection(session)
+ req.set_cnx(cnx)
+ except AuthenticationError:
+ # Keep the dummy session set at initialisation.
+ # such session with work to an some extend but raise an
+ # AuthenticationError on any database access.
+ import contextlib
+ @contextlib.contextmanager
+ def dummy():
+ yield
+ cnx = dummy()
+ # XXX We want to clean up this approach in the future. But
+ # several cubes like registration or forgotten password rely on
+ # this principle.
+
# DENY https acces for anonymous_user
if (req.https
and req.session.anonymous_session
@@ -373,7 +366,8 @@
# handler
try:
### Try to generate the actual request content
- content = self.core_handle(req, path)
+ with cnx:
+ content = self.core_handle(req, path)
# Handle user log-out
except LogOut as ex:
# When authentification is handled by cookie the code that
--- a/web/formfields.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/formfields.py Mon Feb 17 15:32:50 2014 +0100
@@ -1033,6 +1033,10 @@
# while it has no value, hence generating a false error.
return list(self.fields)
+ @property
+ def needs_multipart(self):
+ return any(f.needs_multipart for f in self.fields)
+
class RelationField(Field):
"""Use this field to edit a relation of an entity.
--- a/web/request.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/request.py Mon Feb 17 15:32:50 2014 +0100
@@ -39,6 +39,7 @@
from logilab.common.deprecation import deprecated
from logilab.mtconverter import xml_escape
+from cubicweb.req import RequestSessionBase
from cubicweb.dbapi import DBAPIRequest
from cubicweb.uilib import remove_html_tags, js
from cubicweb.utils import SizeConstrainedList, HTMLHead, make_uid
@@ -82,7 +83,7 @@
-class CubicWebRequestBase(DBAPIRequest):
+class _CubicWebRequestBase(RequestSessionBase):
"""abstract HTTP request, should be extended according to the HTTP backend
Immutable attributes that describe the received query and generic configuration
"""
@@ -94,7 +95,7 @@
:https: boolean, s this a https request
:form: Forms value
"""
- super(CubicWebRequestBase, self).__init__(vreg)
+ super(_CubicWebRequestBase, self).__init__(vreg)
#: (Boolean) Is this an https request.
self.https = https
#: User interface property (vary with https) (see :ref:`uiprops`)
@@ -119,6 +120,19 @@
self.setup_params(form)
#: received body
self.content = StringIO()
+ # use header to set default language (may ne overwriten by user one later)
+ if vreg.config.get('language-negociation', False):
+ # http negociated language
+ accepted_languages = self.header_accept_language()
+ else:
+ accepted_languages = ()
+ for lang in accepted_languages:
+ if lang in self.translations:
+ self.set_language(lang)
+ break
+ else:
+ self.set_default_language(vreg)
+ # 3. default language
#: dictionary that may be used to store request data that has to be
#: shared among various components used to publish the request (views,
#: controller, application...)
@@ -169,7 +183,7 @@
if secure:
base_url = self.vreg.config.get('https-url')
if base_url is None:
- base_url = super(CubicWebRequestBase, self).base_url()
+ base_url = super(_CubicWebRequestBase, self).base_url()
return base_url
@property
@@ -206,31 +220,6 @@
self.set_page_data('rql_varmaker', varmaker)
return varmaker
- def set_session(self, session, user=None):
- """method called by the session handler when the user is authenticated
- or an anonymous connection is open
- """
- super(CubicWebRequestBase, self).set_session(session, user)
- # set request language
- vreg = self.vreg
- if self.user:
- try:
- # 1. user specified language
- lang = vreg.typed_value('ui.language',
- self.user.properties['ui.language'])
- self.set_language(lang)
- return
- except KeyError:
- pass
- if vreg.config['language-negociation']:
- # 2. http negociated language
- for lang in self.header_accept_language():
- if lang in self.translations:
- self.set_language(lang)
- return
- # 3. default language
- self.set_default_language(vreg)
-
# input form parameters management ########################################
# common form parameters which should be protected against html values
@@ -725,7 +714,13 @@
if '__message' in kwargs:
msg = kwargs.pop('__message')
kwargs['_cwmsgid'] = self.set_redirect_message(msg)
- return super(CubicWebRequestBase, self).build_url(*args, **kwargs)
+ if not args:
+ method = 'view'
+ if (self.from_controller() == 'view'
+ and not '_restpath' in kwargs):
+ method = self.relative_path(includeparams=False) or 'view'
+ args = (method,)
+ return super(_CubicWebRequestBase, self).build_url(*args, **kwargs)
def url(self, includeparams=True):
"""return currently accessed url"""
@@ -987,6 +982,109 @@
return 'text/html'
+class DBAPICubicWebRequestBase(_CubicWebRequestBase, DBAPIRequest):
+
+ def set_session(self, session):
+ """method called by the session handler when the user is authenticated
+ or an anonymous connection is open
+ """
+ super(CubicWebRequestBase, self).set_session(session)
+ # set request language
+ user_lang = self.user.properties.get('ui.language')
+ if user_lang is not None:
+ lang = self.vreg.typed_value('ui.language', user_lang)
+ self.set_language(lang)
+
+
+
+def _cnx_func(name):
+ def proxy(req, *args, **kwargs):
+ return getattr(req.cnx, name)(*args, **kwargs)
+ return proxy
+
+
+class ConnectionCubicWebRequestBase(_CubicWebRequestBase):
+
+ def __init__(self, vreg, https=False, form=None, headers={}):
+ """"""
+ self.cnx = None
+ self.session = None
+ self.vreg = vreg
+ try:
+ # no vreg or config which doesn't handle translations
+ self.translations = vreg.config.translations
+ except AttributeError:
+ self.translations = {}
+ super(ConnectionCubicWebRequestBase, self).__init__(vreg, https=https,
+ form=form, headers=headers)
+ from cubicweb.dbapi import DBAPISession, _NeedAuthAccessMock
+ self.session = DBAPISession(None)
+ self.cnx = self.user = _NeedAuthAccessMock()
+ #: cache entities built during the request
+ self._eid_cache = {}
+
+ def set_cnx(self, cnx):
+ self.cnx = cnx
+ self.session = cnx._session
+ self._set_user(cnx.user)
+ # set user language
+ user_lang = self.user.properties.get('ui.language')
+ if user_lang is not None:
+ lang = self.vreg.typed_value('ui.language', user_lang)
+ self.set_language(lang)
+
+
+ def execute(self, *args, **kwargs):
+ rset = self.cnx.execute(*args, **kwargs)
+ rset.req = self
+ return rset
+
+ def set_default_language(self, vreg):
+ # XXX copy from dbapi
+ try:
+ lang = vreg.property_value('ui.language')
+ except Exception: # property may not be registered
+ lang = 'en'
+ try:
+ self.set_language(lang)
+ except KeyError:
+ # this occurs usually during test execution
+ self._ = self.__ = unicode
+ self.pgettext = lambda x, y: unicode(y)
+
+ entity_metas = _cnx_func('entity_metas')
+ source_defs = _cnx_func('source_defs')
+ get_shared_data = _cnx_func('get_shared_data')
+ set_shared_data = _cnx_func('set_shared_data')
+ describe = _cnx_func('describe') # deprecated XXX
+
+ # server-side service call #################################################
+
+ def call_service(self, regid, **kwargs):
+ return self.cnx.call_service(regid, **kwargs)
+
+ # entities cache management ###############################################
+
+ def entity_cache(self, eid):
+ return self._eid_cache[eid]
+
+ def set_entity_cache(self, entity):
+ self._eid_cache[entity.eid] = entity
+
+ def cached_entities(self):
+ return self._eid_cache.values()
+
+ def drop_entity_cache(self, eid=None):
+ if eid is None:
+ self._eid_cache = {}
+ else:
+ del self._eid_cache[eid]
+
+
+
+CubicWebRequestBase = ConnectionCubicWebRequestBase
+
+
## HTTP-accept parsers / utilies ##############################################
def _mimetype_sort_key(accept_info):
"""accepted mimetypes must be sorted by :
@@ -1083,4 +1181,4 @@
}
from cubicweb import set_log_methods
-set_log_methods(CubicWebRequestBase, LOGGER)
+set_log_methods(_CubicWebRequestBase, LOGGER)
--- a/web/test/data/views.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/test/data/views.py Mon Feb 17 15:32:50 2014 +0100
@@ -16,32 +16,8 @@
# You should have received a copy of the GNU Lesser General Public License along
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
-from cubicweb.web import Redirect
-from cubicweb.web.application import CubicWebPublisher
from cubicweb.web.views.ajaxcontroller import ajaxfunc
-# proof of concept : monkey patch handle method so that if we are in an
-# anonymous session and __fblogin is found is req.form, the user with the
-# given login is created if necessary and then a session is opened for that
-# user
-# NOTE: this require "cookie" authentication mode
-def auto_login_handle_request(self, req, path):
- if (not req.cnx or req.cnx.anonymous_connection) and req.form.get('__fblogin'):
- login = password = req.form.pop('__fblogin')
- self.repo.register_user(login, password)
- req.form['__login'] = login
- req.form['__password'] = password
- if req.cnx:
- req.cnx.close()
- req.cnx = None
- try:
- self.session_handler.set_session(req)
- except Redirect:
- pass
- assert req.user.login == login
- return orig_handle(self, req, path)
-
-
def _recursive_replace_stream_by_content(tree):
""" Search for streams (i.e. object that have a 'read' method) in a tree
(which branches are lists or tuples), and substitute them by their content,
@@ -70,6 +46,3 @@
except Exception, ex:
import traceback as tb
tb.print_exc(ex)
-
-orig_handle = CubicWebPublisher.main_handle_request
-CubicWebPublisher.main_handle_request = auto_login_handle_request
--- a/web/test/unittest_application.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/test/unittest_application.py Mon Feb 17 15:32:50 2014 +0100
@@ -32,6 +32,8 @@
from cubicweb.web import LogOut, Redirect, INTERNAL_FIELD_VALUE
from cubicweb.web.views.basecontrollers import ViewController
from cubicweb.web.application import anonymized_request
+from cubicweb.dbapi import DBAPISession, _NeedAuthAccessMock
+from cubicweb import repoapi
class FakeMapping:
"""emulates a mapping module"""
@@ -177,15 +179,6 @@
self.execute('DELETE X in_group G WHERE X eid %s, G name "guests"' % user.eid)
self.commit()
- def test_nonregr_publish1(self):
- req = self.request(u'CWEType X WHERE X final FALSE, X meta FALSE')
- self.app.handle_request(req, 'view')
-
- def test_nonregr_publish2(self):
- req = self.request(u'Any count(N) WHERE N todo_by U, N is Note, U eid %s'
- % self.user().eid)
- self.app.handle_request(req, 'view')
-
def test_publish_validation_error(self):
req = self.request()
user = self.user()
@@ -281,9 +274,9 @@
def _test_cleaned(self, kwargs, injected, cleaned):
req = self.request(**kwargs)
- page = self.app.handle_request(req, 'view')
- self.assertFalse(injected in page, (kwargs, injected))
- self.assertTrue(cleaned in page, (kwargs, cleaned))
+ page = self.app_handle_request(req, 'view')
+ self.assertNotIn(injected, page)
+ self.assertIn(cleaned, page)
def test_nonregr_script_kiddies(self):
"""test against current script injection"""
@@ -315,26 +308,14 @@
self.commit()
self.assertEqual(vreg.property_value('ui.language'), 'en')
- def test_fb_login_concept(self):
- """see data/views.py"""
- self.set_auth_mode('cookie', 'anon')
- self.login('anon')
- req = self.request()
- origcnx = req.cnx
- req.form['__fblogin'] = u'turlututu'
- page = self.app.handle_request(req, '')
- self.assertFalse(req.cnx is origcnx)
- self.assertEqual(req.user.login, 'turlututu')
- self.assertTrue('turlututu' in page, page)
- req.cnx.close() # avoid warning
-
# authentication tests ####################################################
def test_http_auth_no_anon(self):
req, origsession = self.init_authentication('http')
self.assertAuthFailure(req)
- self.assertRaises(AuthenticationError, self.app_handle_request, req, 'login')
- self.assertEqual(req.cnx, None)
+ self.app.handle_request(req, 'login')
+ self.assertEqual(401, req.status_out)
+ clear_cache(req, 'get_authorization')
authstr = base64.encodestring('%s:%s' % (self.admlogin, self.admpassword))
req.set_request_header('Authorization', 'basic %s' % authstr)
self.assertAuthSuccess(req, origsession)
@@ -345,12 +326,13 @@
req, origsession = self.init_authentication('cookie')
self.assertAuthFailure(req)
try:
- form = self.app_handle_request(req, 'login')
+ form = self.app.handle_request(req, 'login')
except Redirect as redir:
self.fail('anonymous user should get login form')
+ clear_cache(req, 'get_authorization')
self.assertTrue('__login' in form)
self.assertTrue('__password' in form)
- self.assertEqual(req.cnx, None)
+ self.assertFalse(req.cnx) # Mock cnx are False
req.form['__login'] = self.admlogin
req.form['__password'] = self.admpassword
self.assertAuthSuccess(req, origsession)
@@ -369,7 +351,7 @@
# req.form['__password'] = self.admpassword
# self.assertAuthFailure(req)
# option allow-email-login set
- origsession.login = address
+ #origsession.login = address
self.set_option('allow-email-login', True)
req.form['__login'] = address
req.form['__password'] = self.admpassword
@@ -387,22 +369,27 @@
raw=True)
clear_cache(req, 'get_authorization')
# reset session as if it was a new incoming request
- req.session = req.cnx = None
+ req.session = DBAPISession(None)
+ req.user = req.cnx = _NeedAuthAccessMock
+
def _test_auth_anon(self, req):
- self.app.connect(req)
- asession = req.session
+ asession = self.app.get_session(req)
+ # important otherwise _reset_cookie will not use the right session
+ req.set_cnx(repoapi.ClientConnection(asession))
self.assertEqual(len(self.open_sessions), 1)
self.assertEqual(asession.login, 'anon')
self.assertTrue(asession.anonymous_session)
self._reset_cookie(req)
def _test_anon_auth_fail(self, req):
- self.assertEqual(len(self.open_sessions), 1)
- self.app.connect(req)
+ self.assertEqual(1, len(self.open_sessions))
+ session = self.app.get_session(req)
+ # important otherwise _reset_cookie will not use the right session
+ req.set_cnx(repoapi.ClientConnection(session))
self.assertEqual(req.message, 'authentication failure')
self.assertEqual(req.session.anonymous_session, True)
- self.assertEqual(len(self.open_sessions), 1)
+ self.assertEqual(1, len(self.open_sessions))
self._reset_cookie(req)
def test_http_auth_anon_allowed(self):
@@ -427,19 +414,19 @@
req.form['__password'] = self.admpassword
self.assertAuthSuccess(req, origsession)
self.assertRaises(LogOut, self.app_handle_request, req, 'logout')
- self.assertEqual(len(self.open_sessions), 0)
+ self.assertEqual(0, len(self.open_sessions))
def test_anonymized_request(self):
req = self.request()
- self.assertEqual(req.session.login, self.admlogin)
+ self.assertEqual(self.admlogin, req.session.user.login)
# admin should see anon + admin
- self.assertEqual(len(list(req.find_entities('CWUser'))), 2)
+ self.assertEqual(2, len(list(req.find_entities('CWUser'))))
with anonymized_request(req):
- self.assertEqual(req.session.login, 'anon')
+ self.assertEqual('anon', req.session.login, 'anon')
# anon should only see anon user
- self.assertEqual(len(list(req.find_entities('CWUser'))), 1)
- self.assertEqual(req.session.login, self.admlogin)
- self.assertEqual(len(list(req.find_entities('CWUser'))), 2)
+ self.assertEqual(1, len(list(req.find_entities('CWUser'))))
+ self.assertEqual(self.admlogin, req.session.login)
+ self.assertEqual(2, len(list(req.find_entities('CWUser'))))
def test_non_regr_optional_first_var(self):
req = self.request()
--- a/web/test/unittest_formfields.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/test/unittest_formfields.py Mon Feb 17 15:32:50 2014 +0100
@@ -25,7 +25,7 @@
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.web.formwidgets import PasswordInput, TextArea, Select, Radio
from cubicweb.web.formfields import *
-from cubicweb.web.views.forms import EntityFieldsForm
+from cubicweb.web.views.forms import EntityFieldsForm, FieldsForm
from cubes.file.entities import File
@@ -160,6 +160,21 @@
field.render(form, renderer)
+class CompoundFieldTC(CubicWebTC):
+
+ def test_multipart(self):
+ """Ensures that compound forms have needs_multipart set if their
+ children require it"""
+ class AForm(FieldsForm):
+ comp = CompoundField([IntField(), StringField()])
+ aform = AForm(self.request(), None)
+ self.assertFalse(aform.needs_multipart)
+ class MForm(FieldsForm):
+ comp = CompoundField([IntField(), FileField()])
+ mform = MForm(self.request(), None)
+ self.assertTrue(mform.needs_multipart)
+
+
class UtilsTC(TestCase):
def test_vocab_sort(self):
self.assertEqual(vocab_sort([('Z', 1), ('A', 2),
--- a/web/test/unittest_session.py Mon Feb 17 11:13:27 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""unit tests for cubicweb.web.application
-
-:organization: Logilab
-:copyright: 2001-2011 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
-:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
-:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
-"""
-from cubicweb.devtools.testlib import CubicWebTC
-from cubicweb.web import InvalidSession
-
-class SessionTC(CubicWebTC):
-
- def test_session_expiration(self):
- sm = self.app.session_handler.session_manager
- # make is if the web session has been opened by the session manager
- sm._sessions[self.cnx.sessionid] = self.websession
- sessionid = self.websession.sessionid
- self.assertEqual(len(sm._sessions), 1)
- self.assertEqual(self.websession.sessionid, self.websession.cnx.sessionid)
- # fake the repo session is expiring
- self.repo.close(sessionid)
- try:
- # fake an incoming http query with sessionid in session cookie
- # don't use self.request() which try to call req.set_session
- req = self.requestcls(self.vreg)
- self.assertRaises(InvalidSession, sm.get_session, req, sessionid)
- self.assertEqual(len(sm._sessions), 0)
- finally:
- # avoid error in tearDown by telling this connection is closed...
- self.cnx._closed = True
-
-if __name__ == '__main__':
- from logilab.common.testlib import unittest_main
- unittest_main()
--- a/web/test/unittest_views_basecontrollers.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/test/unittest_views_basecontrollers.py Mon Feb 17 15:32:50 2014 +0100
@@ -33,7 +33,7 @@
from cubicweb.uilib import rql_for_eid
from cubicweb.web import INTERNAL_FIELD_VALUE, Redirect, RequestError, RemoteCallFailed
import cubicweb.server.session
-from cubicweb.server.session import Transaction as OldTransaction
+from cubicweb.server.session import Connection as OldConnection
from cubicweb.entities.authobjs import CWUser
from cubicweb.web.views.autoform import get_pending_inserts, get_pending_deletes
from cubicweb.web.views.basecontrollers import JSonController, xhtmlize, jsonize
@@ -916,15 +916,15 @@
class UndoControllerTC(CubicWebTC):
def setUp(self):
- class Transaction(OldTransaction):
+ class Connection(OldConnection):
"""Force undo feature to be turned on in all case"""
undo_actions = property(lambda tx: True, lambda x, y:None)
- cubicweb.server.session.Transaction = Transaction
+ cubicweb.server.session.Connection = Connection
super(UndoControllerTC, self).setUp()
def tearDown(self):
super(UndoControllerTC, self).tearDown()
- cubicweb.server.session.Transaction = OldTransaction
+ cubicweb.server.session.Connection = OldConnection
def setup_database(self):
--- a/web/test/unittest_views_basetemplates.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/test/unittest_views_basetemplates.py Mon Feb 17 15:32:50 2014 +0100
@@ -18,16 +18,15 @@
from cubicweb.devtools.testlib import CubicWebTC
from cubicweb.devtools.htmlparser import XMLValidator
+from cubicweb.dbapi import DBAPISession
class LogFormTemplateTC(CubicWebTC):
def _login_labels(self):
valid = self.content_type_validators.get('text/html', XMLValidator)()
- req = self.request()
- req.cnx.anonymous_connection = True
- page = valid.parse_string(self.vreg['views'].main_template(self.request(), 'login'))
- req.cnx.anonymous_connection = False
+ req = self.requestcls(self.vreg, url='login')
+ page = valid.parse_string(self.vreg['views'].main_template(req, 'login'))
return page.find_tag('label')
def test_label(self):
--- a/web/views/authentication.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/views/authentication.py Mon Feb 17 15:32:50 2014 +0100
@@ -105,9 +105,10 @@
class RepositoryAuthenticationManager(AbstractAuthenticationManager):
"""authenticate user associated to a request and check session validity"""
- def __init__(self, vreg):
- super(RepositoryAuthenticationManager, self).__init__(vreg)
- self.repo = vreg.config.repository(vreg)
+ def __init__(self, repo):
+ super(RepositoryAuthenticationManager, self).__init__(repo)
+ self.repo = repo
+ vreg = repo.vreg
self.log_queries = vreg.config['query-log-file']
self.authinforetrievers = sorted(vreg['webauth'].possible_objects(vreg),
key=lambda x: x.order)
@@ -138,13 +139,6 @@
# actual user login
if login and session.login != login:
raise InvalidSession('login mismatch')
- try:
- # calling cnx.user() check connection validity, raise
- # BadConnectionId on failure
- user = session.cnx.user(req)
- except BadConnectionId:
- raise InvalidSession('bad connection id')
- return user
def authenticate(self, req):
"""authenticate user using connection information found in the request,
@@ -160,28 +154,24 @@
except NoAuthInfo:
continue
try:
- cnx = self._authenticate(login, authinfo)
+ session = self._authenticate(login, authinfo)
except AuthenticationError:
retriever.cleanup_authentication_information(req)
continue # the next one may succeed
for retriever_ in self.authinforetrievers:
- retriever_.authenticated(retriever, req, cnx, login, authinfo)
- return cnx, login
+ retriever_.authenticated(retriever, req, session, login, authinfo)
+ return session, login
# false if no authentication info found, eg this is not an
# authentication failure
if 'login' in locals():
req.set_message(req._('authentication failure'))
login, authinfo = self.anoninfo
if login:
- cnx = self._authenticate(login, authinfo)
- cnx.anonymous_connection = True
- return cnx, login
+ session = self._authenticate(login, authinfo)
+ return session, login
raise AuthenticationError()
def _authenticate(self, login, authinfo):
- cnxprops = ConnectionProperties(close=False, log=self.log_queries)
- cnx = _repo_connect(self.repo, login, cnxprops=cnxprops, **authinfo)
- # decorate connection
- cnx.vreg = self.vreg
- return cnx
+ sessionid = self.repo.connect(login, **authinfo)
+ return self.repo._sessions[sessionid]
--- a/web/views/basecomponents.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/views/basecomponents.py Mon Feb 17 15:32:50 2014 +0100
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -188,25 +188,15 @@
"""
__select__ = yes()
__regid__ = 'applmessages'
- # don't want user to hide this component using an cwproperty
+ # don't want user to hide this component using a cwproperty
cw_property_defs = {}
def call(self, msg=None):
if msg is None:
- msgs = []
- if self._cw.cnx:
- srcmsg = self._cw.get_shared_data('sources_error', pop=True, txdata=True)
- if srcmsg:
- msgs.append(srcmsg)
- reqmsg = self._cw.message # XXX don't call self._cw.message twice
- if reqmsg:
- msgs.append(reqmsg)
- else:
- msgs = [msg]
+ msg = self._cw.message # XXX don't call self._cw.message twice
self.w(u'<div id="appMsg" onclick="%s" class="%s">\n' %
- (toggle_action('appMsg'), (msgs and ' ' or 'hidden')))
- for msg in msgs:
- self.w(u'<div class="message" id="%s">%s</div>' % (self.domid, msg))
+ (toggle_action('appMsg'), (msg and ' ' or 'hidden')))
+ self.w(u'<div class="message" id="%s">%s</div>' % (self.domid, msg))
self.w(u'</div>')
--- a/web/views/cwsources.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/views/cwsources.py Mon Feb 17 15:32:50 2014 +0100
@@ -99,7 +99,7 @@
cellvids={1: 'editable-final'})
-MAPPED_SOURCE_TYPES = set( ('pyrorql', 'datafeed') )
+MAPPED_SOURCE_TYPES = set( ('datafeed',) )
class CWSourceMappingTab(EntityView):
__regid__ = 'cwsource-mapping'
@@ -117,20 +117,6 @@
'Any X, SCH, XO ORDERBY ET WHERE X options XO, X cw_for_source S, S eid %(s)s, '
'X cw_schema SCH, SCH is ET', {'s': entity.eid})
self.wview('table', rset, 'noresult')
- # self.w('<h3>%s</h3>' % _('Relations that should not be crossed'))
- # self.w('<p>%s</p>' % _(
- # 'By default, when a relation is not supported by a source, it is '
- # 'supposed that a local relation may point to an entity from the '
- # 'external source. Relations listed here won\'t have this '
- # '"crossing" behaviour.'))
- # self.wview('list', entity.related('cw_dont_cross'), 'noresult')
- # self.w('<h3>%s</h3>' % _('Relations that can be crossed'))
- # self.w('<p>%s</p>' % _(
- # 'By default, when a relation is supported by a source, it is '
- # 'supposed that a local relation can\'t point to an entity from the '
- # 'external source. Relations listed here may have this '
- # '"crossing" behaviour anyway.'))
- # self.wview('list', entity.related('cw_may_cross'), 'noresult')
checker = MAPPING_CHECKERS.get(entity.type, MappingChecker)(entity)
checker.check()
if (checker.errors or checker.warnings or checker.infos):
@@ -215,49 +201,6 @@
pass
-class PyroRQLMappingChecker(MappingChecker):
- """pyrorql source mapping checker"""
-
- def init(self):
- self.dontcross = set()
- self.maycross = set()
- super(PyroRQLMappingChecker, self).init()
-
- def init_schemacfg(self, schemacfg):
- options = schemacfg.options or ()
- if 'dontcross' in options:
- self.dontcross.add(schemacfg.schema.name)
- else:
- super(PyroRQLMappingChecker, self).init_schemacfg(schemacfg)
- if 'maycross' in options:
- self.maycross.add(schemacfg.schema.name)
-
- def custom_check(self):
- error = self.errors.append
- info = self.infos.append
- for etype in self.sentities:
- eschema = self.schema[etype]
- for rschema, ttypes, role in eschema.relation_definitions():
- if rschema in META_RTYPES:
- continue
- if not rschema in self.srelations:
- if rschema not in self.dontcross:
- if role == 'subject' and rschema.inlined:
- error(_('inlined relation %(rtype)s of %(etype)s '
- 'should be supported') %
- {'rtype': rschema, 'etype': etype})
- elif (rschema not in self.seen and rschema not in self.maycross):
- info(_('you may want to specify something for %s') %
- rschema)
- self.seen.add(rschema)
- elif rschema in self.maycross and rschema.inlined:
- error(_('you should un-inline relation %s which is '
- 'supported and may be crossed ') % rschema)
-
-MAPPING_CHECKERS = {
- 'pyrorql': PyroRQLMappingChecker,
- }
-
class CWSourceImportsTab(EntityView):
__regid__ = 'cwsource-imports'
--- a/web/views/debug.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/views/debug.py Mon Feb 17 15:32:50 2014 +0100
@@ -97,7 +97,7 @@
w(u'<h2>%s</h2>' % _('Repository'))
w(u'<h3>%s</h3>' % _('resources usage'))
w(u'<table>')
- stats = repo.stats()
+ stats = self._cw.call_service('repo_stats')
for element in sorted(stats):
w(u'<tr><th align="left">%s</th><td>%s %s</td></tr>'
% (element, xml_escape(unicode(stats[element])),
--- a/web/views/editcontroller.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/views/editcontroller.py Mon Feb 17 15:32:50 2014 +0100
@@ -252,15 +252,7 @@
formid = 'edition'
form = req.vreg['forms'].select(formid, req, entity=entity)
eid = form.actual_eid(entity.eid)
- try:
- editedfields = formparams['_cw_entity_fields']
- except KeyError:
- try:
- editedfields = formparams['_cw_edited_fields']
- warn('[3.13] _cw_edited_fields has been renamed _cw_entity_fields',
- DeprecationWarning)
- except KeyError:
- raise RequestError(req._('no edited fields specified for entity %s' % entity.eid))
+ editedfields = formparams['_cw_entity_fields']
form.formvalues = {} # init fields value cache
for field in form.iter_modified_fields(editedfields, entity):
self.handle_formfield(form, field, rqlquery)
--- a/web/views/management.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/views/management.py Mon Feb 17 15:32:50 2014 +0100
@@ -181,7 +181,7 @@
__select__ = none_rset() & match_user_groups('users', 'managers')
def call(self):
- stats = self._cw.vreg.config.repository(None).stats()
+ stats = self._cw.call_service('repo_stats')
results = []
for element in stats:
results.append(u'%s %s' % (element, stats[element]))
--- a/web/views/sessions.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/views/sessions.py Mon Feb 17 15:32:50 2014 +0100
@@ -26,6 +26,7 @@
from cubicweb.web import InvalidSession, Redirect
from cubicweb.web.application import AbstractSessionManager
from cubicweb.dbapi import ProgrammingError, DBAPISession
+from cubicweb import repoapi
class InMemoryRepositorySessionManager(AbstractSessionManager):
@@ -53,72 +54,59 @@
if sessionid not in self._sessions:
raise InvalidSession()
session = self._sessions[sessionid]
- if session.cnx:
- try:
- user = self.authmanager.validate_session(req, session)
- except InvalidSession:
- # invalid session
- self.close_session(session)
- raise
- # associate the connection to the current request
- req.set_session(session, user)
+ try:
+ user = self.authmanager.validate_session(req, session)
+ except InvalidSession:
+ self.close_session(session)
+ raise
+ if session.closed:
+ self.close_session(session)
+ raise InvalidSession()
return session
- def open_session(self, req, allow_no_cnx=True):
+ def open_session(self, req):
"""open and return a new session for the given request. The session is
also bound to the request.
raise :exc:`cubicweb.AuthenticationError` if authentication failed
(no authentication info found or wrong user/password)
"""
- try:
- cnx, login = self.authmanager.authenticate(req)
- except AuthenticationError:
- if allow_no_cnx:
- session = DBAPISession(None)
- else:
- raise
- else:
- session = DBAPISession(cnx, login)
+ session, login = self.authmanager.authenticate(req)
self._sessions[session.sessionid] = session
- # associate the connection to the current request
- req.set_session(session)
return session
- def postlogin(self, req):
- """postlogin: the user has been authenticated, redirect to the original
- page (index by default) with a welcome message
+ def postlogin(self, req, session):
+ """postlogin: the user have been related to a session
+
+ Both req and session are passed to this function because actually
+ linking the request to the session is not yet done and not the
+ responsability of this object.
"""
# Update last connection date
# XXX: this should be in a post login hook in the repository, but there
# we can't differentiate actual login of automatic session
# reopening. Is it actually a problem?
if 'last_login_time' in req.vreg.schema:
- self._update_last_login_time(req)
- req.set_message(req._('welcome %s!') % req.user.login)
+ self._update_last_login_time(session)
+ req.set_message(req._('welcome %s!') % session.user.login)
- def _update_last_login_time(self, req):
+ def _update_last_login_time(self, session):
# XXX should properly detect missing permission / non writeable source
# and avoid "except (RepositoryError, Unauthorized)" below
try:
- req.execute('SET X last_login_time NOW WHERE X eid %(x)s',
- {'x' : req.user.eid})
- req.cnx.commit()
+ cnx = repoapi.ClientConnection(session)
+ with cnx:
+ cnx.execute('SET X last_login_time NOW WHERE X eid %(x)s',
+ {'x' : session.user.eid})
+ cnx.commit()
except (RepositoryError, Unauthorized):
- req.cnx.rollback()
- except Exception:
- req.cnx.rollback()
- raise
+ pass
def close_session(self, session):
"""close session on logout or on invalid session detected (expired out,
corrupted...)
"""
self.info('closing http session %s' % session.sessionid)
- del self._sessions[session.sessionid]
- if session.cnx:
- try:
- session.cnx.close()
- except (ProgrammingError, BadConnectionId): # expired on the repository side
- pass
- session.cnx = None
+ self._sessions.pop(session.sessionid, None)
+ if not session.closed:
+ session.repo.close(session.id)
--- a/web/views/staticcontrollers.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/views/staticcontrollers.py Mon Feb 17 15:32:50 2014 +0100
@@ -27,6 +27,7 @@
import hashlib
import mimetypes
import threading
+import tempfile
from time import mktime
from datetime import datetime, timedelta
from logging import getLogger
@@ -145,32 +146,34 @@
def concat_cached_filepath(self, paths):
filepath = self.build_filepath(paths)
if not self._up_to_date(filepath, paths):
- tmpfile = filepath + '.tmp'
- try:
- with self.lock:
- if self._up_to_date(filepath, paths):
- # first check could have raced with some other thread
- # updating the file
- return filepath
- with open(tmpfile, 'wb') as f:
- for path in paths:
- dirpath, rid = self._resource(path)
- if rid is None:
- # In production mode log an error, do not return a 404
- # XXX the erroneous content is cached anyway
- self.logger.error('concatenated data url error: %r file '
- 'does not exist', path)
- if self.config.debugmode:
- raise NotFound(path)
- else:
- with open(osp.join(dirpath, rid), 'rb') as source:
- for line in source:
- f.write(line)
- f.write('\n')
+ with self.lock:
+ if self._up_to_date(filepath, paths):
+ # first check could have raced with some other thread
+ # updating the file
+ return filepath
+ fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(filepath))
+ try:
+ f = os.fdopen(fd, 'wb')
+ for path in paths:
+ dirpath, rid = self._resource(path)
+ if rid is None:
+ # In production mode log an error, do not return a 404
+ # XXX the erroneous content is cached anyway
+ self.logger.error('concatenated data url error: %r file '
+ 'does not exist', path)
+ if self.config.debugmode:
+ raise NotFound(path)
+ else:
+ with open(osp.join(dirpath, rid), 'rb') as source:
+ for line in source:
+ f.write(line)
+ f.write('\n')
+ f.close()
+ except:
+ os.remove(tmpfile)
+ raise
+ else:
os.rename(tmpfile, filepath)
- except:
- os.remove(tmpfile)
- raise
return filepath
--- a/web/webconfig.py Mon Feb 17 11:13:27 2014 +0100
+++ b/web/webconfig.py Mon Feb 17 15:32:50 2014 +0100
@@ -274,7 +274,7 @@
try:
return self.__repo
except AttributeError:
- from cubicweb.dbapi import get_repository
+ from cubicweb.repoapi import get_repository
repo = get_repository(config=self, vreg=vreg)
self.__repo = repo
return repo
--- a/wsgi/handler.py Mon Feb 17 11:13:27 2014 +0100
+++ b/wsgi/handler.py Mon Feb 17 15:32:50 2014 +0100
@@ -97,7 +97,7 @@
"""
def __init__(self, config):
- self.appli = CubicWebPublisher(config)
+ self.appli = CubicWebPublisher(config.repository(), config)
self.config = config
self.base_url = self.config['base-url']
self.url_rewriter = self.appli.vreg['components'].select_or_none('urlrewriter')