+
+
+"""
+
+
+class HTMLPageInfoTC(TestCase):
+ """test cases for PageInfo"""
+
+ def setUp(self):
+ parser = htmlparser.HTMLValidator()
+ # disable cleanup that would remove doctype
+ parser.preprocess_data = lambda data: data
+ self.page_info = parser.parse_string(HTML_PAGE2)
+
+ def test_source1(self):
+ """make sure source is stored correctly"""
+ self.assertEqual(self.page_info.source, HTML_PAGE2)
+
+ def test_source2(self):
+ """make sure source is stored correctly - raise exception"""
+ parser = htmlparser.DTDValidator()
+ self.assertRaises(AssertionError, parser.parse_string, HTML_PAGE_ERROR)
+
+ def test_has_title_no_level(self):
+ """tests h? tags information"""
+ self.assertEqual(self.page_info.has_title('Test'), True)
+ self.assertEqual(self.page_info.has_title('Test '), False)
+ self.assertEqual(self.page_info.has_title('Tes'), False)
+ self.assertEqual(self.page_info.has_title('Hello world !'), True)
+
+ def test_has_title_level(self):
+ """tests h? tags information"""
+ self.assertEqual(self.page_info.has_title('Test', level = 1), True)
+ self.assertEqual(self.page_info.has_title('Test', level = 2), False)
+ self.assertEqual(self.page_info.has_title('Test', level = 3), False)
+ self.assertEqual(self.page_info.has_title('Test', level = 4), False)
+ self.assertRaises(IndexError, self.page_info.has_title, 'Test', level = 5)
+
+ def test_has_title_regexp_no_level(self):
+ """tests has_title_regexp() with no particular level specified"""
+ self.assertEqual(self.page_info.has_title_regexp('h[23] title'), True)
+
+ def test_has_title_regexp_level(self):
+ """tests has_title_regexp() with a particular level specified"""
+ self.assertEqual(self.page_info.has_title_regexp('h[23] title', 2), True)
+ self.assertEqual(self.page_info.has_title_regexp('h[23] title', 3), True)
+ self.assertEqual(self.page_info.has_title_regexp('h[23] title', 4), False)
+
+ def test_appears(self):
+ """tests PageInfo.appears()"""
+ self.assertEqual(self.page_info.appears('CW'), True)
+ self.assertEqual(self.page_info.appears('Logilab'), True)
+ self.assertEqual(self.page_info.appears('Logilab introduces'), True)
+ self.assertEqual(self.page_info.appears('H2 title'), False)
+
+ def test_has_link(self):
+ """tests has_link()"""
+ self.assertEqual(self.page_info.has_link('Logilab'), True)
+ self.assertEqual(self.page_info.has_link('logilab'), False)
+ self.assertEqual(self.page_info.has_link('Logilab', 'http://www.logilab.org'), True)
+ self.assertEqual(self.page_info.has_link('Logilab', 'http://www.google.com'), False)
+
+ def test_has_link_regexp(self):
+ """test has_link_regexp()"""
+ self.assertEqual(self.page_info.has_link_regexp('L[oi]gilab'), True)
+ self.assertEqual(self.page_info.has_link_regexp('L[ai]gilab'), False)
+
+
+class CWUtilitiesTC(CubicWebTC):
+
+ def test_temporary_permissions_eschema(self):
+ eschema = self.schema['CWUser']
+ with self.temporary_permissions(CWUser={'read': ()}):
+ self.assertEqual(eschema.permissions['read'], ())
+ self.assertTrue(eschema.permissions['add'])
+ self.assertTrue(eschema.permissions['read'], ())
+
+ def test_temporary_permissions_rdef(self):
+ rdef = self.schema['CWUser'].rdef('in_group')
+ with self.temporary_permissions((rdef, {'read': ()})):
+ self.assertEqual(rdef.permissions['read'], ())
+ self.assertTrue(rdef.permissions['add'])
+ self.assertTrue(rdef.permissions['read'], ())
+
+ def test_temporary_permissions_rdef_with_exception(self):
+ rdef = self.schema['CWUser'].rdef('in_group')
+ try:
+ with self.temporary_permissions((rdef, {'read': ()})):
+ self.assertEqual(rdef.permissions['read'], ())
+ self.assertTrue(rdef.permissions['add'])
+ raise ValueError('goto')
+ except ValueError:
+ self.assertTrue(rdef.permissions['read'], ())
+ else:
+ self.fail('exception was caught unexpectedly')
+
+ def test_temporary_appobjects_registered(self):
+
+ class AnAppobject(object):
+ __registries__ = ('hip',)
+ __regid__ = 'hop'
+ __select__ = yes()
+ registered = None
+
+ @classmethod
+ def __registered__(cls, reg):
+ cls.registered = reg
+
+ with self.temporary_appobjects(AnAppobject):
+ self.assertEqual(self.vreg['hip'], AnAppobject.registered)
+ self.assertIn(AnAppobject, self.vreg['hip']['hop'])
+ self.assertNotIn(AnAppobject, self.vreg['hip']['hop'])
+
+ def test_login(self):
+ """Calling login should not break hook control"""
+ with self.admin_access.repo_cnx() as cnx:
+ self.hook_executed = False
+ self.create_user(cnx, 'babar')
+ cnx.commit()
+
+ from cubicweb.server import hook
+ from cubicweb.predicates import is_instance
+
+ class MyHook(hook.Hook):
+ __regid__ = 'whatever'
+ __select__ = hook.Hook.__select__ & is_instance('CWProperty')
+ category = 'test-hook'
+ events = ('after_add_entity',)
+ test = self
+
+ def __call__(self):
+ self.test.hook_executed = True
+
+ with self.new_access('babar').repo_cnx() as cnx:
+ with self.temporary_appobjects(MyHook):
+ with cnx.allow_all_hooks_but('test-hook'):
+ prop = cnx.create_entity('CWProperty', pkey=u'ui.language', value=u'en')
+ cnx.commit()
+ self.assertFalse(self.hook_executed)
+
+
+class RepoAccessTC(CubicWebTC):
+
+ def test_repo_connection(self):
+ acc = self.new_access('admin')
+ with acc.repo_cnx() as cnx:
+ rset = cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+ def test_client_connection(self):
+ acc = self.new_access('admin')
+ with acc.client_cnx() as cnx:
+ rset = cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+ def test_web_request(self):
+ acc = self.new_access('admin')
+ with acc.web_request(elephant='babar') as req:
+ rset = req.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+ self.assertEqual('babar', req.form['elephant'])
+
+ def test_close(self):
+ acc = self.new_access('admin')
+ acc.close()
+
+ def test_admin_access(self):
+ with self.admin_access.client_cnx() as cnx:
+ self.assertEqual('admin', cnx.user.login)
+
+
+if __name__ == '__main__':
+ unittest_main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/devtools/test/unittest_webtest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/unittest_webtest.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,41 @@
+from six.moves import http_client
+
+from logilab.common.testlib import Tags
+from cubicweb.devtools.webtest import CubicWebTestTC
+
+
+class CWTTC(CubicWebTestTC):
+ def test_response(self):
+ response = self.webapp.get('/')
+ self.assertEqual(200, response.status_int)
+
+ def test_base_url(self):
+ if self.config['base-url'] not in self.webapp.get('/').text:
+ self.fail('no mention of base url in retrieved page')
+
+
+class CWTIdentTC(CubicWebTestTC):
+ test_db_id = 'webtest-ident'
+ anonymous_allowed = False
+ tags = CubicWebTestTC.tags | Tags(('auth',))
+
+ def test_reponse_denied(self):
+ res = self.webapp.get('/', expect_errors=True)
+ self.assertEqual(http_client.FORBIDDEN, res.status_int)
+
+ def test_login(self):
+ res = self.webapp.get('/', expect_errors=True)
+ self.assertEqual(http_client.FORBIDDEN, res.status_int)
+
+ self.login(self.admlogin, self.admpassword)
+ res = self.webapp.get('/')
+ self.assertEqual(http_client.OK, res.status_int)
+
+ self.logout()
+ res = self.webapp.get('/', expect_errors=True)
+ self.assertEqual(http_client.FORBIDDEN, res.status_int)
+
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/devtools/testlib.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/testlib.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,1356 @@
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""this module contains base classes and utilities for cubicweb tests"""
+from __future__ import print_function
+
+import sys
+import re
+import warnings
+from os.path import dirname, join, abspath
+from math import log
+from contextlib import contextmanager
+from inspect import isgeneratorfunction
+from itertools import chain
+
+from six import text_type, string_types
+from six.moves import range
+from six.moves.urllib.parse import urlparse, parse_qs, unquote as urlunquote
+
+import yams.schema
+
+from logilab.common.testlib import Tags, nocoverage
+from logilab.common.debugger import Debugger
+from logilab.common.umessage import message_from_string
+from logilab.common.decorators import cached, classproperty, clear_cache, iclassmethod
+from logilab.common.deprecation import deprecated, class_deprecated
+from logilab.common.shellutils import getlogin
+
+from cubicweb import (ValidationError, NoSelectableObject, AuthenticationError,
+ BadConnectionId)
+from cubicweb import cwconfig, devtools, web, server, repoapi
+from cubicweb.utils import json
+from cubicweb.sobjects import notification
+from cubicweb.web import Redirect, application, eid_param
+from cubicweb.server.hook import SendMailOp
+from cubicweb.server.session import Session
+from cubicweb.devtools import SYSTEM_ENTITIES, SYSTEM_RELATIONS, VIEW_VALIDATORS
+from cubicweb.devtools import fake, htmlparser, DEFAULT_EMPTY_DB_ID
+
+
+if sys.version_info[:2] < (3, 4):
+ from unittest2 import TestCase
+ if not hasattr(TestCase, 'subTest'):
+ raise ImportError('no subTest support in available unittest2')
+else:
+ from unittest import TestCase
+
+# in python 2.7, DeprecationWarning are not shown anymore by default
+warnings.filterwarnings('default', category=DeprecationWarning)
+
+
+# provide a data directory for the test class ##################################
+
+class BaseTestCase(TestCase):
+
+ @classproperty
+ @cached
+ def datadir(cls): # pylint: disable=E0213
+ """helper attribute holding the standard test's data directory
+ """
+ mod = sys.modules[cls.__module__]
+ return join(dirname(abspath(mod.__file__)), 'data')
+ # cache it (use a class method to cache on class since TestCase is
+ # instantiated for each test run)
+
+ @classmethod
+ def datapath(cls, *fname):
+ """joins the object's datadir and `fname`"""
+ return join(cls.datadir, *fname)
+
+
+if hasattr(BaseTestCase, 'assertItemsEqual'):
+ BaseTestCase.assertCountEqual = BaseTestCase.assertItemsEqual
+
+
+# low-level utilities ##########################################################
+
+class CubicWebDebugger(Debugger):
+ """special debugger class providing a 'view' function which saves some
+ html into a temporary file and open a web browser to examinate it.
+ """
+ def do_view(self, arg):
+ import webbrowser
+ data = self._getval(arg)
+ with open('/tmp/toto.html', 'w') as toto:
+ toto.write(data)
+ webbrowser.open('file:///tmp/toto.html')
+
+
+def line_context_filter(line_no, center, before=3, after=None):
+ """return true if line are in context
+
+ if after is None: after = before
+ """
+ if after is None:
+ after = before
+ return center - before <= line_no <= center + after
+
+
+def unprotected_entities(schema, strict=False):
+ """returned a set of each non final entity type, excluding "system" entities
+ (eg CWGroup, CWUser...)
+ """
+ if strict:
+ protected_entities = yams.schema.BASE_TYPES
+ else:
+ protected_entities = yams.schema.BASE_TYPES.union(SYSTEM_ENTITIES)
+ return set(schema.entities()) - protected_entities
+
+
+class JsonValidator(object):
+ def parse_string(self, data):
+ return json.loads(data.decode('ascii'))
+
+
+@contextmanager
+def real_error_handling(app):
+ """By default, CubicWebTC `app` attribute (ie the publisher) is monkey
+ patched so that unexpected error are raised rather than going through the
+ `error_handler` method.
+
+ By using this context manager you disable this monkey-patching temporarily.
+ Hence when publishihng a request no error will be raised, you'll get
+ req.status_out set to an HTTP error status code and the generated page will
+ usually hold a traceback as HTML.
+
+ >>> with real_error_handling(app):
+ >>> page = app.handle_request(req)
+ """
+ # remove the monkey patched error handler
+ fake_error_handler = app.error_handler
+ del app.error_handler
+ # return the app
+ yield app
+ # restore
+ app.error_handler = fake_error_handler
+
+
+# email handling, to test emails sent by an application ########################
+
+MAILBOX = []
+
+
+class Email(object):
+ """you'll get instances of Email into MAILBOX during tests that trigger
+ some notification.
+
+ * `msg` is the original message object
+
+ * `recipients` is a list of email address which are the recipients of this
+ message
+ """
+ def __init__(self, fromaddr, recipients, msg):
+ self.fromaddr = fromaddr
+ self.recipients = recipients
+ self.msg = msg
+
+ @property
+ def message(self):
+ return message_from_string(self.msg)
+
+ @property
+ def subject(self):
+ return self.message.get('Subject')
+
+ @property
+ def content(self):
+ return self.message.get_payload(decode=True)
+
+ def __repr__(self):
+ return '' % (','.join(self.recipients),
+ self.message.get('Subject'))
+
+
+# the trick to get email into MAILBOX instead of actually sent: monkey patch
+# cwconfig.SMTP object
+class MockSMTP:
+
+ def __init__(self, server, port):
+ pass
+
+ def close(self):
+ pass
+
+ def sendmail(self, fromaddr, recipients, msg):
+ MAILBOX.append(Email(fromaddr, recipients, msg))
+
+cwconfig.SMTP = MockSMTP
+
+
+# Repoaccess utility ###############################################3###########
+
+class RepoAccess(object):
+ """An helper to easily create object to access the repo as a specific user
+
+ Each RepoAccess have it own session.
+
+ A repo access can create three type of object:
+
+ .. automethod:: cubicweb.testlib.RepoAccess.cnx
+ .. automethod:: cubicweb.testlib.RepoAccess.web_request
+
+ The RepoAccess need to be closed to destroy the associated Session.
+ TestCase usually take care of this aspect for the user.
+
+ .. automethod:: cubicweb.testlib.RepoAccess.close
+ """
+
+ def __init__(self, repo, login, requestcls):
+ self._repo = repo
+ self._login = login
+ self.requestcls = requestcls
+ self._session = self._unsafe_connect(login)
+
+ def _unsafe_connect(self, login, **kwargs):
+ """ a completely unsafe connect method for the tests """
+ # use an internal connection
+ with self._repo.internal_cnx() as cnx:
+ # try to get a user object
+ user = cnx.find('CWUser', login=login).one()
+ user.groups
+ user.properties
+ user.login
+ session = Session(user, self._repo)
+ self._repo._sessions[session.sessionid] = session
+ user._cw = user.cw_rset.req = session
+ with session.new_cnx() as cnx:
+ self._repo.hm.call_hooks('session_open', cnx)
+ # commit connection at this point in case write operation has been
+ # done during `session_open` hooks
+ cnx.commit()
+ return session
+
+ @contextmanager
+ def cnx(self):
+ """Context manager returning a server side connection for the user"""
+ with self._session.new_cnx() as cnx:
+ yield cnx
+
+ # aliases for bw compat
+ client_cnx = repo_cnx = cnx
+
+ @contextmanager
+ def web_request(self, url=None, headers={}, method='GET', **kwargs):
+ """Context manager returning a web request pre-linked to a client cnx
+
+ To commit and rollback use::
+
+ req.cnx.commit()
+ req.cnx.rolback()
+ """
+ req = self.requestcls(self._repo.vreg, url=url, headers=headers,
+ method=method, form=kwargs)
+ with self._session.new_cnx() as cnx:
+ req.set_cnx(cnx)
+ yield req
+
+ def close(self):
+ """Close the session associated to the RepoAccess"""
+ self._session.close()
+
+ @contextmanager
+ def shell(self):
+ from cubicweb.server.migractions import ServerMigrationHelper
+ with self._session.new_cnx() as cnx:
+ mih = ServerMigrationHelper(None, repo=self._repo, cnx=cnx,
+ interactive=False,
+ # hack so it don't try to load fs schema
+ schema=1)
+ yield mih
+ cnx.commit()
+
+
+# base class for cubicweb tests requiring a full cw environments ###############
+
+class CubicWebTC(BaseTestCase):
+ """abstract class for test using an apptest environment
+
+ attributes:
+
+ * `vreg`, the vregistry
+ * `schema`, self.vreg.schema
+ * `config`, cubicweb configuration
+ * `cnx`, repoapi connection to the repository using an admin user
+ * `session`, server side session associated to `cnx`
+ * `app`, the cubicweb publisher (for web testing)
+ * `repo`, the repository object
+ * `admlogin`, login of the admin user
+ * `admpassword`, password of the admin user
+ * `shell`, create and use shell environment
+ * `anonymous_allowed`: flag telling if anonymous browsing should be allowed
+ """
+ appid = 'data'
+ configcls = devtools.ApptestConfiguration
+ requestcls = fake.FakeRequest
+ tags = Tags('cubicweb', 'cw_repo')
+ test_db_id = DEFAULT_EMPTY_DB_ID
+
+ # anonymous is logged by default in cubicweb test cases
+ anonymous_allowed = True
+
+ @classmethod
+ def setUpClass(cls):
+ test_module_file = sys.modules[cls.__module__].__file__
+ assert 'config' not in cls.__dict__, (
+ '%s has a config class attribute before entering setUpClass. '
+ 'Let CubicWebTC.setUpClass instantiate it and modify it afterwards.' % cls)
+ cls.config = cls.configcls(cls.appid, test_module_file)
+ cls.config.mode = 'test'
+
+ def __init__(self, *args, **kwargs):
+ self._admin_session = None
+ self.repo = None
+ self._open_access = set()
+ super(CubicWebTC, self).__init__(*args, **kwargs)
+
+ def run(self, *args, **kwds):
+ testMethod = getattr(self, self._testMethodName)
+ if isgeneratorfunction(testMethod):
+ raise RuntimeError(
+ '%s appears to be a generative test. This is not handled '
+ 'anymore, use subTest API instead.' % self)
+ return super(CubicWebTC, self).run(*args, **kwds)
+
+ # repository connection handling ###########################################
+
+ def new_access(self, login):
+ """provide a new RepoAccess object for a given user
+
+ The access is automatically closed at the end of the test."""
+ login = text_type(login)
+ access = RepoAccess(self.repo, login, self.requestcls)
+ self._open_access.add(access)
+ return access
+
+ def _close_access(self):
+ while self._open_access:
+ try:
+ self._open_access.pop().close()
+ except BadConnectionId:
+ continue # already closed
+
+ @property
+ def session(self):
+ """return admin session"""
+ return self._admin_session
+
+ def _init_repo(self):
+ """init the repository and connection to it.
+ """
+ # get or restore and working db.
+ db_handler = devtools.get_test_db_handler(self.config, self.init_config)
+ db_handler.build_db_cache(self.test_db_id, self.pre_setup_database)
+ db_handler.restore_database(self.test_db_id)
+ self.repo = db_handler.get_repo(startup=True)
+ # get an admin session (without actual login)
+ login = text_type(db_handler.config.default_admin_config['login'])
+ self.admin_access = self.new_access(login)
+ self._admin_session = self.admin_access._session
+
+ # config management ########################################################
+
+ @classmethod # XXX could be turned into a regular method
+ def init_config(cls, config):
+ """configuration initialization hooks.
+
+ You may only want to override here the configuraton logic.
+
+ Otherwise, consider to use a different :class:`ApptestConfiguration`
+ defined in the `configcls` class attribute.
+
+ This method will be called by the database handler once the config has
+ been properly bootstrapped.
+ """
+ admincfg = config.default_admin_config
+ cls.admlogin = text_type(admincfg['login'])
+ cls.admpassword = admincfg['password']
+ # uncomment the line below if you want rql queries to be logged
+ # config.global_set_option('query-log-file',
+ # '/tmp/test_rql_log.' + `os.getpid()`)
+ config.global_set_option('log-file', None)
+ # set default-dest-addrs to a dumb email address to avoid mailbox or
+ # mail queue pollution
+ config.global_set_option('default-dest-addrs', ['whatever'])
+ send_to = '%s@logilab.fr' % getlogin()
+ config.global_set_option('sender-addr', send_to)
+ config.global_set_option('default-dest-addrs', send_to)
+ config.global_set_option('sender-name', 'cubicweb-test')
+ config.global_set_option('sender-addr', 'cubicweb-test@logilab.fr')
+ # default_base_url on config class isn't enough for TestServerConfiguration
+ config.global_set_option('base-url', config.default_base_url())
+ # web resources
+ try:
+ config.global_set_option('embed-allowed', re.compile('.*'))
+ except Exception: # not in server only configuration
+ pass
+
+ @property
+ def vreg(self):
+ return self.repo.vreg
+
+ # global resources accessors ###############################################
+
+ @property
+ def schema(self):
+ """return the application schema"""
+ return self.vreg.schema
+
+ def set_option(self, optname, value):
+ self.config.global_set_option(optname, value)
+
+ def set_debug(self, debugmode):
+ server.set_debug(debugmode)
+
+ def debugged(self, debugmode):
+ return server.debugged(debugmode)
+
+ # default test setup and teardown #########################################
+
+ def setUp(self):
+ assert hasattr(self, 'config'), (
+ 'It seems that CubicWebTC.setUpClass has not been called. '
+ 'Missing super() call in %s?' % self.setUpClass)
+ # monkey patch send mail operation so emails are sent synchronously
+ self._patch_SendMailOp()
+ previous_failure = self.__class__.__dict__.get('_repo_init_failed')
+ if previous_failure is not None:
+ self.skipTest('repository is not initialised: %r' % previous_failure)
+ try:
+ self._init_repo()
+ except Exception as ex:
+ self.__class__._repo_init_failed = ex
+ raise
+ self.addCleanup(self._close_access)
+ self.config.set_anonymous_allowed(self.anonymous_allowed)
+ self.setup_database()
+ MAILBOX[:] = [] # reset mailbox
+
+ def tearDown(self):
+ # XXX hack until logilab.common.testlib is fixed
+ if self._admin_session is not None:
+ self._admin_session.close()
+ self._admin_session = None
+ while self._cleanups:
+ cleanup, args, kwargs = self._cleanups.pop(-1)
+ cleanup(*args, **kwargs)
+ self.repo.turn_repo_off()
+
+ def _patch_SendMailOp(self):
+ # monkey patch send mail operation so emails are sent synchronously
+ _old_mail_postcommit_event = SendMailOp.postcommit_event
+ SendMailOp.postcommit_event = SendMailOp.sendmails
+
+ def reverse_SendMailOp_monkey_patch():
+ SendMailOp.postcommit_event = _old_mail_postcommit_event
+
+ self.addCleanup(reverse_SendMailOp_monkey_patch)
+
+ def setup_database(self):
+ """add your database setup code by overriding this method"""
+
+ @classmethod
+ def pre_setup_database(cls, cnx, config):
+ """add your pre database setup code by overriding this method
+
+ Do not forget to set the cls.test_db_id value to enable caching of the
+ result.
+ """
+
+ # user / session management ###############################################
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def user(self, req=None):
+ """return the application schema"""
+ if req is None:
+ return self.request().user
+ else:
+ return req.user
+
+ @iclassmethod # XXX turn into a class method
+ def create_user(self, req, login=None, groups=('users',), password=None,
+ email=None, commit=True, **kwargs):
+ """create and return a new user entity"""
+ if password is None:
+ password = login
+ if login is not None:
+ login = text_type(login)
+ user = req.create_entity('CWUser', login=login,
+ upassword=password, **kwargs)
+ req.execute('SET X in_group G WHERE X eid %%(x)s, G name IN(%s)'
+ % ','.join(repr(str(g)) for g in groups),
+ {'x': user.eid})
+ if email is not None:
+ req.create_entity('EmailAddress', address=text_type(email),
+ reverse_primary_email=user)
+ user.cw_clear_relation_cache('in_group', 'subject')
+ if commit:
+ try:
+ req.commit() # req is a session
+ except AttributeError:
+ req.cnx.commit()
+ return user
+
+ # other utilities #########################################################
+
+ @contextmanager
+ def temporary_appobjects(self, *appobjects):
+ self.vreg._loadedmods.setdefault(self.__module__, {})
+ for obj in appobjects:
+ self.vreg.register(obj)
+ registered = getattr(obj, '__registered__', None)
+ if registered:
+ for registry in obj.__registries__:
+ registered(self.vreg[registry])
+ try:
+ yield
+ finally:
+ for obj in appobjects:
+ self.vreg.unregister(obj)
+
+ @contextmanager
+ def temporary_permissions(self, *perm_overrides, **perm_kwoverrides):
+ """Set custom schema permissions within context.
+
+ There are two ways to call this method, which may be used together :
+
+ * using positional argument(s):
+
+ .. sourcecode:: python
+
+ rdef = self.schema['CWUser'].rdef('login')
+ with self.temporary_permissions((rdef, {'read': ()})):
+ ...
+
+
+ * using named argument(s):
+
+ .. sourcecode:: python
+
+ with self.temporary_permissions(CWUser={'read': ()}):
+ ...
+
+ Usually the former will be preferred to override permissions on a
+ relation definition, while the latter is well suited for entity types.
+
+ The allowed keys in the permission dictionary depend on the schema type
+ (entity type / relation definition). Resulting permissions will be
+ similar to `orig_permissions.update(partial_perms)`.
+ """
+ torestore = []
+ for erschema, etypeperms in chain(perm_overrides, perm_kwoverrides.items()):
+ if isinstance(erschema, string_types):
+ erschema = self.schema[erschema]
+ for action, actionperms in etypeperms.items():
+ origperms = erschema.permissions[action]
+ erschema.set_action_permissions(action, actionperms)
+ torestore.append([erschema, action, origperms])
+ try:
+ yield
+ finally:
+ for erschema, action, permissions in torestore:
+ if action is None:
+ erschema.permissions = permissions
+ else:
+ erschema.set_action_permissions(action, permissions)
+
+ def assertModificationDateGreater(self, entity, olddate):
+ entity.cw_attr_cache.pop('modification_date', None)
+ self.assertGreater(entity.modification_date, olddate)
+
+ def assertMessageEqual(self, req, params, expected_msg):
+ msg = req.session.data[params['_cwmsgid']]
+ self.assertEqual(expected_msg, msg)
+
+ # workflow utilities #######################################################
+
+ def assertPossibleTransitions(self, entity, expected):
+ transitions = entity.cw_adapt_to('IWorkflowable').possible_transitions()
+ self.assertListEqual(sorted(tr.name for tr in transitions),
+ sorted(expected))
+
+ # views and actions registries inspection ##################################
+
+ def pviews(self, req, rset):
+ return sorted((a.__regid__, a.__class__)
+ for a in self.vreg['views'].possible_views(req, rset=rset))
+
+ def pactions(self, req, rset,
+ skipcategories=('addrelated', 'siteactions', 'useractions',
+ 'footer', 'manage')):
+ return [(a.__regid__, a.__class__)
+ for a in self.vreg['actions'].poss_visible_objects(req, rset=rset)
+ if a.category not in skipcategories]
+
+ def pactions_by_cats(self, req, rset, categories=('addrelated',)):
+ return [(a.__regid__, a.__class__)
+ for a in self.vreg['actions'].poss_visible_objects(req, rset=rset)
+ if a.category in categories]
+
+ def pactionsdict(self, req, rset,
+ skipcategories=('addrelated', 'siteactions', 'useractions',
+ 'footer', 'manage')):
+ res = {}
+ for a in self.vreg['actions'].poss_visible_objects(req, rset=rset):
+ if a.category not in skipcategories:
+ res.setdefault(a.category, []).append(a.__class__)
+ return res
+
+ def action_submenu(self, req, rset, id):
+ return self._test_action(self.vreg['actions'].select(id, req, rset=rset))
+
+ def _test_action(self, action):
+ class fake_menu(list):
+ @property
+ def items(self):
+ return self
+
+ class fake_box(object):
+ def action_link(self, action, **kwargs):
+ return (action.title, action.url())
+ submenu = fake_menu()
+ action.fill_menu(fake_box(), submenu)
+ return submenu
+
+ def list_views_for(self, rset):
+ """returns the list of views that can be applied on `rset`"""
+ req = rset.req
+ only_once_vids = ('primary', 'secondary', 'text')
+ req.data['ex'] = ValueError("whatever")
+ viewsvreg = self.vreg['views']
+ for vid, views in viewsvreg.items():
+ if vid[0] == '_':
+ continue
+ if rset.rowcount > 1 and vid in only_once_vids:
+ continue
+ views = [view for view in views
+ if view.category != 'startupview'
+ and not issubclass(view, notification.NotificationView)
+ and not isinstance(view, class_deprecated)]
+ if views:
+ try:
+ view = viewsvreg._select_best(views, req, rset=rset)
+ if view is None:
+ raise NoSelectableObject((req,), {'rset': rset}, views)
+ if view.linkable():
+ yield view
+ else:
+ not_selected(self.vreg, view)
+ # else the view is expected to be used as subview and should
+ # not be tested directly
+ except NoSelectableObject:
+ continue
+
+ def list_actions_for(self, rset):
+ """returns the list of actions that can be applied on `rset`"""
+ req = rset.req
+ for action in self.vreg['actions'].possible_objects(req, rset=rset):
+ yield action
+
+ def list_boxes_for(self, rset):
+ """returns the list of boxes that can be applied on `rset`"""
+ req = rset.req
+ for box in self.vreg['ctxcomponents'].possible_objects(req, rset=rset):
+ yield box
+
+ def list_startup_views(self):
+ """returns the list of startup views"""
+ with self.admin_access.web_request() as req:
+ for view in self.vreg['views'].possible_views(req, None):
+ if view.category == 'startupview':
+ yield view.__regid__
+ else:
+ not_selected(self.vreg, view)
+
+ # web ui testing utilities #################################################
+
+ @property
+ @cached
+ def app(self):
+ """return a cubicweb publisher"""
+ publisher = application.CubicWebPublisher(self.repo, self.config)
+
+ def raise_error_handler(*args, **kwargs):
+ raise
+
+ publisher.error_handler = raise_error_handler
+ return publisher
+
+ @deprecated('[3.19] use the .remote_calling method')
+ def remote_call(self, fname, *args):
+ """remote json call simulation"""
+ dump = json.dumps
+ args = [dump(arg) for arg in args]
+ req = self.request(fname=fname, pageid='123', arg=args)
+ ctrl = self.vreg['controllers'].select('ajax', req)
+ return ctrl.publish(), req
+
+ @contextmanager
+ def remote_calling(self, fname, *args):
+ """remote json call simulation"""
+ args = [json.dumps(arg) for arg in args]
+ with self.admin_access.web_request(fname=fname, pageid='123', arg=args) as req:
+ ctrl = self.vreg['controllers'].select('ajax', req)
+ yield ctrl.publish(), req
+
+ def app_handle_request(self, req, path='view'):
+ return self.app.core_handle(req, path)
+
+ @deprecated("[3.15] app_handle_request is the new and better way"
+ " (beware of small semantic changes)")
+ def app_publish(self, *args, **kwargs):
+ return self.app_handle_request(*args, **kwargs)
+
+ def ctrl_publish(self, req, ctrl='edit', rset=None):
+ """call the publish method of the edit controller"""
+ ctrl = self.vreg['controllers'].select(ctrl, req, appli=self.app)
+ try:
+ result = ctrl.publish(rset)
+ req.cnx.commit()
+ except web.Redirect:
+ req.cnx.commit()
+ raise
+ return result
+
+ @staticmethod
+ def fake_form(formid, field_dict=None, entity_field_dicts=()):
+ """Build _cw.form dictionnary to fake posting of some standard cubicweb form
+
+ * `formid`, the form id, usually form's __regid__
+
+ * `field_dict`, dictionary of name:value for fields that are not tied to an entity
+
+ * `entity_field_dicts`, list of (entity, dictionary) where dictionary contains name:value
+ for fields that are not tied to the given entity
+ """
+ assert field_dict or entity_field_dicts, \
+ 'field_dict and entity_field_dicts arguments must not be both unspecified'
+ if field_dict is None:
+ field_dict = {}
+ form = {'__form_id': formid}
+ fields = []
+ for field, value in field_dict.items():
+ fields.append(field)
+ form[field] = value
+
+ def _add_entity_field(entity, field, value):
+ entity_fields.append(field)
+ form[eid_param(field, entity.eid)] = value
+
+ for entity, field_dict in entity_field_dicts:
+ if '__maineid' not in form:
+ form['__maineid'] = entity.eid
+ entity_fields = []
+ form.setdefault('eid', []).append(entity.eid)
+ _add_entity_field(entity, '__type', entity.cw_etype)
+ for field, value in field_dict.items():
+ _add_entity_field(entity, field, value)
+ if entity_fields:
+ form[eid_param('_cw_entity_fields', entity.eid)] = ','.join(entity_fields)
+ if fields:
+ form['_cw_fields'] = ','.join(sorted(fields))
+ return form
+
+ @deprecated('[3.19] use .admin_request_from_url instead')
+ def req_from_url(self, url):
+ """parses `url` and builds the corresponding CW-web request
+
+ req.form will be setup using the url's query string
+ """
+ req = self.request(url=url)
+ if isinstance(url, unicode):
+ url = url.encode(req.encoding) # req.setup_params() expects encoded strings
+ querystring = urlparse(url)[-2]
+ params = parse_qs(querystring)
+ req.setup_params(params)
+ return req
+
+ @contextmanager
+ def admin_request_from_url(self, url):
+ """parses `url` and builds the corresponding CW-web request
+
+ req.form will be setup using the url's query string
+ """
+ with self.admin_access.web_request(url=url) as req:
+ if isinstance(url, unicode):
+ url = url.encode(req.encoding) # req.setup_params() expects encoded strings
+ querystring = urlparse(url)[-2]
+ params = parse_qs(querystring)
+ req.setup_params(params)
+ yield req
+
+ def url_publish(self, url, data=None):
+ """takes `url`, uses application's app_resolver to find the appropriate
+ controller and result set, then publishes the result.
+
+ To simulate post of www-form-encoded data, give a `data` dictionary
+ containing desired key/value associations.
+
+ This should pretty much correspond to what occurs in a real CW server
+ except the apache-rewriter component is not called.
+ """
+ with self.admin_request_from_url(url) as req:
+ if data is not None:
+ req.form.update(data)
+ ctrlid, rset = self.app.url_resolver.process(req, req.relative_path(False))
+ return self.ctrl_publish(req, ctrlid, rset)
+
+ def http_publish(self, url, data=None):
+ """like `url_publish`, except this returns a http response, even in case
+ of errors. You may give form parameters using the `data` argument.
+ """
+ with self.admin_request_from_url(url) as req:
+ if data is not None:
+ req.form.update(data)
+ with real_error_handling(self.app):
+ result = self.app_handle_request(req, req.relative_path(False))
+ return result, req
+
+ @staticmethod
+ def _parse_location(req, location):
+ try:
+ path, params = location.split('?', 1)
+ except ValueError:
+ path = location
+ params = {}
+ else:
+ cleanup = lambda p: (p[0], urlunquote(p[1]))
+ params = dict(cleanup(p.split('=', 1)) for p in params.split('&') if p)
+ if path.startswith(req.base_url()): # may be relative
+ path = path[len(req.base_url()):]
+ return path, params
+
+ def expect_redirect(self, callback, req):
+ """call the given callback with req as argument, expecting to get a
+ Redirect exception
+ """
+ try:
+ callback(req)
+ except Redirect as ex:
+ return self._parse_location(req, ex.location)
+ else:
+ self.fail('expected a Redirect exception')
+
+ def expect_redirect_handle_request(self, req, path='edit'):
+ """call the publish method of the application publisher, expecting to
+ get a Redirect exception
+ """
+ self.app_handle_request(req, path)
+ self.assertTrue(300 <= req.status_out < 400, req.status_out)
+ location = req.get_response_header('location')
+ return self._parse_location(req, location)
+
+ @deprecated("[3.15] expect_redirect_handle_request is the new and better way"
+ " (beware of small semantic changes)")
+ def expect_redirect_publish(self, *args, **kwargs):
+ return self.expect_redirect_handle_request(*args, **kwargs)
+
+ def set_auth_mode(self, authmode, anonuser=None):
+ self.set_option('auth-mode', authmode)
+ self.set_option('anonymous-user', anonuser)
+ if anonuser is None:
+ self.config.anonymous_credential = None
+ else:
+ self.config.anonymous_credential = (anonuser, anonuser)
+
+ def init_authentication(self, authmode, anonuser=None):
+ self.set_auth_mode(authmode, anonuser)
+ req = self.requestcls(self.vreg, url='login')
+ sh = self.app.session_handler
+ authm = sh.session_manager.authmanager
+ authm.anoninfo = self.vreg.config.anonymous_user()
+ authm.anoninfo = authm.anoninfo[0], {'password': authm.anoninfo[1]}
+ # not properly cleaned between tests
+ self.open_sessions = sh.session_manager._sessions = {}
+ return req, self.session
+
+ def assertAuthSuccess(self, req, origsession, nbsessions=1):
+ session = self.app.get_session(req)
+ cnx = repoapi.Connection(session)
+ req.set_cnx(cnx)
+ self.assertEqual(len(self.open_sessions), nbsessions, self.open_sessions)
+ self.assertEqual(session.login, origsession.login)
+ self.assertEqual(session.anonymous_session, False)
+
+ def assertAuthFailure(self, req, nbsessions=0):
+ with self.assertRaises(AuthenticationError):
+ self.app.get_session(req)
+ # +0 since we do not track the opened session
+ self.assertEqual(len(self.open_sessions), nbsessions)
+ clear_cache(req, 'get_authorization')
+
+ # content validation #######################################################
+
+ # validators are used to validate (XML, DTD, whatever) view's content
+ # validators availables are :
+ # DTDValidator : validates XML + declared DTD
+ # SaxOnlyValidator : guarantees XML is well formed
+ # None : do not try to validate anything
+ # validators used must be imported from from.devtools.htmlparser
+ content_type_validators = {
+ # maps MIME type : validator name
+ #
+ # do not set html validators here, we need HTMLValidator for html
+ # snippets
+ # 'text/html': DTDValidator,
+ # 'application/xhtml+xml': DTDValidator,
+ 'application/xml': htmlparser.XMLValidator,
+ 'text/xml': htmlparser.XMLValidator,
+ 'application/json': JsonValidator,
+ 'text/plain': None,
+ 'text/comma-separated-values': None,
+ 'text/x-vcard': None,
+ 'text/calendar': None,
+ 'image/png': None,
+ }
+ # maps vid : validator name (override content_type_validators)
+ vid_validators = dict((vid, htmlparser.VALMAP[valkey])
+ for vid, valkey in VIEW_VALIDATORS.items())
+
+ def view(self, vid, rset=None, req=None, template='main-template',
+ **kwargs):
+ """This method tests the view `vid` on `rset` using `template`
+
+ If no error occurred while rendering the view, the HTML is analyzed
+ and parsed.
+
+ :returns: an instance of `cubicweb.devtools.htmlparser.PageInfo`
+ encapsulation the generated HTML
+ """
+ if req is None:
+ assert rset is not None, 'you must supply at least one of rset or req'
+ req = rset.req
+ req.form['vid'] = vid
+ viewsreg = self.vreg['views']
+ view = viewsreg.select(vid, req, rset=rset, **kwargs)
+ if template is None: # raw view testing, no template
+ viewfunc = view.render
+ else:
+ kwargs['view'] = view
+ viewfunc = lambda **k: viewsreg.main_template(req, template,
+ rset=rset, **kwargs)
+ return self._test_view(viewfunc, view, template, kwargs)
+
+ def _test_view(self, viewfunc, view, template='main-template', kwargs={}):
+ """this method does the actual call to the view
+
+ If no error occurred while rendering the view, the HTML is analyzed
+ and parsed.
+
+ :returns: an instance of `cubicweb.devtools.htmlparser.PageInfo`
+ encapsulation the generated HTML
+ """
+ try:
+ output = viewfunc(**kwargs)
+ except Exception:
+ # hijack exception: generative tests stop when the exception
+ # is not an AssertionError
+ klass, exc, tcbk = sys.exc_info()
+ try:
+ msg = '[%s in %s] %s' % (klass, view.__regid__, exc)
+ except Exception:
+ msg = '[%s in %s] undisplayable exception' % (klass, view.__regid__)
+ exc = AssertionError(msg)
+ exc.__traceback__ = tcbk
+ raise exc
+ return self._check_html(output, view, template)
+
+ def get_validator(self, view=None, content_type=None, output=None):
+ if view is not None:
+ try:
+ return self.vid_validators[view.__regid__]()
+ except KeyError:
+ if content_type is None:
+ content_type = view.content_type
+ if content_type is None:
+ content_type = 'text/html'
+ if content_type in ('text/html', 'application/xhtml+xml') and output:
+ if output.startswith(b''):
+ # only check XML well-formness since HTMLValidator isn't html5
+ # compatible and won't like various other extensions
+ default_validator = htmlparser.XMLSyntaxValidator
+ elif output.startswith(b' used in progress widget, unknown in html dtd
+ output = re.sub('', '', output)
+ return self.assertWellFormed(validator, output.strip(), context=view.__regid__)
+
+ def assertWellFormed(self, validator, content, context=None):
+ try:
+ return validator.parse_string(content)
+ except Exception:
+ # hijack exception: generative tests stop when the exception
+ # is not an AssertionError
+ klass, exc, tcbk = sys.exc_info()
+ if context is None:
+ msg = u'[%s]' % (klass,)
+ else:
+ msg = u'[%s in %s]' % (klass, context)
+ msg = msg.encode(sys.getdefaultencoding(), 'replace')
+
+ try:
+ str_exc = str(exc)
+ except Exception:
+ str_exc = 'undisplayable exception'
+ msg += str_exc.encode(sys.getdefaultencoding(), 'replace')
+ if content is not None:
+ position = getattr(exc, "position", (0,))[0]
+ if position:
+ # define filter
+ if isinstance(content, str):
+ content = unicode(content, sys.getdefaultencoding(), 'replace')
+ content = validator.preprocess_data(content)
+ content = content.splitlines()
+ width = int(log(len(content), 10)) + 1
+ line_template = " %" + ("%i" % width) + "i: %s"
+ # XXX no need to iterate the whole file except to get
+ # the line number
+ content = u'\n'.join(line_template % (idx + 1, line)
+ for idx, line in enumerate(content)
+ if line_context_filter(idx + 1, position))
+ msg += u'\nfor content:\n%s' % content
+ exc = AssertionError(msg)
+ exc.__traceback__ = tcbk
+ raise exc
+
+ def assertDocTestFile(self, testfile):
+ # doctest returns tuple (failure_count, test_count)
+ with self.admin_access.shell() as mih:
+ result = mih.process_script(testfile)
+ if result[0] and result[1]:
+ raise self.failureException("doctest file '%s' failed"
+ % testfile)
+
+ # notifications ############################################################
+
+ def assertSentEmail(self, subject, recipients=None, nb_msgs=None):
+ """test recipients in system mailbox for given email subject
+
+ :param subject: email subject to find in mailbox
+ :param recipients: list of email recipients
+ :param nb_msgs: expected number of entries
+ :returns: list of matched emails
+ """
+ messages = [email for email in MAILBOX
+ if email.message.get('Subject') == subject]
+ if recipients is not None:
+ sent_to = set()
+ for msg in messages:
+ sent_to.update(msg.recipients)
+ self.assertSetEqual(set(recipients), sent_to)
+ if nb_msgs is not None:
+ self.assertEqual(len(MAILBOX), nb_msgs)
+ return messages
+
+
+# auto-populating test classes and utilities ###################################
+
+from cubicweb.devtools.fill import insert_entity_queries, make_relations_queries
+
+# XXX cleanup unprotected_entities & all mess
+
+
+def how_many_dict(schema, cnx, how_many, skip):
+ """given a schema, compute how many entities by type we need to be able to
+ satisfy relations cardinality.
+
+ The `how_many` argument tells how many entities of which type we want at
+ least.
+
+ Return a dictionary with entity types as key, and the number of entities for
+ this type as value.
+ """
+ relmap = {}
+ for rschema in schema.relations():
+ if rschema.final:
+ continue
+ for subj, obj in rschema.rdefs:
+ card = rschema.rdef(subj, obj).cardinality
+ # if the relation is mandatory, we'll need at least as many subj and
+ # obj to satisfy it
+ if card[0] in '1+' and card[1] in '1?':
+ # subj has to be linked to at least one obj,
+ # but obj can be linked to only one subj
+ # -> we need at least as many subj as obj to satisfy
+ # cardinalities for this relation
+ relmap.setdefault((rschema, subj), []).append(str(obj))
+ if card[1] in '1+' and card[0] in '1?':
+ # reverse subj and obj in the above explanation
+ relmap.setdefault((rschema, obj), []).append(str(subj))
+ unprotected = unprotected_entities(schema)
+ for etype in skip: # XXX (syt) duh? explain or kill
+ unprotected.add(etype)
+ howmanydict = {}
+ # step 1, compute a base number of each entity types: number of already
+ # existing entities of this type + `how_many`
+ for etype in unprotected_entities(schema, strict=True):
+ howmanydict[str(etype)] = cnx.execute('Any COUNT(X) WHERE X is %s' % etype)[0][0]
+ if etype in unprotected:
+ howmanydict[str(etype)] += how_many
+ # step 2, augment nb entity per types to satisfy cardinality constraints,
+ # by recomputing for each relation that constrained an entity type:
+ #
+ # new num for etype = max(current num, sum(num for possible target etypes))
+ #
+ # XXX we should first check there is no cycle then propagate changes
+ for (rschema, etype), targets in relmap.items():
+ relfactor = sum(howmanydict[e] for e in targets)
+ howmanydict[str(etype)] = max(relfactor, howmanydict[etype])
+ return howmanydict
+
+
+class AutoPopulateTest(CubicWebTC):
+ """base class for test with auto-populating of the database"""
+ __abstract__ = True
+
+ test_db_id = 'autopopulate'
+
+ tags = CubicWebTC.tags | Tags('autopopulated')
+
+ pdbclass = CubicWebDebugger
+ # this is a hook to be able to define a list of rql queries
+ # that are application dependent and cannot be guessed automatically
+ application_rql = []
+
+ no_auto_populate = ()
+ ignored_relations = set()
+
+ def to_test_etypes(self):
+ return unprotected_entities(self.schema, strict=True)
+
+ def custom_populate(self, how_many, cnx):
+ pass
+
+ def post_populate(self, cnx):
+ pass
+
+ @nocoverage
+ def auto_populate(self, how_many):
+ """this method populates the database with `how_many` entities
+ of each possible type. It also inserts random relations between them
+ """
+ with self.admin_access.cnx() as cnx:
+ with cnx.security_enabled(read=False, write=False):
+ self._auto_populate(cnx, how_many)
+ cnx.commit()
+
+ def _auto_populate(self, cnx, how_many):
+ self.custom_populate(how_many, cnx)
+ vreg = self.vreg
+ howmanydict = how_many_dict(self.schema, cnx, how_many, self.no_auto_populate)
+ for etype in unprotected_entities(self.schema):
+ if etype in self.no_auto_populate:
+ continue
+ nb = howmanydict.get(etype, how_many)
+ for rql, args in insert_entity_queries(etype, self.schema, vreg, nb):
+ cnx.execute(rql, args)
+ edict = {}
+ for etype in unprotected_entities(self.schema, strict=True):
+ rset = cnx.execute('%s X' % etype)
+ edict[str(etype)] = set(row[0] for row in rset.rows)
+ existingrels = {}
+ ignored_relations = SYSTEM_RELATIONS | self.ignored_relations
+ for rschema in self.schema.relations():
+ if rschema.final or rschema in ignored_relations or rschema.rule:
+ continue
+ rset = cnx.execute('DISTINCT Any X,Y WHERE X %s Y' % rschema)
+ existingrels.setdefault(rschema.type, set()).update((x, y) for x, y in rset)
+ q = make_relations_queries(self.schema, edict, cnx, ignored_relations,
+ existingrels=existingrels)
+ for rql, args in q:
+ try:
+ cnx.execute(rql, args)
+ except ValidationError as ex:
+ # failed to satisfy some constraint
+ print('error in automatic db population', ex)
+ cnx.commit_state = None # reset uncommitable flag
+ self.post_populate(cnx)
+
+ def iter_individual_rsets(self, etypes=None, limit=None):
+ etypes = etypes or self.to_test_etypes()
+ with self.admin_access.web_request() as req:
+ for etype in etypes:
+ if limit:
+ rql = 'Any X LIMIT %s WHERE X is %s' % (limit, etype)
+ else:
+ rql = 'Any X WHERE X is %s' % etype
+ rset = req.execute(rql)
+ for row in range(len(rset)):
+ if limit and row > limit:
+ break
+ # XXX iirk
+ rset2 = rset.limit(limit=1, offset=row)
+ yield rset2
+
+ def iter_automatic_rsets(self, limit=10):
+ """generates basic resultsets for each entity type"""
+ etypes = self.to_test_etypes()
+ if not etypes:
+ return
+ with self.admin_access.web_request() as req:
+ for etype in etypes:
+ yield req.execute('Any X LIMIT %s WHERE X is %s' % (limit, etype))
+ etype1 = etypes.pop()
+ try:
+ etype2 = etypes.pop()
+ except KeyError:
+ etype2 = etype1
+ # test a mixed query (DISTINCT/GROUP to avoid getting duplicate
+ # X which make muledit view failing for instance (html validation fails
+ # because of some duplicate "id" attributes)
+ yield req.execute('DISTINCT Any X, MAX(Y) GROUPBY X WHERE X is %s, Y is %s' %
+ (etype1, etype2))
+ # test some application-specific queries if defined
+ for rql in self.application_rql:
+ yield req.execute(rql)
+
+ def _test_everything_for(self, rset):
+ """this method tries to find everything that can be tested
+ for `rset` and yields a callable test (as needed in generative tests)
+ """
+ propdefs = self.vreg['propertydefs']
+ # make all components visible
+ for k, v in propdefs.items():
+ if k.endswith('visible') and not v['default']:
+ propdefs[k]['default'] = True
+ for view in self.list_views_for(rset):
+ backup_rset = rset.copy(rset.rows, rset.description)
+ with self.subTest(name=self._testname(rset, view.__regid__, 'view')):
+ self.view(view.__regid__, rset,
+ rset.req.reset_headers(), 'main-template')
+ # We have to do this because some views modify the
+ # resultset's syntax tree
+ rset = backup_rset
+ for action in self.list_actions_for(rset):
+ with self.subTest(name=self._testname(rset, action.__regid__, 'action')):
+ self._test_action(action)
+ for box in self.list_boxes_for(rset):
+ w = [].append
+ with self.subTest(self._testname(rset, box.__regid__, 'box')):
+ box.render(w)
+
+ @staticmethod
+ def _testname(rset, objid, objtype):
+ return '%s_%s_%s' % ('_'.join(rset.column_types(0)), objid, objtype)
+
+
+# concrete class for automated application testing ############################
+
+class AutomaticWebTest(AutoPopulateTest):
+ """import this if you wan automatic tests to be ran"""
+
+ tags = AutoPopulateTest.tags | Tags('web', 'generated')
+
+ def setUp(self):
+ if self.__class__ is AutomaticWebTest:
+ # Prevent direct use of AutomaticWebTest to avoid database caching
+ # issues.
+ return
+ super(AutomaticWebTest, self).setUp()
+
+ # access to self.app for proper initialization of the authentication
+ # machinery (else some views may fail)
+ self.app
+
+ def test_one_each_config(self):
+ self.auto_populate(1)
+ for rset in self.iter_automatic_rsets(limit=1):
+ self._test_everything_for(rset)
+
+ def test_ten_each_config(self):
+ self.auto_populate(10)
+ for rset in self.iter_automatic_rsets(limit=10):
+ self._test_everything_for(rset)
+
+ def test_startup_views(self):
+ for vid in self.list_startup_views():
+ with self.admin_access.web_request() as req:
+ with self.subTest(vid=vid):
+ self.view(vid, None, req)
+
+
+# registry instrumentization ###################################################
+
+def not_selected(vreg, appobject):
+ try:
+ vreg._selected[appobject.__class__] -= 1
+ except (KeyError, AttributeError):
+ pass
+
+
+# def vreg_instrumentize(testclass):
+# # XXX broken
+# from cubicweb.devtools.apptest import TestEnvironment
+# env = testclass._env = TestEnvironment('data', configcls=testclass.configcls)
+# for reg in env.vreg.values():
+# reg._selected = {}
+# try:
+# orig_select_best = reg.__class__.__orig_select_best
+# except Exception:
+# orig_select_best = reg.__class__._select_best
+# def instr_select_best(self, *args, **kwargs):
+# selected = orig_select_best(self, *args, **kwargs)
+# try:
+# self._selected[selected.__class__] += 1
+# except KeyError:
+# self._selected[selected.__class__] = 1
+# except AttributeError:
+# pass # occurs on reg used to restore database
+# return selected
+# reg.__class__._select_best = instr_select_best
+# reg.__class__.__orig_select_best = orig_select_best
+
+
+# def print_untested_objects(testclass, skipregs=('hooks', 'etypes')):
+# for regname, reg in testclass._env.vreg.items():
+# if regname in skipregs:
+# continue
+# for appobjects in reg.values():
+# for appobject in appobjects:
+# if not reg._selected.get(appobject):
+# print 'not tested', regname, appobject
diff -r e1caf133b81c -r b23d58050076 cubicweb/devtools/webtest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/webtest.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,34 @@
+from __future__ import absolute_import
+
+import webtest
+
+from cubicweb.wsgi import handler
+from cubicweb.devtools.testlib import CubicWebTC
+
+
+class CubicWebTestTC(CubicWebTC):
+ def setUp(self):
+ super(CubicWebTestTC, self).setUp()
+ self.config.global_set_option('base-url', 'http://localhost.local/')
+ # call load_configuration again to let the config reset its datadir_url
+ self.config.load_configuration()
+ webapp = handler.CubicWebWSGIApplication(self.config)
+ self.webapp = webtest.TestApp(webapp)
+
+ def tearDown(self):
+ del self.webapp
+ super(CubicWebTestTC, self).tearDown()
+
+ def login(self, user=None, password=None, **args):
+ if user is None:
+ user = self.admlogin
+ if password is None:
+ password = self.admpassword if user == self.admlogin else user
+ args.update({
+ '__login': user,
+ '__password': password
+ })
+ return self.webapp.get('/login', args)
+
+ def logout(self):
+ return self.webapp.get('/logout')
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/__init__.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,208 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""base application's entities class implementation: `AnyEntity`"""
+
+__docformat__ = "restructuredtext en"
+
+from warnings import warn
+
+from six import text_type, string_types
+
+from logilab.common.decorators import classproperty
+from logilab.common.deprecation import deprecated
+
+from cubicweb import Unauthorized
+from cubicweb.entity import Entity
+
+
+class AnyEntity(Entity):
+ """an entity instance has e_schema automagically set on the class and
+ instances have access to their issuing cursor
+ """
+ __regid__ = 'Any'
+
+ @classproperty
+ def cw_etype(cls):
+ """entity type as a unicode string"""
+ return text_type(cls.__regid__)
+
+ @classmethod
+ def cw_create_url(cls, req, **kwargs):
+ """ return the url of the entity creation form for this entity type"""
+ return req.build_url('add/%s' % cls.__regid__, **kwargs)
+
+ @classmethod
+ @deprecated('[3.22] use cw_fti_index_rql_limit instead')
+ def cw_fti_index_rql_queries(cls, req):
+ """return the list of rql queries to fetch entities to FT-index
+
+ The default is to fetch all entities at once and to prefetch
+ indexable attributes but one could imagine iterating over
+ "smaller" resultsets if the table is very big or returning
+ a subset of entities that match some business-logic condition.
+ """
+ restrictions = ['X is %s' % cls.__regid__]
+ selected = ['X']
+ for attrschema in sorted(cls.e_schema.indexable_attributes()):
+ varname = attrschema.type.upper()
+ restrictions.append('X %s %s' % (attrschema, varname))
+ selected.append(varname)
+ return ['Any %s WHERE %s' % (', '.join(selected),
+ ', '.join(restrictions))]
+
+ @classmethod
+ def cw_fti_index_rql_limit(cls, req, limit=1000):
+ """generate rsets of entities to FT-index
+
+ By default, each successive result set is limited to 1000 entities
+ """
+ if cls.cw_fti_index_rql_queries.__func__ != AnyEntity.cw_fti_index_rql_queries.__func__:
+ warn("[3.22] cw_fti_index_rql_queries is replaced by cw_fti_index_rql_limit",
+ DeprecationWarning)
+ for rql in cls.cw_fti_index_rql_queries(req):
+ yield req.execute(rql)
+ return
+ restrictions = ['X is %s' % cls.__regid__]
+ selected = ['X']
+ start = 0
+ for attrschema in sorted(cls.e_schema.indexable_attributes()):
+ varname = attrschema.type.upper()
+ restrictions.append('X %s %s' % (attrschema, varname))
+ selected.append(varname)
+ while True:
+ q_restrictions = restrictions + ['X eid > %s' % start]
+ rset = req.execute('Any %s ORDERBY X LIMIT %s WHERE %s' %
+ (', '.join(selected),
+ limit,
+ ', '.join(q_restrictions)))
+ if rset:
+ start = rset[-1][0]
+ yield rset
+ else:
+ break
+
+ # meta data api ###########################################################
+
+ def dc_title(self):
+ """return a suitable *unicode* title for this entity"""
+ for rschema, attrschema in self.e_schema.attribute_definitions():
+ if rschema.meta:
+ continue
+ value = self.cw_attr_value(rschema.type)
+ if value is not None:
+ # make the value printable (dates, floats, bytes, etc.)
+ return self.printable_value(rschema.type, value, attrschema.type,
+ format='text/plain')
+ return u'%s #%s' % (self.dc_type(), self.eid)
+
+ def dc_long_title(self):
+ """return a more detailled title for this entity"""
+ return self.dc_title()
+
+ def dc_description(self, format='text/plain'):
+ """return a suitable description for this entity"""
+ if 'description' in self.e_schema.subjrels:
+ return self.printable_value('description', format=format)
+ return u''
+
+ def dc_authors(self):
+ """return a suitable description for the author(s) of the entity"""
+ try:
+ return ', '.join(u.name() for u in self.owned_by)
+ except Unauthorized:
+ return u''
+
+ def dc_creator(self):
+ """return a suitable description for the creator of the entity"""
+ if self.creator:
+ return self.creator.name()
+ return u''
+
+ def dc_date(self, date_format=None):# XXX default to ISO 8601 ?
+ """return latest modification date of this entity"""
+ return self._cw.format_date(self.modification_date, date_format=date_format)
+
+ def dc_type(self, form=''):
+ """return the display name for the type of this entity (translated)"""
+ return self.e_schema.display_name(self._cw, form)
+
+ def dc_language(self):
+ """return language used by this entity (translated)"""
+ # check if entities has internationalizable attributes
+ # XXX one is enough or check if all String attributes are internationalizable?
+ for rschema, attrschema in self.e_schema.attribute_definitions():
+ if rschema.rdef(self.e_schema, attrschema).internationalizable:
+ return self._cw._(self._cw.user.property_value('ui.language'))
+ return self._cw._(self._cw.vreg.property_value('ui.language'))
+
+ @property
+ def creator(self):
+ """return the CWUser entity which has created this entity, or None if
+ unknown or if the curent user doesn't has access to this euser
+ """
+ try:
+ return self.created_by[0]
+ except (Unauthorized, IndexError):
+ return None
+
+ # abstractions making the whole things (well, some at least) working ######
+
+ def sortvalue(self, rtype=None):
+ """return a value which can be used to sort this entity or given
+ entity's attribute
+ """
+ if rtype is None:
+ return self.dc_title().lower()
+ value = self.cw_attr_value(rtype)
+ # do not restrict to `unicode` because Bytes will return a `str` value
+ if isinstance(value, string_types):
+ return self.printable_value(rtype, format='text/plain').lower()
+ return value
+
+
+def fetch_config(fetchattrs, mainattr=None, pclass=AnyEntity, order='ASC'):
+ """function to ease basic configuration of an entity class ORM. Basic usage
+ is:
+
+ .. sourcecode:: python
+
+ class MyEntity(AnyEntity):
+
+ fetch_attrs, cw_fetch_order = fetch_config(['attr1', 'attr2'])
+ # uncomment line below if you want the same sorting for 'unrelated' entities
+ # cw_fetch_unrelated_order = cw_fetch_order
+
+ Using this, when using ORM methods retrieving this type of entity, 'attr1'
+ and 'attr2' will be automatically prefetched and results will be sorted on
+ 'attr1' ascending (ie the first attribute in the list).
+
+ This function will automatically add to fetched attributes those defined in
+ parent class given using the `pclass` argument.
+
+ Also, You can use `mainattr` and `order` argument to have a different
+ sorting.
+ """
+ if pclass is not None:
+ fetchattrs += pclass.fetch_attrs
+ if mainattr is None:
+ mainattr = fetchattrs[0]
+ @classmethod
+ def fetch_order(cls, select, attr, var):
+ if attr == mainattr:
+ select.add_sort_var(var, order=='ASC')
+ return fetchattrs, fetch_order
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/adapters.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/adapters.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,429 @@
+# copyright 2010-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""some basic entity adapter implementations, for interfaces used in the
+framework itself.
+"""
+from cubicweb import _
+
+from itertools import chain
+
+from logilab.mtconverter import TransformError
+from logilab.common.decorators import cached
+
+from cubicweb import ValidationError, view, ViolatedConstraint, UniqueTogetherError
+from cubicweb.predicates import is_instance, relation_possible, match_exception
+
+
+class IEmailableAdapter(view.EntityAdapter):
+ __regid__ = 'IEmailable'
+ __select__ = relation_possible('primary_email') | relation_possible('use_email')
+
+ def get_email(self):
+ if getattr(self.entity, 'primary_email', None):
+ return self.entity.primary_email[0].address
+ if getattr(self.entity, 'use_email', None):
+ return self.entity.use_email[0].address
+ return None
+
+ def allowed_massmail_keys(self):
+ """returns a set of allowed email substitution keys
+
+ The default is to return the entity's attribute list but you might
+ override this method to allow extra keys. For instance, a Person
+ class might want to return a `companyname` key.
+ """
+ return set(rschema.type
+ for rschema, attrtype in self.entity.e_schema.attribute_definitions()
+ if attrtype.type not in ('Password', 'Bytes'))
+
+ def as_email_context(self):
+ """returns the dictionary as used by the sendmail controller to
+ build email bodies.
+
+ NOTE: the dictionary keys should match the list returned by the
+ `allowed_massmail_keys` method.
+ """
+ return dict((attr, getattr(self.entity, attr))
+ for attr in self.allowed_massmail_keys())
+
+
+class INotifiableAdapter(view.EntityAdapter):
+ __regid__ = 'INotifiable'
+ __select__ = is_instance('Any')
+
+ def notification_references(self, view):
+ """used to control References field of email send on notification
+ for this entity. `view` is the notification view.
+
+ Should return a list of eids which can be used to generate message
+ identifiers of previously sent email(s)
+ """
+ itree = self.entity.cw_adapt_to('ITree')
+ if itree is not None:
+ return itree.path()[:-1]
+ if view.msgid_timestamp:
+ return (self.entity.eid,)
+ return ()
+
+
+class IFTIndexableAdapter(view.EntityAdapter):
+ """standard adapter to handle fulltext indexing
+
+ .. automethod:: cubicweb.entities.adapters.IFTIndexableAdapter.fti_containers
+ .. automethod:: cubicweb.entities.adapters.IFTIndexableAdapter.get_words
+ """
+ __regid__ = 'IFTIndexable'
+ __select__ = is_instance('Any')
+
+ def fti_containers(self, _done=None):
+ """return the list of entities to index when handling ``self.entity``
+
+ The actual list of entities depends on ``fulltext_container`` usage
+ in the datamodel definition
+ """
+ if _done is None:
+ _done = set()
+ entity = self.entity
+ _done.add(entity.eid)
+ containers = tuple(entity.e_schema.fulltext_containers())
+ if containers:
+ for rschema, role in containers:
+ if role == 'object':
+ targets = getattr(entity, rschema.type)
+ else:
+ targets = getattr(entity, 'reverse_%s' % rschema)
+ for target in targets:
+ if target.eid in _done:
+ continue
+ for container in target.cw_adapt_to('IFTIndexable').fti_containers(_done):
+ yield container
+ else:
+ yield entity
+
+ # weight in ABCD
+ entity_weight = 1.0
+ attr_weight = {}
+
+ def get_words(self):
+ """used by the full text indexer to get words to index
+
+ this method should only be used on the repository side since it depends
+ on the logilab.database package
+
+ :rtype: list
+ :return: the list of indexable word of this entity
+ """
+ from logilab.database.fti import tokenize
+ # take care to cases where we're modyfying the schema
+ entity = self.entity
+ pending = self._cw.transaction_data.setdefault('pendingrdefs', set())
+ words = {}
+ for rschema in entity.e_schema.indexable_attributes():
+ if (entity.e_schema, rschema) in pending:
+ continue
+ weight = self.attr_weight.get(rschema, 'C')
+ try:
+ value = entity.printable_value(rschema, format=u'text/plain')
+ except TransformError:
+ continue
+ except Exception:
+ self.exception("can't add value of %s to text index for entity %s",
+ rschema, entity.eid)
+ continue
+ if value:
+ words.setdefault(weight, []).extend(tokenize(value))
+ for rschema, role in entity.e_schema.fulltext_relations():
+ if role == 'subject':
+ for entity_ in getattr(entity, rschema.type):
+ merge_weight_dict(words, entity_.cw_adapt_to('IFTIndexable').get_words())
+ else: # if role == 'object':
+ for entity_ in getattr(entity, 'reverse_%s' % rschema.type):
+ merge_weight_dict(words, entity_.cw_adapt_to('IFTIndexable').get_words())
+ return words
+
+
+def merge_weight_dict(maindict, newdict):
+ for weight, words in newdict.items():
+ maindict.setdefault(weight, []).extend(words)
+
+
+class IDownloadableAdapter(view.EntityAdapter):
+ """interface for downloadable entities"""
+ __regid__ = 'IDownloadable'
+ __abstract__ = True
+
+ def download_url(self, **kwargs): # XXX not really part of this interface
+ """return a URL to download entity's content
+
+ It should be a unicode object containing url-encoded ASCII.
+ """
+ raise NotImplementedError
+
+ def download_content_type(self):
+ """return MIME type (unicode) of the downloadable content"""
+ raise NotImplementedError
+
+ def download_encoding(self):
+ """return encoding (unicode) of the downloadable content"""
+ raise NotImplementedError
+
+ def download_file_name(self):
+ """return file name (unicode) of the downloadable content"""
+ raise NotImplementedError
+
+ def download_data(self):
+ """return actual data (bytes) of the downloadable content"""
+ raise NotImplementedError
+
+
+# XXX should propose to use two different relations for children/parent
+class ITreeAdapter(view.EntityAdapter):
+ """This adapter provides a tree interface.
+
+ It has to be overriden to be configured using the tree_relation,
+ child_role and parent_role class attributes to benefit from this default
+ implementation.
+
+ This class provides the following methods:
+
+ .. automethod: iterparents
+ .. automethod: iterchildren
+ .. automethod: prefixiter
+
+ .. automethod: is_leaf
+ .. automethod: is_root
+
+ .. automethod: root
+ .. automethod: parent
+ .. automethod: children
+ .. automethod: different_type_children
+ .. automethod: same_type_children
+ .. automethod: children_rql
+ .. automethod: path
+ """
+ __regid__ = 'ITree'
+ __abstract__ = True
+
+ child_role = 'subject'
+ parent_role = 'object'
+
+ def children_rql(self):
+ """Returns RQL to get the children of the entity."""
+ return self.entity.cw_related_rql(self.tree_relation, self.parent_role)
+
+ def different_type_children(self, entities=True):
+ """Return children entities of different type as this entity.
+
+ According to the `entities` parameter, return entity objects or the
+ equivalent result set.
+ """
+ res = self.entity.related(self.tree_relation, self.parent_role,
+ entities=entities)
+ eschema = self.entity.e_schema
+ if entities:
+ return [e for e in res if e.e_schema != eschema]
+ return res.filtered_rset(lambda x: x.e_schema != eschema, self.entity.cw_col)
+
+ def same_type_children(self, entities=True):
+ """Return children entities of the same type as this entity.
+
+ According to the `entities` parameter, return entity objects or the
+ equivalent result set.
+ """
+ res = self.entity.related(self.tree_relation, self.parent_role,
+ entities=entities)
+ eschema = self.entity.e_schema
+ if entities:
+ return [e for e in res if e.e_schema == eschema]
+ return res.filtered_rset(lambda x: x.e_schema is eschema, self.entity.cw_col)
+
+ def is_leaf(self):
+ """Returns True if the entity does not have any children."""
+ return len(self.children()) == 0
+
+ def is_root(self):
+ """Returns true if the entity is root of the tree (e.g. has no parent).
+ """
+ return self.parent() is None
+
+ def root(self):
+ """Return the root entity of the tree."""
+ return self._cw.entity_from_eid(self.path()[0])
+
+ def parent(self):
+ """Returns the parent entity if any, else None (e.g. if we are on the
+ root).
+ """
+ try:
+ return self.entity.related(self.tree_relation, self.child_role,
+ entities=True)[0]
+ except (KeyError, IndexError):
+ return None
+
+ def children(self, entities=True, sametype=False):
+ """Return children entities.
+
+ According to the `entities` parameter, return entity objects or the
+ equivalent result set.
+ """
+ if sametype:
+ return self.same_type_children(entities)
+ else:
+ return self.entity.related(self.tree_relation, self.parent_role,
+ entities=entities)
+
+ def iterparents(self, strict=True):
+ """Return an iterator on the parents of the entity."""
+ def _uptoroot(self):
+ curr = self
+ while True:
+ curr = curr.parent()
+ if curr is None:
+ break
+ yield curr
+ curr = curr.cw_adapt_to('ITree')
+ if not strict:
+ return chain([self.entity], _uptoroot(self))
+ return _uptoroot(self)
+
+ def iterchildren(self, _done=None):
+ """Return an iterator over the item's children."""
+ if _done is None:
+ _done = set()
+ for child in self.children():
+ if child.eid in _done:
+ self.error('loop in %s tree: %s', child.cw_etype.lower(), child)
+ continue
+ yield child
+ _done.add(child.eid)
+
+ def prefixiter(self, _done=None):
+ """Return an iterator over the item's descendants in a prefixed order."""
+ if _done is None:
+ _done = set()
+ if self.entity.eid in _done:
+ return
+ _done.add(self.entity.eid)
+ yield self.entity
+ for child in self.same_type_children():
+ for entity in child.cw_adapt_to('ITree').prefixiter(_done):
+ yield entity
+
+ @cached
+ def path(self):
+ """Returns the list of eids from the root object to this object."""
+ path = []
+ adapter = self
+ entity = adapter.entity
+ while entity is not None:
+ if entity.eid in path:
+ self.error('loop in %s tree: %s', entity.cw_etype.lower(), entity)
+ break
+ path.append(entity.eid)
+ try:
+ # check we are not jumping to another tree
+ if (adapter.tree_relation != self.tree_relation or
+ adapter.child_role != self.child_role):
+ break
+ entity = adapter.parent()
+ adapter = entity.cw_adapt_to('ITree')
+ except AttributeError:
+ break
+ path.reverse()
+ return path
+
+
+class ISerializableAdapter(view.EntityAdapter):
+ """Adapter to serialize an entity to a bare python structure that may be
+ directly serialized to e.g. JSON.
+ """
+
+ __regid__ = 'ISerializable'
+ __select__ = is_instance('Any')
+
+ def serialize(self):
+ entity = self.entity
+ entity.complete()
+ data = {
+ 'cw_etype': entity.cw_etype,
+ 'cw_source': entity.cw_metainformation()['source']['uri'],
+ 'eid': entity.eid,
+ }
+ for rschema, __ in entity.e_schema.attribute_definitions():
+ attr = rschema.type
+ try:
+ value = entity.cw_attr_cache[attr]
+ except KeyError:
+ # Bytes
+ continue
+ data[attr] = value
+ return data
+
+
+# error handling adapters ######################################################
+
+
+class IUserFriendlyError(view.EntityAdapter):
+ __regid__ = 'IUserFriendlyError'
+ __abstract__ = True
+
+ def __init__(self, *args, **kwargs):
+ self.exc = kwargs.pop('exc')
+ super(IUserFriendlyError, self).__init__(*args, **kwargs)
+
+
+class IUserFriendlyUniqueTogether(IUserFriendlyError):
+ __select__ = match_exception(UniqueTogetherError)
+
+ def raise_user_exception(self):
+ rtypes = self.exc.rtypes
+ errors = {}
+ msgargs = {}
+ i18nvalues = []
+ for rtype in rtypes:
+ errors[rtype] = _('%(KEY-rtype)s is part of violated unicity constraint')
+ msgargs[rtype + '-rtype'] = rtype
+ i18nvalues.append(rtype + '-rtype')
+ errors[''] = _('some relations violate a unicity constraint')
+ raise ValidationError(self.entity.eid, errors, msgargs=msgargs, i18nvalues=i18nvalues)
+
+
+class IUserFriendlyCheckConstraint(IUserFriendlyError):
+ __select__ = match_exception(ViolatedConstraint)
+
+ def raise_user_exception(self):
+ cstrname = self.exc.cstrname
+ eschema = self.entity.e_schema
+ for rschema, attrschema in eschema.attribute_definitions():
+ rdef = rschema.rdef(eschema, attrschema)
+ for constraint in rdef.constraints:
+ if cstrname == constraint.name_for(rdef):
+ break
+ else:
+ continue
+ break
+ else:
+ assert 0
+ key = rschema.type + '-subject'
+ # use .get since a constraint may be associated to an attribute that isn't edited (e.g.
+ # constraint between two attributes). This should be the purpose of an api rework at some
+ # point, we currently rely on the fact that such constraint will provide a dedicated user
+ # message not relying on the `value` argument
+ value = self.entity.cw_edited.get(rschema.type)
+ msg, args = constraint.failed_message(key, value, self.entity)
+ raise ValidationError(self.entity.eid, {key: msg}, args)
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/authobjs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/authobjs.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,183 @@
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""entity classes user and group entities"""
+
+__docformat__ = "restructuredtext en"
+
+from six import string_types, text_type
+
+from logilab.common.decorators import cached
+
+from cubicweb import Unauthorized
+from cubicweb.entities import AnyEntity, fetch_config
+
+class CWGroup(AnyEntity):
+ __regid__ = 'CWGroup'
+ fetch_attrs, cw_fetch_order = fetch_config(['name'])
+ cw_fetch_unrelated_order = cw_fetch_order
+
+ def dc_long_title(self):
+ name = self.name
+ trname = self._cw._(name)
+ if trname != name:
+ return '%s (%s)' % (name, trname)
+ return name
+
+ @cached
+ def num_users(self):
+ """return the number of users in this group"""
+ return self._cw.execute('Any COUNT(U) WHERE U in_group G, G eid %(g)s',
+ {'g': self.eid})[0][0]
+
+
+class CWUser(AnyEntity):
+ __regid__ = 'CWUser'
+ fetch_attrs, cw_fetch_order = fetch_config(['login', 'firstname', 'surname'])
+ cw_fetch_unrelated_order = cw_fetch_order
+
+ # used by repository to check if the user can log in or not
+ AUTHENTICABLE_STATES = ('activated',)
+
+ # low level utilities #####################################################
+ def __init__(self, *args, **kwargs):
+ groups = kwargs.pop('groups', None)
+ properties = kwargs.pop('properties', None)
+ super(CWUser, self).__init__(*args, **kwargs)
+ if groups is not None:
+ self._groups = groups
+ if properties is not None:
+ self._properties = properties
+
+ @property
+ def groups(self):
+ try:
+ return self._groups
+ except AttributeError:
+ self._groups = set(g.name for g in self.in_group)
+ return self._groups
+
+ @property
+ def properties(self):
+ try:
+ return self._properties
+ except AttributeError:
+ self._properties = dict(
+ self._cw.execute(
+ 'Any K, V WHERE P for_user U, U eid %(userid)s, '
+ 'P pkey K, P value V',
+ {'userid': self.eid}))
+ return self._properties
+
+ def prefered_language(self, language=None):
+ """return language used by this user, if explicitly defined (eg not
+ using http negociation)
+ """
+ language = language or self.property_value('ui.language')
+ vreg = self._cw.vreg
+ try:
+ vreg.config.translations[language]
+ except KeyError:
+ language = vreg.property_value('ui.language')
+ assert language in vreg.config.translations[language], language
+ return language
+
+ def property_value(self, key):
+ try:
+ # properties stored on the user aren't correctly typed
+ # (e.g. all values are unicode string)
+ return self._cw.vreg.typed_value(key, self.properties[key])
+ except KeyError:
+ pass
+ except ValueError:
+ self.warning('incorrect value for eproperty %s of user %s',
+ key, self.login)
+ return self._cw.vreg.property_value(key)
+
+ def set_property(self, pkey, value):
+ value = text_type(value)
+ try:
+ prop = self._cw.execute(
+ 'CWProperty X WHERE X pkey %(k)s, X for_user U, U eid %(u)s',
+ {'k': pkey, 'u': self.eid}).get_entity(0, 0)
+ except Exception:
+ kwargs = dict(pkey=text_type(pkey), value=value)
+ if self.is_in_group('managers'):
+ kwargs['for_user'] = self
+ self._cw.create_entity('CWProperty', **kwargs)
+ else:
+ prop.cw_set(value=value)
+
+ def matching_groups(self, groups):
+ """return the number of the given group(s) in which the user is
+
+ :type groups: str or iterable(str)
+ :param groups: a group name or an iterable on group names
+ """
+ if isinstance(groups, string_types):
+ groups = frozenset((groups,))
+ elif isinstance(groups, (tuple, list)):
+ groups = frozenset(groups)
+ return len(groups & self.groups) # XXX return the resulting set instead of its size
+
+ def is_in_group(self, group):
+ """convience / shortcut method to test if the user belongs to `group`
+ """
+ return group in self.groups
+
+ def is_anonymous(self):
+ """ checks if user is an anonymous user"""
+ # FIXME on the web-side anonymous user is detected according to config['anonymous-user'],
+ # we don't have this info on the server side.
+ return self.groups == frozenset(('guests', ))
+
+ def owns(self, eid):
+ try:
+ return self._cw.execute(
+ 'Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
+ {'x': eid, 'u': self.eid})
+ except Unauthorized:
+ return False
+ owns = cached(owns, keyarg=1)
+
+ # presentation utilities ##################################################
+
+ def name(self):
+ """construct a name using firstname / surname or login if not defined"""
+
+ if self.firstname and self.surname:
+ return self._cw._('%(firstname)s %(surname)s') % {
+ 'firstname': self.firstname, 'surname': self.surname}
+ if self.firstname:
+ return self.firstname
+ return self.login
+
+ def dc_title(self):
+ return self.login
+
+ dc_long_title = name
+
+ def __call__(self, *args, **kwargs):
+ """ugly hack for compatibility betweeb dbapi and repo api
+
+ In the dbapi, Connection and Session have a ``user`` method to
+ generated a user for a request In the repo api, Connection and Session
+ have a user attribute inherited from SessionRequestBase prototype. This
+ ugly hack allows to not break user of the user method.
+
+ XXX Deprecate me ASAP"""
+ return self
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/lib.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/lib.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,149 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""entity classes for optional library entities"""
+
+__docformat__ = "restructuredtext en"
+from warnings import warn
+from datetime import datetime
+
+from six.moves import range
+from six.moves.urllib.parse import urlsplit, urlunsplit
+
+from logilab.mtconverter import xml_escape
+
+from cubicweb import UnknownProperty
+from cubicweb.entity import _marker
+from cubicweb.entities import AnyEntity, fetch_config
+
+def mangle_email(address):
+ try:
+ name, host = address.split('@', 1)
+ except ValueError:
+ return address
+ return '%s at %s' % (name, host.replace('.', ' dot '))
+
+
+class EmailAddress(AnyEntity):
+ __regid__ = 'EmailAddress'
+ fetch_attrs, cw_fetch_order = fetch_config(['address', 'alias'])
+ rest_attr = 'eid'
+
+ def dc_title(self):
+ if self.alias:
+ return '%s <%s>' % (self.alias, self.display_address())
+ return self.display_address()
+
+ @property
+ def email_of(self):
+ return self.reverse_use_email and self.reverse_use_email[0] or None
+
+ @property
+ def prefered(self):
+ return self.prefered_form and self.prefered_form[0] or self
+
+ def related_emails(self, skipeids=None):
+ # XXX move to eemail
+ # check email relations are in the schema first
+ subjrels = self.e_schema.object_relations()
+ if not ('sender' in subjrels and 'recipients' in subjrels):
+ return
+ rset = self._cw.execute('DISTINCT Any X, S, D ORDERBY D DESC '
+ 'WHERE X sender Y or X recipients Y, '
+ 'X subject S, X date D, Y eid %(y)s',
+ {'y': self.eid})
+ if skipeids is None:
+ skipeids = set()
+ for i in range(len(rset)):
+ eid = rset[i][0]
+ if eid in skipeids:
+ continue
+ skipeids.add(eid)
+ yield rset.get_entity(i, 0)
+
+ def display_address(self):
+ if self._cw.vreg.config['mangle-emails']:
+ return mangle_email(self.address)
+ return self.address
+
+ def printable_value(self, attr, value=_marker, attrtype=None,
+ format='text/html'):
+ """overriden to return displayable address when necessary"""
+ if attr == 'address':
+ address = self.display_address()
+ if format == 'text/html':
+ address = xml_escape(address)
+ return address
+ return super(EmailAddress, self).printable_value(attr, value, attrtype, format)
+
+
+class Bookmark(AnyEntity):
+ """customized class for Bookmark entities"""
+ __regid__ = 'Bookmark'
+ fetch_attrs, cw_fetch_order = fetch_config(['title', 'path'])
+
+ def actual_url(self):
+ url = self._cw.build_url(self.path)
+ if self.title:
+ urlparts = list(urlsplit(url))
+ if urlparts[3]:
+ urlparts[3] += '&vtitle=%s' % self._cw.url_quote(self.title)
+ else:
+ urlparts[3] = 'vtitle=%s' % self._cw.url_quote(self.title)
+ url = urlunsplit(urlparts)
+ return url
+
+ def action_url(self):
+ return self.absolute_url() + '/follow'
+
+
+class CWProperty(AnyEntity):
+ __regid__ = 'CWProperty'
+
+ fetch_attrs, cw_fetch_order = fetch_config(['pkey', 'value'])
+ rest_attr = 'pkey'
+
+ def typed_value(self):
+ return self._cw.vreg.typed_value(self.pkey, self.value)
+
+ def dc_description(self, format='text/plain'):
+ try:
+ return self._cw._(self._cw.vreg.property_info(self.pkey)['help'])
+ except UnknownProperty:
+ return u''
+
+
+class CWCache(AnyEntity):
+ """Cache"""
+ __regid__ = 'CWCache'
+ fetch_attrs, cw_fetch_order = fetch_config(['name'])
+
+ def __init__(self, *args, **kwargs):
+ warn('[3.19] CWCache entity type is going away soon. '
+ 'Other caching mechanisms can be used more reliably '
+ 'to the same effect.',
+ DeprecationWarning)
+ super(CWCache, self).__init__(*args, **kwargs)
+
+ def touch(self):
+ self._cw.execute('SET X timestamp %(t)s WHERE X eid %(x)s',
+ {'t': datetime.now(), 'x': self.eid})
+
+ def valid(self, date):
+ if date:
+ return date > self.timestamp
+ return False
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/schemaobjs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/schemaobjs.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,178 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""schema definition related entities"""
+
+__docformat__ = "restructuredtext en"
+
+from logilab.common.decorators import cached
+
+from yams.schema import role_name
+
+from cubicweb import ValidationError
+from cubicweb.schema import ERQLExpression, RRQLExpression
+
+from cubicweb.entities import AnyEntity, fetch_config
+
+
+class CWEType(AnyEntity):
+ __regid__ = 'CWEType'
+ fetch_attrs, cw_fetch_order = fetch_config(['name'])
+
+ def dc_title(self):
+ return u'%s (%s)' % (self.name, self._cw._(self.name))
+
+ def dc_long_title(self):
+ stereotypes = []
+ _ = self._cw._
+ if self.final:
+ stereotypes.append(_('final'))
+ if stereotypes:
+ return u'%s <<%s>>' % (self.dc_title(), ', '.join(stereotypes))
+ return self.dc_title()
+
+
+class CWRType(AnyEntity):
+ __regid__ = 'CWRType'
+ fetch_attrs, cw_fetch_order = fetch_config(['name'])
+
+ def dc_title(self):
+ return u'%s (%s)' % (self.name, self._cw._(self.name))
+
+ def dc_long_title(self):
+ stereotypes = []
+ _ = self._cw._
+ if self.symmetric:
+ stereotypes.append(_('symmetric'))
+ if self.inlined:
+ stereotypes.append(_('inlined'))
+ if self.final:
+ stereotypes.append(_('final'))
+ if stereotypes:
+ return u'%s <<%s>>' % (self.dc_title(), ', '.join(stereotypes))
+ return self.dc_title()
+
+ def check_inlined_allowed(self):
+ """check inlining is possible, raise ValidationError if not possible
+ """
+ # don't use the persistent schema, we may miss cardinality changes
+ # in the same transaction
+ for rdef in self.reverse_relation_type:
+ card = rdef.cardinality[0]
+ if not card in '?1':
+ qname = role_name('inlined', 'subject')
+ rtype = self.name
+ stype = rdef.stype
+ otype = rdef.otype
+ msg = self._cw._("can't set inlined=True, "
+ "%(stype)s %(rtype)s %(otype)s "
+ "has cardinality=%(card)s")
+ raise ValidationError(self.eid, {qname: msg % locals()})
+
+
+class CWRelation(AnyEntity):
+ __regid__ = 'CWRelation'
+ fetch_attrs = fetch_config(['cardinality'])[0]
+
+ def dc_title(self):
+ return u'%s %s %s' % (
+ self.from_entity[0].name,
+ self.relation_type[0].name,
+ self.to_entity[0].name)
+
+ def dc_long_title(self):
+ card = self.cardinality
+ scard, ocard = u'', u''
+ if card[0] != '1':
+ scard = '[%s]' % card[0]
+ if card[1] != '1':
+ ocard = '[%s]' % card[1]
+ return u'%s %s%s%s %s' % (
+ self.from_entity[0].name,
+ scard, self.relation_type[0].name, ocard,
+ self.to_entity[0].name)
+
+ @property
+ def rtype(self):
+ return self.relation_type[0]
+
+ @property
+ def stype(self):
+ return self.from_entity[0]
+
+ @property
+ def otype(self):
+ return self.to_entity[0]
+
+ def yams_schema(self):
+ rschema = self._cw.vreg.schema.rschema(self.rtype.name)
+ return rschema.rdefs[(self.stype.name, self.otype.name)]
+
+
+class CWAttribute(CWRelation):
+ __regid__ = 'CWAttribute'
+
+ def dc_long_title(self):
+ card = self.cardinality
+ scard = u''
+ if card[0] == '1':
+ scard = '+'
+ return u'%s %s%s %s' % (
+ self.from_entity[0].name,
+ scard, self.relation_type[0].name,
+ self.to_entity[0].name)
+
+
+class CWConstraint(AnyEntity):
+ __regid__ = 'CWConstraint'
+ fetch_attrs, cw_fetch_order = fetch_config(['value'])
+
+ def dc_title(self):
+ return '%s(%s)' % (self.cstrtype[0].name, self.value or u'')
+
+ @property
+ def type(self):
+ return self.cstrtype[0].name
+
+
+class RQLExpression(AnyEntity):
+ __regid__ = 'RQLExpression'
+ fetch_attrs, cw_fetch_order = fetch_config(['exprtype', 'mainvars', 'expression'])
+
+ def dc_title(self):
+ return self.expression or u''
+
+ def dc_long_title(self):
+ return '%s(%s)' % (self.exprtype, self.expression or u'')
+
+ @property
+ def expression_of(self):
+ for rel in ('read_permission', 'add_permission', 'delete_permission',
+ 'update_permission', 'condition'):
+ values = getattr(self, 'reverse_%s' % rel)
+ if values:
+ return values[0]
+
+ @cached
+ def _rqlexpr(self):
+ if self.exprtype == 'ERQLExpression':
+ return ERQLExpression(self.expression, self.mainvars, self.eid)
+ #if self.exprtype == 'RRQLExpression':
+ return RRQLExpression(self.expression, self.mainvars, self.eid)
+
+ def check_expression(self, *args, **kwargs):
+ return self._rqlexpr().check(*args, **kwargs)
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/sources.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/sources.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,184 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""data source related entities"""
+
+__docformat__ = "restructuredtext en"
+
+import re
+from socket import gethostname
+import logging
+
+from logilab.common.textutils import text_to_dict
+from logilab.common.configuration import OptionError
+from logilab.mtconverter import xml_escape
+
+from cubicweb.entities import AnyEntity, fetch_config
+
+class _CWSourceCfgMixIn(object):
+ @property
+ def dictconfig(self):
+ return self.config and text_to_dict(self.config) or {}
+
+ def update_config(self, skip_unknown=False, **config):
+ from cubicweb.server import SOURCE_TYPES
+ from cubicweb.server.serverconfig import (SourceConfiguration,
+ generate_source_config)
+ cfg = self.dictconfig
+ cfg.update(config)
+ options = SOURCE_TYPES[self.type].options
+ sconfig = SourceConfiguration(self._cw.vreg.config, options=options)
+ for opt, val in cfg.items():
+ try:
+ sconfig.set_option(opt, val)
+ except OptionError:
+ if skip_unknown:
+ continue
+ raise
+ cfgstr = unicode(generate_source_config(sconfig), self._cw.encoding)
+ self.cw_set(config=cfgstr)
+
+
+class CWSource(_CWSourceCfgMixIn, AnyEntity):
+ __regid__ = 'CWSource'
+ fetch_attrs, cw_fetch_order = fetch_config(['name', 'type'])
+
+ @property
+ def host_config(self):
+ dictconfig = self.dictconfig
+ host = gethostname()
+ for hostcfg in self.host_configs:
+ if hostcfg.match(host):
+ self.info('matching host config %s for source %s',
+ hostcfg.match_host, self.name)
+ dictconfig.update(hostcfg.dictconfig)
+ return dictconfig
+
+ @property
+ def host_configs(self):
+ return self.reverse_cw_host_config_of
+
+ def init_mapping(self, mapping):
+ for key, options in mapping:
+ if isinstance(key, tuple): # relation definition
+ assert len(key) == 3
+ restrictions = ['X relation_type RT, RT name %(rt)s']
+ kwargs = {'rt': key[1]}
+ if key[0] != '*':
+ restrictions.append('X from_entity FT, FT name %(ft)s')
+ kwargs['ft'] = key[0]
+ if key[2] != '*':
+ restrictions.append('X to_entity TT, TT name %(tt)s')
+ kwargs['tt'] = key[2]
+ rql = 'Any X WHERE %s' % ','.join(restrictions)
+ schemarset = self._cw.execute(rql, kwargs)
+ elif key[0].isupper(): # entity type
+ schemarset = self._cw.execute('CWEType X WHERE X name %(et)s',
+ {'et': key})
+ else: # relation type
+ schemarset = self._cw.execute('CWRType X WHERE X name %(rt)s',
+ {'rt': key})
+ for schemaentity in schemarset.entities():
+ self._cw.create_entity('CWSourceSchemaConfig',
+ cw_for_source=self,
+ cw_schema=schemaentity,
+ options=options)
+
+ @property
+ def repo_source(self):
+ """repository only property, not available from the web side (eg
+ self._cw is expected to be a server session)
+ """
+ return self._cw.repo.sources_by_eid[self.eid]
+
+
+class CWSourceHostConfig(_CWSourceCfgMixIn, AnyEntity):
+ __regid__ = 'CWSourceHostConfig'
+ fetch_attrs, cw_fetch_order = fetch_config(['match_host', 'config'])
+
+ @property
+ def cwsource(self):
+ return self.cw_host_config_of[0]
+
+ def match(self, hostname):
+ return re.match(self.match_host, hostname)
+
+
+class CWSourceSchemaConfig(AnyEntity):
+ __regid__ = 'CWSourceSchemaConfig'
+ fetch_attrs, cw_fetch_order = fetch_config(['cw_for_source', 'cw_schema', 'options'])
+
+ def dc_title(self):
+ return self._cw._(self.cw_etype) + ' #%s' % self.eid
+
+ @property
+ def schema(self):
+ return self.cw_schema[0]
+
+ @property
+ def cwsource(self):
+ return self.cw_for_source[0]
+
+
+class CWDataImport(AnyEntity):
+ __regid__ = 'CWDataImport'
+ repo_source = _logs = None # please pylint
+
+ def init(self):
+ self._logs = []
+ self.repo_source = self.cwsource.repo_source
+
+ def dc_title(self):
+ return '%s [%s]' % (self.printable_value('start_timestamp'),
+ self.printable_value('status'))
+
+ @property
+ def cwsource(self):
+ return self.cw_import_of[0]
+
+ def record_debug(self, msg, path=None, line=None):
+ self._log(logging.DEBUG, msg, path, line)
+ self.repo_source.debug(msg)
+
+ def record_info(self, msg, path=None, line=None):
+ self._log(logging.INFO, msg, path, line)
+ self.repo_source.info(msg)
+
+ def record_warning(self, msg, path=None, line=None):
+ self._log(logging.WARNING, msg, path, line)
+ self.repo_source.warning(msg)
+
+ def record_error(self, msg, path=None, line=None):
+ self._status = u'failed'
+ self._log(logging.ERROR, msg, path, line)
+ self.repo_source.error(msg)
+
+ def record_fatal(self, msg, path=None, line=None):
+ self._status = u'failed'
+ self._log(logging.FATAL, msg, path, line)
+ self.repo_source.fatal(msg)
+
+ def _log(self, severity, msg, path=None, line=None):
+ encodedmsg = u'%s\t%s\t%s\t%s ' % (severity, path or u'',
+ line or u'', xml_escape(msg))
+ self._logs.append(encodedmsg)
+
+ def write_log(self, session, **kwargs):
+ if 'status' not in kwargs:
+ kwargs['status'] = getattr(self, '_status', u'success')
+ self.cw_set(log=u' '.join(self._logs), **kwargs)
+ self._logs = []
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/test/data/migration/postcreate.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/test/data/migration/postcreate.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,19 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+wf = add_workflow(u'bmk wf', 'Bookmark')
+wf.add_state(u'hop', initial=True)
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/test/data/schema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/test/data/schema.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,37 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""entities tests schema"""
+
+from yams.buildobjs import EntityType, String, RichString, Int
+from cubicweb.schema import make_workflowable
+
+class Company(EntityType):
+ order = Int()
+ name = String()
+ description = RichString()
+
+class Division(Company):
+ __specializes_schema__ = True
+
+class SubDivision(Division):
+ __specializes_schema__ = True
+
+
+from cubicweb.schemas import bootstrap, Bookmark
+make_workflowable(bootstrap.CWGroup)
+make_workflowable(Bookmark.Bookmark)
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/test/unittest_base.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/test/unittest_base.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,262 @@
+# -*- coding: utf-8 -*-
+# copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""unit tests for cubicweb.entities.base module
+"""
+
+from logilab.common.testlib import unittest_main
+from logilab.common.decorators import clear_cache
+from logilab.common.registry import yes
+
+from cubicweb.devtools.testlib import CubicWebTC
+
+from cubicweb.entities import AnyEntity
+
+
+class BaseEntityTC(CubicWebTC):
+
+ def setup_database(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.membereid = self.create_user(cnx, 'member').eid
+ cnx.commit()
+
+
+class MetadataTC(BaseEntityTC):
+
+ def test_creator(self):
+ with self.new_access('member').repo_cnx() as cnx:
+ entity = cnx.create_entity('Bookmark', title=u"hello", path=u'project/cubicweb')
+ cnx.commit()
+ self.assertEqual(entity.creator.eid, self.membereid)
+ self.assertEqual(entity.dc_creator(), u'member')
+
+ def test_type(self):
+ # dc_type may be translated
+ with self.admin_access.client_cnx() as cnx:
+ member = cnx.entity_from_eid(self.membereid)
+ self.assertEqual(member.dc_type(), 'CWUser')
+
+ def test_cw_etype(self):
+ # cw_etype is never translated
+ with self.admin_access.client_cnx() as cnx:
+ member = cnx.entity_from_eid(self.membereid)
+ self.assertEqual(member.cw_etype, 'CWUser')
+
+ def test_entity_meta_attributes(self):
+ # XXX move to yams
+ self.assertEqual(self.schema['CWUser'].meta_attributes(), {})
+ self.assertEqual(dict((str(k), v)
+ for k, v in self.schema['State'].meta_attributes().items()),
+ {'description_format': ('format', 'description')})
+
+ def test_fti_rql_method(self):
+ class EmailAddress(AnyEntity):
+ __regid__ = 'EmailAddress'
+ __select__ = AnyEntity.__select__ & yes(2)
+
+ @classmethod
+ def cw_fti_index_rql_queries(cls, req):
+ return ['EmailAddress Y']
+
+ with self.admin_access.web_request() as req:
+ req.create_entity('EmailAddress', address=u'foo@bar.com')
+ eclass = self.vreg['etypes'].etype_class('EmailAddress')
+ # deprecated
+ self.assertEqual(['Any X, ADDRESS, ALIAS WHERE X is EmailAddress, '
+ 'X address ADDRESS, X alias ALIAS'],
+ eclass.cw_fti_index_rql_queries(req))
+
+ self.assertEqual(['Any X, ADDRESS, ALIAS ORDERBY X LIMIT 1000 WHERE X is EmailAddress, '
+ 'X address ADDRESS, X alias ALIAS, X eid > 0'],
+ [rset.rql for rset in eclass.cw_fti_index_rql_limit(req)])
+
+ # test backwards compatibility with custom method
+ with self.temporary_appobjects(EmailAddress):
+ self.vreg['etypes'].clear_caches()
+ eclass = self.vreg['etypes'].etype_class('EmailAddress')
+ self.assertEqual(['EmailAddress Y'],
+ [rset.rql for rset in eclass.cw_fti_index_rql_limit(req)])
+
+
+class EmailAddressTC(BaseEntityTC):
+
+ def test_canonical_form(self):
+ with self.admin_access.repo_cnx() as cnx:
+ email1 = cnx.execute('INSERT EmailAddress X: '
+ 'X address "maarten.ter.huurne@philips.com"').get_entity(0, 0)
+ email2 = cnx.execute('INSERT EmailAddress X: '
+ 'X address "maarten@philips.com"').get_entity(0, 0)
+ email3 = cnx.execute('INSERT EmailAddress X: '
+ 'X address "toto@logilab.fr"').get_entity(0, 0)
+ email1.cw_set(prefered_form=email2)
+ self.assertEqual(email1.prefered.eid, email2.eid)
+ self.assertEqual(email2.prefered.eid, email2.eid)
+ self.assertEqual(email3.prefered.eid, email3.eid)
+
+ def test_mangling(self):
+ query = 'INSERT EmailAddress X: X address "maarten.ter.huurne@philips.com"'
+ with self.admin_access.repo_cnx() as cnx:
+ email = cnx.execute(query).get_entity(0, 0)
+ self.assertEqual(email.display_address(), 'maarten.ter.huurne@philips.com')
+ self.assertEqual(email.printable_value('address'), 'maarten.ter.huurne@philips.com')
+ self.vreg.config.global_set_option('mangle-emails', True)
+ try:
+ self.assertEqual(email.display_address(), 'maarten.ter.huurne at philips dot com')
+ self.assertEqual(email.printable_value('address'),
+ 'maarten.ter.huurne at philips dot com')
+ email = cnx.execute('INSERT EmailAddress X: X address "syt"').get_entity(0, 0)
+ self.assertEqual(email.display_address(), 'syt')
+ self.assertEqual(email.printable_value('address'), 'syt')
+ finally:
+ self.vreg.config.global_set_option('mangle-emails', False)
+
+ def test_printable_value_escape(self):
+ with self.admin_access.repo_cnx() as cnx:
+ email = cnx.execute('INSERT EmailAddress X: '
+ 'X address "maarten&ter@philips.com"').get_entity(0, 0)
+ self.assertEqual(email.printable_value('address'),
+ 'maarten&ter@philips.com')
+ self.assertEqual(email.printable_value('address', format='text/plain'),
+ 'maarten&ter@philips.com')
+
+
+class CWUserTC(BaseEntityTC):
+
+ def test_complete(self):
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.execute('CWUser X WHERE X login "admin"').get_entity(0, 0)
+ e.complete()
+
+ def test_matching_groups(self):
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.execute('CWUser X WHERE X login "admin"').get_entity(0, 0)
+ self.assertTrue(e.matching_groups('managers'))
+ self.assertFalse(e.matching_groups('xyz'))
+ self.assertTrue(e.matching_groups(('xyz', 'managers')))
+ self.assertFalse(e.matching_groups(('xyz', 'abcd')))
+
+ def test_dc_title_and_name(self):
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.execute('CWUser U WHERE U login "member"').get_entity(0, 0)
+ self.assertEqual(e.dc_title(), 'member')
+ self.assertEqual(e.name(), 'member')
+ e.cw_set(firstname=u'bouah')
+ self.assertEqual(e.dc_title(), 'member')
+ self.assertEqual(e.name(), u'bouah')
+ e.cw_set(surname=u'lĂ´t')
+ self.assertEqual(e.dc_title(), 'member')
+ self.assertEqual(e.name(), u'bouah lĂ´t')
+
+ def test_falsey_dc_title(self):
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.create_entity('Company', order=0, name=u'pythonian')
+ cnx.commit()
+ self.assertEqual(u'0', e.dc_title())
+
+ def test_allowed_massmail_keys(self):
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.execute('CWUser U WHERE U login "member"').get_entity(0, 0)
+ # Bytes/Password attributes should be omitted
+ self.assertEqual(
+ e.cw_adapt_to('IEmailable').allowed_massmail_keys(),
+ set(('surname', 'firstname', 'login', 'last_login_time',
+ 'creation_date', 'modification_date', 'cwuri', 'eid'))
+ )
+
+ def test_cw_instantiate_object_relation(self):
+ """ a weird non regression test """
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.execute('CWUser U WHERE U login "member"').get_entity(0, 0)
+ cnx.create_entity('CWGroup', name=u'logilab', reverse_in_group=e)
+
+
+class HTMLtransformTC(BaseEntityTC):
+
+ def test_sanitized_html(self):
+ with self.admin_access.repo_cnx() as cnx:
+ c = cnx.create_entity('Company', name=u'Babar',
+ description=u"""
+Title
+=====
+
+Elephant management best practices.
+
+.. raw:: html
+
+
+""", description_format=u'text/rest')
+ cnx.commit()
+ c.cw_clear_all_caches()
+ self.assertIn('alert',
+ c.printable_value('description', format='text/plain'))
+ self.assertNotIn('alert',
+ c.printable_value('description', format='text/html'))
+
+
+class SpecializedEntityClassesTC(CubicWebTC):
+
+ def select_eclass(self, etype):
+ # clear selector cache
+ clear_cache(self.vreg['etypes'], 'etype_class')
+ return self.vreg['etypes'].etype_class(etype)
+
+ def test_etype_class_selection_and_specialization(self):
+ # no specific class for Subdivisions, the default one should be selected
+ eclass = self.select_eclass('SubDivision')
+ self.assertTrue(eclass.__autogenerated__)
+ # self.assertEqual(eclass.__bases__, (AnyEntity,))
+ # build class from most generic to most specific and make
+ # sure the most specific is always selected
+ self.vreg._loadedmods[__name__] = {}
+ for etype in ('Company', 'Division', 'SubDivision'):
+ class Foo(AnyEntity):
+ __regid__ = etype
+ self.vreg.register(Foo)
+ eclass = self.select_eclass('SubDivision')
+ self.assertTrue(eclass.__autogenerated__)
+ self.assertFalse(eclass is Foo)
+ if etype == 'SubDivision':
+ self.assertEqual(eclass.__bases__, (Foo,))
+ else:
+ self.assertEqual(eclass.__bases__[0].__bases__, (Foo,))
+ # check Division eclass is still selected for plain Division entities
+ eclass = self.select_eclass('Division')
+ self.assertEqual(eclass.cw_etype, 'Division')
+
+
+class ISerializableTC(CubicWebTC):
+
+ def test_serialization(self):
+ with self.admin_access.repo_cnx() as cnx:
+ entity = cnx.create_entity('CWGroup', name=u'tmp')
+ cnx.commit()
+ serializer = entity.cw_adapt_to('ISerializable')
+ expected = {
+ 'cw_etype': u'CWGroup',
+ 'cw_source': 'system',
+ 'eid': entity.eid,
+ 'cwuri': u'http://testing.fr/cubicweb/%s' % entity.eid,
+ 'creation_date': entity.creation_date,
+ 'modification_date': entity.modification_date,
+ 'name': u'tmp',
+ }
+ self.assertEqual(serializer.serialize(), expected)
+
+
+if __name__ == '__main__':
+ unittest_main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/test/unittest_wfobjs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/test/unittest_wfobjs.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,705 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+from cubicweb import ValidationError
+from cubicweb.devtools.testlib import CubicWebTC
+
+def add_wf(shell, etype, name=None, default=False):
+ if name is None:
+ name = etype
+ return shell.add_workflow(name, etype, default=default,
+ ensure_workflowable=False)
+
+def parse_hist(wfhist):
+ return [(ti.previous_state.name, ti.new_state.name,
+ ti.transition and ti.transition.name, ti.comment)
+ for ti in wfhist]
+
+
+class WorkflowBuildingTC(CubicWebTC):
+
+ def test_wf_construction(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ foo = wf.add_state(u'foo', initial=True)
+ bar = wf.add_state(u'bar')
+ self.assertEqual(wf.state_by_name('bar').eid, bar.eid)
+ self.assertEqual(wf.state_by_name('barrr'), None)
+ baz = wf.add_transition(u'baz', (foo,), bar, ('managers',))
+ self.assertEqual(wf.transition_by_name('baz').eid, baz.eid)
+ self.assertEqual(len(baz.require_group), 1)
+ self.assertEqual(baz.require_group[0].name, 'managers')
+
+ def test_duplicated_state(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ wf.add_state(u'foo', initial=True)
+ shell.commit()
+ with self.assertRaises(ValidationError) as cm:
+ wf.add_state(u'foo')
+ self.assertEqual({'name': u'%(KEY-rtype)s is part of violated unicity constraint',
+ 'state_of': u'%(KEY-rtype)s is part of violated unicity constraint',
+ '': u'some relations violate a unicity constraint'},
+ cm.exception.errors)
+ shell.rollback()
+ # no pb if not in the same workflow
+ wf2 = add_wf(shell, 'Company')
+ foo = wf2.add_state(u'foo', initial=True)
+ shell.commit()
+ # gnark gnark
+ bar = wf.add_state(u'bar')
+ shell.commit()
+ with self.assertRaises(ValidationError) as cm:
+ bar.cw_set(name=u'foo')
+ shell.rollback()
+ self.assertEqual({'name': u'%(KEY-rtype)s is part of violated unicity constraint',
+ 'state_of': u'%(KEY-rtype)s is part of violated unicity constraint',
+ '': u'some relations violate a unicity constraint'},
+ cm.exception.errors)
+
+ def test_duplicated_transition(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ foo = wf.add_state(u'foo', initial=True)
+ bar = wf.add_state(u'bar')
+ wf.add_transition(u'baz', (foo,), bar, ('managers',))
+ with self.assertRaises(ValidationError) as cm:
+ wf.add_transition(u'baz', (bar,), foo)
+ self.assertEqual({'name': u'%(KEY-rtype)s is part of violated unicity constraint',
+ 'transition_of': u'%(KEY-rtype)s is part of violated unicity constraint',
+ '': u'some relations violate a unicity constraint'},
+ cm.exception.errors)
+ shell.rollback()
+ # no pb if not in the same workflow
+ wf2 = add_wf(shell, 'Company')
+ foo = wf2.add_state(u'foo', initial=True)
+ bar = wf2.add_state(u'bar')
+ wf2.add_transition(u'baz', (foo,), bar, ('managers',))
+ shell.commit()
+ # gnark gnark
+ biz = wf2.add_transition(u'biz', (bar,), foo)
+ shell.commit()
+ with self.assertRaises(ValidationError) as cm:
+ biz.cw_set(name=u'baz')
+ shell.rollback()
+ self.assertEqual({'name': u'%(KEY-rtype)s is part of violated unicity constraint',
+ 'transition_of': u'%(KEY-rtype)s is part of violated unicity constraint',
+ '': u'some relations violate a unicity constraint'},
+ cm.exception.errors)
+
+
+class WorkflowTC(CubicWebTC):
+
+ def setup_database(self):
+ rschema = self.schema['in_state']
+ for rdef in rschema.rdefs.values():
+ self.assertEqual(rdef.cardinality, '1*')
+ with self.admin_access.client_cnx() as cnx:
+ self.member_eid = self.create_user(cnx, 'member').eid
+ cnx.commit()
+
+ def test_workflow_base(self):
+ with self.admin_access.web_request() as req:
+ e = self.create_user(req, 'toto')
+ iworkflowable = e.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'activated')
+ iworkflowable.change_state('deactivated', u'deactivate 1')
+ req.cnx.commit()
+ iworkflowable.change_state('activated', u'activate 1')
+ req.cnx.commit()
+ iworkflowable.change_state('deactivated', u'deactivate 2')
+ req.cnx.commit()
+ e.cw_clear_relation_cache('wf_info_for', 'object')
+ self.assertEqual([tr.comment for tr in e.reverse_wf_info_for],
+ ['deactivate 1', 'activate 1', 'deactivate 2'])
+ self.assertEqual(iworkflowable.latest_trinfo().comment, 'deactivate 2')
+
+ def test_possible_transitions(self):
+ with self.admin_access.web_request() as req:
+ user = req.execute('CWUser X').get_entity(0, 0)
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ trs = list(iworkflowable.possible_transitions())
+ self.assertEqual(len(trs), 1)
+ self.assertEqual(trs[0].name, u'deactivate')
+ self.assertEqual(trs[0].destination(None).name, u'deactivated')
+ # test a std user get no possible transition
+ with self.new_access('member').web_request() as req:
+ # fetch the entity using the new session
+ trs = list(req.user.cw_adapt_to('IWorkflowable').possible_transitions())
+ self.assertEqual(len(trs), 0)
+
+ def _test_manager_deactivate(self, user):
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ user.cw_clear_relation_cache('in_state', 'subject')
+ self.assertEqual(len(user.in_state), 1)
+ self.assertEqual(iworkflowable.state, 'deactivated')
+ trinfo = iworkflowable.latest_trinfo()
+ self.assertEqual(trinfo.previous_state.name, 'activated')
+ self.assertEqual(trinfo.new_state.name, 'deactivated')
+ self.assertEqual(trinfo.comment, 'deactivate user')
+ self.assertEqual(trinfo.comment_format, 'text/plain')
+ return trinfo
+
+ def test_change_state(self):
+ with self.admin_access.client_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.change_state('deactivated', comment=u'deactivate user')
+ trinfo = self._test_manager_deactivate(user)
+ self.assertEqual(trinfo.transition, None)
+
+ def test_set_in_state_bad_wf(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ s = wf.add_state(u'foo', initial=True)
+ shell.commit()
+ with self.admin_access.repo_cnx() as cnx:
+ with cnx.security_enabled(write=False):
+ with self.assertRaises(ValidationError) as cm:
+ cnx.execute('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
+ {'x': cnx.user.eid, 's': s.eid})
+ self.assertEqual(cm.exception.errors, {'in_state-subject': "state doesn't belong to entity's workflow. "
+ "You may want to set a custom workflow for this entity first."})
+
+ def test_fire_transition(self):
+ with self.admin_access.client_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate', comment=u'deactivate user')
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'deactivated')
+ self._test_manager_deactivate(user)
+ trinfo = self._test_manager_deactivate(user)
+ self.assertEqual(trinfo.transition.name, 'deactivate')
+
+ def test_goback_transition(self):
+ with self.admin_access.web_request() as req:
+ wf = req.user.cw_adapt_to('IWorkflowable').current_workflow
+ asleep = wf.add_state('asleep')
+ wf.add_transition('rest', (wf.state_by_name('activated'),
+ wf.state_by_name('deactivated')),
+ asleep)
+ wf.add_transition('wake up', asleep)
+ user = self.create_user(req, 'stduser')
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ iworkflowable.fire_transition('wake up')
+ req.cnx.commit()
+ self.assertEqual(iworkflowable.state, 'activated')
+ iworkflowable.fire_transition('deactivate')
+ req.cnx.commit()
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ iworkflowable.fire_transition('wake up')
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'deactivated')
+
+ # XXX test managers can change state without matching transition
+
+ def _test_stduser_deactivate(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.create_user(cnx, 'tutu')
+ with self.new_access('tutu').web_request() as req:
+ iworkflowable = req.entity_from_eid(self.member_eid).cw_adapt_to('IWorkflowable')
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('deactivate')
+ self.assertEqual(cm.exception.errors, {'by_transition-subject': "transition may not be fired"})
+ with self.new_access('member').web_request() as req:
+ iworkflowable = req.entity_from_eid(self.member_eid).cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate')
+ req.cnx.commit()
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('activate')
+ self.assertEqual(cm.exception.errors, {'by_transition-subject': "transition may not be fired"})
+
+ def test_fire_transition_owned_by(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
+ 'X expression "X owned_by U", T condition X '
+ 'WHERE T name "deactivate"')
+ cnx.commit()
+ self._test_stduser_deactivate()
+
+ def test_fire_transition_has_update_perm(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
+ 'X expression "U has_update_permission X", T condition X '
+ 'WHERE T name "deactivate"')
+ cnx.commit()
+ self._test_stduser_deactivate()
+
+ def test_swf_base(self):
+ """subworkflow
+
+ +-----------+ tr1 +-----------+
+ | swfstate1 | ------>| swfstate2 |
+ +-----------+ +-----------+
+ | tr2 +-----------+
+ `------>| swfstate3 |
+ +-----------+
+
+ main workflow
+
+ +--------+ swftr1 +--------+
+ | state1 | -------[swfstate2]->| state2 |
+ +--------+ | +--------+
+ | +--------+
+ `-[swfstate3]-->| state3 |
+ +--------+
+ """
+ # sub-workflow
+ with self.admin_access.shell() as shell:
+ swf = add_wf(shell, 'CWGroup', name='subworkflow')
+ swfstate1 = swf.add_state(u'swfstate1', initial=True)
+ swfstate2 = swf.add_state(u'swfstate2')
+ swfstate3 = swf.add_state(u'swfstate3')
+ tr1 = swf.add_transition(u'tr1', (swfstate1,), swfstate2)
+ tr2 = swf.add_transition(u'tr2', (swfstate1,), swfstate3)
+ # main workflow
+ mwf = add_wf(shell, 'CWGroup', name='main workflow', default=True)
+ state1 = mwf.add_state(u'state1', initial=True)
+ state2 = mwf.add_state(u'state2')
+ state3 = mwf.add_state(u'state3')
+ swftr1 = mwf.add_wftransition(u'swftr1', swf, state1,
+ [(swfstate2, state2), (swfstate3, state3)])
+ swf.cw_clear_all_caches()
+ self.assertEqual(swftr1.destination(None).eid, swfstate1.eid)
+ # workflows built, begin test
+ with self.admin_access.web_request() as req:
+ group = req.create_entity('CWGroup', name=u'grp1')
+ req.cnx.commit()
+ iworkflowable = group.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.current_state.eid, state1.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.subworkflow_input_transition(), None)
+ iworkflowable.fire_transition('swftr1', u'go')
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_state.eid, swfstate1.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, swf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.subworkflow_input_transition().eid, swftr1.eid)
+ iworkflowable.fire_transition('tr1', u'go')
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_state.eid, state2.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.subworkflow_input_transition(), None)
+ # force back to swfstate1 is impossible since we can't any more find
+ # subworkflow input transition
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.change_state(swfstate1, u'gadget')
+ self.assertEqual(cm.exception.errors, {'to_state-subject': "state doesn't belong to entity's workflow"})
+ req.cnx.rollback()
+ # force back to state1
+ iworkflowable.change_state('state1', u'gadget')
+ iworkflowable.fire_transition('swftr1', u'au')
+ group.cw_clear_all_caches()
+ iworkflowable.fire_transition('tr2', u'chapeau')
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_state.eid, state3.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertListEqual(parse_hist(iworkflowable.workflow_history),
+ [('state1', 'swfstate1', 'swftr1', 'go'),
+ ('swfstate1', 'swfstate2', 'tr1', 'go'),
+ ('swfstate2', 'state2', 'swftr1', 'exiting from subworkflow subworkflow'),
+ ('state2', 'state1', None, 'gadget'),
+ ('state1', 'swfstate1', 'swftr1', 'au'),
+ ('swfstate1', 'swfstate3', 'tr2', 'chapeau'),
+ ('swfstate3', 'state3', 'swftr1', 'exiting from subworkflow subworkflow'),
+ ])
+
+ def test_swf_exit_consistency(self):
+ with self.admin_access.shell() as shell:
+ # sub-workflow
+ swf = add_wf(shell, 'CWGroup', name='subworkflow')
+ swfstate1 = swf.add_state(u'swfstate1', initial=True)
+ swfstate2 = swf.add_state(u'swfstate2')
+ tr1 = swf.add_transition(u'tr1', (swfstate1,), swfstate2)
+ # main workflow
+ mwf = add_wf(shell, 'CWGroup', name='main workflow', default=True)
+ state1 = mwf.add_state(u'state1', initial=True)
+ state2 = mwf.add_state(u'state2')
+ state3 = mwf.add_state(u'state3')
+ mwf.add_wftransition(u'swftr1', swf, state1,
+ [(swfstate2, state2), (swfstate2, state3)])
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'subworkflow_exit-subject': u"can't have multiple exits on the same state"})
+
+ def test_swf_fire_in_a_row(self):
+ with self.admin_access.shell() as shell:
+ # sub-workflow
+ subwf = add_wf(shell, 'CWGroup', name='subworkflow')
+ xsigning = subwf.add_state('xsigning', initial=True)
+ xaborted = subwf.add_state('xaborted')
+ xsigned = subwf.add_state('xsigned')
+ xabort = subwf.add_transition('xabort', (xsigning,), xaborted)
+ xsign = subwf.add_transition('xsign', (xsigning,), xsigning)
+ xcomplete = subwf.add_transition('xcomplete', (xsigning,), xsigned,
+ type=u'auto')
+ # main workflow
+ twf = add_wf(shell, 'CWGroup', name='mainwf', default=True)
+ created = twf.add_state(_('created'), initial=True)
+ identified = twf.add_state(_('identified'))
+ released = twf.add_state(_('released'))
+ closed = twf.add_state(_('closed'))
+ twf.add_wftransition(_('identify'), subwf, (created,),
+ [(xsigned, identified), (xaborted, created)])
+ twf.add_wftransition(_('release'), subwf, (identified,),
+ [(xsigned, released), (xaborted, identified)])
+ twf.add_wftransition(_('close'), subwf, (released,),
+ [(xsigned, closed), (xaborted, released)])
+ shell.commit()
+ with self.admin_access.repo_cnx() as cnx:
+ group = cnx.create_entity('CWGroup', name=u'grp1')
+ cnx.commit()
+ iworkflowable = group.cw_adapt_to('IWorkflowable')
+ for trans in ('identify', 'release', 'close'):
+ iworkflowable.fire_transition(trans)
+ cnx.commit()
+
+
+ def test_swf_magic_tr(self):
+ with self.admin_access.shell() as shell:
+ # sub-workflow
+ subwf = add_wf(shell, 'CWGroup', name='subworkflow')
+ xsigning = subwf.add_state('xsigning', initial=True)
+ xaborted = subwf.add_state('xaborted')
+ xsigned = subwf.add_state('xsigned')
+ xabort = subwf.add_transition('xabort', (xsigning,), xaborted)
+ xsign = subwf.add_transition('xsign', (xsigning,), xsigned)
+ # main workflow
+ twf = add_wf(shell, 'CWGroup', name='mainwf', default=True)
+ created = twf.add_state(_('created'), initial=True)
+ identified = twf.add_state(_('identified'))
+ released = twf.add_state(_('released'))
+ twf.add_wftransition(_('identify'), subwf, created,
+ [(xaborted, None), (xsigned, identified)])
+ twf.add_wftransition(_('release'), subwf, identified,
+ [(xaborted, None)])
+ shell.commit()
+ with self.admin_access.web_request() as req:
+ group = req.create_entity('CWGroup', name=u'grp1')
+ req.cnx.commit()
+ iworkflowable = group.cw_adapt_to('IWorkflowable')
+ for trans, nextstate in (('identify', 'xsigning'),
+ ('xabort', 'created'),
+ ('identify', 'xsigning'),
+ ('xsign', 'identified'),
+ ('release', 'xsigning'),
+ ('xabort', 'identified')
+ ):
+ iworkflowable.fire_transition(trans)
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, nextstate)
+
+ def test_replace_state(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWGroup', name='groupwf', default=True)
+ s_new = wf.add_state('new', initial=True)
+ s_state1 = wf.add_state('state1')
+ wf.add_transition('tr', (s_new,), s_state1)
+ shell.commit()
+
+ with self.admin_access.repo_cnx() as cnx:
+ group = cnx.create_entity('CWGroup', name=u'grp1')
+ cnx.commit()
+
+ iwf = group.cw_adapt_to('IWorkflowable')
+ iwf.fire_transition('tr')
+ cnx.commit()
+ group.cw_clear_all_caches()
+
+ wf = cnx.entity_from_eid(wf.eid)
+ wf.add_state('state2')
+ with cnx.security_enabled(write=False):
+ wf.replace_state('state1', 'state2')
+ cnx.commit()
+
+ self.assertEqual(iwf.state, 'state2')
+ self.assertEqual(iwf.latest_trinfo().to_state[0].name, 'state2')
+
+
+class CustomWorkflowTC(CubicWebTC):
+
+ def setup_database(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.member_eid = self.create_user(cnx, 'member').eid
+
+ def test_custom_wf_replace_state_no_history(self):
+ """member in inital state with no previous history, state is simply
+ redirected when changing workflow
+ """
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep', initial=True)
+ with self.admin_access.web_request() as req:
+ req.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'activated') # no change before commit
+ req.cnx.commit()
+ member.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual(iworkflowable.workflow_history, ())
+
+ def test_custom_wf_replace_state_keep_history(self):
+ """member in inital state with some history, state is redirected and
+ state change is recorded to history
+ """
+ with self.admin_access.web_request() as req:
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate')
+ iworkflowable.fire_transition('activate')
+ req.cnx.commit()
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep', initial=True)
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ with self.admin_access.web_request() as req:
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('activated', 'deactivated', 'deactivate', None),
+ ('deactivated', 'activated', 'activate', None),
+ ('activated', 'asleep', None, 'workflow changed to "CWUser"')])
+
+ def test_custom_wf_no_initial_state(self):
+ """try to set a custom workflow which has no initial state"""
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep')
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'custom_workflow-subject': u'workflow has no initial state'})
+
+ def test_custom_wf_bad_etype(self):
+ """try to set a custom workflow which doesn't apply to entity type"""
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ wf.add_state('asleep', initial=True)
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'custom_workflow-subject': u"workflow isn't a workflow for this type"})
+
+ def test_del_custom_wf(self):
+ """member in some state shared by the new workflow, nothing has to be
+ done
+ """
+ with self.admin_access.web_request() as req:
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate')
+ req.cnx.commit()
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep', initial=True)
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ shell.commit()
+ with self.admin_access.web_request() as req:
+ req.execute('DELETE X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'asleep')# no change before commit
+ req.cnx.commit()
+ member.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_workflow.name, "default user workflow")
+ self.assertEqual(iworkflowable.state, 'activated')
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('activated', 'deactivated', 'deactivate', None),
+ ('deactivated', 'asleep', None, 'workflow changed to "CWUser"'),
+ ('asleep', 'activated', None, 'workflow changed to "default user workflow"'),])
+
+
+class AutoTransitionTC(CubicWebTC):
+
+ def setup_custom_wf(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ asleep = wf.add_state('asleep', initial=True)
+ dead = wf.add_state('dead')
+ wf.add_transition('rest', asleep, asleep)
+ wf.add_transition('sick', asleep, dead, type=u'auto',
+ conditions=({'expr': u'X surname "toto"',
+ 'mainvars': u'X'},))
+ return wf
+
+ def test_auto_transition_fired(self):
+ wf = self.setup_custom_wf()
+ with self.admin_access.web_request() as req:
+ user = self.create_user(req, 'member')
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ req.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': user.eid})
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
+ ['rest'])
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
+ ['rest'])
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('asleep', 'asleep', 'rest', None)])
+ user.cw_set(surname=u'toto') # fulfill condition
+ req.cnx.commit()
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'dead')
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('asleep', 'asleep', 'rest', None),
+ ('asleep', 'asleep', 'rest', None),
+ ('asleep', 'dead', 'sick', None),])
+
+ def test_auto_transition_custom_initial_state_fired(self):
+ wf = self.setup_custom_wf()
+ with self.admin_access.web_request() as req:
+ user = self.create_user(req, 'member', surname=u'toto')
+ req.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': user.eid})
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'dead')
+
+ def test_auto_transition_initial_state_fired(self):
+ with self.admin_access.web_request() as req:
+ wf = req.execute('Any WF WHERE ET default_workflow WF, '
+ 'ET name %(et)s', {'et': 'CWUser'}).get_entity(0, 0)
+ dead = wf.add_state('dead')
+ wf.add_transition('sick', wf.state_by_name('activated'), dead,
+ type=u'auto', conditions=({'expr': u'X surname "toto"',
+ 'mainvars': u'X'},))
+ req.cnx.commit()
+ with self.admin_access.web_request() as req:
+ user = self.create_user(req, 'member', surname=u'toto')
+ req.cnx.commit()
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'dead')
+
+
+class WorkflowHooksTC(CubicWebTC):
+
+ def setUp(self):
+ CubicWebTC.setUp(self)
+ with self.admin_access.web_request() as req:
+ self.wf = req.user.cw_adapt_to('IWorkflowable').current_workflow
+ self.s_activated = self.wf.state_by_name('activated').eid
+ self.s_deactivated = self.wf.state_by_name('deactivated').eid
+ self.s_dummy = self.wf.add_state(u'dummy').eid
+ self.wf.add_transition(u'dummy', (self.s_deactivated,), self.s_dummy)
+ ueid = self.create_user(req, 'stduser', commit=False).eid
+ # test initial state is set
+ rset = req.execute('Any N WHERE S name N, X in_state S, X eid %(x)s',
+ {'x' : ueid})
+ self.assertFalse(rset, rset.rows)
+ req.cnx.commit()
+ initialstate = req.execute('Any N WHERE S name N, X in_state S, X eid %(x)s',
+ {'x' : ueid})[0][0]
+ self.assertEqual(initialstate, u'activated')
+ # give access to users group on the user's wf transitions
+ # so we can test wf enforcing on euser (managers don't have anymore this
+ # enforcement
+ req.execute('SET X require_group G '
+ 'WHERE G name "users", X transition_of WF, WF eid %(wf)s',
+ {'wf': self.wf.eid})
+ req.cnx.commit()
+
+ # XXX currently, we've to rely on hooks to set initial state, or to use execute
+ # def test_initial_state(self):
+ # cnx = self.login('stduser')
+ # cu = cnx.cursor()
+ # self.assertRaises(ValidationError, cu.execute,
+ # 'INSERT CWUser X: X login "badaboum", X upassword %(pwd)s, '
+ # 'X in_state S WHERE S name "deactivated"', {'pwd': 'oops'})
+ # cnx.close()
+ # # though managers can do whatever he want
+ # self.execute('INSERT CWUser X: X login "badaboum", X upassword %(pwd)s, '
+ # 'X in_state S, X in_group G WHERE S name "deactivated", G name "users"', {'pwd': 'oops'})
+ # self.commit()
+
+ # test that the workflow is correctly enforced
+
+ def _cleanup_msg(self, msg):
+ """remove the variable part of one specific error message"""
+ lmsg = msg.split()
+ lmsg.pop(1)
+ lmsg.pop()
+ return ' '.join(lmsg)
+
+ def test_transition_checking1(self):
+ with self.new_access('stduser').repo_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('activate')
+ self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
+ u"transition isn't allowed from")
+
+ def test_transition_checking2(self):
+ with self.new_access('stduser').repo_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('dummy')
+ self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
+ u"transition isn't allowed from")
+
+ def test_transition_checking3(self):
+ with self.new_access('stduser').repo_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate')
+ cnx.commit()
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('deactivate')
+ self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
+ u"transition isn't allowed from")
+ cnx.rollback()
+ # get back now
+ iworkflowable.fire_transition('activate')
+ cnx.commit()
+
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/entities/wfobjs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/wfobjs.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,589 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""workflow handling:
+
+* entity types defining workflow (Workflow, State, Transition...)
+* workflow history (TrInfo)
+* adapter for workflowable entities (IWorkflowableAdapter)
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+from six import text_type, string_types
+
+from logilab.common.decorators import cached, clear_cache
+from logilab.common.deprecation import deprecated
+
+from cubicweb.entities import AnyEntity, fetch_config
+from cubicweb.view import EntityAdapter
+from cubicweb.predicates import relation_possible
+
+
+try:
+ from cubicweb import server
+except ImportError:
+ # We need to lookup DEBUG from there,
+ # however a pure dbapi client may not have it.
+ class server(object): pass
+ server.DEBUG = False
+
+
+class WorkflowException(Exception): pass
+
+class Workflow(AnyEntity):
+ __regid__ = 'Workflow'
+
+ @property
+ def initial(self):
+ """return the initial state for this workflow"""
+ return self.initial_state and self.initial_state[0] or None
+
+ def is_default_workflow_of(self, etype):
+ """return True if this workflow is the default workflow for the given
+ entity type
+ """
+ return any(et for et in self.reverse_default_workflow
+ if et.name == etype)
+
+ def iter_workflows(self, _done=None):
+ """return an iterator on actual workflows, eg this workflow and its
+ subworkflows
+ """
+ # infinite loop safety belt
+ if _done is None:
+ _done = set()
+ yield self
+ _done.add(self.eid)
+ for tr in self._cw.execute('Any T WHERE T is WorkflowTransition, '
+ 'T transition_of WF, WF eid %(wf)s',
+ {'wf': self.eid}).entities():
+ if tr.subwf.eid in _done:
+ continue
+ for subwf in tr.subwf.iter_workflows(_done):
+ yield subwf
+
+ # state / transitions accessors ############################################
+
+ def state_by_name(self, statename):
+ rset = self._cw.execute('Any S, SN WHERE S name SN, S name %(n)s, '
+ 'S state_of WF, WF eid %(wf)s',
+ {'n': statename, 'wf': self.eid})
+ if rset:
+ return rset.get_entity(0, 0)
+ return None
+
+ def state_by_eid(self, eid):
+ rset = self._cw.execute('Any S, SN WHERE S name SN, S eid %(s)s, '
+ 'S state_of WF, WF eid %(wf)s',
+ {'s': eid, 'wf': self.eid})
+ if rset:
+ return rset.get_entity(0, 0)
+ return None
+
+ def transition_by_name(self, trname):
+ rset = self._cw.execute('Any T, TN WHERE T name TN, T name %(n)s, '
+ 'T transition_of WF, WF eid %(wf)s',
+ {'n': text_type(trname), 'wf': self.eid})
+ if rset:
+ return rset.get_entity(0, 0)
+ return None
+
+ def transition_by_eid(self, eid):
+ rset = self._cw.execute('Any T, TN WHERE T name TN, T eid %(t)s, '
+ 'T transition_of WF, WF eid %(wf)s',
+ {'t': eid, 'wf': self.eid})
+ if rset:
+ return rset.get_entity(0, 0)
+ return None
+
+ # wf construction methods ##################################################
+
+ def add_state(self, name, initial=False, **kwargs):
+ """add a state to this workflow"""
+ state = self._cw.create_entity('State', name=text_type(name), **kwargs)
+ self._cw.execute('SET S state_of WF WHERE S eid %(s)s, WF eid %(wf)s',
+ {'s': state.eid, 'wf': self.eid})
+ if initial:
+ assert not self.initial, "Initial state already defined as %s" % self.initial
+ self._cw.execute('SET WF initial_state S '
+ 'WHERE S eid %(s)s, WF eid %(wf)s',
+ {'s': state.eid, 'wf': self.eid})
+ return state
+
+ def _add_transition(self, trtype, name, fromstates,
+ requiredgroups=(), conditions=(), **kwargs):
+ tr = self._cw.create_entity(trtype, name=text_type(name), **kwargs)
+ self._cw.execute('SET T transition_of WF '
+ 'WHERE T eid %(t)s, WF eid %(wf)s',
+ {'t': tr.eid, 'wf': self.eid})
+ assert fromstates, fromstates
+ if not isinstance(fromstates, (tuple, list)):
+ fromstates = (fromstates,)
+ for state in fromstates:
+ if hasattr(state, 'eid'):
+ state = state.eid
+ self._cw.execute('SET S allowed_transition T '
+ 'WHERE S eid %(s)s, T eid %(t)s',
+ {'s': state, 't': tr.eid})
+ tr.set_permissions(requiredgroups, conditions, reset=False)
+ return tr
+
+ def add_transition(self, name, fromstates, tostate=None,
+ requiredgroups=(), conditions=(), **kwargs):
+ """add a transition to this workflow from some state(s) to another"""
+ tr = self._add_transition('Transition', name, fromstates,
+ requiredgroups, conditions, **kwargs)
+ if tostate is not None:
+ if hasattr(tostate, 'eid'):
+ tostate = tostate.eid
+ self._cw.execute('SET T destination_state S '
+ 'WHERE S eid %(s)s, T eid %(t)s',
+ {'t': tr.eid, 's': tostate})
+ return tr
+
+ def add_wftransition(self, name, subworkflow, fromstates, exitpoints=(),
+ requiredgroups=(), conditions=(), **kwargs):
+ """add a workflow transition to this workflow"""
+ tr = self._add_transition('WorkflowTransition', name, fromstates,
+ requiredgroups, conditions, **kwargs)
+ if hasattr(subworkflow, 'eid'):
+ subworkflow = subworkflow.eid
+ assert self._cw.execute('SET T subworkflow WF WHERE WF eid %(wf)s,T eid %(t)s',
+ {'t': tr.eid, 'wf': subworkflow})
+ for fromstate, tostate in exitpoints:
+ tr.add_exit_point(fromstate, tostate)
+ return tr
+
+ def replace_state(self, todelstate, replacement):
+ """migration convenience method"""
+ if not hasattr(todelstate, 'eid'):
+ todelstate = self.state_by_name(todelstate)
+ if not hasattr(replacement, 'eid'):
+ replacement = self.state_by_name(replacement)
+ args = {'os': todelstate.eid, 'ns': replacement.eid}
+ execute = self._cw.execute
+ execute('SET X in_state NS WHERE X in_state OS, '
+ 'NS eid %(ns)s, OS eid %(os)s', args)
+ execute('SET X from_state NS WHERE X from_state OS, '
+ 'OS eid %(os)s, NS eid %(ns)s', args)
+ execute('SET X to_state NS WHERE X to_state OS, '
+ 'OS eid %(os)s, NS eid %(ns)s', args)
+ todelstate.cw_delete()
+
+
+class BaseTransition(AnyEntity):
+ """customized class for abstract transition
+
+ provides a specific may_be_fired method to check if the relation may be
+ fired by the logged user
+ """
+ __regid__ = 'BaseTransition'
+ fetch_attrs, cw_fetch_order = fetch_config(['name', 'type'])
+
+ def __init__(self, *args, **kwargs):
+ if self.cw_etype == 'BaseTransition':
+ raise WorkflowException('should not be instantiated')
+ super(BaseTransition, self).__init__(*args, **kwargs)
+
+ @property
+ def workflow(self):
+ return self.transition_of[0]
+
+ def has_input_state(self, state):
+ if hasattr(state, 'eid'):
+ state = state.eid
+ return any(s for s in self.reverse_allowed_transition if s.eid == state)
+
+ def may_be_fired(self, eid):
+ """return true if the logged user may fire this transition
+
+ `eid` is the eid of the object on which we may fire the transition
+ """
+ DBG = False
+ if server.DEBUG & server.DBG_SEC:
+ if 'transition' in server._SECURITY_CAPS:
+ DBG = True
+ user = self._cw.user
+ # check user is at least in one of the required groups if any
+ groups = frozenset(g.name for g in self.require_group)
+ if groups:
+ matches = user.matching_groups(groups)
+ if matches:
+ if DBG:
+ print('may_be_fired: %r may fire: user matches %s' % (self.name, groups))
+ return matches
+ if 'owners' in groups and user.owns(eid):
+ if DBG:
+ print('may_be_fired: %r may fire: user is owner' % self.name)
+ return True
+ # check one of the rql expression conditions matches if any
+ if self.condition:
+ if DBG:
+ print('my_be_fired: %r: %s' %
+ (self.name, [(rqlexpr.expression,
+ rqlexpr.check_expression(self._cw, eid))
+ for rqlexpr in self.condition]))
+ for rqlexpr in self.condition:
+ if rqlexpr.check_expression(self._cw, eid):
+ return True
+ if self.condition or groups:
+ return False
+ return True
+
+ def set_permissions(self, requiredgroups=(), conditions=(), reset=True):
+ """set or add (if `reset` is False) groups and conditions for this
+ transition
+ """
+ if reset:
+ self._cw.execute('DELETE T require_group G WHERE T eid %(x)s',
+ {'x': self.eid})
+ self._cw.execute('DELETE T condition R WHERE T eid %(x)s',
+ {'x': self.eid})
+ for gname in requiredgroups:
+ rset = self._cw.execute('SET T require_group G '
+ 'WHERE T eid %(x)s, G name %(gn)s',
+ {'x': self.eid, 'gn': text_type(gname)})
+ assert rset, '%s is not a known group' % gname
+ if isinstance(conditions, string_types):
+ conditions = (conditions,)
+ for expr in conditions:
+ if isinstance(expr, string_types):
+ kwargs = {'expr': text_type(expr)}
+ else:
+ assert isinstance(expr, dict)
+ kwargs = expr
+ kwargs['x'] = self.eid
+ kwargs.setdefault('mainvars', u'X')
+ self._cw.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
+ 'X expression %(expr)s, X mainvars %(mainvars)s, '
+ 'T condition X WHERE T eid %(x)s', kwargs)
+ # XXX clear caches?
+
+
+class Transition(BaseTransition):
+ """customized class for Transition entities"""
+ __regid__ = 'Transition'
+
+ def dc_long_title(self):
+ return '%s (%s)' % (self.name, self._cw._(self.name))
+
+ def destination(self, entity):
+ try:
+ return self.destination_state[0]
+ except IndexError:
+ return entity.cw_adapt_to('IWorkflowable').latest_trinfo().previous_state
+
+ def potential_destinations(self):
+ try:
+ yield self.destination_state[0]
+ except IndexError:
+ for incomingstate in self.reverse_allowed_transition:
+ for tr in incomingstate.reverse_destination_state:
+ for previousstate in tr.reverse_allowed_transition:
+ yield previousstate
+
+
+class WorkflowTransition(BaseTransition):
+ """customized class for WorkflowTransition entities"""
+ __regid__ = 'WorkflowTransition'
+
+ @property
+ def subwf(self):
+ return self.subworkflow[0]
+
+ def destination(self, entity):
+ return self.subwf.initial
+
+ def potential_destinations(self):
+ yield self.subwf.initial
+
+ def add_exit_point(self, fromstate, tostate):
+ if hasattr(fromstate, 'eid'):
+ fromstate = fromstate.eid
+ if tostate is None:
+ self._cw.execute('INSERT SubWorkflowExitPoint X: T subworkflow_exit X, '
+ 'X subworkflow_state FS WHERE T eid %(t)s, FS eid %(fs)s',
+ {'t': self.eid, 'fs': fromstate})
+ else:
+ if hasattr(tostate, 'eid'):
+ tostate = tostate.eid
+ self._cw.execute('INSERT SubWorkflowExitPoint X: T subworkflow_exit X, '
+ 'X subworkflow_state FS, X destination_state TS '
+ 'WHERE T eid %(t)s, FS eid %(fs)s, TS eid %(ts)s',
+ {'t': self.eid, 'fs': fromstate, 'ts': tostate})
+
+ def get_exit_point(self, entity, stateeid):
+ """if state is an exit point, return its associated destination state"""
+ if hasattr(stateeid, 'eid'):
+ stateeid = stateeid.eid
+ try:
+ tostateeid = self.exit_points()[stateeid]
+ except KeyError:
+ return None
+ if tostateeid is None:
+ # go back to state from which we've entered the subworkflow
+ return entity.cw_adapt_to('IWorkflowable').subworkflow_input_trinfo().previous_state
+ return self._cw.entity_from_eid(tostateeid)
+
+ @cached
+ def exit_points(self):
+ result = {}
+ for ep in self.subworkflow_exit:
+ result[ep.subwf_state.eid] = ep.destination and ep.destination.eid
+ return result
+
+ def cw_clear_all_caches(self):
+ super(WorkflowTransition, self).cw_clear_all_caches()
+ clear_cache(self, 'exit_points')
+
+
+class SubWorkflowExitPoint(AnyEntity):
+ """customized class for SubWorkflowExitPoint entities"""
+ __regid__ = 'SubWorkflowExitPoint'
+
+ @property
+ def subwf_state(self):
+ return self.subworkflow_state[0]
+
+ @property
+ def destination(self):
+ return self.destination_state and self.destination_state[0] or None
+
+
+class State(AnyEntity):
+ """customized class for State entities"""
+ __regid__ = 'State'
+ fetch_attrs, cw_fetch_order = fetch_config(['name'])
+ rest_attr = 'eid'
+
+ def dc_long_title(self):
+ return '%s (%s)' % (self.name, self._cw._(self.name))
+
+ @property
+ def workflow(self):
+ # take care, may be missing in multi-sources configuration
+ return self.state_of and self.state_of[0] or None
+
+
+class TrInfo(AnyEntity):
+ """customized class for Transition information entities
+ """
+ __regid__ = 'TrInfo'
+ fetch_attrs, cw_fetch_order = fetch_config(['creation_date', 'comment'],
+ pclass=None) # don't want modification_date
+ @property
+ def for_entity(self):
+ return self.wf_info_for[0]
+
+ @property
+ def previous_state(self):
+ return self.from_state[0]
+
+ @property
+ def new_state(self):
+ return self.to_state[0]
+
+ @property
+ def transition(self):
+ return self.by_transition and self.by_transition[0] or None
+
+
+
+class IWorkflowableAdapter(EntityAdapter):
+ """base adapter providing workflow helper methods for workflowable entities.
+ """
+ __regid__ = 'IWorkflowable'
+ __select__ = relation_possible('in_state')
+
+ @cached
+ def cwetype_workflow(self):
+ """return the default workflow for entities of this type"""
+ # XXX CWEType method
+ wfrset = self._cw.execute('Any WF WHERE ET default_workflow WF, '
+ 'ET name %(et)s', {'et': text_type(self.entity.cw_etype)})
+ if wfrset:
+ return wfrset.get_entity(0, 0)
+ self.warning("can't find any workflow for %s", self.entity.cw_etype)
+ return None
+
+ @property
+ def main_workflow(self):
+ """return current workflow applied to this entity"""
+ if self.entity.custom_workflow:
+ return self.entity.custom_workflow[0]
+ return self.cwetype_workflow()
+
+ @property
+ def current_workflow(self):
+ """return current workflow applied to this entity"""
+ return self.current_state and self.current_state.workflow or self.main_workflow
+
+ @property
+ def current_state(self):
+ """return current state entity"""
+ return self.entity.in_state and self.entity.in_state[0] or None
+
+ @property
+ def state(self):
+ """return current state name"""
+ try:
+ return self.current_state.name
+ except AttributeError:
+ self.warning('entity %s has no state', self.entity)
+ return None
+
+ @property
+ def printable_state(self):
+ """return current state name translated to context's language"""
+ state = self.current_state
+ if state:
+ return self._cw._(state.name)
+ return u''
+
+ @property
+ def workflow_history(self):
+ """return the workflow history for this entity (eg ordered list of
+ TrInfo entities)
+ """
+ return self.entity.reverse_wf_info_for
+
+ def latest_trinfo(self):
+ """return the latest transition information for this entity"""
+ try:
+ return self.workflow_history[-1]
+ except IndexError:
+ return None
+
+ def possible_transitions(self, type='normal'):
+ """generates transition that MAY be fired for the given entity,
+ expected to be in this state
+ used only by the UI
+ """
+ if self.current_state is None or self.current_workflow is None:
+ return
+ rset = self._cw.execute(
+ 'Any T,TT, TN WHERE S allowed_transition T, S eid %(x)s, '
+ 'T type TT, T type %(type)s, '
+ 'T name TN, T transition_of WF, WF eid %(wfeid)s',
+ {'x': self.current_state.eid, 'type': text_type(type),
+ 'wfeid': self.current_workflow.eid})
+ for tr in rset.entities():
+ if tr.may_be_fired(self.entity.eid):
+ yield tr
+
+ def subworkflow_input_trinfo(self):
+ """return the TrInfo which has be recorded when this entity went into
+ the current sub-workflow
+ """
+ if self.main_workflow.eid == self.current_workflow.eid:
+ return # doesn't make sense
+ subwfentries = []
+ for trinfo in self.workflow_history:
+ if (trinfo.transition and
+ trinfo.previous_state.workflow.eid != trinfo.new_state.workflow.eid):
+ # entering or leaving a subworkflow
+ if (subwfentries and
+ subwfentries[-1].new_state.workflow.eid == trinfo.previous_state.workflow.eid and
+ subwfentries[-1].previous_state.workflow.eid == trinfo.new_state.workflow.eid):
+ # leave
+ del subwfentries[-1]
+ else:
+ # enter
+ subwfentries.append(trinfo)
+ if not subwfentries:
+ return None
+ return subwfentries[-1]
+
+ def subworkflow_input_transition(self):
+ """return the transition which has went through the current sub-workflow
+ """
+ return getattr(self.subworkflow_input_trinfo(), 'transition', None)
+
+ def _add_trinfo(self, comment, commentformat, treid=None, tseid=None):
+ kwargs = {}
+ if comment is not None:
+ kwargs['comment'] = comment
+ if commentformat is not None:
+ kwargs['comment_format'] = commentformat
+ kwargs['wf_info_for'] = self.entity
+ if treid is not None:
+ kwargs['by_transition'] = self._cw.entity_from_eid(treid)
+ if tseid is not None:
+ kwargs['to_state'] = self._cw.entity_from_eid(tseid)
+ return self._cw.create_entity('TrInfo', **kwargs)
+
+ def _get_transition(self, tr):
+ assert self.current_workflow
+ if isinstance(tr, string_types):
+ _tr = self.current_workflow.transition_by_name(tr)
+ assert _tr is not None, 'not a %s transition: %s' % (
+ self.__regid__, tr)
+ tr = _tr
+ return tr
+
+ def fire_transition(self, tr, comment=None, commentformat=None):
+ """change the entity's state by firing given transition (name or entity)
+ in entity's workflow
+ """
+ tr = self._get_transition(tr)
+ return self._add_trinfo(comment, commentformat, tr.eid)
+
+ def fire_transition_if_possible(self, tr, comment=None, commentformat=None):
+ """change the entity's state by firing given transition (name or entity)
+ in entity's workflow if this transition is possible
+ """
+ tr = self._get_transition(tr)
+ if any(tr_ for tr_ in self.possible_transitions()
+ if tr_.eid == tr.eid):
+ self.fire_transition(tr, comment, commentformat)
+
+ def change_state(self, statename, comment=None, commentformat=None, tr=None):
+ """change the entity's state to the given state (name or entity) in
+ entity's workflow. This method should only by used by manager to fix an
+ entity's state when their is no matching transition, otherwise
+ fire_transition should be used.
+ """
+ assert self.current_workflow
+ if hasattr(statename, 'eid'):
+ stateeid = statename.eid
+ else:
+ state = self.current_workflow.state_by_name(statename)
+ if state is None:
+ raise WorkflowException('not a %s state: %s' % (self.__regid__,
+ statename))
+ stateeid = state.eid
+ # XXX try to find matching transition?
+ return self._add_trinfo(comment, commentformat, tr and tr.eid, stateeid)
+
+ def set_initial_state(self, statename):
+ """set a newly created entity's state to the given state (name or entity)
+ in entity's workflow. This is useful if you don't want it to be the
+ workflow's initial state.
+ """
+ assert self.current_workflow
+ if hasattr(statename, 'eid'):
+ stateeid = statename.eid
+ else:
+ state = self.current_workflow.state_by_name(statename)
+ if state is None:
+ raise WorkflowException('not a %s state: %s' % (self.__regid__,
+ statename))
+ stateeid = state.eid
+ self._cw.execute('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
+ {'x': self.entity.eid, 's': stateeid})
diff -r e1caf133b81c -r b23d58050076 cubicweb/entity.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entity.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,1426 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Base class for entity objects manipulated in clients"""
+
+__docformat__ = "restructuredtext en"
+
+from warnings import warn
+
+from six import text_type, string_types, integer_types
+from six.moves import range
+
+from logilab.common.decorators import cached
+from logilab.common.deprecation import deprecated
+from logilab.common.registry import yes
+from logilab.mtconverter import TransformData, xml_escape
+
+from rql.utils import rqlvar_maker
+from rql.stmts import Select
+from rql.nodes import (Not, VariableRef, Constant, make_relation,
+ Relation as RqlRelation)
+
+from cubicweb import Unauthorized, neg_role
+from cubicweb.utils import support_args
+from cubicweb.rset import ResultSet
+from cubicweb.appobject import AppObject
+from cubicweb.schema import (RQLVocabularyConstraint, RQLConstraint,
+ GeneratedConstraint)
+from cubicweb.rqlrewrite import RQLRewriter
+
+from cubicweb.uilib import soup2xhtml
+from cubicweb.mttransforms import ENGINE
+
+_marker = object()
+
+def greater_card(rschema, subjtypes, objtypes, index):
+ for subjtype in subjtypes:
+ for objtype in objtypes:
+ card = rschema.rdef(subjtype, objtype).cardinality[index]
+ if card in '+*':
+ return card
+ return '1'
+
+def can_use_rest_path(value):
+ """return True if value can be used at the end of a Rest URL path"""
+ if value is None:
+ return False
+ value = text_type(value)
+ # the check for ?, /, & are to prevent problems when running
+ # behind Apache mod_proxy
+ if value == u'' or u'?' in value or u'/' in value or u'&' in value:
+ return False
+ return True
+
+def rel_vars(rel):
+ return ((isinstance(rel.children[0], VariableRef)
+ and rel.children[0].variable or None),
+ (isinstance(rel.children[1].children[0], VariableRef)
+ and rel.children[1].children[0].variable or None)
+ )
+
+def rel_matches(rel, rtype, role, varname, operator='='):
+ if rel.r_type == rtype and rel.children[1].operator == operator:
+ same_role_var_idx = 0 if role == 'subject' else 1
+ variables = rel_vars(rel)
+ if variables[same_role_var_idx].name == varname:
+ return variables[1 - same_role_var_idx]
+
+def build_cstr_with_linkto_infos(cstr, args, searchedvar, evar,
+ lt_infos, eidvars):
+ """restrict vocabulary as much as possible in entity creation,
+ based on infos provided by __linkto form param.
+
+ Example based on following schema:
+
+ class works_in(RelationDefinition):
+ subject = 'CWUser'
+ object = 'Lab'
+ cardinality = '1*'
+ constraints = [RQLConstraint('S in_group G, O welcomes G')]
+
+ class welcomes(RelationDefinition):
+ subject = 'Lab'
+ object = 'CWGroup'
+
+ If you create a CWUser in the "scientists" CWGroup you can show
+ only the labs that welcome them using :
+
+ lt_infos = {('in_group', 'subject'): 321}
+
+ You get following restriction : 'O welcomes G, G eid 321'
+
+ """
+ st = cstr.snippet_rqlst.copy()
+ # replace relations in ST by eid infos from linkto where possible
+ for (info_rtype, info_role), eids in lt_infos.items():
+ eid = eids[0] # NOTE: we currently assume a pruned lt_info with only 1 eid
+ for rel in st.iget_nodes(RqlRelation):
+ targetvar = rel_matches(rel, info_rtype, info_role, evar.name)
+ if targetvar is not None:
+ if targetvar.name in eidvars:
+ rel.parent.remove(rel)
+ else:
+ eidrel = make_relation(
+ targetvar, 'eid', (targetvar.name, 'Substitute'),
+ Constant)
+ rel.parent.replace(rel, eidrel)
+ args[targetvar.name] = eid
+ eidvars.add(targetvar.name)
+ # if modified ST still contains evar references we must discard the
+ # constraint, otherwise evar is unknown in the final rql query which can
+ # lead to a SQL table cartesian product and multiple occurences of solutions
+ evarname = evar.name
+ for rel in st.iget_nodes(RqlRelation):
+ for variable in rel_vars(rel):
+ if variable and evarname == variable.name:
+ return
+ # else insert snippets into the global tree
+ return GeneratedConstraint(st, cstr.mainvars - set(evarname))
+
+def pruned_lt_info(eschema, lt_infos):
+ pruned = {}
+ for (lt_rtype, lt_role), eids in lt_infos.items():
+ # we can only use lt_infos describing relation with a cardinality
+ # of value 1 towards the linked entity
+ if not len(eids) == 1:
+ continue
+ lt_card = eschema.rdef(lt_rtype, lt_role).cardinality[
+ 0 if lt_role == 'subject' else 1]
+ if lt_card not in '?1':
+ continue
+ pruned[(lt_rtype, lt_role)] = eids
+ return pruned
+
+
+class Entity(AppObject):
+ """an entity instance has e_schema automagically set on
+ the class and instances has access to their issuing cursor.
+
+ A property is set for each attribute and relation on each entity's type
+ class. Becare that among attributes, 'eid' is *NEITHER* stored in the
+ dict containment (which acts as a cache for other attributes dynamically
+ fetched)
+
+ :type e_schema: `cubicweb.schema.EntitySchema`
+ :ivar e_schema: the entity's schema
+
+ :type rest_attr: str
+ :cvar rest_attr: indicates which attribute should be used to build REST urls
+ If `None` is specified (the default), the first unique attribute will
+ be used ('eid' if none found)
+
+ :type cw_skip_copy_for: list
+ :cvar cw_skip_copy_for: a list of couples (rtype, role) for each relation
+ that should be skipped when copying this kind of entity. Note that some
+ relations such as composite relations or relations that have '?1' as
+ object cardinality are always skipped.
+ """
+ __registry__ = 'etypes'
+ __select__ = yes()
+
+ # class attributes that must be set in class definition
+ rest_attr = None
+ fetch_attrs = None
+ skip_copy_for = () # bw compat (< 3.14), use cw_skip_copy_for instead
+ cw_skip_copy_for = [('in_state', 'subject')]
+ # class attributes set automatically at registration time
+ e_schema = None
+
+ @classmethod
+ def __initialize__(cls, schema):
+ """initialize a specific entity class by adding descriptors to access
+ entity type's attributes and relations
+ """
+ etype = cls.__regid__
+ assert etype != 'Any', etype
+ cls.e_schema = eschema = schema.eschema(etype)
+ for rschema, _ in eschema.attribute_definitions():
+ if rschema.type == 'eid':
+ continue
+ setattr(cls, rschema.type, Attribute(rschema.type))
+ mixins = []
+ for rschema, _, role in eschema.relation_definitions():
+ if role == 'subject':
+ attr = rschema.type
+ else:
+ attr = 'reverse_%s' % rschema.type
+ setattr(cls, attr, Relation(rschema, role))
+
+ fetch_attrs = ('modification_date',)
+
+ @classmethod
+ def cw_fetch_order(cls, select, attr, var):
+ """This class method may be used to control sort order when multiple
+ entities of this type are fetched through ORM methods. Its arguments
+ are:
+
+ * `select`, the RQL syntax tree
+
+ * `attr`, the attribute being watched
+
+ * `var`, the variable through which this attribute's value may be
+ accessed in the query
+
+ When you want to do some sorting on the given attribute, you should
+ modify the syntax tree accordingly. For instance:
+
+ .. sourcecode:: python
+
+ from rql import nodes
+
+ class Version(AnyEntity):
+ __regid__ = 'Version'
+
+ fetch_attrs = ('num', 'description', 'in_state')
+
+ @classmethod
+ def cw_fetch_order(cls, select, attr, var):
+ if attr == 'num':
+ func = nodes.Function('version_sort_value')
+ func.append(nodes.variable_ref(var))
+ sterm = nodes.SortTerm(func, asc=False)
+ select.add_sort_term(sterm)
+
+ The default implementation call
+ :meth:`~cubicweb.entity.Entity.cw_fetch_unrelated_order`
+ """
+ cls.cw_fetch_unrelated_order(select, attr, var)
+
+ @classmethod
+ def cw_fetch_unrelated_order(cls, select, attr, var):
+ """This class method may be used to control sort order when multiple entities of
+ this type are fetched to use in edition (e.g. propose them to create a
+ new relation on an edited entity).
+
+ See :meth:`~cubicweb.entity.Entity.cw_fetch_unrelated_order` for a
+ description of its arguments and usage.
+
+ By default entities will be listed on their modification date descending,
+ i.e. you'll get entities recently modified first.
+ """
+ if attr == 'modification_date':
+ select.add_sort_var(var, asc=False)
+
+ @classmethod
+ def fetch_rql(cls, user, restriction=None, fetchattrs=None, mainvar='X',
+ settype=True, ordermethod='fetch_order'):
+ st = cls.fetch_rqlst(user, mainvar=mainvar, fetchattrs=fetchattrs,
+ settype=settype, ordermethod=ordermethod)
+ rql = st.as_string()
+ if restriction:
+ # cannot use RQLRewriter API to insert 'X rtype %(x)s' restriction
+ warn('[3.14] fetch_rql: use of `restriction` parameter is '
+ 'deprecated, please use fetch_rqlst and supply a syntax'
+ 'tree with your restriction instead', DeprecationWarning)
+ insert = ' WHERE ' + ','.join(restriction)
+ if ' WHERE ' in rql:
+ select, where = rql.split(' WHERE ', 1)
+ rql = select + insert + ',' + where
+ else:
+ rql += insert
+ return rql
+
+ @classmethod
+ def fetch_rqlst(cls, user, select=None, mainvar='X', fetchattrs=None,
+ settype=True, ordermethod='fetch_order'):
+ if select is None:
+ select = Select()
+ mainvar = select.get_variable(mainvar)
+ select.add_selected(mainvar)
+ elif isinstance(mainvar, string_types):
+ assert mainvar in select.defined_vars
+ mainvar = select.get_variable(mainvar)
+ # eases string -> syntax tree test transition: please remove once stable
+ select._varmaker = rqlvar_maker(defined=select.defined_vars,
+ aliases=select.aliases, index=26)
+ if settype:
+ rel = select.add_type_restriction(mainvar, cls.__regid__)
+ # should use 'is_instance_of' instead of 'is' so we retrieve
+ # subclasses instances as well
+ rel.r_type = 'is_instance_of'
+ if fetchattrs is None:
+ fetchattrs = cls.fetch_attrs
+ cls._fetch_restrictions(mainvar, select, fetchattrs, user, ordermethod)
+ return select
+
+ @classmethod
+ def _fetch_ambiguous_rtypes(cls, select, var, fetchattrs, subjtypes, schema):
+ """find rtypes in `fetchattrs` that relate different subject etypes
+ taken from (`subjtypes`) to different target etypes; these so called
+ "ambiguous" relations, are added directly to the `select` syntax tree
+ selection but removed from `fetchattrs` to avoid the fetch recursion
+ because we have to choose only one targettype for the recursion and
+ adding its own fetch attrs to the selection -when we recurse- would
+ filter out the other possible target types from the result set
+ """
+ for attr in fetchattrs.copy():
+ rschema = schema.rschema(attr)
+ if rschema.final:
+ continue
+ ttypes = None
+ for subjtype in subjtypes:
+ cur_ttypes = set(rschema.objects(subjtype))
+ if ttypes is None:
+ ttypes = cur_ttypes
+ elif cur_ttypes != ttypes:
+ # we found an ambiguous relation: remove it from fetchattrs
+ fetchattrs.remove(attr)
+ # ... and add it to the selection
+ targetvar = select.make_variable()
+ select.add_selected(targetvar)
+ rel = make_relation(var, attr, (targetvar,), VariableRef)
+ select.add_restriction(rel)
+ break
+
+ @classmethod
+ def _fetch_restrictions(cls, mainvar, select, fetchattrs,
+ user, ordermethod='fetch_order', visited=None):
+ eschema = cls.e_schema
+ if visited is None:
+ visited = set((eschema.type,))
+ elif eschema.type in visited:
+ # avoid infinite recursion
+ return
+ else:
+ visited.add(eschema.type)
+ for attr in sorted(fetchattrs):
+ try:
+ rschema = eschema.subjrels[attr]
+ except KeyError:
+ cls.warning('skipping fetch_attr %s defined in %s (not found in schema)',
+ attr, cls.__regid__)
+ continue
+ # XXX takefirst=True to remove warning triggered by ambiguous inlined relations
+ rdef = eschema.rdef(attr, takefirst=True)
+ if not user.matching_groups(rdef.get_groups('read')):
+ continue
+ if rschema.final or rdef.cardinality[0] in '?1':
+ var = select.make_variable()
+ select.add_selected(var)
+ rel = make_relation(mainvar, attr, (var,), VariableRef)
+ select.add_restriction(rel)
+ else:
+ cls.warning('bad relation %s specified in fetch attrs for %s',
+ attr, cls)
+ continue
+ if not rschema.final:
+ # XXX we need outer join in case the relation is not mandatory
+ # (card == '?') *or if the entity is being added*, since in
+ # that case the relation may still be missing. As we miss this
+ # later information here, systematically add it.
+ rel.change_optional('right')
+ targettypes = rschema.objects(eschema.type)
+ vreg = user._cw.vreg # XXX user._cw.vreg iiiirk
+ etypecls = vreg['etypes'].etype_class(targettypes[0])
+ if len(targettypes) > 1:
+ # find fetch_attrs common to all destination types
+ fetchattrs = vreg['etypes'].fetch_attrs(targettypes)
+ # ... and handle ambiguous relations
+ cls._fetch_ambiguous_rtypes(select, var, fetchattrs,
+ targettypes, vreg.schema)
+ else:
+ fetchattrs = etypecls.fetch_attrs
+ etypecls._fetch_restrictions(var, select, fetchattrs,
+ user, None, visited=visited)
+ if ordermethod is not None:
+ try:
+ cmeth = getattr(cls, ordermethod)
+ warn('[3.14] %s %s class method should be renamed to cw_%s'
+ % (cls.__regid__, ordermethod, ordermethod),
+ DeprecationWarning)
+ except AttributeError:
+ cmeth = getattr(cls, 'cw_' + ordermethod)
+ if support_args(cmeth, 'select'):
+ cmeth(select, attr, var)
+ else:
+ warn('[3.14] %s should now take (select, attr, var) and '
+ 'modify the syntax tree when desired instead of '
+ 'returning something' % cmeth, DeprecationWarning)
+ orderterm = cmeth(attr, var.name)
+ if orderterm is not None:
+ try:
+ var, order = orderterm.split()
+ except ValueError:
+ if '(' in orderterm:
+ cls.error('ignore %s until %s is upgraded',
+ orderterm, cmeth)
+ orderterm = None
+ elif not ' ' in orderterm.strip():
+ var = orderterm
+ order = 'ASC'
+ if orderterm is not None:
+ select.add_sort_var(select.get_variable(var),
+ order=='ASC')
+
+ @classmethod
+ @cached
+ def cw_rest_attr_info(cls):
+ """this class method return an attribute name to be used in URL for
+ entities of this type and a boolean flag telling if its value should be
+ checked for uniqness.
+
+ The attribute returned is, in order of priority:
+
+ * class's `rest_attr` class attribute
+ * an attribute defined as unique in the class'schema
+ * 'eid'
+ """
+ mainattr, needcheck = 'eid', True
+ if cls.rest_attr:
+ mainattr = cls.rest_attr
+ needcheck = not cls.e_schema.has_unique_values(mainattr)
+ else:
+ for rschema in cls.e_schema.subject_relations():
+ if (rschema.final
+ and rschema not in ('eid', 'cwuri')
+ and cls.e_schema.has_unique_values(rschema)
+ and cls.e_schema.rdef(rschema.type).cardinality[0] == '1'):
+ mainattr = str(rschema)
+ needcheck = False
+ break
+ if mainattr == 'eid':
+ needcheck = False
+ return mainattr, needcheck
+
+ @classmethod
+ def _cw_build_entity_query(cls, kwargs):
+ relations = []
+ restrictions = set()
+ pendingrels = []
+ eschema = cls.e_schema
+ qargs = {}
+ attrcache = {}
+ for attr, value in kwargs.items():
+ if attr.startswith('reverse_'):
+ attr = attr[len('reverse_'):]
+ role = 'object'
+ else:
+ role = 'subject'
+ assert eschema.has_relation(attr, role), '%s %s not found on %s' % (attr, role, eschema)
+ rschema = eschema.subjrels[attr] if role == 'subject' else eschema.objrels[attr]
+ if not rschema.final and isinstance(value, (tuple, list, set, frozenset)):
+ if len(value) == 0:
+ continue # avoid crash with empty IN clause
+ elif len(value) == 1:
+ value = next(iter(value))
+ else:
+ # prepare IN clause
+ pendingrels.append( (attr, role, value) )
+ continue
+ if rschema.final: # attribute
+ relations.append('X %s %%(%s)s' % (attr, attr))
+ attrcache[attr] = value
+ elif value is None:
+ pendingrels.append( (attr, role, value) )
+ else:
+ rvar = attr.upper()
+ if role == 'object':
+ relations.append('%s %s X' % (rvar, attr))
+ else:
+ relations.append('X %s %s' % (attr, rvar))
+ restriction = '%s eid %%(%s)s' % (rvar, attr)
+ if not restriction in restrictions:
+ restrictions.add(restriction)
+ if hasattr(value, 'eid'):
+ value = value.eid
+ qargs[attr] = value
+ rql = u''
+ if relations:
+ rql += ', '.join(relations)
+ if restrictions:
+ rql += ' WHERE %s' % ', '.join(restrictions)
+ return rql, qargs, pendingrels, attrcache
+
+ @classmethod
+ def _cw_handle_pending_relations(cls, eid, pendingrels, execute):
+ for attr, role, values in pendingrels:
+ if role == 'object':
+ restr = 'Y %s X' % attr
+ else:
+ restr = 'X %s Y' % attr
+ if values is None:
+ execute('DELETE %s WHERE X eid %%(x)s' % restr, {'x': eid})
+ continue
+ execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
+ restr, ','.join(str(getattr(r, 'eid', r)) for r in values)),
+ {'x': eid}, build_descr=False)
+
+ @classmethod
+ def cw_instantiate(cls, execute, **kwargs):
+ """add a new entity of this given type
+
+ Example (in a shell session):
+
+ >>> companycls = vreg['etypes'].etype_class('Company')
+ >>> personcls = vreg['etypes'].etype_class('Person')
+ >>> c = companycls.cw_instantiate(session.execute, name=u'Logilab')
+ >>> p = personcls.cw_instantiate(session.execute, firstname=u'John', lastname=u'Doe',
+ ... works_for=c)
+
+ You can also set relations where the entity has 'object' role by
+ prefixing the relation name by 'reverse_'. Also, relation values may be
+ an entity or eid, a list of entities or eids.
+ """
+ rql, qargs, pendingrels, attrcache = cls._cw_build_entity_query(kwargs)
+ if rql:
+ rql = 'INSERT %s X: %s' % (cls.__regid__, rql)
+ else:
+ rql = 'INSERT %s X' % (cls.__regid__)
+ try:
+ created = execute(rql, qargs).get_entity(0, 0)
+ except IndexError:
+ raise Exception('could not create a %r with %r (%r)' %
+ (cls.__regid__, rql, qargs))
+ created._cw_update_attr_cache(attrcache)
+ cls._cw_handle_pending_relations(created.eid, pendingrels, execute)
+ return created
+
+ def __init__(self, req, rset=None, row=None, col=0):
+ AppObject.__init__(self, req, rset=rset, row=row, col=col)
+ self._cw_related_cache = {}
+ self._cw_adapters_cache = {}
+ if rset is not None:
+ self.eid = rset[row][col]
+ else:
+ self.eid = None
+ self._cw_is_saved = True
+ self.cw_attr_cache = {}
+
+ def __repr__(self):
+ return '' % (
+ self.e_schema, self.eid, list(self.cw_attr_cache), id(self))
+
+ def __lt__(self, other):
+ return NotImplemented
+
+ def __eq__(self, other):
+ if isinstance(self.eid, integer_types):
+ return self.eid == other.eid
+ return self is other
+
+ def __hash__(self):
+ if isinstance(self.eid, integer_types):
+ return self.eid
+ return super(Entity, self).__hash__()
+
+ def _cw_update_attr_cache(self, attrcache):
+ trdata = self._cw.transaction_data
+ uncached_attrs = trdata.get('%s.storage-special-process-attrs' % self.eid, set())
+ uncached_attrs.update(trdata.get('%s.dont-cache-attrs' % self.eid, set()))
+ for attr in uncached_attrs:
+ attrcache.pop(attr, None)
+ self.cw_attr_cache.pop(attr, None)
+ self.cw_attr_cache.update(attrcache)
+
+ def _cw_dont_cache_attribute(self, attr, repo_side=False):
+ """Called when some attribute has been transformed by a *storage*,
+ hence the original value should not be cached **by anyone**.
+
+ For example we have a special "fs_importing" mode in BFSS
+ where a file path is given as attribute value and stored as is
+ in the data base. Later access to the attribute will provide
+ the content of the file at the specified path. We do not want
+ the "filepath" value to be cached.
+
+ """
+ trdata = self._cw.transaction_data
+ trdata.setdefault('%s.dont-cache-attrs' % self.eid, set()).add(attr)
+ if repo_side:
+ trdata.setdefault('%s.storage-special-process-attrs' % self.eid, set()).add(attr)
+
+ def __json_encode__(self):
+ """custom json dumps hook to dump the entity's eid
+ which is not part of dict structure itself
+ """
+ dumpable = self.cw_attr_cache.copy()
+ dumpable['eid'] = self.eid
+ return dumpable
+
+ def cw_adapt_to(self, interface):
+ """return an adapter the entity to the given interface name.
+
+ return None if it can not be adapted.
+ """
+ cache = self._cw_adapters_cache
+ try:
+ return cache[interface]
+ except KeyError:
+ adapter = self._cw.vreg['adapters'].select_or_none(
+ interface, self._cw, entity=self)
+ cache[interface] = adapter
+ return adapter
+
+ def has_eid(self): # XXX cw_has_eid
+ """return True if the entity has an attributed eid (False
+ meaning that the entity has to be created
+ """
+ try:
+ int(self.eid)
+ return True
+ except (ValueError, TypeError):
+ return False
+
+ def cw_is_saved(self):
+ """during entity creation, there is some time during which the entity
+ has an eid attributed though it's not saved (eg during
+ 'before_add_entity' hooks). You can use this method to ensure the entity
+ has an eid *and* is saved in its source.
+ """
+ return self.has_eid() and self._cw_is_saved
+
+ @cached
+ def cw_metainformation(self):
+ metas = self._cw.entity_metas(self.eid)
+ metas['source'] = self._cw.source_defs()[metas['source']]
+ return metas
+
+ def cw_check_perm(self, action):
+ self.e_schema.check_perm(self._cw, action, eid=self.eid)
+
+ def cw_has_perm(self, action):
+ return self.e_schema.has_perm(self._cw, action, eid=self.eid)
+
+ def view(self, __vid, __registry='views', w=None, initargs=None, **kwargs): # XXX cw_view
+ """shortcut to apply a view on this entity"""
+ if initargs is None:
+ initargs = kwargs
+ else:
+ initargs.update(kwargs)
+ view = self._cw.vreg[__registry].select(__vid, self._cw, rset=self.cw_rset,
+ row=self.cw_row, col=self.cw_col,
+ **initargs)
+ return view.render(row=self.cw_row, col=self.cw_col, w=w, **kwargs)
+
+ def absolute_url(self, *args, **kwargs): # XXX cw_url
+ """return an absolute url to view this entity"""
+ # use *args since we don't want first argument to be "anonymous" to
+ # avoid potential clash with kwargs
+ if args:
+ assert len(args) == 1, 'only 0 or 1 non-named-argument expected'
+ method = args[0]
+ else:
+ method = None
+ # in linksearch mode, we don't want external urls else selecting
+ # the object for use in the relation is tricky
+ # XXX search_state is web specific
+ use_ext_id = False
+ if 'base_url' not in kwargs and \
+ getattr(self._cw, 'search_state', ('normal',))[0] == 'normal':
+ sourcemeta = self.cw_metainformation()['source']
+ if sourcemeta.get('use-cwuri-as-url'):
+ return self.cwuri # XXX consider kwargs?
+ if sourcemeta.get('base-url'):
+ kwargs['base_url'] = sourcemeta['base-url']
+ use_ext_id = True
+ if method in (None, 'view'):
+ kwargs['_restpath'] = self.rest_path(use_ext_id)
+ else:
+ kwargs['rql'] = 'Any X WHERE X eid %s' % self.eid
+ return self._cw.build_url(method, **kwargs)
+
+ def rest_path(self, use_ext_eid=False): # XXX cw_rest_path
+ """returns a REST-like (relative) path for this entity"""
+ mainattr, needcheck = self.cw_rest_attr_info()
+ etype = str(self.e_schema)
+ path = etype.lower()
+ fallback = False
+ if mainattr != 'eid':
+ value = getattr(self, mainattr)
+ if not can_use_rest_path(value):
+ mainattr = 'eid'
+ path = None
+ elif needcheck:
+ # make sure url is not ambiguous
+ try:
+ nbresults = self.__unique
+ except AttributeError:
+ rql = 'Any COUNT(X) WHERE X is %s, X %s %%(value)s' % (
+ etype, mainattr)
+ nbresults = self.__unique = self._cw.execute(rql, {'value' : value})[0][0]
+ if nbresults != 1: # ambiguity?
+ mainattr = 'eid'
+ path = None
+ if mainattr == 'eid':
+ if use_ext_eid:
+ value = self.cw_metainformation()['extid']
+ else:
+ value = self.eid
+ if path is None:
+ # fallback url: / url is used as cw entities uri,
+ # prefer it to //eid/
+ return text_type(value)
+ return u'%s/%s' % (path, self._cw.url_quote(value))
+
+ def cw_attr_metadata(self, attr, metadata):
+ """return a metadata for an attribute (None if unspecified)"""
+ value = getattr(self, '%s_%s' % (attr, metadata), None)
+ if value is None and metadata == 'encoding':
+ value = self._cw.vreg.property_value('ui.encoding')
+ return value
+
+ def printable_value(self, attr, value=_marker, attrtype=None,
+ format='text/html', displaytime=True): # XXX cw_printable_value
+ """return a displayable value (i.e. unicode string) which may contains
+ html tags
+ """
+ attr = str(attr)
+ if value is _marker:
+ value = getattr(self, attr)
+ if isinstance(value, string_types):
+ value = value.strip()
+ if value is None or value == '': # don't use "not", 0 is an acceptable value
+ return u''
+ if attrtype is None:
+ attrtype = self.e_schema.destination(attr)
+ props = self.e_schema.rdef(attr)
+ if attrtype == 'String':
+ # internalinalized *and* formatted string such as schema
+ # description...
+ if props.internationalizable:
+ value = self._cw._(value)
+ attrformat = self.cw_attr_metadata(attr, 'format')
+ if attrformat:
+ return self._cw_mtc_transform(value, attrformat, format,
+ self._cw.encoding)
+ elif attrtype == 'Bytes':
+ attrformat = self.cw_attr_metadata(attr, 'format')
+ if attrformat:
+ encoding = self.cw_attr_metadata(attr, 'encoding')
+ return self._cw_mtc_transform(value.getvalue(), attrformat, format,
+ encoding)
+ return u''
+ value = self._cw.printable_value(attrtype, value, props,
+ displaytime=displaytime)
+ if format == 'text/html':
+ value = xml_escape(value)
+ return value
+
+ def _cw_mtc_transform(self, data, format, target_format, encoding,
+ _engine=ENGINE):
+ trdata = TransformData(data, format, encoding, appobject=self)
+ data = _engine.convert(trdata, target_format).decode()
+ if target_format == 'text/html':
+ data = soup2xhtml(data, self._cw.encoding)
+ return data
+
+ # entity cloning ##########################################################
+
+ def copy_relations(self, ceid): # XXX cw_copy_relations
+ """copy relations of the object with the given eid on this
+ object (this method is called on the newly created copy, and
+ ceid designates the original entity).
+
+ By default meta and composite relations are skipped.
+ Overrides this if you want another behaviour
+ """
+ assert self.has_eid()
+ execute = self._cw.execute
+ skip_copy_for = {'subject': set(), 'object': set()}
+ for rtype in self.skip_copy_for:
+ skip_copy_for['subject'].add(rtype)
+ warn('[3.14] skip_copy_for on entity classes (%s) is deprecated, '
+ 'use cw_skip_for instead with list of couples (rtype, role)' % self.cw_etype,
+ DeprecationWarning)
+ for rtype, role in self.cw_skip_copy_for:
+ assert role in ('subject', 'object'), role
+ skip_copy_for[role].add(rtype)
+ for rschema in self.e_schema.subject_relations():
+ if rschema.type in skip_copy_for['subject']:
+ continue
+ if rschema.final or rschema.meta or rschema.rule:
+ continue
+ # skip already defined relations
+ if getattr(self, rschema.type):
+ continue
+ # XXX takefirst=True to remove warning triggered by ambiguous relations
+ rdef = self.e_schema.rdef(rschema, takefirst=True)
+ # skip composite relation
+ if rdef.composite:
+ continue
+ # skip relation with card in ?1 else we either change the copied
+ # object (inlined relation) or inserting some inconsistency
+ if rdef.cardinality[1] in '?1':
+ continue
+ rql = 'SET X %s V WHERE X eid %%(x)s, Y eid %%(y)s, Y %s V' % (
+ rschema.type, rschema.type)
+ execute(rql, {'x': self.eid, 'y': ceid})
+ self.cw_clear_relation_cache(rschema.type, 'subject')
+ for rschema in self.e_schema.object_relations():
+ if rschema.meta or rschema.rule:
+ continue
+ # skip already defined relations
+ if self.related(rschema.type, 'object'):
+ continue
+ if rschema.type in skip_copy_for['object']:
+ continue
+ # XXX takefirst=True to remove warning triggered by ambiguous relations
+ rdef = self.e_schema.rdef(rschema, 'object', takefirst=True)
+ # skip composite relation
+ if rdef.composite:
+ continue
+ # skip relation with card in ?1 else we either change the copied
+ # object (inlined relation) or inserting some inconsistency
+ if rdef.cardinality[0] in '?1':
+ continue
+ rql = 'SET V %s X WHERE X eid %%(x)s, Y eid %%(y)s, V %s Y' % (
+ rschema.type, rschema.type)
+ execute(rql, {'x': self.eid, 'y': ceid})
+ self.cw_clear_relation_cache(rschema.type, 'object')
+
+ # data fetching methods ###################################################
+
+ @cached
+ def as_rset(self): # XXX .cw_as_rset
+ """returns a resultset containing `self` information"""
+ rset = ResultSet([(self.eid,)], 'Any X WHERE X eid %(x)s',
+ {'x': self.eid}, [(self.cw_etype,)])
+ rset.req = self._cw
+ return rset
+
+ def _cw_to_complete_relations(self):
+ """by default complete final relations to when calling .complete()"""
+ for rschema in self.e_schema.subject_relations():
+ if rschema.final:
+ continue
+ targets = rschema.objects(self.e_schema)
+ if rschema.inlined:
+ matching_groups = self._cw.user.matching_groups
+ if all(matching_groups(e.get_groups('read')) and
+ rschema.rdef(self.e_schema, e).get_groups('read')
+ for e in targets):
+ yield rschema, 'subject'
+
+ def _cw_to_complete_attributes(self, skip_bytes=True, skip_pwd=True):
+ for rschema, attrschema in self.e_schema.attribute_definitions():
+ # skip binary data by default
+ if skip_bytes and attrschema.type == 'Bytes':
+ continue
+ attr = rschema.type
+ if attr == 'eid':
+ continue
+ # password retrieval is blocked at the repository server level
+ rdef = rschema.rdef(self.e_schema, attrschema)
+ if not self._cw.user.matching_groups(rdef.get_groups('read')) \
+ or (attrschema.type == 'Password' and skip_pwd):
+ self.cw_attr_cache[attr] = None
+ continue
+ yield attr
+
+ _cw_completed = False
+ def complete(self, attributes=None, skip_bytes=True, skip_pwd=True): # XXX cw_complete
+ """complete this entity by adding missing attributes (i.e. query the
+ repository to fill the entity)
+
+ :type skip_bytes: bool
+ :param skip_bytes:
+ if true, attribute of type Bytes won't be considered
+ """
+ assert self.has_eid()
+ if self._cw_completed:
+ return
+ if attributes is None:
+ self._cw_completed = True
+ varmaker = rqlvar_maker()
+ V = next(varmaker)
+ rql = ['WHERE %s eid %%(x)s' % V]
+ selected = []
+ for attr in (attributes or self._cw_to_complete_attributes(skip_bytes, skip_pwd)):
+ # if attribute already in entity, nothing to do
+ if attr in self.cw_attr_cache:
+ continue
+ # case where attribute must be completed, but is not yet in entity
+ var = next(varmaker)
+ rql.append('%s %s %s' % (V, attr, var))
+ selected.append((attr, var))
+ # +1 since this doesn't include the main variable
+ lastattr = len(selected) + 1
+ # don't fetch extra relation if attributes specified or of the entity is
+ # coming from an external source (may lead to error)
+ if attributes is None and self.cw_metainformation()['source']['uri'] == 'system':
+ # fetch additional relations (restricted to 0..1 relations)
+ for rschema, role in self._cw_to_complete_relations():
+ rtype = rschema.type
+ if self.cw_relation_cached(rtype, role):
+ continue
+ # at this point we suppose that:
+ # * this is a inlined relation
+ # * entity (self) is the subject
+ # * user has read perm on the relation and on the target entity
+ assert rschema.inlined
+ assert role == 'subject'
+ var = next(varmaker)
+ # keep outer join anyway, we don't want .complete to crash on
+ # missing mandatory relation (see #1058267)
+ rql.append('%s %s %s?' % (V, rtype, var))
+ selected.append(((rtype, role), var))
+ if selected:
+ # select V, we need it as the left most selected variable
+ # if some outer join are included to fetch inlined relations
+ rql = 'Any %s,%s %s' % (V, ','.join(var for attr, var in selected),
+ ','.join(rql))
+ try:
+ rset = self._cw.execute(rql, {'x': self.eid}, build_descr=False)[0]
+ except IndexError:
+ raise Exception('unable to fetch attributes for entity with eid %s'
+ % self.eid)
+ # handle attributes
+ for i in range(1, lastattr):
+ self.cw_attr_cache[str(selected[i-1][0])] = rset[i]
+ # handle relations
+ for i in range(lastattr, len(rset)):
+ rtype, role = selected[i-1][0]
+ value = rset[i]
+ if value is None:
+ rrset = ResultSet([], rql, {'x': self.eid})
+ rrset.req = self._cw
+ else:
+ rrset = self._cw.eid_rset(value)
+ self.cw_set_relation_cache(rtype, role, rrset)
+
+ def cw_attr_value(self, name):
+ """get value for the attribute relation , query the repository
+ to get the value if necessary.
+
+ :type name: str
+ :param name: name of the attribute to get
+ """
+ try:
+ return self.cw_attr_cache[name]
+ except KeyError:
+ if not self.cw_is_saved():
+ return None
+ rql = "Any A WHERE X eid %%(x)s, X %s A" % name
+ try:
+ rset = self._cw.execute(rql, {'x': self.eid})
+ except Unauthorized:
+ self.cw_attr_cache[name] = value = None
+ else:
+ assert rset.rowcount <= 1, (self, rql, rset.rowcount)
+ try:
+ self.cw_attr_cache[name] = value = rset.rows[0][0]
+ except IndexError:
+ # probably a multisource error
+ self.critical("can't get value for attribute %s of entity with eid %s",
+ name, self.eid)
+ if self.e_schema.destination(name) == 'String':
+ self.cw_attr_cache[name] = value = self._cw._('unaccessible')
+ else:
+ self.cw_attr_cache[name] = value = None
+ return value
+
+ def related(self, rtype, role='subject', limit=None, entities=False, # XXX .cw_related
+ safe=False, targettypes=None):
+ """returns a resultset of related entities
+
+ :param rtype:
+ the name of the relation, aka relation type
+ :param role:
+ the role played by 'self' in the relation ('subject' or 'object')
+ :param limit:
+ resultset's maximum size
+ :param entities:
+ if True, the entites are returned; if False, a result set is returned
+ :param safe:
+ if True, an empty rset/list of entities will be returned in case of
+ :exc:`Unauthorized`, else (the default), the exception is propagated
+ :param targettypes:
+ a tuple of target entity types to restrict the query
+ """
+ rtype = str(rtype)
+ # Caching restricted/limited results is best avoided.
+ cacheable = limit is None and targettypes is None
+ if cacheable:
+ cache_key = '%s_%s' % (rtype, role)
+ if cache_key in self._cw_related_cache:
+ return self._cw_related_cache[cache_key][entities]
+ if not self.has_eid():
+ if entities:
+ return []
+ return self._cw.empty_rset()
+ rql = self.cw_related_rql(rtype, role, limit=limit, targettypes=targettypes)
+ try:
+ rset = self._cw.execute(rql, {'x': self.eid})
+ except Unauthorized:
+ if not safe:
+ raise
+ rset = self._cw.empty_rset()
+ if entities:
+ if cacheable:
+ self.cw_set_relation_cache(rtype, role, rset)
+ return self.related(rtype, role, entities=entities)
+ return list(rset.entities())
+ else:
+ return rset
+
+ def cw_related_rql(self, rtype, role='subject', targettypes=None, limit=None):
+ return self.cw_related_rqlst(
+ rtype, role=role, targettypes=targettypes, limit=limit).as_string()
+
+ def cw_related_rqlst(self, rtype, role='subject', targettypes=None,
+ limit=None, sort_terms=None):
+ """Return the select node of the RQL query of entities related through
+ `rtype` with this entity as `role`, possibly filtered by
+ `targettypes`.
+
+ The RQL query can be given a `limit` and sort terms with `sort_terms`
+ arguments being a sequence of ``(, )``
+ (e.g. ``[('name', True), ('modification_date', False)]`` would lead to
+ a sorting by ``name``, ascending and then by ``modification_date``,
+ descending. If `sort_terms` is not specified the default sorting is by
+ ``modification_date``, descending.
+ """
+ vreg = self._cw.vreg
+ rschema = vreg.schema[rtype]
+ select = Select()
+ mainvar, evar = select.get_variable('X'), select.get_variable('E')
+ select.add_selected(mainvar)
+ if limit is not None:
+ select.set_limit(limit)
+ select.add_eid_restriction(evar, 'x', 'Substitute')
+ if role == 'subject':
+ rel = make_relation(evar, rtype, (mainvar,), VariableRef)
+ select.add_restriction(rel)
+ if targettypes is None:
+ targettypes = rschema.objects(self.e_schema)
+ else:
+ select.add_constant_restriction(mainvar, 'is',
+ targettypes, 'etype')
+ gcard = greater_card(rschema, (self.e_schema,), targettypes, 0)
+ else:
+ rel = make_relation(mainvar, rtype, (evar,), VariableRef)
+ select.add_restriction(rel)
+ if targettypes is None:
+ targettypes = rschema.subjects(self.e_schema)
+ else:
+ select.add_constant_restriction(mainvar, 'is', targettypes,
+ 'etype')
+ gcard = greater_card(rschema, targettypes, (self.e_schema,), 1)
+ etypecls = vreg['etypes'].etype_class(targettypes[0])
+ if len(targettypes) > 1:
+ fetchattrs = vreg['etypes'].fetch_attrs(targettypes)
+ self._fetch_ambiguous_rtypes(select, mainvar, fetchattrs,
+ targettypes, vreg.schema)
+ else:
+ fetchattrs = etypecls.fetch_attrs
+ etypecls.fetch_rqlst(self._cw.user, select, mainvar, fetchattrs,
+ settype=False)
+ # optimisation: remove ORDERBY if cardinality is 1 or ? (though
+ # greater_card return 1 for those both cases)
+ if gcard == '1':
+ select.remove_sort_terms()
+ elif not select.orderby:
+ # Build a mapping (rtype, node) for relations usable for sorting.
+ sorting_relations = {}
+ for r in select.where.get_nodes(RqlRelation):
+ lhs, rhs = r.children
+ if lhs.variable != mainvar:
+ continue
+ if r.operator() != '=':
+ continue
+ rhs_term = rhs.children[0]
+ if not isinstance(rhs_term, VariableRef):
+ continue
+ sorting_relations[r.r_type] = r
+ sort_terms = sort_terms or [('modification_date', False)]
+ for term, order in sort_terms:
+ # Add a relation for sorting only if it is not only retrieved
+ # (e.g. modification_date) instead of adding another variable
+ # for sorting. This should not be problematic, but it is with
+ # sqlserver, see ticket #694445.
+ rel = sorting_relations.get(term)
+ if rel is None:
+ mdvar = select.make_variable()
+ rel = make_relation(mainvar, term, (mdvar,), VariableRef)
+ select.add_restriction(rel)
+ var = rel.children[1].children[0].variable
+ select.add_sort_var(var, asc=order)
+ return select
+
+ # generic vocabulary methods ##############################################
+
+ def cw_linkable_rql(self, rtype, targettype, role, ordermethod=None,
+ vocabconstraints=True, lt_infos={}, limit=None):
+ """build a rql to fetch targettype entities either related or unrelated
+ to this entity using (rtype, role) relation.
+
+ Consider relation permissions so that returned entities may be actually
+ linked by `rtype`.
+
+ `lt_infos` are supplementary informations, usually coming from __linkto
+ parameter, that can help further restricting the results in case current
+ entity is not yet created. It is a dict describing entities the current
+ entity will be linked to, which keys are (rtype, role) tuples and values
+ are a list of eids.
+ """
+ return self._cw_compute_linkable_rql(rtype, targettype, role, ordermethod=None,
+ vocabconstraints=vocabconstraints,
+ lt_infos=lt_infos, limit=limit,
+ unrelated_only=False)
+
+ def cw_unrelated_rql(self, rtype, targettype, role, ordermethod=None,
+ vocabconstraints=True, lt_infos={}, limit=None):
+ """build a rql to fetch `targettype` entities unrelated to this entity
+ using (rtype, role) relation.
+
+ Consider relation permissions so that returned entities may be actually
+ linked by `rtype`.
+
+ `lt_infos` are supplementary informations, usually coming from __linkto
+ parameter, that can help further restricting the results in case current
+ entity is not yet created. It is a dict describing entities the current
+ entity will be linked to, which keys are (rtype, role) tuples and values
+ are a list of eids.
+ """
+ return self._cw_compute_linkable_rql(rtype, targettype, role, ordermethod=None,
+ vocabconstraints=vocabconstraints,
+ lt_infos=lt_infos, limit=limit,
+ unrelated_only=True)
+
+ def _cw_compute_linkable_rql(self, rtype, targettype, role, ordermethod=None,
+ vocabconstraints=True, lt_infos={}, limit=None,
+ unrelated_only=False):
+ """build a rql to fetch `targettype` entities that may be related to
+ this entity using the (rtype, role) relation.
+
+ By default (unrelated_only=False), this includes the already linked
+ entities as well as the unrelated ones. If `unrelated_only` is True, the
+ rql filters out the already related entities.
+ """
+ ordermethod = ordermethod or 'fetch_unrelated_order'
+ rschema = self._cw.vreg.schema.rschema(rtype)
+ rdef = rschema.role_rdef(self.e_schema, targettype, role)
+ rewriter = RQLRewriter(self._cw)
+ select = Select()
+ # initialize some variables according to the `role` of `self` in the
+ # relation (variable names must respect constraints conventions):
+ # * variable for myself (`evar`)
+ # * variable for searched entities (`searchvedvar`)
+ if role == 'subject':
+ evar = subjvar = select.get_variable('S')
+ searchedvar = objvar = select.get_variable('O')
+ else:
+ searchedvar = subjvar = select.get_variable('S')
+ evar = objvar = select.get_variable('O')
+ select.add_selected(searchedvar)
+ if limit is not None:
+ select.set_limit(limit)
+ # initialize some variables according to `self` existence
+ if rdef.role_cardinality(neg_role(role)) in '?1':
+ # if cardinality in '1?', we want a target entity which isn't
+ # already linked using this relation
+ variable = select.make_variable()
+ if role == 'subject':
+ rel = make_relation(variable, rtype, (searchedvar,), VariableRef)
+ else:
+ rel = make_relation(searchedvar, rtype, (variable,), VariableRef)
+ select.add_restriction(Not(rel))
+ elif self.has_eid() and unrelated_only:
+ # elif we have an eid, we don't want a target entity which is
+ # already linked to ourself through this relation
+ rel = make_relation(subjvar, rtype, (objvar,), VariableRef)
+ select.add_restriction(Not(rel))
+ if self.has_eid():
+ rel = make_relation(evar, 'eid', ('x', 'Substitute'), Constant)
+ select.add_restriction(rel)
+ args = {'x': self.eid}
+ if role == 'subject':
+ sec_check_args = {'fromeid': self.eid}
+ else:
+ sec_check_args = {'toeid': self.eid}
+ existant = None # instead of 'SO', improve perfs
+ else:
+ args = {}
+ sec_check_args = {}
+ existant = searchedvar.name
+ # undefine unused evar, or the type resolver will consider it
+ select.undefine_variable(evar)
+ # retrieve entity class for targettype to compute base rql
+ etypecls = self._cw.vreg['etypes'].etype_class(targettype)
+ etypecls.fetch_rqlst(self._cw.user, select, searchedvar,
+ ordermethod=ordermethod)
+ # from now on, we need variable type resolving
+ self._cw.vreg.solutions(self._cw, select, args)
+ # insert RQL expressions for schema constraints into the rql syntax tree
+ if vocabconstraints:
+ cstrcls = (RQLVocabularyConstraint, RQLConstraint)
+ else:
+ cstrcls = RQLConstraint
+ lt_infos = pruned_lt_info(self.e_schema, lt_infos or {})
+ # if there are still lt_infos, use set to keep track of added eid
+ # relations (adding twice the same eid relation is incorrect RQL)
+ eidvars = set()
+ for cstr in rdef.constraints:
+ # consider constraint.mainvars to check if constraint apply
+ if isinstance(cstr, cstrcls) and searchedvar.name in cstr.mainvars:
+ if not self.has_eid():
+ if lt_infos:
+ # we can perhaps further restrict with linkto infos using
+ # a custom constraint built from cstr and lt_infos
+ cstr = build_cstr_with_linkto_infos(
+ cstr, args, searchedvar, evar, lt_infos, eidvars)
+ if cstr is None:
+ continue # could not build constraint -> discard
+ elif evar.name in cstr.mainvars:
+ continue
+ # compute a varmap suitable to RQLRewriter.rewrite argument
+ varmap = dict((v, v) for v in (searchedvar.name, evar.name)
+ if v in select.defined_vars and v in cstr.mainvars)
+ # rewrite constraint by constraint since we want a AND between
+ # expressions.
+ rewriter.rewrite(select, [(varmap, (cstr,))], args, existant)
+ # insert security RQL expressions granting the permission to 'add' the
+ # relation into the rql syntax tree, if necessary
+ rqlexprs = rdef.get_rqlexprs('add')
+ if not self.has_eid():
+ rqlexprs = [rqlexpr for rqlexpr in rqlexprs
+ if searchedvar.name in rqlexpr.mainvars]
+ if rqlexprs and not rdef.has_perm(self._cw, 'add', **sec_check_args):
+ # compute a varmap suitable to RQLRewriter.rewrite argument
+ varmap = dict((v, v) for v in (searchedvar.name, evar.name)
+ if v in select.defined_vars)
+ # rewrite all expressions at once since we want a OR between them.
+ rewriter.rewrite(select, [(varmap, rqlexprs)], args, existant)
+ # ensure we have an order defined
+ if not select.orderby:
+ select.add_sort_var(select.defined_vars[searchedvar.name])
+ # we're done, turn the rql syntax tree as a string
+ rql = select.as_string()
+ return rql, args
+
+ def unrelated(self, rtype, targettype, role='subject', limit=None,
+ ordermethod=None, lt_infos={}): # XXX .cw_unrelated
+ """return a result set of target type objects that may be related
+ by a given relation, with self as subject or object
+ """
+ try:
+ rql, args = self.cw_unrelated_rql(rtype, targettype, role, limit=limit,
+ ordermethod=ordermethod, lt_infos=lt_infos)
+ except Unauthorized:
+ return self._cw.empty_rset()
+ return self._cw.execute(rql, args)
+
+ # relations cache handling #################################################
+
+ def cw_relation_cached(self, rtype, role):
+ """return None if the given relation isn't already cached on the
+ instance, else the content of the cache (a 2-uple (rset, entities)).
+ """
+ return self._cw_related_cache.get('%s_%s' % (rtype, role))
+
+ def cw_set_relation_cache(self, rtype, role, rset):
+ """set cached values for the given relation"""
+ if rset:
+ related = list(rset.entities(0))
+ rschema = self._cw.vreg.schema.rschema(rtype)
+ if role == 'subject':
+ rcard = rschema.rdef(self.e_schema, related[0].e_schema).cardinality[1]
+ target = 'object'
+ else:
+ rcard = rschema.rdef(related[0].e_schema, self.e_schema).cardinality[0]
+ target = 'subject'
+ if rcard in '?1':
+ for rentity in related:
+ rentity._cw_related_cache['%s_%s' % (rtype, target)] = (
+ self.as_rset(), (self,))
+ else:
+ related = ()
+ self._cw_related_cache['%s_%s' % (rtype, role)] = (rset, related)
+
+ def cw_clear_relation_cache(self, rtype=None, role=None):
+ """clear cached values for the given relation or the entire cache if
+ no relation is given
+ """
+ if rtype is None:
+ self._cw_related_cache.clear()
+ self._cw_adapters_cache.clear()
+ else:
+ assert role
+ self._cw_related_cache.pop('%s_%s' % (rtype, role), None)
+
+ def cw_clear_all_caches(self):
+ """flush all caches on this entity. Further attributes/relations access
+ will triggers new database queries to get back values.
+
+ If you use custom caches on your entity class (take care to @cached!),
+ you should override this method to clear them as well.
+ """
+ # clear attributes cache
+ self._cw_completed = False
+ self.cw_attr_cache.clear()
+ # clear relations cache
+ self.cw_clear_relation_cache()
+ # rest path unique cache
+ try:
+ del self.__unique
+ except AttributeError:
+ pass
+
+ # raw edition utilities ###################################################
+
+ def cw_set(self, **kwargs):
+ """update this entity using given attributes / relation, working in the
+ same fashion as :meth:`cw_instantiate`.
+
+ Example (in a shell session):
+
+ >>> c = rql('Any X WHERE X is Company').get_entity(0, 0)
+ >>> p = rql('Any X WHERE X is Person').get_entity(0, 0)
+ >>> c.cw_set(name=u'Logilab')
+ >>> p.cw_set(firstname=u'John', lastname=u'Doe', works_for=c)
+
+ You can also set relations where the entity has 'object' role by
+ prefixing the relation name by 'reverse_'. Also, relation values may be
+ an entity or eid, a list of entities or eids, or None (meaning that all
+ relations of the given type from or to this object should be deleted).
+ """
+ assert kwargs
+ assert self.cw_is_saved(), "should not call set_attributes while entity "\
+ "hasn't been saved yet"
+ rql, qargs, pendingrels, attrcache = self._cw_build_entity_query(kwargs)
+ if rql:
+ rql = 'SET ' + rql
+ qargs['x'] = self.eid
+ if ' WHERE ' in rql:
+ rql += ', X eid %(x)s'
+ else:
+ rql += ' WHERE X eid %(x)s'
+ self._cw.execute(rql, qargs)
+ # update current local object _after_ the rql query to avoid
+ # interferences between the query execution itself and the cw_edited /
+ # skip_security machinery
+ self._cw_update_attr_cache(attrcache)
+ self._cw_handle_pending_relations(self.eid, pendingrels, self._cw.execute)
+ # XXX update relation cache
+
+ def cw_delete(self, **kwargs):
+ assert self.has_eid(), self.eid
+ self._cw.execute('DELETE %s X WHERE X eid %%(x)s' % self.e_schema,
+ {'x': self.eid}, **kwargs)
+
+ # server side utilities ####################################################
+
+ def _cw_clear_local_perm_cache(self, action):
+ for rqlexpr in self.e_schema.get_rqlexprs(action):
+ self._cw.local_perm_cache.pop((rqlexpr.eid, (('x', self.eid),)), None)
+
+ # deprecated stuff #########################################################
+
+ @deprecated('[3.16] use cw_set() instead of set_attributes()')
+ def set_attributes(self, **kwargs): # XXX cw_set_attributes
+ if kwargs:
+ self.cw_set(**kwargs)
+
+ @deprecated('[3.16] use cw_set() instead of set_relations()')
+ def set_relations(self, **kwargs): # XXX cw_set_relations
+ """add relations to the given object. To set a relation where this entity
+ is the object of the relation, use 'reverse_' as argument name.
+
+ Values may be an entity or eid, a list of entities or eids, or None
+ (meaning that all relations of the given type from or to this object
+ should be deleted).
+ """
+ if kwargs:
+ self.cw_set(**kwargs)
+
+ @deprecated('[3.13] use entity.cw_clear_all_caches()')
+ def clear_all_caches(self):
+ return self.cw_clear_all_caches()
+
+
+# attribute and relation descriptors ##########################################
+
+class Attribute(object):
+ """descriptor that controls schema attribute access"""
+
+ def __init__(self, attrname):
+ assert attrname != 'eid'
+ self._attrname = attrname
+
+ def __get__(self, eobj, eclass):
+ if eobj is None:
+ return self
+ return eobj.cw_attr_value(self._attrname)
+
+ @deprecated('[3.10] assign to entity.cw_attr_cache[attr] or entity.cw_edited[attr]')
+ def __set__(self, eobj, value):
+ if hasattr(eobj, 'cw_edited') and not eobj.cw_edited.saved:
+ eobj.cw_edited[self._attrname] = value
+ else:
+ eobj.cw_attr_cache[self._attrname] = value
+
+
+class Relation(object):
+ """descriptor that controls schema relation access"""
+
+ def __init__(self, rschema, role):
+ self._rtype = rschema.type
+ self._role = role
+
+ def __get__(self, eobj, eclass):
+ if eobj is None:
+ raise AttributeError('%s can only be accessed from instances'
+ % self._rtype)
+ return eobj.related(self._rtype, self._role, entities=True)
+
+ def __set__(self, eobj, value):
+ raise NotImplementedError
+
+
+from logging import getLogger
+from cubicweb import set_log_methods
+set_log_methods(Entity, getLogger('cubicweb.entity'))
diff -r e1caf133b81c -r b23d58050076 cubicweb/etwist/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/__init__.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,20 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+""" CW - nevow/twisted client
+
+"""
diff -r e1caf133b81c -r b23d58050076 cubicweb/etwist/http.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/http.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,43 @@
+"""twisted server for CubicWeb web instances
+
+:organization: Logilab
+:copyright: 2001-2011 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+"""
+
+__docformat__ = "restructuredtext en"
+
+class HTTPResponse(object):
+ """An object representing an HTTP Response to be sent to the client.
+ """
+ def __init__(self, twisted_request, code=None, headers=None, stream=None):
+ self._headers_out = headers
+ self._twreq = twisted_request
+ self._stream = stream
+ self._code = code
+
+ self._init_headers()
+ self._finalize()
+
+ def _init_headers(self):
+ if self._headers_out is None:
+ return
+ # initialize headers
+ for k, values in self._headers_out.getAllRawHeaders():
+ self._twreq.responseHeaders.setRawHeaders(k, values)
+ # add content-length if not present
+ if (self._headers_out.getHeader('content-length') is None
+ and self._stream is not None):
+ self._twreq.setHeader('content-length', len(self._stream))
+
+ def _finalize(self):
+ # we must set code before writing anything, else it's too late
+ if self._code is not None:
+ self._twreq.setResponseCode(self._code)
+ if self._stream is not None:
+ self._twreq.write(str(self._stream))
+ self._twreq.finish()
+
+ def __repr__(self):
+ return "<%s.%s code=%d>" % (self.__module__, self.__class__.__name__, self._code)
diff -r e1caf133b81c -r b23d58050076 cubicweb/etwist/request.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/request.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,59 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Twisted request handler for CubicWeb"""
+
+__docformat__ = "restructuredtext en"
+
+
+from cubicweb.web.request import CubicWebRequestBase
+
+
+class CubicWebTwistedRequestAdapter(CubicWebRequestBase):
+ """ from twisted .req to cubicweb .form
+ req.files are put into .form[]
+ """
+ def __init__(self, req, vreg, https):
+ self._twreq = req
+ super(CubicWebTwistedRequestAdapter, self).__init__(
+ vreg, https, req.args, headers=req.received_headers)
+ for key, name_stream_list in req.files.items():
+ for name, stream in name_stream_list:
+ if name is not None:
+ name = unicode(name, self.encoding)
+ self.form.setdefault(key, []).append((name, stream))
+ # 3.16.4 backward compat
+ if len(self.form[key]) == 1:
+ self.form[key] = self.form[key][0]
+ self.content = self._twreq.content # stream
+
+ def http_method(self):
+ """returns 'POST', 'GET', 'HEAD', etc."""
+ return self._twreq.method
+
+ def relative_path(self, includeparams=True):
+ """return the normalized path of the request (ie at least relative to
+ the instance's root, but some other normalization may be needed so that
+ the returned path may be used to compare to generated urls
+
+ :param includeparams:
+ boolean indicating if GET form parameters should be kept in the path
+ """
+ path = self._twreq.uri[1:] # remove the root '/'
+ if not includeparams:
+ path = path.split('?', 1)[0]
+ return path
diff -r e1caf133b81c -r b23d58050076 cubicweb/etwist/server.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/server.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,298 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""twisted server for CubicWeb web instances"""
+__docformat__ = "restructuredtext en"
+
+import sys
+import select
+import traceback
+import threading
+from cgi import FieldStorage, parse_header
+
+from six.moves.urllib.parse import urlsplit, urlunsplit
+
+from cubicweb.statsd_logger import statsd_timeit
+
+from twisted.internet import reactor, task, threads
+from twisted.web import http, server
+from twisted.web import resource
+from twisted.web.server import NOT_DONE_YET
+
+
+from logilab.mtconverter import xml_escape
+from logilab.common.decorators import monkeypatch
+
+from cubicweb import ConfigurationError, CW_EVENT_MANAGER
+from cubicweb.utils import json_dumps
+from cubicweb.web import DirectResponse
+from cubicweb.web.application import CubicWebPublisher
+from cubicweb.etwist.request import CubicWebTwistedRequestAdapter
+from cubicweb.etwist.http import HTTPResponse
+
+def start_task(interval, func):
+ lc = task.LoopingCall(func)
+ # wait until interval has expired to actually start the task, else we have
+ # to wait all tasks to be finished for the server to be actually started
+ lc.start(interval, now=False)
+
+
+class CubicWebRootResource(resource.Resource):
+ def __init__(self, config, repo):
+ resource.Resource.__init__(self)
+ self.config = config
+ # instantiate publisher here and not in init_publisher to get some
+ # checks done before daemonization (eg versions consistency)
+ self.appli = CubicWebPublisher(repo, config)
+ self.base_url = config['base-url']
+ self.https_url = config['https-url']
+ global MAX_POST_LENGTH
+ MAX_POST_LENGTH = config['max-post-length']
+
+ def init_publisher(self):
+ config = self.config
+ # when we have an in-memory repository, clean unused sessions every XX
+ # seconds and properly shutdown the server
+ if config['repository-uri'] == 'inmemory://':
+ if config.mode != 'test':
+ reactor.addSystemEventTrigger('before', 'shutdown',
+ self.shutdown_event)
+ self.appli.repo.start_looping_tasks()
+ self.set_url_rewriter()
+ CW_EVENT_MANAGER.bind('after-registry-reload', self.set_url_rewriter)
+
+ def start_service(self):
+ start_task(self.appli.session_handler.clean_sessions_interval,
+ self.appli.session_handler.clean_sessions)
+
+ def set_url_rewriter(self):
+ self.url_rewriter = self.appli.vreg['components'].select_or_none('urlrewriter')
+
+ def shutdown_event(self):
+ """callback fired when the server is shutting down to properly
+ clean opened sessions
+ """
+ self.appli.repo.shutdown()
+
+ def getChild(self, path, request):
+ """Indicate which resource to use to process down the URL's path"""
+ return self
+
+ def render(self, request):
+ """Render a page from the root resource"""
+ # reload modified files in debug mode
+ if self.config.debugmode:
+ self.config.uiprops.reload_if_needed()
+ if self.https_url:
+ self.config.https_uiprops.reload_if_needed()
+ self.appli.vreg.reload_if_needed()
+ if self.config['profile']: # default profiler don't trace threads
+ return self.render_request(request)
+ else:
+ deferred = threads.deferToThread(self.render_request, request)
+ return NOT_DONE_YET
+
+ @statsd_timeit
+ def render_request(self, request):
+ try:
+ # processing HUGE files (hundred of megabytes) in http.processReceived
+ # blocks other HTTP requests processing
+ # due to the clumsy & slow parsing algorithm of cgi.FieldStorage
+ # so we deferred that part to the cubicweb thread
+ request.process_multipart()
+ return self._render_request(request)
+ except Exception:
+ trace = traceback.format_exc()
+ return HTTPResponse(stream='
%s
' % xml_escape(trace),
+ code=500, twisted_request=request)
+
+ def _render_request(self, request):
+ origpath = request.path
+ host = request.host
+ # dual http/https access handling: expect a rewrite rule to prepend
+ # 'https' to the path to detect https access
+ https = False
+ if origpath.split('/', 2)[1] == 'https':
+ origpath = origpath[6:]
+ request.uri = request.uri[6:]
+ https = True
+ if self.url_rewriter is not None:
+ # XXX should occur before authentication?
+ path = self.url_rewriter.rewrite(host, origpath, request)
+ request.uri.replace(origpath, path, 1)
+ else:
+ path = origpath
+ req = CubicWebTwistedRequestAdapter(request, self.appli.vreg, https)
+ try:
+ ### Try to generate the actual request content
+ content = self.appli.handle_request(req, path)
+ except DirectResponse as ex:
+ return ex.response
+ # at last: create twisted object
+ return HTTPResponse(code = req.status_out,
+ headers = req.headers_out,
+ stream = content,
+ twisted_request=req._twreq)
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ @classmethod
+ def debug(cls, msg, *a, **kw):
+ pass
+ info = warning = error = critical = exception = debug
+
+
+JSON_PATHS = set(('json',))
+FRAME_POST_PATHS = set(('validateform',))
+
+orig_gotLength = http.Request.gotLength
+@monkeypatch(http.Request)
+def gotLength(self, length):
+ orig_gotLength(self, length)
+ if length > MAX_POST_LENGTH: # length is 0 on GET
+ path = self.channel._path.split('?', 1)[0].rstrip('/').rsplit('/', 1)[-1]
+ self.clientproto = 'HTTP/1.1' # not yet initialized
+ self.channel.persistent = 0 # force connection close on cleanup
+ self.setResponseCode(http.REQUEST_ENTITY_TOO_LARGE)
+ if path in JSON_PATHS: # XXX better json path detection
+ self.setHeader('content-type',"application/json")
+ body = json_dumps({'reason': 'request max size exceeded'})
+ elif path in FRAME_POST_PATHS: # XXX better frame post path detection
+ self.setHeader('content-type',"text/html")
+ body = ('' % json_dumps( (False, 'request max size exceeded', None) ))
+ else:
+ self.setHeader('content-type',"text/html")
+ body = ("Processing Failed"
+ "request max size exceeded")
+ self.setHeader('content-length', str(len(body)))
+ self.write(body)
+ # see request.finish(). Done here since we get error due to not full
+ # initialized request
+ self.finished = 1
+ if not self.queued:
+ self._cleanup()
+ for d in self.notifications:
+ d.callback(None)
+ self.notifications = []
+
+@monkeypatch(http.Request)
+def requestReceived(self, command, path, version):
+ """Called by channel when all data has been received.
+
+ This method is not intended for users.
+ """
+ self.content.seek(0, 0)
+ self.args = {}
+ self.files = {}
+ self.stack = []
+ self.method, self.uri = command, path
+ self.clientproto = version
+ x = self.uri.split('?', 1)
+ if len(x) == 1:
+ self.path = self.uri
+ else:
+ self.path, argstring = x
+ self.args = http.parse_qs(argstring, 1)
+ # cache the client and server information, we'll need this later to be
+ # serialized and sent with the request so CGIs will work remotely
+ self.client = self.channel.transport.getPeer()
+ self.host = self.channel.transport.getHost()
+ # Argument processing
+ ctype = self.getHeader('content-type')
+ self._do_process_multipart = False
+ if self.method == "POST" and ctype:
+ key, pdict = parse_header(ctype)
+ if key == 'application/x-www-form-urlencoded':
+ self.args.update(http.parse_qs(self.content.read(), 1))
+ self.content.seek(0)
+ elif key == 'multipart/form-data':
+ # defer this as it can be extremely time consumming
+ # with big files
+ self._do_process_multipart = True
+ self.process()
+
+@monkeypatch(http.Request)
+def process_multipart(self):
+ if not self._do_process_multipart:
+ return
+ form = FieldStorage(self.content, self.received_headers,
+ environ={'REQUEST_METHOD': 'POST'},
+ keep_blank_values=1,
+ strict_parsing=1)
+ for key in form:
+ values = form[key]
+ if not isinstance(values, list):
+ values = [values]
+ for value in values:
+ if value.filename:
+ if value.done != -1: # -1 is transfer has been interrupted
+ self.files.setdefault(key, []).append((value.filename, value.file))
+ else:
+ self.files.setdefault(key, []).append((None, None))
+ else:
+ self.args.setdefault(key, []).append(value.value)
+
+from logging import getLogger
+from cubicweb import set_log_methods
+LOGGER = getLogger('cubicweb.twisted')
+set_log_methods(CubicWebRootResource, LOGGER)
+
+def run(config, debug=None, repo=None):
+ # repo may by passed during test.
+ #
+ # Test has already created a repo object so we should not create a new one.
+ # Explicitly passing the repo object avoid relying on the fragile
+ # config.repository() cache. We could imagine making repo a mandatory
+ # argument and receives it from the starting command directly.
+ if debug is not None:
+ config.debugmode = debug
+ config.check_writeable_uid_directory(config.appdatahome)
+ # create the site
+ if repo is None:
+ repo = config.repository()
+ root_resource = CubicWebRootResource(config, repo)
+ website = server.Site(root_resource)
+ # serve it via standard HTTP on port set in the configuration
+ port = config['port'] or 8080
+ interface = config['interface']
+ reactor.suggestThreadPoolSize(config['webserver-threadpool-size'])
+ reactor.listenTCP(port, website, interface=interface)
+ if not config.debugmode:
+ if sys.platform == 'win32':
+ raise ConfigurationError("Under windows, you must use the service management "
+ "commands (e.g : 'net start my_instance)'")
+ from logilab.common.daemon import daemonize
+ LOGGER.info('instance started in the background on %s', root_resource.base_url)
+ whichproc = daemonize(config['pid-file'], umask=config['umask'])
+ if whichproc: # 1 = orig process, 2 = first fork, None = second fork (eg daemon process)
+ return whichproc # parent process
+ root_resource.init_publisher() # before changing uid
+ if config['uid'] is not None:
+ from logilab.common.daemon import setugid
+ setugid(config['uid'])
+ root_resource.start_service()
+ LOGGER.info('instance started on %s', root_resource.base_url)
+ # avoid annoying warnign if not in Main Thread
+ signals = threading.currentThread().getName() == 'MainThread'
+ if config['profile']:
+ import cProfile
+ cProfile.runctx('reactor.run(installSignalHandlers=%s)' % signals,
+ globals(), locals(), config['profile'])
+ else:
+ reactor.run(installSignalHandlers=signals)
diff -r e1caf133b81c -r b23d58050076 cubicweb/etwist/service.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/service.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,99 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from __future__ import print_function
+
+import os
+import sys
+
+try:
+ import win32serviceutil
+ import win32service
+except ImportError:
+ print('Win32 extensions for Python are likely not installed.')
+ sys.exit(3)
+
+from os.path import join
+
+from cubicweb.etwist.server import (CubicWebRootResource, reactor, server)
+
+from logilab.common.shellutils import rm
+
+import logging
+from logging import getLogger, handlers
+from cubicweb import set_log_methods
+from cubicweb.cwconfig import CubicWebConfiguration as cwcfg
+
+def _check_env(env):
+ env_vars = ('CW_INSTANCES_DIR', 'CW_INSTANCES_DATA_DIR', 'CW_RUNTIME_DIR')
+ for var in env_vars:
+ if var not in env:
+ raise Exception('The environment variables %s must be set.' % \
+ ', '.join(env_vars))
+ if not env.get('USERNAME'):
+ env['USERNAME'] = 'cubicweb'
+
+class CWService(object, win32serviceutil.ServiceFramework):
+ _svc_name_ = None
+ _svc_display_name_ = None
+ instance = None
+
+ def __init__(self, *args, **kwargs):
+ win32serviceutil.ServiceFramework.__init__(self, *args, **kwargs)
+ cwcfg.load_cwctl_plugins()
+ logger = getLogger('cubicweb')
+ set_log_methods(CubicWebRootResource, logger)
+
+ def SvcStop(self):
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+ logger = getLogger('cubicweb.twisted')
+ logger.info('stopping %s service' % self.instance)
+ reactor.stop()
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+
+ def SvcDoRun(self):
+ self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
+ logger = getLogger('cubicweb.twisted')
+ handler = handlers.NTEventLogHandler('cubicweb')
+ handler.setLevel(logging.INFO)
+ logger.addHandler(handler)
+ logger.info('starting %s service' % self.instance)
+ try:
+ _check_env(os.environ)
+ # create the site
+ config = cwcfg.config_for(self.instance)
+ config.init_log(force=True)
+ config.debugmode = False
+ logger.info('starting cubicweb instance %s ', self.instance)
+ config.info('clear ui caches')
+ for cachedir in ('uicache', 'uicachehttps'):
+ rm(join(config.appdatahome, cachedir, '*'))
+ root_resource = CubicWebRootResource(config, config.repository())
+ website = server.Site(root_resource)
+ # serve it via standard HTTP on port set in the configuration
+ port = config['port'] or 8080
+ logger.info('listening on port %s' % port)
+ reactor.listenTCP(port, website)
+ root_resource.init_publisher()
+ root_resource.start_service()
+ logger.info('instance started on %s', root_resource.base_url)
+ self.ReportServiceStatus(win32service.SERVICE_RUNNING)
+ reactor.run()
+ except Exception as e:
+ logger.error('service %s stopped (cause: %s)' % (self.instance, e))
+ logger.exception('what happened ...')
+ self.ReportServiceStatus(win32service.SERVICE_STOPPED)
diff -r e1caf133b81c -r b23d58050076 cubicweb/etwist/test/data/views.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/test/data/views.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,29 @@
+# copyright 2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""only for unit tests !"""
+
+from cubicweb.view import View
+from cubicweb.predicates import match_http_method
+
+class PutView(View):
+ __regid__ = 'put'
+ __select__ = match_http_method('PUT') | match_http_method('POST')
+ binary = True
+
+ def call(self):
+ self.w(self._cw.content.read())
diff -r e1caf133b81c -r b23d58050076 cubicweb/etwist/test/unittest_server.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/test/unittest_server.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,38 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+import os, os.path as osp, glob
+import urllib
+
+from cubicweb.devtools.httptest import CubicWebServerTC
+
+
+class ETwistHTTPTC(CubicWebServerTC):
+ def test_put_content(self):
+ data = {'hip': 'hop'}
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ body = urllib.urlencode(data)
+ response = self.web_request('?vid=put', method='PUT', body=body)
+ self.assertEqual(body, response.body)
+ response = self.web_request('?vid=put', method='POST', body=body,
+ headers=headers)
+ self.assertEqual(body, response.body)
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/etwist/twconfig.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/twconfig.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,115 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""twisted server configurations:
+
+* the "all-in-one" configuration to get a web instance running in a twisted
+ web server integrating a repository server in the same process (only available
+ if the repository part of the software is installed
+"""
+__docformat__ = "restructuredtext en"
+
+from os.path import join
+
+from logilab.common.configuration import Method, merge_options
+
+from cubicweb.cwconfig import CONFIGURATIONS
+from cubicweb.web.webconfig import WebConfiguration
+
+
+class WebConfigurationBase(WebConfiguration):
+ """web instance (in a twisted web server) client of a RQL server"""
+
+ options = merge_options((
+ # ctl configuration
+ ('port',
+ {'type' : 'int',
+ 'default': None,
+ 'help': 'http server port number (default to 8080)',
+ 'group': 'web', 'level': 0,
+ }),
+ ('interface',
+ {'type' : 'string',
+ 'default': "",
+ 'help': 'http server address on which to listen (default to everywhere)',
+ 'group': 'web', 'level': 1,
+ }),
+ ('max-post-length',
+ {'type' : 'bytes',
+ 'default': '100MB',
+ 'help': 'maximum length of HTTP request. Default to 100 MB.',
+ 'group': 'web', 'level': 1,
+ }),
+ ('profile',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'profile code and use the specified file to store stats if this option is set',
+ 'group': 'web', 'level': 3,
+ }),
+ ('host',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'host name if not correctly detectable through gethostname',
+ 'group': 'main', 'level': 1,
+ }),
+ ('pid-file',
+ {'type' : 'string',
+ 'default': Method('default_pid_file'),
+ 'help': 'repository\'s pid file',
+ 'group': 'main', 'level': 2,
+ }),
+ ('uid',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'if this option is set, use the specified user to start \
+the repository rather than the user running the command',
+ 'group': 'main', 'level': WebConfiguration.mode == 'system'
+ }),
+ ('webserver-threadpool-size',
+ {'type': 'int',
+ 'default': 4,
+ 'help': "size of twisted's reactor threadpool. It should probably be not too \
+much greater than connection-poolsize",
+ 'group': 'web', 'level': 3,
+ }),
+ ) + WebConfiguration.options)
+
+ def server_file(self):
+ return join(self.apphome, '%s-%s.py' % (self.appid, self.name))
+
+ def default_base_url(self):
+ from socket import getfqdn
+ return 'http://%s:%s/' % (self['host'] or getfqdn().lower(), self['port'] or 8080)
+
+
+try:
+ from cubicweb.server.serverconfig import ServerConfiguration
+
+ class AllInOneConfiguration(WebConfigurationBase, ServerConfiguration):
+ """repository and web instance in the same twisted process"""
+ name = 'all-in-one'
+ options = merge_options(WebConfigurationBase.options
+ + ServerConfiguration.options)
+
+ cubicweb_appobject_path = WebConfigurationBase.cubicweb_appobject_path | ServerConfiguration.cubicweb_appobject_path
+ cube_appobject_path = WebConfigurationBase.cube_appobject_path | ServerConfiguration.cube_appobject_path
+
+
+ CONFIGURATIONS.append(AllInOneConfiguration)
+
+except ImportError:
+ pass
diff -r e1caf133b81c -r b23d58050076 cubicweb/etwist/twctl.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/twctl.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,79 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""cubicweb-clt handlers for twisted"""
+
+from cubicweb.toolsutils import CommandHandler
+from cubicweb.web.webctl import WebCreateHandler, WebUpgradeHandler
+
+# trigger configuration registration
+import cubicweb.etwist.twconfig # pylint: disable=W0611
+
+class TWCreateHandler(WebCreateHandler):
+ cfgname = 'twisted'
+
+class TWStartHandler(CommandHandler):
+ cmdname = 'start'
+ cfgname = 'twisted'
+
+ def start_server(self, config):
+ from cubicweb.etwist import server
+ return server.run(config)
+
+class TWStopHandler(CommandHandler):
+ cmdname = 'stop'
+ cfgname = 'twisted'
+
+ def poststop(self):
+ pass
+
+class TWUpgradeHandler(WebUpgradeHandler):
+ cfgname = 'twisted'
+
+
+try:
+ from cubicweb.server import serverctl
+ class AllInOneCreateHandler(serverctl.RepositoryCreateHandler,
+ TWCreateHandler):
+ """configuration to get an instance running in a twisted web server
+ integrating a repository server in the same process
+ """
+ cfgname = 'all-in-one'
+
+ def bootstrap(self, cubes, automatic=False, inputlevel=0):
+ """bootstrap this configuration"""
+ serverctl.RepositoryCreateHandler.bootstrap(self, cubes, automatic, inputlevel)
+ TWCreateHandler.bootstrap(self, cubes, automatic, inputlevel)
+
+ class AllInOneStartHandler(TWStartHandler):
+ cmdname = 'start'
+ cfgname = 'all-in-one'
+ subcommand = 'cubicweb-twisted'
+
+ class AllInOneStopHandler(CommandHandler):
+ cmdname = 'stop'
+ cfgname = 'all-in-one'
+ subcommand = 'cubicweb-twisted'
+
+ def poststop(self):
+ pass
+
+ class AllInOneUpgradeHandler(TWUpgradeHandler):
+ cfgname = 'all-in-one'
+
+except ImportError:
+ pass
diff -r e1caf133b81c -r b23d58050076 cubicweb/ext/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/__init__.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,17 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
diff -r e1caf133b81c -r b23d58050076 cubicweb/ext/html4zope.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/html4zope.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,174 @@
+# Author: David Goodger
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""
+
+"""
+# Contact: goodger@users.sourceforge.net
+# Revision: $Revision: 1.2 $
+# Date: $Date: 2005-07-04 16:36:50 $
+# Copyright: This module has been placed in the public domain.
+
+"""
+Simple HyperText Markup Language document tree Writer.
+
+The output conforms to the HTML 4.01 Transitional DTD and to the Extensible
+HTML version 1.0 Transitional DTD (*almost* strict). The output contains a
+minimum of formatting information. A cascading style sheet ("default.css" by
+default) is required for proper viewing with a modern graphical browser.
+
+http://cvs.zope.org/Zope/lib/python/docutils/writers/Attic/html4zope.py?rev=1.1.2.2&only_with_tag=ajung-restructuredtext-integration-branch&content-type=text/vnd.viewcvs-markup
+"""
+
+__docformat__ = 'reStructuredText'
+
+import os
+
+from logilab.mtconverter import xml_escape
+
+from docutils import nodes
+from docutils.writers.html4css1 import Writer as CSS1Writer
+from docutils.writers.html4css1 import HTMLTranslator as CSS1HTMLTranslator
+
+default_level = int(os.environ.get('STX_DEFAULT_LEVEL', 3))
+
+class Writer(CSS1Writer):
+ """css writer using our html translator"""
+ def __init__(self, base_url):
+ CSS1Writer.__init__(self)
+ self.translator_class = URLBinder(base_url, HTMLTranslator)
+
+ def apply_template(self):
+ """overriding this is necessary with docutils >= 0.5"""
+ return self.visitor.astext()
+
+class URLBinder:
+ def __init__(self, url, klass):
+ self.base_url = url
+ self.translator_class = HTMLTranslator
+
+ def __call__(self, document):
+ translator = self.translator_class(document)
+ translator.base_url = self.base_url
+ return translator
+
+class HTMLTranslator(CSS1HTMLTranslator):
+ """ReST tree to html translator"""
+
+ def astext(self):
+ """return the extracted html"""
+ return ''.join(self.body)
+
+ def visit_title(self, node):
+ """Only 6 section levels are supported by HTML."""
+ if isinstance(node.parent, nodes.topic):
+ self.body.append(
+ self.starttag(node, 'p', '', CLASS='topic-title'))
+ if node.parent.hasattr('id'):
+ self.body.append(
+ self.starttag({}, 'a', '', name=node.parent['id']))
+ self.context.append('
"""
+ def depart_document(self, node):
+ """syt: i don't want the enclosing
"""
+
+ def visit_reference(self, node):
+ """syt: i want absolute urls"""
+ if 'refuri' in node:
+ href = node['refuri']
+ if ( self.settings.cloak_email_addresses
+ and href.startswith('mailto:')):
+ href = self.cloak_mailto(href)
+ self.in_mailto = 1
+ else:
+ assert 'refid' in node, \
+ 'References must have "refuri" or "refid" attribute.'
+ href = '%s#%s' % (self.base_url, node['refid'])
+ atts = {'href': href, 'class': 'reference'}
+ if not isinstance(node.parent, nodes.TextElement):
+ assert len(node) == 1 and isinstance(node[0], nodes.image)
+ atts['class'] += ' image-reference'
+ self.body.append(self.starttag(node, 'a', '', **atts))
+
+ ## override error messages to avoid XHTML problems ########################
+ def visit_problematic(self, node):
+ pass
+
+ def depart_problematic(self, node):
+ pass
+
+ def visit_system_message(self, node):
+ backref_text = ''
+ if len(node['backrefs']):
+ backrefs = node['backrefs']
+ if len(backrefs) == 1:
+ backref_text = '; backlink'
+ else:
+ i = 1
+ backlinks = []
+ for backref in backrefs:
+ backlinks.append(str(i))
+ i += 1
+ backref_text = ('; backlinks: %s'
+ % ', '.join(backlinks))
+ if node.hasattr('line'):
+ line = ', line %s' % node['line']
+ else:
+ line = ''
+ a_start = a_end = ''
+ error = u'System Message: %s%s/%s%s (%s %s)%s\n' % (
+ a_start, node['type'], node['level'], a_end,
+ self.encode(node['source']), line, backref_text)
+ self.body.append(u'
ReST / HTML errors:%s
' % xml_escape(error))
+
+ def depart_system_message(self, node):
+ pass
diff -r e1caf133b81c -r b23d58050076 cubicweb/ext/markdown.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/markdown.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,27 @@
+from __future__ import absolute_import
+import markdown
+
+import logging
+
+log = logging.getLogger(__name__)
+
+
+def markdown_publish(context, data):
+ """publish a string formatted as MarkDown Text to HTML
+
+ :type context: a cubicweb application object
+
+ :type data: str
+ :param data: some MarkDown text
+
+ :rtype: unicode
+ :return:
+ the data formatted as HTML or the original data if an error occurred
+ """
+ md = markdown.Markdown()
+ try:
+ return md.convert(data)
+ except:
+ import traceback; traceback.print_exc()
+ log.exception("Error while converting Markdown to HTML")
+ return data
diff -r e1caf133b81c -r b23d58050076 cubicweb/ext/rest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/rest.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,469 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""rest publishing functions
+
+contains some functions and setup of docutils for cubicweb. Provides the
+following ReST directives:
+
+* `eid`, create link to entity in the repository by their eid
+
+* `card`, create link to card entity in the repository by their wikiid
+ (proposing to create it when the refered card doesn't exist yet)
+
+* `winclude`, reference to a web documentation file (in wdoc/ directories)
+
+* `sourcecode` (if pygments is installed), source code colorization
+
+* `rql-table`, create a table from a RQL query
+
+"""
+__docformat__ = "restructuredtext en"
+
+import sys
+from itertools import chain
+from logging import getLogger
+from os.path import join
+
+from six import text_type
+from six.moves.urllib.parse import urlsplit
+
+from docutils import statemachine, nodes, utils, io
+from docutils.core import Publisher
+from docutils.parsers.rst import Parser, states, directives, Directive
+from docutils.parsers.rst.roles import register_canonical_role, set_classes
+
+from logilab.mtconverter import ESC_UCAR_TABLE, ESC_CAR_TABLE, xml_escape
+
+from cubicweb import UnknownEid
+from cubicweb.ext.html4zope import Writer
+
+from cubicweb.web.views import vid_from_rset # XXX better not to import c.w.views here...
+
+# We provide our own parser as an attempt to get rid of
+# state machine reinstanciation
+
+import re
+# compile states.Body patterns
+for k, v in states.Body.patterns.items():
+ if isinstance(v, str):
+ states.Body.patterns[k] = re.compile(v)
+
+# register ReStructured Text mimetype / extensions
+import mimetypes
+mimetypes.add_type('text/rest', '.rest')
+mimetypes.add_type('text/rest', '.rst')
+
+
+LOGGER = getLogger('cubicweb.rest')
+
+
+def eid_reference_role(role, rawtext, text, lineno, inliner,
+ options={}, content=[]):
+ try:
+ try:
+ eid_num, rest = text.split(u':', 1)
+ except ValueError:
+ eid_num, rest = text, '#'+text
+ eid_num = int(eid_num)
+ if eid_num < 0:
+ raise ValueError
+ except ValueError:
+ msg = inliner.reporter.error(
+ 'EID number must be a positive number; "%s" is invalid.'
+ % text, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ # Base URL mainly used by inliner.pep_reference; so this is correct:
+ context = inliner.document.settings.context
+ try:
+ refedentity = context._cw.entity_from_eid(eid_num)
+ except UnknownEid:
+ ref = '#'
+ rest += u' ' + context._cw._('(UNEXISTANT EID)')
+ else:
+ ref = refedentity.absolute_url()
+ set_classes(options)
+ return [nodes.reference(rawtext, utils.unescape(rest), refuri=ref,
+ **options)], []
+
+
+def rql_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
+ """``:rql:```` or ``:rql:`:```
+
+ Example: ``:rql:`Any X,Y WHERE X is CWUser, X login Y:table```
+
+ Replace the directive with the output of applying the view to the resultset
+ returned by the query.
+
+ "X eid %(userid)s" can be used in the RQL query for this query will be
+ executed with the argument {'userid': _cw.user.eid}.
+ """
+ _cw = inliner.document.settings.context._cw
+ text = text.strip()
+ if ':' in text:
+ rql, vid = text.rsplit(u':', 1)
+ rql = rql.strip()
+ else:
+ rql, vid = text, None
+ _cw.ensure_ro_rql(rql)
+ try:
+ rset = _cw.execute(rql, {'userid': _cw.user.eid})
+ if rset:
+ if vid is None:
+ vid = vid_from_rset(_cw, rset, _cw.vreg.schema)
+ else:
+ vid = 'noresult'
+ view = _cw.vreg['views'].select(vid, _cw, rset=rset)
+ content = view.render()
+ except Exception as exc:
+ content = 'an error occurred while interpreting this rql directive: %r' % exc
+ set_classes(options)
+ return [nodes.raw('', content, format='html')], []
+
+
+def bookmark_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
+ """``:bookmark:```` or ``:bookmark:`:```
+
+ Example: ``:bookmark:`1234:table```
+
+ Replace the directive with the output of applying the view to the resultset
+ returned by the query stored in the bookmark. By default, the view is the one
+ stored in the bookmark, but it can be overridden by the directive as in the
+ example above.
+
+ "X eid %(userid)s" can be used in the RQL query stored in the Bookmark, for
+ this query will be executed with the argument {'userid': _cw.user.eid}.
+ """
+ _cw = inliner.document.settings.context._cw
+ text = text.strip()
+ try:
+ if ':' in text:
+ eid, vid = text.rsplit(u':', 1)
+ eid = int(eid)
+ else:
+ eid, vid = int(text), None
+ except ValueError:
+ msg = inliner.reporter.error(
+ 'EID number must be a positive number; "%s" is invalid.'
+ % text, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ try:
+ bookmark = _cw.entity_from_eid(eid)
+ except UnknownEid:
+ msg = inliner.reporter.error('Unknown EID %s.' % text, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ try:
+ params = dict(_cw.url_parse_qsl(urlsplit(bookmark.path).query))
+ rql = params['rql']
+ if vid is None:
+ vid = params.get('vid')
+ except (ValueError, KeyError) as exc:
+ msg = inliner.reporter.error('Could not parse bookmark path %s [%s].'
+ % (bookmark.path, exc), line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ try:
+ rset = _cw.execute(rql, {'userid': _cw.user.eid})
+ if rset:
+ if vid is None:
+ vid = vid_from_rset(_cw, rset, _cw.vreg.schema)
+ else:
+ vid = 'noresult'
+ view = _cw.vreg['views'].select(vid, _cw, rset=rset)
+ content = view.render()
+ except Exception as exc:
+ content = 'An error occurred while interpreting directive bookmark: %r' % exc
+ set_classes(options)
+ return [nodes.raw('', content, format='html')], []
+
+
+def winclude_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """Include a reST file as part of the content of this reST file.
+
+ same as standard include directive but using config.locate_doc_resource to
+ get actual file to include.
+
+ Most part of this implementation is copied from `include` directive defined
+ in `docutils.parsers.rst.directives.misc`
+ """
+ context = state.document.settings.context
+ cw = context._cw
+ source = state_machine.input_lines.source(
+ lineno - state_machine.input_offset - 1)
+ #source_dir = os.path.dirname(os.path.abspath(source))
+ fid = arguments[0]
+ for lang in chain((cw.lang, cw.vreg.property_value('ui.language')),
+ cw.vreg.config.available_languages()):
+ rid = '%s_%s.rst' % (fid, lang)
+ resourcedir = cw.vreg.config.locate_doc_file(rid)
+ if resourcedir:
+ break
+ else:
+ severe = state_machine.reporter.severe(
+ 'Problems with "%s" directive path:\nno resource matching %s.'
+ % (name, fid),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [severe]
+ path = join(resourcedir, rid)
+ encoding = options.get('encoding', state.document.settings.input_encoding)
+ try:
+ state.document.settings.record_dependencies.add(path)
+ include_file = io.FileInput(
+ source_path=path, encoding=encoding,
+ error_handler=state.document.settings.input_encoding_error_handler,
+ handle_io_errors=None)
+ except IOError as error:
+ severe = state_machine.reporter.severe(
+ 'Problems with "%s" directive path:\n%s: %s.'
+ % (name, error.__class__.__name__, error),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [severe]
+ try:
+ include_text = include_file.read()
+ except UnicodeError as error:
+ severe = state_machine.reporter.severe(
+ 'Problem with "%s" directive:\n%s: %s'
+ % (name, error.__class__.__name__, error),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [severe]
+ if 'literal' in options:
+ literal_block = nodes.literal_block(include_text, include_text,
+ source=path)
+ literal_block.line = 1
+ return literal_block
+ else:
+ include_lines = statemachine.string2lines(include_text,
+ convert_whitespace=1)
+ state_machine.insert_input(include_lines, path)
+ return []
+
+winclude_directive.arguments = (1, 0, 1)
+winclude_directive.options = {'literal': directives.flag,
+ 'encoding': directives.encoding}
+
+
+class RQLTableDirective(Directive):
+ """rql-table directive
+
+ Example:
+
+ .. rql-table::
+ :vid: mytable
+ :headers: , , progress
+ :colvids: 2=progress
+
+ Any X,U,X WHERE X is Project, X url U
+
+ All fields but the RQL string are optionnal. The ``:headers:`` option can
+ contain empty column names.
+ """
+
+ required_arguments = 0
+ optional_arguments = 0
+ has_content= True
+ final_argument_whitespace = True
+ option_spec = {'vid': directives.unchanged,
+ 'headers': directives.unchanged,
+ 'colvids': directives.unchanged}
+
+ def run(self):
+ errid = "rql-table directive"
+ self.assert_has_content()
+ if self.arguments:
+ raise self.warning('%s does not accept arguments' % errid)
+ rql = ' '.join([l.strip() for l in self.content])
+ _cw = self.state.document.settings.context._cw
+ _cw.ensure_ro_rql(rql)
+ try:
+ rset = _cw.execute(rql)
+ except Exception as exc:
+ raise self.error("fail to execute RQL query in %s: %r" %
+ (errid, exc))
+ if not rset:
+ raise self.warning("empty result set")
+ vid = self.options.get('vid', 'table')
+ try:
+ view = _cw.vreg['views'].select(vid, _cw, rset=rset)
+ except Exception as exc:
+ raise self.error("fail to select '%s' view in %s: %r" %
+ (vid, errid, exc))
+ headers = None
+ if 'headers' in self.options:
+ headers = [h.strip() for h in self.options['headers'].split(',')]
+ while headers.count(''):
+ headers[headers.index('')] = None
+ if len(headers) != len(rset[0]):
+ raise self.error("the number of 'headers' does not match the "
+ "number of columns in %s" % errid)
+ cellvids = None
+ if 'colvids' in self.options:
+ cellvids = {}
+ for f in self.options['colvids'].split(','):
+ try:
+ idx, vid = f.strip().split('=')
+ except ValueError:
+ raise self.error("malformatted 'colvids' option in %s" %
+ errid)
+ cellvids[int(idx.strip())] = vid.strip()
+ try:
+ content = view.render(headers=headers, cellvids=cellvids)
+ except Exception as exc:
+ raise self.error("Error rendering %s (%s)" % (errid, exc))
+ return [nodes.raw('', content, format='html')]
+
+
+try:
+ from pygments import highlight
+ from pygments.lexers import get_lexer_by_name
+ from pygments.formatters.html import HtmlFormatter
+except ImportError:
+ pygments_directive = None
+else:
+ _PYGMENTS_FORMATTER = HtmlFormatter()
+
+ def pygments_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ try:
+ lexer = get_lexer_by_name(arguments[0])
+ except ValueError:
+ # no lexer found
+ lexer = get_lexer_by_name('text')
+ parsed = highlight(u'\n'.join(content), lexer, _PYGMENTS_FORMATTER)
+ # don't fail if no context set on the sourcecode directive
+ try:
+ context = state.document.settings.context
+ context._cw.add_css('pygments.css')
+ except AttributeError:
+ # used outside cubicweb XXX use hasattr instead
+ pass
+ return [nodes.raw('', parsed, format='html')]
+
+ pygments_directive.arguments = (1, 0, 1)
+ pygments_directive.content = 1
+
+
+class CubicWebReSTParser(Parser):
+ """The (customized) reStructuredText parser."""
+
+ def __init__(self):
+ self.initial_state = 'Body'
+ self.state_classes = states.state_classes
+ self.inliner = states.Inliner()
+ self.statemachine = states.RSTStateMachine(
+ state_classes=self.state_classes,
+ initial_state=self.initial_state,
+ debug=0)
+
+ def parse(self, inputstring, document):
+ """Parse `inputstring` and populate `document`, a document tree."""
+ self.setup_parse(inputstring, document)
+ inputlines = statemachine.string2lines(inputstring,
+ convert_whitespace=1)
+ self.statemachine.run(inputlines, document, inliner=self.inliner)
+ self.finish_parse()
+
+
+# XXX docutils keep a ref on context, can't find a correct way to remove it
+class CWReSTPublisher(Publisher):
+ def __init__(self, context, settings, **kwargs):
+ Publisher.__init__(self, **kwargs)
+ self.set_components('standalone', 'restructuredtext', 'pseudoxml')
+ self.process_programmatic_settings(None, settings, None)
+ self.settings.context = context
+
+
+def rest_publish(context, data):
+ """publish a string formatted as ReStructured Text to HTML
+
+ :type context: a cubicweb application object
+
+ :type data: str
+ :param data: some ReST text
+
+ :rtype: unicode
+ :return:
+ the data formatted as HTML or the original data if an error occurred
+ """
+ req = context._cw
+ if isinstance(data, text_type):
+ encoding = 'unicode'
+ # remove unprintable characters unauthorized in xml
+ data = data.translate(ESC_UCAR_TABLE)
+ else:
+ encoding = req.encoding
+ # remove unprintable characters unauthorized in xml
+ data = data.translate(ESC_CAR_TABLE)
+ settings = {'input_encoding': encoding, 'output_encoding': 'unicode',
+ 'warning_stream': False,
+ 'traceback': True, # don't sys.exit
+ 'stylesheet': None, # don't try to embed stylesheet (may cause
+ # obscure bug due to docutils computing
+ # relative path according to the directory
+ # used *at import time*
+ # dunno what's the max, severe is 4, and we never want a crash
+ # (though try/except may be a better option...). May be the
+ # above traceback option will avoid this?
+ 'halt_level': 10,
+ # disable stupid switch to colspan=2 if field name is above a size limit
+ 'field_name_limit': sys.maxsize,
+ }
+ if context:
+ if hasattr(req, 'url'):
+ base_url = req.url()
+ elif hasattr(context, 'absolute_url'):
+ base_url = context.absolute_url()
+ else:
+ base_url = req.base_url()
+ else:
+ base_url = None
+ try:
+ pub = CWReSTPublisher(context, settings,
+ parser=CubicWebReSTParser(),
+ writer=Writer(base_url=base_url),
+ source_class=io.StringInput,
+ destination_class=io.StringOutput)
+ pub.set_source(data)
+ pub.set_destination()
+ res = pub.publish(enable_exit_status=None)
+ # necessary for proper garbage collection, else a ref is kept somewhere in docutils...
+ del pub.settings.context
+ return res
+ except BaseException:
+ LOGGER.exception('error while publishing ReST text')
+ if not isinstance(data, text_type):
+ data = text_type(data, encoding, 'replace')
+ return xml_escape(req._('error while publishing ReST text')
+ + '\n\n' + data)
+
+
+_INITIALIZED = False
+def cw_rest_init():
+ global _INITIALIZED
+ if _INITIALIZED:
+ return
+ _INITIALIZED = True
+ register_canonical_role('eid', eid_reference_role)
+ register_canonical_role('rql', rql_role)
+ register_canonical_role('bookmark', bookmark_role)
+ directives.register_directive('winclude', winclude_directive)
+ if pygments_directive is not None:
+ directives.register_directive('sourcecode', pygments_directive)
+ directives.register_directive('rql-table', RQLTableDirective)
diff -r e1caf133b81c -r b23d58050076 cubicweb/ext/tal.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/tal.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,273 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""provides simpleTAL extensions for CubicWeb
+
+"""
+
+__docformat__ = "restructuredtext en"
+
+import sys
+import re
+from os.path import exists, isdir, join
+from logging import getLogger
+from StringIO import StringIO
+
+from simpletal import simpleTAL, simpleTALES
+
+from logilab.common.decorators import cached
+
+LOGGER = getLogger('cubicweb.tal')
+
+
+class LoggerAdapter(object):
+ def __init__(self, tal_logger):
+ self.tal_logger = tal_logger
+
+ def debug(self, msg):
+ LOGGER.debug(msg)
+
+ def warn(self, msg):
+ LOGGER.warning(msg)
+
+ def __getattr__(self, attrname):
+ return getattr(self.tal_logger, attrname)
+
+
+class CubicWebContext(simpleTALES.Context):
+ """add facilities to access entity / resultset"""
+
+ def __init__(self, options=None, allowPythonPath=1):
+ simpleTALES.Context.__init__(self, options, allowPythonPath)
+ self.log = LoggerAdapter(self.log)
+
+ def update(self, context):
+ for varname, value in context.items():
+ self.addGlobal(varname, value)
+
+ def addRepeat(self, name, var, initialValue):
+ simpleTALES.Context.addRepeat(self, name, var, initialValue)
+
+# XXX FIXME need to find a clean to define OPCODE values for extensions
+I18N_CONTENT = 18
+I18N_REPLACE = 19
+RQL_EXECUTE = 20
+# simpleTAL uses the OPCODE values to define priority over commands.
+# TAL_ITER should have the same priority than TAL_REPEAT (i.e. 3), but
+# we can't use the same OPCODE for two different commands without changing
+# the simpleTAL implementation. Another solution would be to totally override
+# the REPEAT implementation with the ITER one, but some specific operations
+# (involving len() for instance) are not implemented for ITER, so we prefer
+# to keep both implementations for now, and to fool simpleTAL by using a float
+# number between 3 and 4
+TAL_ITER = 3.1
+
+
+# FIX simpleTAL HTML 4.01 stupidity
+# (simpleTAL never closes tags like INPUT, IMG, HR ...)
+simpleTAL.HTML_FORBIDDEN_ENDTAG.clear()
+
+class CubicWebTemplateCompiler(simpleTAL.HTMLTemplateCompiler):
+ """extends default compiler by adding i18n:content commands"""
+
+ def __init__(self):
+ simpleTAL.HTMLTemplateCompiler.__init__(self)
+ self.commandHandler[I18N_CONTENT] = self.compile_cmd_i18n_content
+ self.commandHandler[I18N_REPLACE] = self.compile_cmd_i18n_replace
+ self.commandHandler[RQL_EXECUTE] = self.compile_cmd_rql
+ self.commandHandler[TAL_ITER] = self.compile_cmd_tal_iter
+
+ def setTALPrefix(self, prefix):
+ simpleTAL.TemplateCompiler.setTALPrefix(self, prefix)
+ self.tal_attribute_map['i18n:content'] = I18N_CONTENT
+ self.tal_attribute_map['i18n:replace'] = I18N_REPLACE
+ self.tal_attribute_map['rql:execute'] = RQL_EXECUTE
+ self.tal_attribute_map['tal:iter'] = TAL_ITER
+
+ def compile_cmd_i18n_content(self, argument):
+ # XXX tal:content structure=, text= should we support this ?
+ structure_flag = 0
+ return (I18N_CONTENT, (argument, False, structure_flag, self.endTagSymbol))
+
+ def compile_cmd_i18n_replace(self, argument):
+ # XXX tal:content structure=, text= should we support this ?
+ structure_flag = 0
+ return (I18N_CONTENT, (argument, True, structure_flag, self.endTagSymbol))
+
+ def compile_cmd_rql(self, argument):
+ return (RQL_EXECUTE, (argument, self.endTagSymbol))
+
+ def compile_cmd_tal_iter(self, argument):
+ original_id, (var_name, expression, end_tag_symbol) = \
+ simpleTAL.HTMLTemplateCompiler.compileCmdRepeat(self, argument)
+ return (TAL_ITER, (var_name, expression, self.endTagSymbol))
+
+ def getTemplate(self):
+ return CubicWebTemplate(self.commandList, self.macroMap, self.symbolLocationTable)
+
+ def compileCmdAttributes (self, argument):
+ """XXX modified to support single attribute
+ definition ending by a ';'
+
+ backport this to simpleTAL
+ """
+ # Compile tal:attributes into attribute command
+ # Argument: [(attributeName, expression)]
+
+ # Break up the list of attribute settings first
+ commandArgs = []
+ # We only want to match semi-colons that are not escaped
+ argumentSplitter = re.compile(r'(?.
+
+
+from cubicweb.web.views import tableview
+
+class CustomRsetTableView(tableview.RsetTableView):
+ __regid__ = 'mytable'
diff -r e1caf133b81c -r b23d58050076 cubicweb/ext/test/unittest_rest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/test/unittest_rest.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,244 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from six import PY3
+
+from logilab.common.testlib import unittest_main
+from cubicweb.devtools.testlib import CubicWebTC
+
+from cubicweb.ext.rest import rest_publish
+
+class RestTC(CubicWebTC):
+
+ def context(self, req):
+ return req.execute('CWUser X WHERE X login "admin"').get_entity(0, 0)
+
+ def test_eid_role(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ self.assertEqual(rest_publish(context, ':eid:`%s`' % context.eid),
+ '
\n')
+
+ def test_bad_rest_no_crash(self):
+ with self.admin_access.web_request() as req:
+ rest_publish(self.context(req), '''
+| card | implication |
+--------------------------
+| 1-1 | N1 = N2 |
+| 1-? | N1 <= N2 |
+| 1-+ | N1 >= N2 |
+| 1-* | N1>0 => N2>0 |
+--------------------------
+| ?-? | N1 # N2 |
+| ?-+ | N1 >= N2 |
+| ?-* | N1 # N2 |
+--------------------------
+| +-+ | N1>0 => N2>0 et |
+| | N2>0 => N1>0 |
+| +-* | N1>+ => N2>0 |
+--------------------------
+| *-* | N1#N2 |
+--------------------------
+
+''')
+
+ def test_disable_field_name_colspan(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ value = rest_publish(context, '''my field list:
+
+:a long dumb param name: value
+''')
+ self.assertNotIn('colspan', value)
+
+ def test_rql_role_with_vid(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ out = rest_publish(context, ':rql:`Any X ORDERBY XL WHERE X is CWUser, X login XL:table`')
+ self.assertTrue(out.endswith('anon\n'
+ '
\n'))
+
+ def test_rql_role_with_vid_empty_rset(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ out = rest_publish(context, ':rql:`Any X WHERE X is CWUser, X login "nono":table`')
+ self.assertTrue(out.endswith('
'
+ 'No result matching query
\n\n'))
+
+ def test_rql_role_with_unknown_vid(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ out = rest_publish(context, ':rql:`Any X WHERE X is CWUser:toto`')
+ self.assertTrue(out.startswith("
an error occurred while interpreting this "
+ "rql directive: ObjectNotFound(%s'toto',)
" %
+ ('' if PY3 else 'u')),
+ out)
+
+ def test_rql_role_without_vid(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ out = rest_publish(context, ':rql:`Any X,XL ORDERBY XL WHERE X is CWUser, X login XL`')
+ self.assertEqual(out, u'
\n')
+
+ def test_rqltable_nocontent(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ out = rest_publish(context, """.. rql-table::""")
+ self.assertIn("System Message: ERROR", out)
+ self.assertIn("Content block expected for the "rql-table" "
+ "directive; none found" , out)
+
+ def test_rqltable_norset(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ rql = "Any X WHERE X is CWUser, X firstname 'franky'"
+ out = rest_publish(
+ context, """\
+.. rql-table::
+
+ %(rql)s""" % {'rql': rql})
+ self.assertIn("System Message: WARNING", out)
+ self.assertIn("empty result set", out)
+
+ def test_rqltable_nooptions(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+
+ %(rql)s
+ """ % {'rql': rql})
+ view = self.vreg['views'].select('table', req, rset=req.execute(rql))
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+
+ def test_rqltable_vid(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ vid = 'mytable'
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :vid: %(vid)s
+
+ %(rql)s
+ """ % {'rql': rql, 'vid': vid})
+ view = self.vreg['views'].select(vid, req, rset=req.execute(rql))
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+ self.assertIn(vid, out[:49])
+
+ def test_rqltable_badvid(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ vid = 'mytabel'
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :vid: %(vid)s
+
+ %(rql)s
+ """ % {'rql': rql, 'vid': vid})
+ self.assertIn("fail to select '%s' view" % vid, out)
+
+ def test_rqltable_headers(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ headers = ["nom", "prenom", "identifiant"]
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :headers: %(headers)s
+
+ %(rql)s
+ """ % {'rql': rql, 'headers': ', '.join(headers)})
+ view = self.vreg['views'].select('table', req, rset=req.execute(rql))
+ view.headers = headers
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+
+ def test_rqltable_headers_missing(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ headers = ["nom", "", "identifiant"]
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :headers: %(headers)s
+
+ %(rql)s
+ """ % {'rql': rql, 'headers': ', '.join(headers)})
+ view = self.vreg['views'].select('table', req, rset=req.execute(rql))
+ view.headers = [headers[0], None, headers[2]]
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+
+ def test_rqltable_headers_missing_edges(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ headers = [" ", "prenom", ""]
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :headers: %(headers)s
+
+ %(rql)s
+ """ % {'rql': rql, 'headers': ', '.join(headers)})
+ view = self.vreg['views'].select('table', req, rset=req.execute(rql))
+ view.headers = [None, headers[1], None]
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+
+ def test_rqltable_colvids(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any X,S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ colvids = {0: "oneline"}
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :colvids: %(colvids)s
+
+ %(rql)s
+ """ % {'rql': rql,
+ 'colvids': ', '.join(["%d=%s" % (k, v)
+ for k, v in colvids.items()])
+ })
+ view = self.vreg['views'].select('table', req, rset=req.execute(rql))
+ view.cellvids = colvids
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+
+
+if __name__ == '__main__':
+ unittest_main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/__init__.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,84 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""core hooks registering some maintainance tasks as server startup time"""
+
+__docformat__ = "restructuredtext en"
+
+from datetime import timedelta, datetime
+
+from cubicweb.server import hook
+
+class TransactionsCleanupStartupHook(hook.Hook):
+ """start task to cleanup transaction data"""
+ __regid__ = 'cw.looping-tasks.transactions-cleanup'
+ events = ('server_startup',)
+
+ def __call__(self):
+ # XXX use named args and inner functions to avoid referencing globals
+ # which may cause reloading pb
+ lifetime = timedelta(days=self.repo.config['keep-transaction-lifetime'])
+ def cleanup_old_transactions(repo=self.repo, lifetime=lifetime):
+ mindate = datetime.utcnow() - lifetime
+ with repo.internal_cnx() as cnx:
+ cnx.system_sql(
+ 'DELETE FROM transactions WHERE tx_time < %(time)s',
+ {'time': mindate})
+ cnx.commit()
+ if self.repo.config['undo-enabled']:
+ self.repo.looping_task(60*60*24, cleanup_old_transactions,
+ self.repo)
+
+class UpdateFeedsStartupHook(hook.Hook):
+ """start task to update datafeed based sources"""
+ __regid__ = 'cw.looping-tasks.update-feeds'
+ events = ('server_startup',)
+
+ def __call__(self):
+ def update_feeds(repo):
+ # take a list to avoid iterating on a dictionary whose size may
+ # change
+ for uri, source in list(repo.sources_by_uri.items()):
+ if (uri == 'system'
+ or not repo.config.source_enabled(source)
+ or not source.config['synchronize']):
+ continue
+ with repo.internal_cnx() as cnx:
+ try:
+ source.pull_data(cnx)
+ except Exception as exc:
+ cnx.exception('while trying to update feed %s', source)
+ self.repo.looping_task(60, update_feeds, self.repo)
+
+
+class DataImportsCleanupStartupHook(hook.Hook):
+ """start task to cleanup old data imports (ie datafeed import logs)"""
+ __regid__ = 'cw.looping-tasks.dataimports-cleanup'
+ events = ('server_startup',)
+
+ def __call__(self):
+ def expire_dataimports(repo=self.repo):
+ for uri, source in repo.sources_by_uri.items():
+ if (uri == 'system'
+ or not repo.config.source_enabled(source)):
+ continue
+ with repo.internal_cnx() as cnx:
+ mindate = datetime.utcnow() - timedelta(seconds=source.config['logs-lifetime'])
+ cnx.execute('DELETE CWDataImport X WHERE X start_timestamp < %(time)s',
+ {'time': mindate})
+ cnx.commit()
+ self.repo.looping_task(60*60*24, expire_dataimports, self.repo)
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/bookmark.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/bookmark.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,42 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""bookmark related hooks"""
+
+__docformat__ = "restructuredtext en"
+
+from cubicweb.server import hook
+
+
+class AutoDeleteBookmarkOp(hook.Operation):
+ bookmark = None # make pylint happy
+ def precommit_event(self):
+ if not self.cnx.deleted_in_transaction(self.bookmark.eid):
+ if not self.bookmark.bookmarked_by:
+ self.bookmark.cw_delete()
+
+
+class DelBookmarkedByHook(hook.Hook):
+ """ensure user logins are stripped"""
+ __regid__ = 'autodelbookmark'
+ __select__ = hook.Hook.__select__ & hook.match_rtype('bookmarked_by',)
+ category = 'bookmark'
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ AutoDeleteBookmarkOp(self._cw,
+ bookmark=self._cw.entity_from_eid(self.eidfrom))
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/email.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/email.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,80 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""hooks to ensure use_email / primary_email relations consistency"""
+
+__docformat__ = "restructuredtext en"
+
+from cubicweb.server import hook
+
+
+class SetUseEmailRelationOp(hook.Operation):
+ """delay this operation to commit to avoid conflict with a late rql query
+ already setting the relation
+ """
+ rtype = 'use_email'
+ entity = email = None # make pylint happy
+
+ def condition(self):
+ """check entity has use_email set for the email address"""
+ return not any(e for e in self.entity.use_email
+ if self.email.eid == e.eid)
+
+ def precommit_event(self):
+ if self.cnx.deleted_in_transaction(self.entity.eid):
+ return
+ if self.cnx.deleted_in_transaction(self.email.eid):
+ return
+ if self.condition():
+ self.cnx.execute(
+ 'SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % self.rtype,
+ {'x': self.entity.eid, 'y': self.email.eid})
+
+
+class SetPrimaryEmailRelationOp(SetUseEmailRelationOp):
+ rtype = 'primary_email'
+
+ def condition(self):
+ """check entity has no primary_email set"""
+ return not self.entity.primary_email
+
+
+class SetPrimaryEmailHook(hook.Hook):
+ """notify when a bug or story or version has its state modified"""
+ __regid__ = 'setprimaryemail'
+ __select__ = hook.Hook.__select__ & hook.match_rtype('use_email')
+ category = 'email'
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ if 'primary_email' in entity.e_schema.subject_relations():
+ SetPrimaryEmailRelationOp(self._cw, entity=entity,
+ email=self._cw.entity_from_eid(self.eidto))
+
+class SetUseEmailHook(hook.Hook):
+ """notify when a bug or story or version has its state modified"""
+ __regid__ = 'setprimaryemail'
+ __select__ = hook.Hook.__select__ & hook.match_rtype('primary_email')
+ category = 'email'
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ if 'use_email' in entity.e_schema.subject_relations():
+ SetUseEmailRelationOp(self._cw, entity=entity,
+ email=self._cw.entity_from_eid(self.eidto))
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/integrity.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/integrity.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,328 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Core hooks: check for data integrity according to the instance'schema
+validity
+"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from threading import Lock
+
+from six import text_type
+
+from cubicweb import validation_error, neg_role
+from cubicweb.schema import (META_RTYPES, WORKFLOW_RTYPES,
+ RQLConstraint, RQLUniqueConstraint)
+from cubicweb.predicates import is_instance, composite_etype
+from cubicweb.uilib import soup2xhtml
+from cubicweb.server import hook
+
+# special relations that don't have to be checked for integrity, usually
+# because they are handled internally by hooks (so we trust ourselves)
+DONT_CHECK_RTYPES_ON_ADD = META_RTYPES | WORKFLOW_RTYPES
+DONT_CHECK_RTYPES_ON_DEL = META_RTYPES | WORKFLOW_RTYPES
+
+_UNIQUE_CONSTRAINTS_LOCK = Lock()
+_UNIQUE_CONSTRAINTS_HOLDER = None
+
+
+def _acquire_unique_cstr_lock(cnx):
+ """acquire the _UNIQUE_CONSTRAINTS_LOCK for the cnx.
+
+ This lock used to avoid potential integrity pb when checking
+ RQLUniqueConstraint in two different transactions, as explained in
+ https://extranet.logilab.fr/3577926
+ """
+ if 'uniquecstrholder' in cnx.transaction_data:
+ return
+ _UNIQUE_CONSTRAINTS_LOCK.acquire()
+ cnx.transaction_data['uniquecstrholder'] = True
+ # register operation responsible to release the lock on commit/rollback
+ _ReleaseUniqueConstraintsOperation(cnx)
+
+def _release_unique_cstr_lock(cnx):
+ if 'uniquecstrholder' in cnx.transaction_data:
+ del cnx.transaction_data['uniquecstrholder']
+ _UNIQUE_CONSTRAINTS_LOCK.release()
+
+class _ReleaseUniqueConstraintsOperation(hook.Operation):
+ def postcommit_event(self):
+ _release_unique_cstr_lock(self.cnx)
+ def rollback_event(self):
+ _release_unique_cstr_lock(self.cnx)
+
+
+class _CheckRequiredRelationOperation(hook.DataOperationMixIn,
+ hook.LateOperation):
+ """checking relation cardinality has to be done after commit in case the
+ relation is being replaced
+ """
+ containercls = list
+ role = key = base_rql = None
+
+ def precommit_event(self):
+ cnx = self.cnx
+ pendingeids = cnx.transaction_data.get('pendingeids', ())
+ pendingrtypes = cnx.transaction_data.get('pendingrtypes', ())
+ for eid, rtype in self.get_data():
+ # recheck pending eids / relation types
+ if eid in pendingeids:
+ continue
+ if rtype in pendingrtypes:
+ continue
+ if not cnx.execute(self.base_rql % rtype, {'x': eid}):
+ etype = cnx.entity_metas(eid)['type']
+ msg = _('at least one relation %(rtype)s is required on '
+ '%(etype)s (%(eid)s)')
+ raise validation_error(eid, {(rtype, self.role): msg},
+ {'rtype': rtype, 'etype': etype, 'eid': eid},
+ ['rtype', 'etype'])
+
+
+class _CheckSRelationOp(_CheckRequiredRelationOperation):
+ """check required subject relation"""
+ role = 'subject'
+ base_rql = 'Any O WHERE S eid %%(x)s, S %s O'
+
+class _CheckORelationOp(_CheckRequiredRelationOperation):
+ """check required object relation"""
+ role = 'object'
+ base_rql = 'Any S WHERE O eid %%(x)s, S %s O'
+
+
+class IntegrityHook(hook.Hook):
+ __abstract__ = True
+ category = 'integrity'
+
+
+class _EnsureSymmetricRelationsAdd(hook.Hook):
+ """ ensure X r Y => Y r X iff r is symmetric """
+ __regid__ = 'cw.add_ensure_symmetry'
+ __abstract__ = True
+ category = 'activeintegrity'
+ events = ('after_add_relation',)
+ # __select__ is set in the registration callback
+
+ def __call__(self):
+ self._cw.repo.system_source.add_relation(self._cw, self.eidto,
+ self.rtype, self.eidfrom)
+
+
+class _EnsureSymmetricRelationsDelete(hook.Hook):
+ """ ensure X r Y => Y r X iff r is symmetric """
+ __regid__ = 'cw.delete_ensure_symmetry'
+ __abstract__ = True
+ category = 'activeintegrity'
+ events = ('after_delete_relation',)
+ # __select__ is set in the registration callback
+
+ def __call__(self):
+ self._cw.repo.system_source.delete_relation(self._cw, self.eidto,
+ self.rtype, self.eidfrom)
+
+
+class CheckCardinalityHookBeforeDeleteRelation(IntegrityHook):
+ """check cardinalities are satisfied"""
+ __regid__ = 'checkcard_before_delete_relation'
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ rtype = self.rtype
+ if rtype in DONT_CHECK_RTYPES_ON_DEL:
+ return
+ cnx = self._cw
+ eidfrom, eidto = self.eidfrom, self.eidto
+ rdef = cnx.rtype_eids_rdef(rtype, eidfrom, eidto)
+ if (rdef.subject, rtype, rdef.object) in cnx.transaction_data.get('pendingrdefs', ()):
+ return
+ card = rdef.cardinality
+ if card[0] in '1+' and not cnx.deleted_in_transaction(eidfrom):
+ _CheckSRelationOp.get_instance(cnx).add_data((eidfrom, rtype))
+ if card[1] in '1+' and not cnx.deleted_in_transaction(eidto):
+ _CheckORelationOp.get_instance(cnx).add_data((eidto, rtype))
+
+
+class CheckCardinalityHookAfterAddEntity(IntegrityHook):
+ """check cardinalities are satisfied"""
+ __regid__ = 'checkcard_after_add_entity'
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ eid = self.entity.eid
+ eschema = self.entity.e_schema
+ for rschema, targetschemas, role in eschema.relation_definitions():
+ # skip automatically handled relations
+ if rschema.type in DONT_CHECK_RTYPES_ON_ADD:
+ continue
+ rdef = rschema.role_rdef(eschema, targetschemas[0], role)
+ if rdef.role_cardinality(role) in '1+':
+ if role == 'subject':
+ op = _CheckSRelationOp.get_instance(self._cw)
+ else:
+ op = _CheckORelationOp.get_instance(self._cw)
+ op.add_data((eid, rschema.type))
+
+
+class _CheckConstraintsOp(hook.DataOperationMixIn, hook.LateOperation):
+ """ check a new relation satisfy its constraints """
+ containercls = list
+ def precommit_event(self):
+ cnx = self.cnx
+ for values in self.get_data():
+ eidfrom, rtype, eidto, constraints = values
+ # first check related entities have not been deleted in the same
+ # transaction
+ if cnx.deleted_in_transaction(eidfrom):
+ continue
+ if cnx.deleted_in_transaction(eidto):
+ continue
+ for constraint in constraints:
+ # XXX
+ # * lock RQLConstraint as well?
+ # * use a constraint id to use per constraint lock and avoid
+ # unnecessary commit serialization ?
+ if isinstance(constraint, RQLUniqueConstraint):
+ _acquire_unique_cstr_lock(cnx)
+ try:
+ constraint.repo_check(cnx, eidfrom, rtype, eidto)
+ except NotImplementedError:
+ self.critical('can\'t check constraint %s, not supported',
+ constraint)
+
+
+class CheckConstraintHook(IntegrityHook):
+ """check the relation satisfy its constraints
+
+ this is delayed to a precommit time operation since other relation which
+ will make constraint satisfied (or unsatisfied) may be added later.
+ """
+ __regid__ = 'checkconstraint'
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ # XXX get only RQL[Unique]Constraints?
+ rdef = self._cw.rtype_eids_rdef(self.rtype, self.eidfrom, self.eidto)
+ constraints = rdef.constraints
+ if constraints:
+ _CheckConstraintsOp.get_instance(self._cw).add_data(
+ (self.eidfrom, self.rtype, self.eidto, constraints))
+
+
+class CheckAttributeConstraintHook(IntegrityHook):
+ """check the attribute relation satisfy its constraints
+
+ this is delayed to a precommit time operation since other relation which
+ will make constraint satisfied (or unsatisfied) may be added later.
+ """
+ __regid__ = 'checkattrconstraint'
+ events = ('after_add_entity', 'after_update_entity')
+
+ def __call__(self):
+ eschema = self.entity.e_schema
+ for attr in self.entity.cw_edited:
+ if eschema.subjrels[attr].final:
+ constraints = [c for c in eschema.rdef(attr).constraints
+ if isinstance(c, (RQLUniqueConstraint, RQLConstraint))]
+ if constraints:
+ _CheckConstraintsOp.get_instance(self._cw).add_data(
+ (self.entity.eid, attr, None, constraints))
+
+
+class DontRemoveOwnersGroupHook(IntegrityHook):
+ """delete the composed of a composite relation when this relation is deleted
+ """
+ __regid__ = 'checkownersgroup'
+ __select__ = IntegrityHook.__select__ & is_instance('CWGroup')
+ events = ('before_delete_entity', 'before_update_entity')
+
+ def __call__(self):
+ entity = self.entity
+ if self.event == 'before_delete_entity' and entity.name == 'owners':
+ raise validation_error(entity, {None: _("can't be deleted")})
+ elif self.event == 'before_update_entity' \
+ and 'name' in entity.cw_edited:
+ oldname, newname = entity.cw_edited.oldnewvalue('name')
+ if oldname == 'owners' and newname != oldname:
+ raise validation_error(entity, {('name', 'subject'): _("can't be changed")})
+
+
+class TidyHtmlFields(IntegrityHook):
+ """tidy HTML in rich text strings"""
+ __regid__ = 'htmltidy'
+ events = ('before_add_entity', 'before_update_entity')
+
+ def __call__(self):
+ entity = self.entity
+ metaattrs = entity.e_schema.meta_attributes()
+ edited = entity.cw_edited
+ for metaattr, (metadata, attr) in metaattrs.items():
+ if metadata == 'format' and attr in edited:
+ try:
+ value = edited[attr]
+ except KeyError:
+ continue # no text to tidy
+ if isinstance(value, text_type): # filter out None and Binary
+ if getattr(entity, str(metaattr)) == 'text/html':
+ edited[attr] = soup2xhtml(value, self._cw.encoding)
+
+
+class StripCWUserLoginHook(IntegrityHook):
+ """ensure user logins are stripped"""
+ __regid__ = 'stripuserlogin'
+ __select__ = IntegrityHook.__select__ & is_instance('CWUser')
+ events = ('before_add_entity', 'before_update_entity',)
+
+ def __call__(self):
+ login = self.entity.cw_edited.get('login')
+ if login:
+ self.entity.cw_edited['login'] = login.strip()
+
+
+class DeleteCompositeOrphanHook(hook.Hook):
+ """Delete the composed of a composite relation when the composite is
+ deleted (this is similar to the cascading ON DELETE CASCADE
+ semantics of sql).
+ """
+ __regid__ = 'deletecomposite'
+ __select__ = hook.Hook.__select__ & composite_etype()
+ events = ('before_delete_entity',)
+ category = 'activeintegrity'
+ # give the application's before_delete_entity hooks a chance to run before we cascade
+ order = 99
+
+ def __call__(self):
+ eid = self.entity.eid
+ for rdef, role in self.entity.e_schema.composite_rdef_roles:
+ rtype = rdef.rtype.type
+ target = getattr(rdef, neg_role(role))
+ expr = ('C %s X' % rtype) if role == 'subject' else ('X %s C' % rtype)
+ self._cw.execute('DELETE %s X WHERE C eid %%(c)s, %s' % (target, expr),
+ {'c': eid})
+
+
+def registration_callback(vreg):
+ vreg.register_all(globals().values(), __name__)
+ symmetric_rtypes = [rschema.type for rschema in vreg.schema.relations()
+ if rschema.symmetric]
+ class EnsureSymmetricRelationsAdd(_EnsureSymmetricRelationsAdd):
+ __select__ = _EnsureSymmetricRelationsAdd.__select__ & hook.match_rtype(*symmetric_rtypes)
+ vreg.register(EnsureSymmetricRelationsAdd)
+ class EnsureSymmetricRelationsDelete(_EnsureSymmetricRelationsDelete):
+ __select__ = _EnsureSymmetricRelationsDelete.__select__ & hook.match_rtype(*symmetric_rtypes)
+ vreg.register(EnsureSymmetricRelationsDelete)
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/logstats.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/logstats.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,59 @@
+# copyright 2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+"""looping task for dumping instance's stats in a file
+"""
+
+__docformat__ = "restructuredtext en"
+
+from datetime import datetime
+import json
+
+from cubicweb.server import hook
+
+class LogStatsStartHook(hook.Hook):
+ """register task to regularly dump instance's stats in a file
+
+ data are stored as one json entry per row
+ """
+ __regid__ = 'cubicweb.hook.logstats.start'
+ events = ('server_startup',)
+
+ def __call__(self):
+ interval = self.repo.config.get('logstat-interval', 0)
+ if interval <= 0:
+ return
+
+ def dump_stats(repo):
+ statsfile = repo.config.get('logstat-file')
+ with repo.internal_cnx() as cnx:
+ stats = cnx.call_service('repo_stats')
+ gcstats = cnx.call_service('repo_gc_stats', nmax=5)
+
+ allstats = {'resources': stats,
+ 'memory': gcstats,
+ 'timestamp': datetime.utcnow().isoformat(),
+ }
+ try:
+ with open(statsfile, 'ab') as ofile:
+ json.dump(allstats, ofile)
+ ofile.write('\n')
+ except IOError:
+ repo.warning('Cannot open stats file for writing: %s', statsfile)
+
+ self.repo.looping_task(interval, dump_stats, self.repo)
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/metadata.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/metadata.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,219 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Core hooks: set generic metadata"""
+
+__docformat__ = "restructuredtext en"
+
+from datetime import datetime
+from base64 import b64encode
+
+from pytz import utc
+
+from cubicweb.predicates import is_instance
+from cubicweb.server import hook
+from cubicweb.server.edition import EditedEntity
+
+
+class MetaDataHook(hook.Hook):
+ __abstract__ = True
+ category = 'metadata'
+
+
+class InitMetaAttrsHook(MetaDataHook):
+ """before create a new entity -> set creation and modification date
+
+ this is a conveniency hook, you shouldn't have to disable it
+ """
+ __regid__ = 'metaattrsinit'
+ events = ('before_add_entity',)
+
+ def __call__(self):
+ timestamp = datetime.now(utc)
+ edited = self.entity.cw_edited
+ if not edited.get('creation_date'):
+ edited['creation_date'] = timestamp
+ if not edited.get('modification_date'):
+ edited['modification_date'] = timestamp
+ if not self._cw.transaction_data.get('do-not-insert-cwuri'):
+ cwuri = u'%s%s' % (self._cw.base_url(), self.entity.eid)
+ edited.setdefault('cwuri', cwuri)
+
+
+class UpdateMetaAttrsHook(MetaDataHook):
+ """update an entity -> set modification date"""
+ __regid__ = 'metaattrsupdate'
+ events = ('before_update_entity',)
+
+ def __call__(self):
+ # repairing is true during c-c upgrade/shell and similar commands. We
+ # usually don't want to update modification date in such cases.
+ #
+ # XXX to be really clean, we should turn off modification_date update
+ # explicitly on each command where we do not want that behaviour.
+ if not self._cw.vreg.config.repairing:
+ self.entity.cw_edited.setdefault('modification_date', datetime.now(utc))
+
+
+class SetCreatorOp(hook.DataOperationMixIn, hook.Operation):
+
+ def precommit_event(self):
+ cnx = self.cnx
+ relations = [(eid, cnx.user.eid) for eid in self.get_data()
+ # don't consider entities that have been created and deleted in
+ # the same transaction, nor ones where created_by has been
+ # explicitly set
+ if not cnx.deleted_in_transaction(eid) and \
+ not cnx.entity_from_eid(eid).created_by]
+ cnx.add_relations([('created_by', relations)])
+
+
+class SetOwnershipHook(MetaDataHook):
+ """create a new entity -> set owner and creator metadata"""
+ __regid__ = 'setowner'
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ if not self._cw.is_internal_session:
+ self._cw.add_relation(self.entity.eid, 'owned_by', self._cw.user.eid)
+ SetCreatorOp.get_instance(self._cw).add_data(self.entity.eid)
+
+
+class SyncOwnersOp(hook.DataOperationMixIn, hook.Operation):
+ def precommit_event(self):
+ for compositeeid, composedeid in self.get_data():
+ if self.cnx.deleted_in_transaction(compositeeid):
+ continue
+ if self.cnx.deleted_in_transaction(composedeid):
+ continue
+ self.cnx.execute('SET X owned_by U WHERE C owned_by U, C eid %(c)s,'
+ 'NOT EXISTS(X owned_by U, X eid %(x)s)',
+ {'c': compositeeid, 'x': composedeid})
+
+
+class SyncCompositeOwner(MetaDataHook):
+ """when adding composite relation, the composed should have the same owners
+ has the composite
+ """
+ __regid__ = 'synccompositeowner'
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ if self.rtype == 'wf_info_for':
+ # skip this special composite relation # XXX (syt) why?
+ return
+ eidfrom, eidto = self.eidfrom, self.eidto
+ composite = self._cw.rtype_eids_rdef(self.rtype, eidfrom, eidto).composite
+ if composite == 'subject':
+ SyncOwnersOp.get_instance(self._cw).add_data( (eidfrom, eidto) )
+ elif composite == 'object':
+ SyncOwnersOp.get_instance(self._cw).add_data( (eidto, eidfrom) )
+
+
+class FixUserOwnershipHook(MetaDataHook):
+ """when a user has been created, add owned_by relation on itself"""
+ __regid__ = 'fixuserowner'
+ __select__ = MetaDataHook.__select__ & is_instance('CWUser')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ self._cw.add_relation(self.entity.eid, 'owned_by', self.entity.eid)
+
+
+class UpdateFTIHook(MetaDataHook):
+ """sync fulltext index text index container when a relation with
+ fulltext_container set is added / removed
+ """
+ __regid__ = 'updateftirel'
+ events = ('after_add_relation', 'after_delete_relation')
+
+ def __call__(self):
+ rtype = self.rtype
+ cnx = self._cw
+ ftcontainer = cnx.vreg.schema.rschema(rtype).fulltext_container
+ if ftcontainer == 'subject':
+ cnx.repo.system_source.index_entity(
+ cnx, cnx.entity_from_eid(self.eidfrom))
+ elif ftcontainer == 'object':
+ cnx.repo.system_source.index_entity(
+ cnx, cnx.entity_from_eid(self.eidto))
+
+
+
+# entity source handling #######################################################
+
+class ChangeEntitySourceUpdateCaches(hook.Operation):
+ oldsource = newsource = entity = None # make pylint happy
+
+ def postcommit_event(self):
+ self.oldsource.reset_caches()
+ repo = self.cnx.repo
+ entity = self.entity
+ extid = entity.cw_metainformation()['extid']
+ repo._type_source_cache[entity.eid] = (
+ entity.cw_etype, None, self.newsource.uri)
+ repo._extid_cache[extid] = -entity.eid
+
+
+class ChangeEntitySourceDeleteHook(MetaDataHook):
+ """support for moving an entity from an external source by watching 'Any
+ cw_source CWSource' relation
+ """
+
+ __regid__ = 'cw.metadata.source-change'
+ __select__ = MetaDataHook.__select__ & hook.match_rtype('cw_source')
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ if (self._cw.deleted_in_transaction(self.eidfrom)
+ or self._cw.deleted_in_transaction(self.eidto)):
+ return
+ schange = self._cw.transaction_data.setdefault('cw_source_change', {})
+ schange[self.eidfrom] = self.eidto
+
+
+class ChangeEntitySourceAddHook(MetaDataHook):
+ __regid__ = 'cw.metadata.source-change'
+ __select__ = MetaDataHook.__select__ & hook.match_rtype('cw_source')
+ events = ('before_add_relation',)
+
+ def __call__(self):
+ schange = self._cw.transaction_data.get('cw_source_change')
+ if schange is not None and self.eidfrom in schange:
+ newsource = self._cw.entity_from_eid(self.eidto)
+ if newsource.name != 'system':
+ raise Exception('changing source to something else than the '
+ 'system source is unsupported')
+ syssource = newsource.repo_source
+ oldsource = self._cw.entity_from_eid(schange[self.eidfrom])
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ # we don't want the moved entity to be reimported later. To
+ # distinguish this state, move the record from the 'entities' table
+ # to 'moved_entities'. External source will then have consider
+ # case where `extid2eid` returns a negative eid as 'this entity was
+ # known but has been moved, ignore it'.
+ extid = self._cw.entity_metas(entity.eid)['extid']
+ assert extid is not None
+ attrs = {'eid': entity.eid, 'extid': b64encode(extid).decode('ascii')}
+ self._cw.system_sql(syssource.sqlgen.insert('moved_entities', attrs), attrs)
+ attrs = {'type': entity.cw_etype, 'eid': entity.eid, 'extid': None,
+ 'asource': 'system'}
+ self._cw.system_sql(syssource.sqlgen.update('entities', attrs, ['eid']), attrs)
+ # register an operation to update repository/sources caches
+ ChangeEntitySourceUpdateCaches(self._cw, entity=entity,
+ oldsource=oldsource.repo_source,
+ newsource=syssource)
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/notification.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/notification.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,244 @@
+# copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""some hooks to handle notification on entity's changes"""
+
+__docformat__ = "restructuredtext en"
+
+from logilab.common.textutils import normalize_text
+from logilab.common.deprecation import deprecated
+
+from cubicweb import RegistryNotFound
+from cubicweb.predicates import is_instance
+from cubicweb.server import hook
+from cubicweb.sobjects.supervising import SupervisionMailOp
+
+
+@deprecated('[3.17] use notify_on_commit instead')
+def RenderAndSendNotificationView(cnx, view, viewargs=None):
+ notify_on_commit(cnx, view, viewargs)
+
+
+def notify_on_commit(cnx, view, viewargs=None):
+ """register a notification view (see
+ :class:`~cubicweb.sobjects.notification.NotificationView`) to be sent at
+ post-commit time, ie only if the transaction has succeeded.
+
+ `viewargs` is an optional dictionary containing extra argument to be given
+ to :meth:`~cubicweb.sobjects.notification.NotificationView.render_and_send`
+ """
+ if viewargs is None:
+ viewargs = {}
+ notif_op = _RenderAndSendNotificationOp.get_instance(cnx)
+ notif_op.add_data((view, viewargs))
+
+
+class _RenderAndSendNotificationOp(hook.DataOperationMixIn, hook.Operation):
+ """End of the notification chain. Do render and send views after commit
+
+ All others Operations end up adding data to this Operation.
+ The notification are done on ``postcommit_event`` to make sure to prevent
+ sending notification about rolled back data.
+ """
+
+ containercls = list
+
+ def postcommit_event(self):
+ deleted = self.cnx.deleted_in_transaction
+ for view, viewargs in self.get_data():
+ if view.cw_rset is not None:
+ if not view.cw_rset:
+ # entity added and deleted in the same transaction
+ # (cache effect)
+ continue
+ elif deleted(view.cw_rset[view.cw_row or 0][view.cw_col or 0]):
+ # entity added and deleted in the same transaction
+ continue
+ try:
+ view.render_and_send(**viewargs)
+ except Exception:
+ # error in post commit are not propagated
+ # We keep this logic here to prevent a small notification error
+ # to prevent them all.
+ self.exception('Notification failed')
+
+
+class NotificationHook(hook.Hook):
+ __abstract__ = True
+ category = 'notification'
+
+ def select_view(self, vid, rset, row=0, col=0):
+ try:
+ return self._cw.vreg['views'].select_or_none(vid, self._cw, rset=rset,
+ row=row, col=col)
+ except RegistryNotFound: # can happen in some config
+ # (e.g. repo only config with no
+ # notification views registered by
+ # the instance's cubes)
+ return None
+
+
+class StatusChangeHook(NotificationHook):
+ """notify when a workflowable entity has its state modified"""
+ __regid__ = 'notifystatuschange'
+ __select__ = NotificationHook.__select__ & is_instance('TrInfo')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ if not entity.from_state: # not a transition
+ return
+ rset = entity.related('wf_info_for')
+ view = self.select_view('notif_status_change', rset=rset, row=0)
+ if view is None:
+ return
+ comment = entity.printable_value('comment', format='text/plain')
+ # XXX don't try to wrap rest until we've a proper transformation (see
+ # #103822)
+ if comment and entity.comment_format != 'text/rest':
+ comment = normalize_text(comment, 80)
+ viewargs = {'comment': comment,
+ 'previous_state': entity.previous_state.name,
+ 'current_state': entity.new_state.name}
+ notify_on_commit(self._cw, view, viewargs=viewargs)
+
+class RelationChangeHook(NotificationHook):
+ __regid__ = 'notifyrelationchange'
+ events = ('before_add_relation', 'after_add_relation',
+ 'before_delete_relation', 'after_delete_relation')
+
+ def __call__(self):
+ """if a notification view is defined for the event, send notification
+ email defined by the view
+ """
+ rset = self._cw.eid_rset(self.eidfrom)
+ view = self.select_view('notif_%s_%s' % (self.event, self.rtype),
+ rset=rset, row=0)
+ if view is None:
+ return
+ notify_on_commit(self._cw, view)
+
+
+class EntityChangeHook(NotificationHook):
+ """if a notification view is defined for the event, send notification
+ email defined by the view
+ """
+ __regid__ = 'notifyentitychange'
+ events = ('after_add_entity', 'after_update_entity')
+
+ def __call__(self):
+ rset = self.entity.as_rset()
+ view = self.select_view('notif_%s' % self.event, rset=rset, row=0)
+ if view is None:
+ return
+ notify_on_commit(self._cw, view)
+
+
+class EntityUpdatedNotificationOp(hook.SingleLastOperation):
+ """scrap all changed entity to prepare a Notification Operation for them"""
+
+ def precommit_event(self):
+ # precommit event that creates postcommit operation
+ cnx = self.cnx
+ for eid in cnx.transaction_data['changes']:
+ view = cnx.vreg['views'].select('notif_entity_updated', cnx,
+ rset=cnx.eid_rset(eid),
+ row=0)
+ notify_on_commit(self.cnx, view,
+ viewargs={'changes': cnx.transaction_data['changes'][eid]})
+
+
+class EntityUpdateHook(NotificationHook):
+ __regid__ = 'notifentityupdated'
+ __abstract__ = True # do not register by default
+ __select__ = NotificationHook.__select__ & hook.issued_from_user_query()
+ events = ('before_update_entity',)
+ skip_attrs = set(['modification_date'])
+
+ def __call__(self):
+ cnx = self._cw
+ if cnx.added_in_transaction(self.entity.eid):
+ return # entity is being created
+ # then compute changes
+ attrs = [k for k in self.entity.cw_edited
+ if not k in self.skip_attrs]
+ if not attrs:
+ return
+ changes = cnx.transaction_data.setdefault('changes', {})
+ thisentitychanges = changes.setdefault(self.entity.eid, set())
+ rqlsel, rqlrestr = [], ['X eid %(x)s']
+ for i, attr in enumerate(attrs):
+ var = chr(65+i)
+ rqlsel.append(var)
+ rqlrestr.append('X %s %s' % (attr, var))
+ rql = 'Any %s WHERE %s' % (','.join(rqlsel), ','.join(rqlrestr))
+ rset = cnx.execute(rql, {'x': self.entity.eid})
+ for i, attr in enumerate(attrs):
+ oldvalue = rset[0][i]
+ newvalue = self.entity.cw_edited[attr]
+ if oldvalue != newvalue:
+ thisentitychanges.add((attr, oldvalue, newvalue))
+ if thisentitychanges:
+ EntityUpdatedNotificationOp(cnx)
+
+
+# supervising ##################################################################
+
+class SomethingChangedHook(NotificationHook):
+ __regid__ = 'supervising'
+ __select__ = NotificationHook.__select__ & hook.issued_from_user_query()
+ events = ('before_add_relation', 'before_delete_relation',
+ 'after_add_entity', 'before_update_entity')
+
+ def __call__(self):
+ dest = self._cw.vreg.config['supervising-addrs']
+ if not dest: # no supervisors, don't do this for nothing...
+ return
+ if self._call():
+ SupervisionMailOp(self._cw)
+
+ def _call(self):
+ event = self.event.split('_', 1)[1]
+ if event == 'update_entity':
+ if self._cw.added_in_transaction(self.entity.eid):
+ return False
+ if self.entity.e_schema == 'CWUser':
+ if not (frozenset(self.entity.cw_edited)
+ - frozenset(('eid', 'modification_date',
+ 'last_login_time'))):
+ # don't record last_login_time update which are done
+ # automatically at login time
+ return False
+ self._cw.transaction_data.setdefault('pendingchanges', []).append(
+ (event, self))
+ return True
+
+
+class EntityDeleteHook(SomethingChangedHook):
+ __regid__ = 'supervisingentitydel'
+ events = ('before_delete_entity',)
+
+ def _call(self):
+ try:
+ title = self.entity.dc_title()
+ except Exception:
+ # may raise an error during deletion process, for instance due to
+ # missing required relation
+ title = '#%s' % self.entity.eid
+ self._cw.transaction_data.setdefault('pendingchanges', []).append(
+ ('delete_entity', (self.entity.eid, self.entity.cw_etype, title)))
+ return True
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/security.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/security.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,209 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Security hooks: check permissions to add/delete/update entities according to
+the connected user
+"""
+
+__docformat__ = "restructuredtext en"
+from warnings import warn
+
+from logilab.common.registry import objectify_predicate
+
+from yams import buildobjs
+
+from cubicweb import Unauthorized
+from cubicweb.server import BEFORE_ADD_RELATIONS, ON_COMMIT_ADD_RELATIONS, hook
+
+
+
+def check_entity_attributes(cnx, entity, action, editedattrs=None):
+ eid = entity.eid
+ eschema = entity.e_schema
+ if action == 'delete':
+ eschema.check_perm(session, action, eid=eid)
+ return
+ # ._cw_skip_security_attributes is there to bypass security for attributes
+ # set by hooks by modifying the entity's dictionary
+ if editedattrs is None:
+ editedattrs = entity.cw_edited
+ dontcheck = editedattrs.skip_security
+ etypechecked = False
+ for attr in editedattrs:
+ if attr in dontcheck:
+ continue
+ rdef = eschema.rdef(attr, takefirst=True)
+ if rdef.final: # non final relation are checked by standard hooks
+ perms = rdef.permissions.get(action)
+ # comparison below works because the default update perm is:
+ #
+ # ('managers', ERQLExpression(Any X WHERE U has_update_permission X,
+ # X eid %(x)s, U eid %(u)s))
+ #
+ # is deserialized in this order (groups first), and ERQLExpression
+ # implements comparison by rql expression.
+ if perms == buildobjs.DEFAULT_ATTRPERMS[action]:
+ # The default rule is to delegate to the entity
+ # rule. This needs to be checked only once.
+ if not etypechecked:
+ entity.cw_check_perm(action)
+ etypechecked = True
+ continue
+ if perms == ():
+ # That means an immutable attribute; as an optimization, avoid
+ # going through check_perm.
+ raise Unauthorized(action, str(rdef))
+ rdef.check_perm(cnx, action, eid=eid)
+
+ if action == 'add' and not etypechecked:
+ # think about cnx.create_entity('Foo')
+ # the standard metadata were inserted by a hook
+ # with a bypass ... we conceptually need to check
+ # the eid attribute at *creation* time
+ entity.cw_check_perm(action)
+
+
+class CheckEntityPermissionOp(hook.DataOperationMixIn, hook.LateOperation):
+ def precommit_event(self):
+ cnx = self.cnx
+ for eid, action, edited in self.get_data():
+ entity = cnx.entity_from_eid(eid)
+ check_entity_attributes(cnx, entity, action, edited)
+
+
+class CheckRelationPermissionOp(hook.DataOperationMixIn, hook.LateOperation):
+ def precommit_event(self):
+ cnx = self.cnx
+ for action, rschema, eidfrom, eidto in self.get_data():
+ rdef = rschema.rdef(cnx.entity_metas(eidfrom)['type'],
+ cnx.entity_metas(eidto)['type'])
+ rdef.check_perm(cnx, action, fromeid=eidfrom, toeid=eidto)
+
+
+@objectify_predicate
+def write_security_enabled(cls, req, **kwargs):
+ if req is None or not req.write_security:
+ return 0
+ return 1
+
+class SecurityHook(hook.Hook):
+ __abstract__ = True
+ category = 'security'
+ __select__ = hook.Hook.__select__ & write_security_enabled()
+
+
+class AfterAddEntitySecurityHook(SecurityHook):
+ __regid__ = 'securityafteraddentity'
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ CheckEntityPermissionOp.get_instance(self._cw).add_data(
+ (self.entity.eid, 'add', self.entity.cw_edited) )
+
+
+class AfterUpdateEntitySecurityHook(SecurityHook):
+ __regid__ = 'securityafterupdateentity'
+ events = ('after_update_entity',)
+
+ def __call__(self):
+ # save back editedattrs in case the entity is reedited later in the
+ # same transaction, which will lead to cw_edited being
+ # overwritten
+ action = 'add' if self._cw.added_in_transaction(self.entity.eid) else 'update'
+ CheckEntityPermissionOp.get_instance(self._cw).add_data(
+ (self.entity.eid, action, self.entity.cw_edited) )
+
+
+class BeforeDelEntitySecurityHook(SecurityHook):
+ __regid__ = 'securitybeforedelentity'
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ self.entity.cw_check_perm('delete')
+
+
+def skip_inlined_relation_security(cnx, rschema, eid):
+ """return True if security for the given inlined relation should be skipped,
+ in case where the relation has been set through modification of
+ `entity.cw_edited` in a hook
+ """
+ assert rschema.inlined
+ try:
+ entity = cnx.entity_cache(eid)
+ except KeyError:
+ return False
+ edited = getattr(entity, 'cw_edited', None)
+ if edited is None:
+ return False
+ return rschema.type in edited.skip_security
+
+
+class BeforeAddRelationSecurityHook(SecurityHook):
+ __regid__ = 'securitybeforeaddrelation'
+ events = ('before_add_relation',)
+
+ def __call__(self):
+ if self.rtype in BEFORE_ADD_RELATIONS:
+ nocheck = self._cw.transaction_data.get('skip-security', ())
+ if (self.eidfrom, self.rtype, self.eidto) in nocheck:
+ return
+ rschema = self._cw.repo.schema[self.rtype]
+ if rschema.inlined and skip_inlined_relation_security(
+ self._cw, rschema, self.eidfrom):
+ return
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
+ rdef.check_perm(self._cw, 'add', fromeid=self.eidfrom, toeid=self.eidto)
+
+
+class AfterAddRelationSecurityHook(SecurityHook):
+ __regid__ = 'securityafteraddrelation'
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ if self.rtype not in BEFORE_ADD_RELATIONS:
+ nocheck = self._cw.transaction_data.get('skip-security', ())
+ if (self.eidfrom, self.rtype, self.eidto) in nocheck:
+ return
+ rschema = self._cw.repo.schema[self.rtype]
+ if rschema.inlined and skip_inlined_relation_security(
+ self._cw, rschema, self.eidfrom):
+ return
+ if self.rtype in ON_COMMIT_ADD_RELATIONS:
+ CheckRelationPermissionOp.get_instance(self._cw).add_data(
+ ('add', rschema, self.eidfrom, self.eidto) )
+ else:
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
+ rdef.check_perm(self._cw, 'add', fromeid=self.eidfrom, toeid=self.eidto)
+
+
+class BeforeDeleteRelationSecurityHook(SecurityHook):
+ __regid__ = 'securitybeforedelrelation'
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ nocheck = self._cw.transaction_data.get('skip-security', ())
+ if (self.eidfrom, self.rtype, self.eidto) in nocheck:
+ return
+ rschema = self._cw.repo.schema[self.rtype]
+ if rschema.inlined and skip_inlined_relation_security(
+ self._cw, rschema, self.eidfrom):
+ return
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
+ rdef.check_perm(self._cw, 'delete', fromeid=self.eidfrom, toeid=self.eidto)
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/synccomputed.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/synccomputed.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,227 @@
+# copyright 2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Hooks for synchronizing computed attributes"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from collections import defaultdict
+
+from rql import nodes
+
+from cubicweb.server import hook
+
+
+class RecomputeAttributeOperation(hook.DataOperationMixIn, hook.Operation):
+ """Operation to recompute caches of computed attribute at commit time,
+ depending on what's have been modified in the transaction and avoiding to
+ recompute twice the same attribute
+ """
+ containercls = dict
+ def add_data(self, computed_attribute, eid=None):
+ try:
+ self._container[computed_attribute].add(eid)
+ except KeyError:
+ self._container[computed_attribute] = set((eid,))
+
+ def precommit_event(self):
+ for computed_attribute_rdef, eids in self.get_data().items():
+ attr = computed_attribute_rdef.rtype
+ formula = computed_attribute_rdef.formula
+ select = self.cnx.repo.vreg.rqlhelper.parse(formula).children[0]
+ xvar = select.get_variable('X')
+ select.add_selected(xvar, index=0)
+ select.add_group_var(xvar, index=0)
+ if None in eids:
+ select.add_type_restriction(xvar, computed_attribute_rdef.subject)
+ else:
+ select.add_eid_restriction(xvar, eids)
+ update_rql = 'SET X %s %%(value)s WHERE X eid %%(x)s' % attr
+ for eid, value in self.cnx.execute(select.as_string()):
+ self.cnx.execute(update_rql, {'value': value, 'x': eid})
+
+
+class EntityWithCACreatedHook(hook.Hook):
+ """When creating an entity that has some computed attribute, those
+ attributes have to be computed.
+
+ Concret class of this hook are generated at registration time by
+ introspecting the schema.
+ """
+ __abstract__ = True
+ events = ('after_add_entity',)
+ # list of computed attribute rdefs that have to be recomputed
+ computed_attributes = None
+
+ def __call__(self):
+ for rdef in self.computed_attributes:
+ RecomputeAttributeOperation.get_instance(self._cw).add_data(
+ rdef, self.entity.eid)
+
+
+class RelationInvolvedInCAModifiedHook(hook.Hook):
+ """When some relation used in a computed attribute is updated, those
+ attributes have to be recomputed.
+
+ Concret class of this hook are generated at registration time by
+ introspecting the schema.
+ """
+ __abstract__ = True
+ events = ('after_add_relation', 'before_delete_relation')
+ # list of (computed attribute rdef, optimize_on) that have to be recomputed
+ optimized_computed_attributes = None
+
+ def __call__(self):
+ for rdef, optimize_on in self.optimized_computed_attributes:
+ if optimize_on is None:
+ eid = None
+ else:
+ eid = getattr(self, optimize_on)
+ RecomputeAttributeOperation.get_instance(self._cw).add_data(rdef, eid)
+
+
+class AttributeInvolvedInCAModifiedHook(hook.Hook):
+ """When some attribute used in a computed attribute is updated, those
+ attributes have to be recomputed.
+
+ Concret class of this hook are generated at registration time by
+ introspecting the schema.
+ """
+ __abstract__ = True
+ events = ('after_update_entity',)
+ # list of (computed attribute rdef, attributes of this entity type involved)
+ # that may have to be recomputed
+ attributes_computed_attributes = None
+
+ def __call__(self):
+ edited_attributes = frozenset(self.entity.cw_edited)
+ for rdef, used_attributes in self.attributes_computed_attributes.items():
+ if edited_attributes.intersection(used_attributes):
+ # XXX optimize if the modified attributes belong to the same
+ # entity as the computed attribute
+ RecomputeAttributeOperation.get_instance(self._cw).add_data(rdef)
+
+
+# code generation at registration time #########################################
+
+def _optimize_on(formula_select, rtype):
+ """Given a formula and some rtype, tells whether on update of the given
+ relation, formula may be recomputed only for rhe relation's subject
+ ('eidfrom' returned), object ('eidto' returned) or None.
+
+ Optimizing is only possible when X is used as direct subject/object of this
+ relation, else we may miss some necessary update.
+ """
+ for rel in formula_select.get_nodes(nodes.Relation):
+ if rel.r_type == rtype:
+ sub = rel.get_variable_parts()[0]
+ obj = rel.get_variable_parts()[1]
+ if sub.name == 'X':
+ return 'eidfrom'
+ elif obj.name == 'X':
+ return 'eidto'
+ else:
+ return None
+
+
+class _FormulaDependenciesMatrix(object):
+ """This class computes and represents the dependencies of computed attributes
+ towards relations and attributes
+ """
+
+ def __init__(self, schema):
+ """Analyzes the schema to compute the dependencies"""
+ # entity types holding some computed attribute {etype: [computed rdefs]}
+ self.computed_attribute_by_etype = defaultdict(list)
+ # depending entity types {dep. etype: {computed rdef: dep. etype attributes}}
+ self.computed_attribute_by_etype_attrs = defaultdict(lambda: defaultdict(set))
+ # depending relations def {dep. rdef: [computed rdefs]
+ self.computed_attribute_by_relation = defaultdict(list) # by rdef
+ # Walk through all attributes definitions
+ for rdef in schema.iter_computed_attributes():
+ self.computed_attribute_by_etype[rdef.subject.type].append(rdef)
+ # extract the relations it depends upon - `rdef.formula_select` is
+ # expected to have been set by finalize_computed_attributes
+ select = rdef.formula_select
+ for rel_node in select.get_nodes(nodes.Relation):
+ if rel_node.is_types_restriction():
+ continue
+ rschema = schema.rschema(rel_node.r_type)
+ lhs, rhs = rel_node.get_variable_parts()
+ for sol in select.solutions:
+ subject_etype = sol[lhs.name]
+ if isinstance(rhs, nodes.VariableRef):
+ object_etypes = set(sol[rhs.name] for sol in select.solutions)
+ else:
+ object_etypes = rschema.objects(subject_etype)
+ for object_etype in object_etypes:
+ if rschema.final:
+ attr_for_computations = self.computed_attribute_by_etype_attrs[subject_etype]
+ attr_for_computations[rdef].add(rschema.type)
+ else:
+ depend_on_rdef = rschema.rdefs[subject_etype, object_etype]
+ self.computed_attribute_by_relation[depend_on_rdef].append(rdef)
+
+ def generate_entity_creation_hooks(self):
+ for etype, computed_attributes in self.computed_attribute_by_etype.items():
+ regid = 'computed_attribute.%s_created' % etype
+ selector = hook.is_instance(etype)
+ yield type('%sCreatedHook' % etype,
+ (EntityWithCACreatedHook,),
+ {'__regid__': regid,
+ '__select__': hook.Hook.__select__ & selector,
+ 'computed_attributes': computed_attributes})
+
+ def generate_relation_change_hooks(self):
+ for rdef, computed_attributes in self.computed_attribute_by_relation.items():
+ regid = 'computed_attribute.%s_modified' % rdef.rtype
+ selector = hook.match_rtype(rdef.rtype.type,
+ frometypes=(rdef.subject.type,),
+ toetypes=(rdef.object.type,))
+ optimized_computed_attributes = []
+ for computed_rdef in computed_attributes:
+ optimized_computed_attributes.append(
+ (computed_rdef,
+ _optimize_on(computed_rdef.formula_select, rdef.rtype))
+ )
+ yield type('%sModifiedHook' % rdef.rtype,
+ (RelationInvolvedInCAModifiedHook,),
+ {'__regid__': regid,
+ '__select__': hook.Hook.__select__ & selector,
+ 'optimized_computed_attributes': optimized_computed_attributes})
+
+ def generate_entity_update_hooks(self):
+ for etype, attributes_computed_attributes in self.computed_attribute_by_etype_attrs.items():
+ regid = 'computed_attribute.%s_updated' % etype
+ selector = hook.is_instance(etype)
+ yield type('%sModifiedHook' % etype,
+ (AttributeInvolvedInCAModifiedHook,),
+ {'__regid__': regid,
+ '__select__': hook.Hook.__select__ & selector,
+ 'attributes_computed_attributes': attributes_computed_attributes})
+
+
+def registration_callback(vreg):
+ vreg.register_all(globals().values(), __name__)
+ dependencies = _FormulaDependenciesMatrix(vreg.schema)
+ for hook_class in dependencies.generate_entity_creation_hooks():
+ vreg.register(hook_class)
+ for hook_class in dependencies.generate_relation_change_hooks():
+ vreg.register(hook_class)
+ for hook_class in dependencies.generate_entity_update_hooks():
+ vreg.register(hook_class)
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/syncschema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/syncschema.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,1430 @@
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""schema hooks:
+
+- synchronize the living schema object with the persistent schema
+- perform physical update on the source when necessary
+
+checking for schema consistency is done in hooks.py
+"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+import json
+from copy import copy
+
+from yams.schema import BASE_TYPES, BadSchemaDefinition, RelationDefinitionSchema
+from yams.constraints import UniqueConstraint
+from yams import buildobjs as ybo, convert_default_value
+
+from logilab.common.decorators import clear_cache
+
+from cubicweb import validation_error
+from cubicweb.predicates import is_instance
+from cubicweb.schema import (SCHEMA_TYPES, META_RTYPES, VIRTUAL_RTYPES,
+ CONSTRAINTS, UNIQUE_CONSTRAINTS, ETYPE_NAME_MAP)
+from cubicweb.server import hook, schemaserial as ss, schema2sql as y2sql
+from cubicweb.server.sqlutils import SQL_PREFIX
+from cubicweb.hooks.synccomputed import RecomputeAttributeOperation
+
+# core entity and relation types which can't be removed
+CORE_TYPES = BASE_TYPES | SCHEMA_TYPES | META_RTYPES | set(
+ ('CWUser', 'CWGroup', 'login', 'upassword', 'name', 'in_group'))
+
+
+def get_constraints(cnx, entity):
+ constraints = []
+ for cstreid in cnx.transaction_data.get(entity.eid, ()):
+ cstrent = cnx.entity_from_eid(cstreid)
+ cstr = CONSTRAINTS[cstrent.type].deserialize(cstrent.value)
+ cstr.eid = cstreid
+ constraints.append(cstr)
+ return constraints
+
+
+def group_mapping(cw):
+ try:
+ return cw.transaction_data['groupmap']
+ except KeyError:
+ cw.transaction_data['groupmap'] = gmap = ss.group_mapping(cw)
+ return gmap
+
+
+def add_inline_relation_column(cnx, etype, rtype):
+ """add necessary column and index for an inlined relation"""
+ attrkey = '%s.%s' % (etype, rtype)
+ createdattrs = cnx.transaction_data.setdefault('createdattrs', set())
+ if attrkey in createdattrs:
+ return
+ createdattrs.add(attrkey)
+ table = SQL_PREFIX + etype
+ column = SQL_PREFIX + rtype
+ try:
+ cnx.system_sql(str('ALTER TABLE %s ADD %s integer REFERENCES entities (eid)'
+ % (table, column)),
+ rollback_on_failure=False)
+ cnx.info('added column %s to table %s', column, table)
+ except Exception:
+ # silent exception here, if this error has not been raised because the
+ # column already exists, index creation will fail anyway
+ cnx.exception('error while adding column %s to table %s',
+ table, column)
+ # create index before alter table which may expectingly fail during test
+ # (sqlite) while index creation should never fail (test for index existence
+ # is done by the dbhelper)
+ cnx.repo.system_source.create_index(cnx, table, column)
+ cnx.info('added index on %s(%s)', table, column)
+
+
+def insert_rdef_on_subclasses(cnx, eschema, rschema, rdefdef, props):
+ # XXX 'infered': True/False, not clear actually
+ props.update({'constraints': rdefdef.constraints,
+ 'description': rdefdef.description,
+ 'cardinality': rdefdef.cardinality,
+ 'permissions': rdefdef.get_permissions(),
+ 'order': rdefdef.order,
+ 'infered': False, 'eid': None
+ })
+ cstrtypemap = ss.cstrtype_mapping(cnx)
+ groupmap = group_mapping(cnx)
+ object = rschema.schema.eschema(rdefdef.object)
+ for specialization in eschema.specialized_by(False):
+ if (specialization, rdefdef.object) in rschema.rdefs:
+ continue
+ sperdef = RelationDefinitionSchema(specialization, rschema,
+ object, None, values=props)
+ ss.execschemarql(cnx.execute, sperdef,
+ ss.rdef2rql(sperdef, cstrtypemap, groupmap))
+
+
+def check_valid_changes(cnx, entity, ro_attrs=('name', 'final')):
+ errors = {}
+ # don't use getattr(entity, attr), we would get the modified value if any
+ for attr in entity.cw_edited:
+ if attr in ro_attrs:
+ origval, newval = entity.cw_edited.oldnewvalue(attr)
+ if newval != origval:
+ errors[attr] = _("can't change this attribute")
+ if errors:
+ raise validation_error(entity, errors)
+
+
+class _MockEntity(object): # XXX use a named tuple with python 2.6
+ def __init__(self, eid):
+ self.eid = eid
+
+
+class SyncSchemaHook(hook.Hook):
+ """abstract class for schema synchronization hooks (in the `syncschema`
+ category)
+ """
+ __abstract__ = True
+ category = 'syncschema'
+
+
+# operations for low-level database alteration ################################
+
+class DropTable(hook.Operation):
+ """actually remove a database from the instance's schema"""
+ table = None # make pylint happy
+
+ def precommit_event(self):
+ dropped = self.cnx.transaction_data.setdefault('droppedtables', set())
+ if self.table in dropped:
+ return # already processed
+ dropped.add(self.table)
+ self.cnx.system_sql('DROP TABLE %s' % self.table)
+ self.info('dropped table %s', self.table)
+
+ # XXX revertprecommit_event
+
+
+class DropRelationTable(DropTable):
+ def __init__(self, cnx, rtype):
+ super(DropRelationTable, self).__init__(
+ cnx, table='%s_relation' % rtype)
+ cnx.transaction_data.setdefault('pendingrtypes', set()).add(rtype)
+
+
+class DropColumn(hook.DataOperationMixIn, hook.Operation):
+ """actually remove the attribut's column from entity table in the system
+ database
+ """
+ def precommit_event(self):
+ cnx = self.cnx
+ for etype, attr in self.get_data():
+ table = SQL_PREFIX + etype
+ column = SQL_PREFIX + attr
+ source = cnx.repo.system_source
+ # drop index if any
+ source.drop_index(cnx, table, column)
+ if source.dbhelper.alter_column_support:
+ cnx.system_sql('ALTER TABLE %s DROP COLUMN %s' % (table, column),
+ rollback_on_failure=False)
+ self.info('dropped column %s from table %s', column, table)
+ else:
+ # not supported by sqlite for instance
+ self.error('dropping column not supported by the backend, handle '
+ 'it yourself (%s.%s)', table, column)
+
+ # XXX revertprecommit_event
+
+
+# base operations for in-memory schema synchronization ########################
+
+class MemSchemaNotifyChanges(hook.SingleLastOperation):
+ """the update schema operation:
+
+ special operation which should be called once and after all other schema
+ operations. It will trigger internal structures rebuilding to consider
+ schema changes.
+ """
+
+ def __init__(self, cnx):
+ hook.SingleLastOperation.__init__(self, cnx)
+
+ def precommit_event(self):
+ for eschema in self.cnx.repo.schema.entities():
+ if not eschema.final:
+ clear_cache(eschema, 'ordered_relations')
+
+ def postcommit_event(self):
+ repo = self.cnx.repo
+ # commit event should not raise error, while set_schema has chances to
+ # do so because it triggers full vreg reloading
+ try:
+ repo.schema.rebuild_infered_relations()
+ # trigger vreg reload
+ repo.set_schema(repo.schema)
+ # CWUser class might have changed, update current session users
+ cwuser_cls = self.cnx.vreg['etypes'].etype_class('CWUser')
+ for session in repo._sessions.values():
+ session.user.__class__ = cwuser_cls
+ except Exception:
+ self.critical('error while setting schema', exc_info=True)
+
+ def rollback_event(self):
+ self.precommit_event()
+
+
+class MemSchemaOperation(hook.Operation):
+ """base class for schema operations"""
+ def __init__(self, cnx, **kwargs):
+ hook.Operation.__init__(self, cnx, **kwargs)
+ # every schema operation is triggering a schema update
+ MemSchemaNotifyChanges(cnx)
+
+
+# operations for high-level source database alteration ########################
+
+class CWETypeAddOp(MemSchemaOperation):
+ """after adding a CWEType entity:
+ * add it to the instance's schema
+ * create the necessary table
+ * set creation_date and modification_date by creating the necessary
+ CWAttribute entities
+ * add relation by creating the necessary CWRelation entity
+ """
+ entity = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ entity = self.entity
+ schema = cnx.vreg.schema
+ etype = ybo.EntityType(eid=entity.eid, name=entity.name,
+ description=entity.description)
+ eschema = schema.add_entity_type(etype)
+ # create the necessary table
+ for sql in y2sql.eschema2sql(cnx.repo.system_source.dbhelper,
+ eschema, prefix=SQL_PREFIX):
+ cnx.system_sql(sql)
+ # add meta relations
+ gmap = group_mapping(cnx)
+ cmap = ss.cstrtype_mapping(cnx)
+ for rtype in (META_RTYPES - VIRTUAL_RTYPES):
+ try:
+ rschema = schema[rtype]
+ except KeyError:
+ self.critical('rtype %s was not handled at cwetype creation time', rtype)
+ continue
+ if not rschema.rdefs:
+ self.warning('rtype %s has no relation definition yet', rtype)
+ continue
+ sampletype = rschema.subjects()[0]
+ desttype = rschema.objects()[0]
+ try:
+ rdef = copy(rschema.rdef(sampletype, desttype))
+ except KeyError:
+ # this combo does not exist because this is not a universal META_RTYPE
+ continue
+ rdef.subject = _MockEntity(eid=entity.eid)
+ mock = _MockEntity(eid=None)
+ ss.execschemarql(cnx.execute, mock, ss.rdef2rql(rdef, cmap, gmap))
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ self.cnx.vreg.schema.del_entity_type(self.entity.name)
+ # revert changes on database
+ self.cnx.system_sql('DROP TABLE %s%s' % (SQL_PREFIX, self.entity.name))
+
+
+class CWETypeRenameOp(MemSchemaOperation):
+ """this operation updates physical storage accordingly"""
+
+ oldname = newname = None # make pylint happy
+
+ def rename(self, oldname, newname):
+ cnx = self.cnx
+ source = cnx.repo.system_source
+ dbhelper = source.dbhelper
+ # we need sql to operate physical changes on the system database
+ sqlexec = cnx.system_sql
+ cnx.vreg.schema.rename_entity_type(oldname, newname)
+ old_table = SQL_PREFIX + oldname
+ new_table = SQL_PREFIX + newname
+ eschema = cnx.vreg.schema.eschema(newname)
+ # drop old indexes before the renaming
+ for rschema in eschema.subject_relations():
+ if rschema.inlined or (rschema.final and eschema.rdef(rschema.type).indexed):
+ source.drop_index(cnx, old_table, SQL_PREFIX + rschema.type)
+ if rschema.final and any(isinstance(cstr, UniqueConstraint)
+ for cstr in eschema.rdef(rschema.type).constraints):
+ source.drop_index(cnx, old_table, SQL_PREFIX + rschema.type, unique=True)
+ sql = dbhelper.sql_rename_table(old_table, new_table)
+ sqlexec(sql)
+ self.info('renamed table %s to %s', oldname, newname)
+ sqlexec('UPDATE entities SET type=%(newname)s WHERE type=%(oldname)s',
+ {'newname': newname, 'oldname': oldname})
+ for eid, (etype, extid, auri) in cnx.repo._type_source_cache.items():
+ if etype == oldname:
+ cnx.repo._type_source_cache[eid] = (newname, extid, auri)
+ # recreate the indexes
+ for rschema in eschema.subject_relations():
+ if rschema.inlined or (rschema.final and eschema.rdef(rschema.type).indexed):
+ source.create_index(cnx, new_table, SQL_PREFIX + rschema.type)
+ if rschema.final and any(isinstance(cstr, UniqueConstraint)
+ for cstr in eschema.rdef(rschema.type).constraints):
+ source.create_index(cnx, new_table, SQL_PREFIX + rschema.type, unique=True)
+ for attrs in eschema._unique_together or ():
+ columns = ['%s%s' % (SQL_PREFIX, attr) for attr in attrs]
+ old_index_name = y2sql.unique_index_name(oldname, columns)
+ for sql in dbhelper.sqls_drop_multicol_unique_index(
+ new_table, columns, old_index_name):
+ sqlexec(sql)
+ new_index_name = y2sql.unique_index_name(newname, columns)
+ for sql in dbhelper.sqls_create_multicol_unique_index(
+ new_table, columns, new_index_name):
+ sqlexec(sql)
+ # XXX transaction records
+
+ def precommit_event(self):
+ self.rename(self.oldname, self.newname)
+
+ def revertprecommit_event(self):
+ self.rename(self.newname, self.oldname)
+
+
+class CWRTypeUpdateOp(MemSchemaOperation):
+ """actually update some properties of a relation definition"""
+
+ rschema = entity = values = None # make pylint happy
+ oldvalues = None
+
+ def precommit_event(self):
+ rschema = self.rschema
+ if rschema.final:
+ return # watched changes to final relation type are unexpected
+ cnx = self.cnx
+ if 'fulltext_container' in self.values:
+ op = UpdateFTIndexOp.get_instance(cnx)
+ for subjtype, objtype in rschema.rdefs:
+ if self.values['fulltext_container'] == 'subject':
+ op.add_data(subjtype)
+ op.add_data(objtype)
+ else:
+ op.add_data(objtype)
+ op.add_data(subjtype)
+ # update the in-memory schema first
+ self.oldvalues = dict((attr, getattr(rschema, attr)) for attr in self.values)
+ self.rschema.__dict__.update(self.values)
+ # then make necessary changes to the system source database
+ if 'inlined' not in self.values:
+ return # nothing to do
+ inlined = self.values['inlined']
+ # check in-lining is possible when inlined
+ if inlined:
+ self.entity.check_inlined_allowed()
+ # inlined changed, make necessary physical changes!
+ sqlexec = self.cnx.system_sql
+ rtype = rschema.type
+ eidcolumn = SQL_PREFIX + 'eid'
+ if not inlined:
+ # need to create the relation if it has not been already done by
+ # another event of the same transaction
+ if rschema.type not in cnx.transaction_data.get('createdtables', ()):
+ # create the necessary table
+ for sql in y2sql.rschema2sql(rschema):
+ sqlexec(sql)
+ cnx.transaction_data.setdefault('createdtables', []).append(
+ rschema.type)
+ # copy existant data
+ column = SQL_PREFIX + rtype
+ for etype in rschema.subjects():
+ table = SQL_PREFIX + str(etype)
+ sqlexec('INSERT INTO %s_relation SELECT %s, %s FROM %s WHERE NOT %s IS NULL'
+ % (rtype, eidcolumn, column, table, column))
+ # drop existant columns
+ for etype in rschema.subjects():
+ DropColumn.get_instance(cnx).add_data((str(etype), rtype))
+ else:
+ for etype in rschema.subjects():
+ try:
+ add_inline_relation_column(cnx, str(etype), rtype)
+ except Exception as ex:
+ # the column probably already exists. this occurs when the
+ # entity's type has just been added or if the column has not
+ # been previously dropped (eg sqlite)
+ self.error('error while altering table %s: %s', etype, ex)
+ # copy existant data.
+ # XXX don't use, it's not supported by sqlite (at least at when i tried it)
+ #sqlexec('UPDATE %(etype)s SET %(rtype)s=eid_to '
+ # 'FROM %(rtype)s_relation '
+ # 'WHERE %(etype)s.eid=%(rtype)s_relation.eid_from'
+ # % locals())
+ table = SQL_PREFIX + str(etype)
+ cursor = sqlexec('SELECT eid_from, eid_to FROM %(table)s, '
+ '%(rtype)s_relation WHERE %(table)s.%(eidcolumn)s='
+ '%(rtype)s_relation.eid_from' % locals())
+ args = [{'val': eid_to, 'x': eid} for eid, eid_to in cursor.fetchall()]
+ if args:
+ column = SQL_PREFIX + rtype
+ cursor.executemany('UPDATE %s SET %s=%%(val)s WHERE %s=%%(x)s'
+ % (table, column, eidcolumn), args)
+ # drop existant table
+ DropRelationTable(cnx, rtype)
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ self.rschema.__dict__.update(self.oldvalues)
+ # XXX revert changes on database
+
+
+class CWComputedRTypeUpdateOp(MemSchemaOperation):
+ """actually update some properties of a computed relation definition"""
+ rschema = entity = rule = None # make pylint happy
+ old_rule = None
+
+ def precommit_event(self):
+ # update the in-memory schema first
+ self.old_rule = self.rschema.rule
+ self.rschema.rule = self.rule
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ self.rschema.rule = self.old_rule
+
+
+class CWAttributeAddOp(MemSchemaOperation):
+ """an attribute relation (CWAttribute) has been added:
+ * add the necessary column
+ * set default on this column if any and possible
+ * register an operation to add the relation definition to the
+ instance's schema on commit
+
+ constraints are handled by specific hooks
+ """
+ entity = None # make pylint happy
+
+ def init_rdef(self, **kwargs):
+ entity = self.entity
+ fromentity = entity.stype
+ rdefdef = self.rdefdef = ybo.RelationDefinition(
+ str(fromentity.name), entity.rtype.name, str(entity.otype.name),
+ description=entity.description, cardinality=entity.cardinality,
+ constraints=get_constraints(self.cnx, entity),
+ order=entity.ordernum, eid=entity.eid, **kwargs)
+ try:
+ self.cnx.vreg.schema.add_relation_def(rdefdef)
+ except BadSchemaDefinition:
+ # rdef has been infered then explicitly added (current consensus is
+ # not clear at all versus infered relation handling (and much
+ # probably buggy)
+ rdef = self.cnx.vreg.schema.rschema(rdefdef.name).rdefs[rdefdef.subject, rdefdef.object]
+ assert rdef.infered
+ else:
+ rdef = self.cnx.vreg.schema.rschema(rdefdef.name).rdefs[rdefdef.subject, rdefdef.object]
+
+ self.cnx.execute('SET X ordernum Y+1 '
+ 'WHERE X from_entity SE, SE eid %(se)s, X ordernum Y, '
+ 'X ordernum >= %(order)s, NOT X eid %(x)s',
+ {'x': entity.eid, 'se': fromentity.eid,
+ 'order': entity.ordernum or 0})
+ return rdefdef, rdef
+
+ def precommit_event(self):
+ cnx = self.cnx
+ entity = self.entity
+ # entity.defaultval is a Binary or None, but we need a correctly typed
+ # value
+ default = entity.defaultval
+ if default is not None:
+ default = default.unzpickle()
+ props = {'default': default,
+ 'indexed': entity.indexed,
+ 'fulltextindexed': entity.fulltextindexed,
+ 'internationalizable': entity.internationalizable}
+ if entity.extra_props:
+ props.update(json.loads(entity.extra_props.getvalue().decode('ascii')))
+ # entity.formula may not exist yet if we're migrating to 3.20
+ if hasattr(entity, 'formula'):
+ props['formula'] = entity.formula
+ # update the in-memory schema first
+ rdefdef, rdef = self.init_rdef(**props)
+ # then make necessary changes to the system source database
+ syssource = cnx.repo.system_source
+ attrtype = y2sql.type_from_rdef(syssource.dbhelper, rdef)
+ # added some str() wrapping query since some backend (eg psycopg) don't
+ # allow unicode queries
+ table = SQL_PREFIX + rdefdef.subject
+ column = SQL_PREFIX + rdefdef.name
+ try:
+ cnx.system_sql(str('ALTER TABLE %s ADD %s %s'
+ % (table, column, attrtype)),
+ rollback_on_failure=False)
+ self.info('added column %s to table %s', column, table)
+ except Exception as ex:
+ # the column probably already exists. this occurs when
+ # the entity's type has just been added or if the column
+ # has not been previously dropped
+ self.error('error while altering table %s: %s', table, ex)
+ if entity.indexed:
+ try:
+ syssource.create_index(cnx, table, column, unique=False)
+ except Exception as ex:
+ self.error('error while creating index for %s.%s: %s',
+ table, column, ex)
+ # final relations are not infered, propagate
+ schema = cnx.vreg.schema
+ try:
+ eschema = schema.eschema(rdefdef.subject)
+ except KeyError:
+ return # entity type currently being added
+ # propagate attribute to children classes
+ rschema = schema.rschema(rdefdef.name)
+ # if relation type has been inserted in the same transaction, its final
+ # attribute is still set to False, so we've to ensure it's False
+ rschema.final = True
+ insert_rdef_on_subclasses(cnx, eschema, rschema, rdefdef, props)
+ # update existing entities with the default value of newly added attribute
+ if default is not None:
+ default = convert_default_value(self.rdefdef, default)
+ cnx.system_sql('UPDATE %s SET %s=%%(default)s' % (table, column),
+ {'default': default})
+ # if attribute is computed, compute it
+ if getattr(entity, 'formula', None):
+ # add rtype attribute for RelationDefinitionSchema api compat, this
+ # is what RecomputeAttributeOperation expect
+ rdefdef.rtype = rdefdef.name
+ RecomputeAttributeOperation.get_instance(cnx).add_data(rdefdef)
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ if getattr(self, 'rdefdef', None) is None:
+ return
+ self.cnx.vreg.schema.del_relation_def(
+ self.rdefdef.subject, self.rdefdef.name, self.rdefdef.object)
+ # XXX revert changes on database
+
+
+class CWRelationAddOp(CWAttributeAddOp):
+ """an actual relation has been added:
+
+ * add the relation definition to the instance's schema
+
+ * if this is an inlined relation, add the necessary column else if it's the
+ first instance of this relation type, add the necessary table and set
+ default permissions
+
+ constraints are handled by specific hooks
+ """
+ entity = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ entity = self.entity
+ # update the in-memory schema first
+ rdefdef, rdef = self.init_rdef(composite=entity.composite)
+ # then make necessary changes to the system source database
+ schema = cnx.vreg.schema
+ rtype = rdefdef.name
+ rschema = schema.rschema(rtype)
+ # this have to be done before permissions setting
+ if rschema.inlined:
+ # need to add a column if the relation is inlined and if this is the
+ # first occurence of "Subject relation Something" whatever Something
+ if len(rschema.objects(rdefdef.subject)) == 1:
+ add_inline_relation_column(cnx, rdefdef.subject, rtype)
+ eschema = schema[rdefdef.subject]
+ insert_rdef_on_subclasses(cnx, eschema, rschema, rdefdef,
+ {'composite': entity.composite})
+ else:
+ if rschema.symmetric:
+ # for symmetric relations, rdefs will store relation definitions
+ # in both ways (i.e. (subj -> obj) and (obj -> subj))
+ relation_already_defined = len(rschema.rdefs) > 2
+ else:
+ relation_already_defined = len(rschema.rdefs) > 1
+ # need to create the relation if no relation definition in the
+ # schema and if it has not been added during other event of the same
+ # transaction
+ if not (relation_already_defined or
+ rtype in cnx.transaction_data.get('createdtables', ())):
+ rschema = schema.rschema(rtype)
+ # create the necessary table
+ for sql in y2sql.rschema2sql(rschema):
+ cnx.system_sql(sql)
+ cnx.transaction_data.setdefault('createdtables', []).append(
+ rtype)
+
+ # XXX revertprecommit_event
+
+
+class RDefDelOp(MemSchemaOperation):
+ """an actual relation has been removed"""
+ rdef = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ rdef = self.rdef
+ rschema = rdef.rtype
+ # make necessary changes to the system source database first
+ rdeftype = rschema.final and 'CWAttribute' or 'CWRelation'
+ execute = cnx.execute
+ rset = execute('Any COUNT(X) WHERE X is %s, X relation_type R,'
+ 'R eid %%(x)s' % rdeftype, {'x': rschema.eid})
+ lastrel = rset[0][0] == 0
+ # we have to update physical schema systematically for final and inlined
+ # relations, but only if it's the last instance for this relation type
+ # for other relations
+ if (rschema.final or rschema.inlined):
+ if not cnx.deleted_in_transaction(rdef.subject.eid):
+ rset = execute('Any COUNT(X) WHERE X is %s, X relation_type R, '
+ 'R eid %%(r)s, X from_entity E, E eid %%(e)s'
+ % rdeftype,
+ {'r': rschema.eid, 'e': rdef.subject.eid})
+ if rset[0][0] == 0:
+ ptypes = cnx.transaction_data.setdefault('pendingrtypes', set())
+ ptypes.add(rschema.type)
+ DropColumn.get_instance(cnx).add_data((str(rdef.subject), str(rschema)))
+ elif rschema.inlined:
+ cnx.system_sql('UPDATE %s%s SET %s%s=NULL WHERE '
+ 'EXISTS(SELECT 1 FROM entities '
+ ' WHERE eid=%s%s AND type=%%(to_etype)s)'
+ % (SQL_PREFIX, rdef.subject, SQL_PREFIX, rdef.rtype,
+ SQL_PREFIX, rdef.rtype),
+ {'to_etype': rdef.object.type})
+ elif lastrel:
+ DropRelationTable(cnx, str(rschema))
+ else:
+ cnx.system_sql('DELETE FROM %s_relation WHERE '
+ 'EXISTS(SELECT 1 FROM entities '
+ ' WHERE eid=eid_from AND type=%%(from_etype)s)'
+ ' AND EXISTS(SELECT 1 FROM entities '
+ ' WHERE eid=eid_to AND type=%%(to_etype)s)'
+ % rschema,
+ {'from_etype': rdef.subject.type, 'to_etype': rdef.object.type})
+ # then update the in-memory schema
+ if rdef.subject not in ETYPE_NAME_MAP and rdef.object not in ETYPE_NAME_MAP:
+ rschema.del_relation_def(rdef.subject, rdef.object)
+ # if this is the last relation definition of this type, drop associated
+ # relation type
+ if lastrel and not cnx.deleted_in_transaction(rschema.eid):
+ execute('DELETE CWRType X WHERE X eid %(x)s', {'x': rschema.eid})
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ #
+ # Note: add_relation_def takes a RelationDefinition, not a
+ # RelationDefinitionSchema, needs to fake it
+ rdef = self.rdef
+ rdef.name = str(rdef.rtype)
+ if rdef.subject not in ETYPE_NAME_MAP and rdef.object not in ETYPE_NAME_MAP:
+ self.cnx.vreg.schema.add_relation_def(rdef)
+
+
+class RDefUpdateOp(MemSchemaOperation):
+ """actually update some properties of a relation definition"""
+ rschema = rdefkey = values = None # make pylint happy
+ rdef = oldvalues = None
+ indexed_changed = null_allowed_changed = False
+
+ def precommit_event(self):
+ cnx = self.cnx
+ rdef = self.rdef = self.rschema.rdefs[self.rdefkey]
+ # update the in-memory schema first
+ self.oldvalues = dict((attr, getattr(rdef, attr)) for attr in self.values)
+ rdef.update(self.values)
+ # then make necessary changes to the system source database
+ syssource = cnx.repo.system_source
+ if 'indexed' in self.values:
+ syssource.update_rdef_indexed(cnx, rdef)
+ self.indexed_changed = True
+ if ('cardinality' in self.values and rdef.rtype.final
+ and self.values['cardinality'][0] != self.oldvalues['cardinality'][0]):
+ syssource.update_rdef_null_allowed(self.cnx, rdef)
+ self.null_allowed_changed = True
+ if 'fulltextindexed' in self.values:
+ UpdateFTIndexOp.get_instance(cnx).add_data(rdef.subject)
+ if 'formula' in self.values:
+ RecomputeAttributeOperation.get_instance(cnx).add_data(rdef)
+
+ def revertprecommit_event(self):
+ if self.rdef is None:
+ return
+ # revert changes on in memory schema
+ self.rdef.update(self.oldvalues)
+ # revert changes on database
+ syssource = self.cnx.repo.system_source
+ if self.indexed_changed:
+ syssource.update_rdef_indexed(self.cnx, self.rdef)
+ if self.null_allowed_changed:
+ syssource.update_rdef_null_allowed(self.cnx, self.rdef)
+
+
+def _set_modifiable_constraints(rdef):
+ # for proper in-place modification of in-memory schema: if rdef.constraints
+ # is already a list, reuse it (we're updating multiple constraints of the
+ # same rdef in the same transaction)
+ if not isinstance(rdef.constraints, list):
+ rdef.constraints = list(rdef.constraints)
+
+
+class CWConstraintDelOp(MemSchemaOperation):
+ """actually remove a constraint of a relation definition"""
+ rdef = oldcstr = newcstr = None # make pylint happy
+ size_cstr_changed = unique_changed = False
+
+ def precommit_event(self):
+ cnx = self.cnx
+ rdef = self.rdef
+ # in-place modification of in-memory schema first
+ _set_modifiable_constraints(rdef)
+ if self.oldcstr in rdef.constraints:
+ rdef.constraints.remove(self.oldcstr)
+ else:
+ self.critical('constraint %s for rdef %s was missing or already removed',
+ self.oldcstr, rdef)
+ if cnx.deleted_in_transaction(rdef.eid):
+ # don't try to alter a table that's going away (or is already gone)
+ return
+ # then update database: alter the physical schema on size/unique
+ # constraint changes
+ syssource = cnx.repo.system_source
+ cstrtype = self.oldcstr.type()
+ if cstrtype == 'SizeConstraint':
+ # if the size constraint is being replaced with a new max size, we'll
+ # call update_rdef_column in CWConstraintAddOp, skip it here
+ for cstr in cnx.transaction_data.get('newsizecstr', ()):
+ rdefentity = cstr.reverse_constrained_by[0]
+ cstrrdef = cnx.vreg.schema.schema_by_eid(rdefentity.eid)
+ if cstrrdef == rdef:
+ return
+
+ # we found that the size constraint for this rdef is really gone,
+ # not just replaced by another
+ syssource.update_rdef_column(cnx, rdef)
+ self.size_cstr_changed = True
+ elif cstrtype == 'UniqueConstraint':
+ syssource.update_rdef_unique(cnx, rdef)
+ self.unique_changed = True
+ elif cstrtype in ('BoundaryConstraint',
+ 'IntervalBoundConstraint',
+ 'StaticVocabularyConstraint'):
+ cnx.system_sql('ALTER TABLE %s%s DROP CONSTRAINT %s'
+ % (SQL_PREFIX, rdef.subject, self.oldcstr.name_for(rdef)))
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ if self.newcstr is not None:
+ self.rdef.constraints.remove(self.newcstr)
+ if self.oldcstr is not None:
+ self.rdef.constraints.append(self.oldcstr)
+ # revert changes on database
+ syssource = self.cnx.repo.system_source
+ if self.size_cstr_changed:
+ syssource.update_rdef_column(self.cnx, self.rdef)
+ if self.unique_changed:
+ syssource.update_rdef_unique(self.cnx, self.rdef)
+
+
+class CWConstraintAddOp(CWConstraintDelOp):
+ """actually update constraint of a relation definition"""
+ entity = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ rdefentity = self.entity.reverse_constrained_by[0]
+ rdef = self.rdef = cnx.vreg.schema.schema_by_eid(rdefentity.eid)
+ cstrtype = self.entity.type
+ if cstrtype in UNIQUE_CONSTRAINTS:
+ oldcstr = self.oldcstr = rdef.constraint_by_type(cstrtype)
+ else:
+ oldcstr = None
+ newcstr = self.newcstr = CONSTRAINTS[cstrtype].deserialize(self.entity.value)
+ # in-place modification of in-memory schema first
+ _set_modifiable_constraints(rdef)
+ newcstr.eid = self.entity.eid
+ if oldcstr is not None:
+ rdef.constraints.remove(oldcstr)
+ rdef.constraints.append(newcstr)
+ # then update database: alter the physical schema on size/unique
+ # constraint changes
+ syssource = cnx.repo.system_source
+ if cstrtype == 'SizeConstraint' and (oldcstr is None or
+ oldcstr.max != newcstr.max):
+ syssource.update_rdef_column(cnx, rdef)
+ self.size_cstr_changed = True
+ elif cstrtype == 'UniqueConstraint' and oldcstr is None:
+ syssource.update_rdef_unique(cnx, rdef)
+ self.unique_changed = True
+ if cstrtype in ('BoundaryConstraint',
+ 'IntervalBoundConstraint',
+ 'StaticVocabularyConstraint'):
+ cstrname, check = y2sql.check_constraint(rdef, newcstr, syssource.dbhelper,
+ prefix=SQL_PREFIX)
+ # oldcstr is the new constraint when the attribute is being added in the same
+ # transaction or when constraint value is updated. So we've to take care...
+ if oldcstr is not None:
+ oldcstrname = self.oldcstr.name_for(rdef)
+ if oldcstrname != cstrname:
+ cnx.system_sql('ALTER TABLE %s%s DROP CONSTRAINT %s'
+ % (SQL_PREFIX, rdef.subject, oldcstrname))
+ cnx.system_sql('ALTER TABLE %s%s ADD CONSTRAINT %s CHECK(%s)' %
+ (SQL_PREFIX, rdef.subject, cstrname, check))
+
+
+class CWUniqueTogetherConstraintAddOp(MemSchemaOperation):
+ entity = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ prefix = SQL_PREFIX
+ entity = self.entity
+ table = '%s%s' % (prefix, entity.constraint_of[0].name)
+ cols = ['%s%s' % (prefix, r.name) for r in entity.relations]
+ dbhelper = cnx.repo.system_source.dbhelper
+ sqls = dbhelper.sqls_create_multicol_unique_index(table, cols, entity.name)
+ for sql in sqls:
+ cnx.system_sql(sql)
+
+ def postcommit_event(self):
+ entity = self.entity
+ eschema = self.cnx.vreg.schema.schema_by_eid(entity.constraint_of[0].eid)
+ attrs = [r.name for r in entity.relations]
+ eschema._unique_together.append(attrs)
+
+
+class CWUniqueTogetherConstraintDelOp(MemSchemaOperation):
+ entity = cstrname = None # make pylint happy
+ cols = () # make pylint happy
+
+ def insert_index(self):
+ # We need to run before CWConstraintDelOp: if a size constraint is
+ # removed and the column is part of a unique_together constraint, we
+ # remove the unique_together index before changing the column's type.
+ # SQL Server does not support unique indices on unlimited text columns.
+ return 0
+
+ def precommit_event(self):
+ cnx = self.cnx
+ prefix = SQL_PREFIX
+ table = '%s%s' % (prefix, self.entity.type)
+ dbhelper = cnx.repo.system_source.dbhelper
+ cols = ['%s%s' % (prefix, c) for c in self.cols]
+ sqls = dbhelper.sqls_drop_multicol_unique_index(table, cols, self.cstrname)
+ for sql in sqls:
+ cnx.system_sql(sql)
+
+ def postcommit_event(self):
+ eschema = self.cnx.vreg.schema.schema_by_eid(self.entity.eid)
+ cols = set(self.cols)
+ unique_together = [ut for ut in eschema._unique_together
+ if set(ut) != cols]
+ eschema._unique_together = unique_together
+
+
+# operations for in-memory schema synchronization #############################
+
+class MemSchemaCWETypeDel(MemSchemaOperation):
+ """actually remove the entity type from the instance's schema"""
+ etype = None # make pylint happy
+
+ def postcommit_event(self):
+ # del_entity_type also removes entity's relations
+ self.cnx.vreg.schema.del_entity_type(self.etype)
+
+
+class MemSchemaCWRTypeAdd(MemSchemaOperation):
+ """actually add the relation type to the instance's schema"""
+ rtypedef = None # make pylint happy
+
+ def precommit_event(self):
+ self.cnx.vreg.schema.add_relation_type(self.rtypedef)
+
+ def revertprecommit_event(self):
+ self.cnx.vreg.schema.del_relation_type(self.rtypedef.name)
+
+
+class MemSchemaCWRTypeDel(MemSchemaOperation):
+ """actually remove the relation type from the instance's schema"""
+ rtype = None # make pylint happy
+
+ def postcommit_event(self):
+ try:
+ self.cnx.vreg.schema.del_relation_type(self.rtype)
+ except KeyError:
+ # s/o entity type have already been deleted
+ pass
+
+
+class MemSchemaPermissionAdd(MemSchemaOperation):
+ """synchronize schema when a *_permission relation has been added on a group
+ """
+ eid = action = group_eid = expr = None # make pylint happy
+
+ def precommit_event(self):
+ """the observed connections.cnxset has been commited"""
+ try:
+ erschema = self.cnx.vreg.schema.schema_by_eid(self.eid)
+ except KeyError:
+ # duh, schema not found, log error and skip operation
+ self.warning('no schema for %s', self.eid)
+ return
+ perms = list(erschema.action_permissions(self.action))
+ if self.group_eid is not None:
+ perm = self.cnx.entity_from_eid(self.group_eid).name
+ else:
+ perm = erschema.rql_expression(self.expr)
+ try:
+ perms.index(perm)
+ self.warning('%s already in permissions for %s on %s',
+ perm, self.action, erschema)
+ except ValueError:
+ perms.append(perm)
+ erschema.set_action_permissions(self.action, perms)
+
+ # XXX revertprecommit_event
+
+
+class MemSchemaPermissionDel(MemSchemaPermissionAdd):
+ """synchronize schema when a *_permission relation has been deleted from a
+ group
+ """
+
+ def precommit_event(self):
+ """the observed connections set has been commited"""
+ try:
+ erschema = self.cnx.vreg.schema.schema_by_eid(self.eid)
+ except KeyError:
+ # duh, schema not found, log error and skip operation
+ self.warning('no schema for %s', self.eid)
+ return
+ perms = list(erschema.action_permissions(self.action))
+ if self.group_eid is not None:
+ perm = self.cnx.entity_from_eid(self.group_eid).name
+ else:
+ perm = erschema.rql_expression(self.expr)
+ try:
+ perms.remove(perm)
+ erschema.set_action_permissions(self.action, perms)
+ except ValueError:
+ self.error('can\'t remove permission %s for %s on %s',
+ perm, self.action, erschema)
+
+ # XXX revertprecommit_event
+
+
+class MemSchemaSpecializesAdd(MemSchemaOperation):
+ etypeeid = parentetypeeid = None # make pylint happy
+
+ def precommit_event(self):
+ eschema = self.cnx.vreg.schema.schema_by_eid(self.etypeeid)
+ parenteschema = self.cnx.vreg.schema.schema_by_eid(self.parentetypeeid)
+ eschema._specialized_type = parenteschema.type
+ parenteschema._specialized_by.append(eschema.type)
+
+ # XXX revertprecommit_event
+
+
+class MemSchemaSpecializesDel(MemSchemaOperation):
+ etypeeid = parentetypeeid = None # make pylint happy
+
+ def precommit_event(self):
+ try:
+ eschema = self.cnx.vreg.schema.schema_by_eid(self.etypeeid)
+ parenteschema = self.cnx.vreg.schema.schema_by_eid(self.parentetypeeid)
+ except KeyError:
+ # etype removed, nothing to do
+ return
+ eschema._specialized_type = None
+ parenteschema._specialized_by.remove(eschema.type)
+
+ # XXX revertprecommit_event
+
+
+# CWEType hooks ################################################################
+
+class DelCWETypeHook(SyncSchemaHook):
+ """before deleting a CWEType entity:
+ * check that we don't remove a core entity type
+ * cascade to delete related CWAttribute and CWRelation entities
+ * instantiate an operation to delete the entity type on commit
+ """
+ __regid__ = 'syncdelcwetype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWEType')
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ # final entities can't be deleted, don't care about that
+ name = self.entity.name
+ if name in CORE_TYPES:
+ raise validation_error(self.entity, {None: _("can't be deleted")})
+ # delete every entities of this type
+ if name not in ETYPE_NAME_MAP:
+ MemSchemaCWETypeDel(self._cw, etype=name)
+ if not self.entity.final:
+ DropTable(self._cw, table=SQL_PREFIX + name)
+
+
+class AfterDelCWETypeHook(DelCWETypeHook):
+ __regid__ = 'wfcleanup'
+ events = ('after_delete_entity',)
+
+ def __call__(self):
+ # workflow cleanup
+ self._cw.execute('DELETE Workflow X WHERE NOT X workflow_of Y')
+
+
+class AfterAddCWETypeHook(DelCWETypeHook):
+ """after adding a CWEType entity:
+ * create the necessary table
+ * set creation_date and modification_date by creating the necessary
+ CWAttribute entities
+ * add owned_by relation by creating the necessary CWRelation entity
+ * register an operation to add the entity type to the instance's
+ schema on commit
+ """
+ __regid__ = 'syncaddcwetype'
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ if entity.cw_edited.get('final'):
+ # final entity types don't need a table in the database and are
+ # systematically added by yams at schema initialization time so
+ # there is no need to do further processing. Simply assign its eid.
+ self._cw.vreg.schema[entity.name].eid = entity.eid
+ return
+ CWETypeAddOp(self._cw, entity=entity)
+
+
+class BeforeUpdateCWETypeHook(DelCWETypeHook):
+ """check name change, handle final"""
+ __regid__ = 'syncupdatecwetype'
+ events = ('before_update_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ check_valid_changes(self._cw, entity, ro_attrs=('final',))
+ # don't use getattr(entity, attr), we would get the modified value if any
+ if 'name' in entity.cw_edited:
+ oldname, newname = entity.cw_edited.oldnewvalue('name')
+ if newname.lower() != oldname.lower():
+ CWETypeRenameOp(self._cw, oldname=oldname, newname=newname)
+
+
+# CWRType hooks ################################################################
+
+class DelCWRTypeHook(SyncSchemaHook):
+ """before deleting a CWRType entity:
+ * check that we don't remove a core relation type
+ * cascade to delete related CWAttribute and CWRelation entities
+ * instantiate an operation to delete the relation type on commit
+ """
+ __regid__ = 'syncdelcwrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWRType')
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ name = self.entity.name
+ if name in CORE_TYPES:
+ raise validation_error(self.entity, {None: _("can't be deleted")})
+ # delete relation definitions using this relation type
+ self._cw.execute('DELETE CWAttribute X WHERE X relation_type Y, Y eid %(x)s',
+ {'x': self.entity.eid})
+ self._cw.execute('DELETE CWRelation X WHERE X relation_type Y, Y eid %(x)s',
+ {'x': self.entity.eid})
+ MemSchemaCWRTypeDel(self._cw, rtype=name)
+
+
+class AfterAddCWComputedRTypeHook(SyncSchemaHook):
+ """after a CWComputedRType entity has been added:
+ * register an operation to add the relation type to the instance's
+ schema on commit
+
+ We don't know yet this point if a table is necessary
+ """
+ __regid__ = 'syncaddcwcomputedrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWComputedRType')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ rtypedef = ybo.ComputedRelation(name=entity.name,
+ eid=entity.eid,
+ rule=entity.rule)
+ MemSchemaCWRTypeAdd(self._cw, rtypedef=rtypedef)
+
+
+class AfterAddCWRTypeHook(SyncSchemaHook):
+ """after a CWRType entity has been added:
+ * register an operation to add the relation type to the instance's
+ schema on commit
+
+ We don't know yet this point if a table is necessary
+ """
+ __regid__ = 'syncaddcwrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWRType')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ rtypedef = ybo.RelationType(name=entity.name,
+ description=entity.description,
+ inlined=entity.cw_edited.get('inlined', False),
+ symmetric=entity.cw_edited.get('symmetric', False),
+ eid=entity.eid)
+ MemSchemaCWRTypeAdd(self._cw, rtypedef=rtypedef)
+
+
+class BeforeUpdateCWRTypeHook(SyncSchemaHook):
+ """check name change, handle final"""
+ __regid__ = 'syncupdatecwrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWRType')
+ events = ('before_update_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ check_valid_changes(self._cw, entity)
+ newvalues = {}
+ for prop in ('symmetric', 'inlined', 'fulltext_container'):
+ if prop in entity.cw_edited:
+ old, new = entity.cw_edited.oldnewvalue(prop)
+ if old != new:
+ newvalues[prop] = new
+ if newvalues:
+ rschema = self._cw.vreg.schema.rschema(entity.name)
+ CWRTypeUpdateOp(self._cw, rschema=rschema, entity=entity,
+ values=newvalues)
+
+
+class BeforeUpdateCWComputedRTypeHook(SyncSchemaHook):
+ """check name change, handle final"""
+ __regid__ = 'syncupdatecwcomputedrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWComputedRType')
+ events = ('before_update_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ check_valid_changes(self._cw, entity)
+ if 'rule' in entity.cw_edited:
+ old, new = entity.cw_edited.oldnewvalue('rule')
+ if old != new:
+ rschema = self._cw.vreg.schema.rschema(entity.name)
+ CWComputedRTypeUpdateOp(self._cw, rschema=rschema,
+ entity=entity, rule=new)
+
+
+class AfterDelRelationTypeHook(SyncSchemaHook):
+ """before deleting a CWAttribute or CWRelation entity:
+ * if this is a final or inlined relation definition, instantiate an
+ operation to drop necessary column, else if this is the last instance
+ of a non final relation, instantiate an operation to drop necessary
+ table
+ * instantiate an operation to delete the relation definition on commit
+ * delete the associated relation type when necessary
+ """
+ __regid__ = 'syncdelrelationtype'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype('relation_type')
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ cnx = self._cw
+ try:
+ rdef = cnx.vreg.schema.schema_by_eid(self.eidfrom)
+ except KeyError:
+ self.critical('cant get schema rdef associated to %s', self.eidfrom)
+ return
+ subjschema, rschema, objschema = rdef.as_triple()
+ pendingrdefs = cnx.transaction_data.setdefault('pendingrdefs', set())
+ # first delete existing relation if necessary
+ if rschema.final:
+ pendingrdefs.add((subjschema, rschema))
+ else:
+ pendingrdefs.add((subjschema, rschema, objschema))
+ RDefDelOp(cnx, rdef=rdef)
+
+
+# CWComputedRType hooks #######################################################
+
+class DelCWComputedRTypeHook(SyncSchemaHook):
+ """before deleting a CWComputedRType entity:
+ * check that we don't remove a core relation type
+ * instantiate an operation to delete the relation type on commit
+ """
+ __regid__ = 'syncdelcwcomputedrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWComputedRType')
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ name = self.entity.name
+ if name in CORE_TYPES:
+ raise validation_error(self.entity, {None: _("can't be deleted")})
+ MemSchemaCWRTypeDel(self._cw, rtype=name)
+
+
+# CWAttribute / CWRelation hooks ###############################################
+
+class AfterAddCWAttributeHook(SyncSchemaHook):
+ __regid__ = 'syncaddcwattribute'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWAttribute')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ CWAttributeAddOp(self._cw, entity=self.entity)
+
+
+class AfterAddCWRelationHook(AfterAddCWAttributeHook):
+ __regid__ = 'syncaddcwrelation'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWRelation')
+
+ def __call__(self):
+ CWRelationAddOp(self._cw, entity=self.entity)
+
+
+class AfterUpdateCWRDefHook(SyncSchemaHook):
+ __regid__ = 'syncaddcwattribute'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWAttribute',
+ 'CWRelation')
+ events = ('before_update_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ if self._cw.deleted_in_transaction(entity.eid):
+ return
+ subjtype = entity.stype.name
+ objtype = entity.otype.name
+ if subjtype in ETYPE_NAME_MAP or objtype in ETYPE_NAME_MAP:
+ return
+ rschema = self._cw.vreg.schema[entity.rtype.name]
+ # note: do not access schema rdef here, it may be added later by an
+ # operation
+ newvalues = {}
+ for prop in RelationDefinitionSchema.rproperty_defs(objtype):
+ if prop == 'constraints':
+ continue
+ if prop == 'order':
+ attr = 'ordernum'
+ else:
+ attr = prop
+ if attr in entity.cw_edited:
+ old, new = entity.cw_edited.oldnewvalue(attr)
+ if old != new:
+ newvalues[prop] = new
+ if newvalues:
+ RDefUpdateOp(self._cw, rschema=rschema, rdefkey=(subjtype, objtype),
+ values=newvalues)
+
+
+# constraints synchronization hooks ############################################
+
+class AfterAddCWConstraintHook(SyncSchemaHook):
+ __regid__ = 'syncaddcwconstraint'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWConstraint')
+ events = ('after_add_entity', 'after_update_entity')
+
+ def __call__(self):
+ if self.entity.cstrtype[0].name == 'SizeConstraint':
+ txdata = self._cw.transaction_data
+ if 'newsizecstr' not in txdata:
+ txdata['newsizecstr'] = set()
+ txdata['newsizecstr'].add(self.entity)
+ CWConstraintAddOp(self._cw, entity=self.entity)
+
+
+class AfterAddConstrainedByHook(SyncSchemaHook):
+ __regid__ = 'syncaddconstrainedby'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype('constrained_by')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ if self._cw.added_in_transaction(self.eidfrom):
+ # used by get_constraints() which is called in CWAttributeAddOp
+ self._cw.transaction_data.setdefault(self.eidfrom, []).append(self.eidto)
+
+
+class BeforeDeleteCWConstraintHook(SyncSchemaHook):
+ __regid__ = 'syncdelcwconstraint'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWConstraint')
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ schema = self._cw.vreg.schema
+ try:
+ # KeyError, e.g. composite chain deletion
+ rdef = schema.schema_by_eid(entity.reverse_constrained_by[0].eid)
+ # IndexError
+ cstr = rdef.constraint_by_eid(entity.eid)
+ except (KeyError, IndexError):
+ self._cw.critical('constraint type no more accessible')
+ else:
+ CWConstraintDelOp(self._cw, rdef=rdef, oldcstr=cstr)
+
+
+# unique_together constraints
+# XXX: use setoperations and before_add_relation here (on constraint_of and relations)
+class AfterAddCWUniqueTogetherConstraintHook(SyncSchemaHook):
+ __regid__ = 'syncadd_cwuniquetogether_constraint'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWUniqueTogetherConstraint')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ CWUniqueTogetherConstraintAddOp(self._cw, entity=self.entity)
+
+
+class BeforeDeleteConstraintOfHook(SyncSchemaHook):
+ __regid__ = 'syncdelconstraintof'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype('constraint_of')
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ if self._cw.deleted_in_transaction(self.eidto):
+ return
+ schema = self._cw.vreg.schema
+ cstr = self._cw.entity_from_eid(self.eidfrom)
+ entity = schema.schema_by_eid(self.eidto)
+ cols = tuple(r.name for r in cstr.relations)
+ CWUniqueTogetherConstraintDelOp(self._cw, entity=entity,
+ cstrname=cstr.name, cols=cols)
+
+
+# permissions synchronization hooks ############################################
+
+class AfterAddPermissionHook(SyncSchemaHook):
+ """added entity/relation *_permission, need to update schema"""
+ __regid__ = 'syncaddperm'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype(
+ 'read_permission', 'add_permission', 'delete_permission',
+ 'update_permission')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ action = self.rtype.split('_', 1)[0]
+ if self._cw.entity_metas(self.eidto)['type'] == 'CWGroup':
+ MemSchemaPermissionAdd(self._cw, action=action, eid=self.eidfrom,
+ group_eid=self.eidto)
+ else: # RQLExpression
+ expr = self._cw.entity_from_eid(self.eidto).expression
+ MemSchemaPermissionAdd(self._cw, action=action, eid=self.eidfrom,
+ expr=expr)
+
+
+class BeforeDelPermissionHook(AfterAddPermissionHook):
+ """delete entity/relation *_permission, need to update schema
+
+ skip the operation if the related type is being deleted
+ """
+ __regid__ = 'syncdelperm'
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ if self._cw.deleted_in_transaction(self.eidfrom):
+ return
+ action = self.rtype.split('_', 1)[0]
+ if self._cw.entity_metas(self.eidto)['type'] == 'CWGroup':
+ MemSchemaPermissionDel(self._cw, action=action, eid=self.eidfrom,
+ group_eid=self.eidto)
+ else: # RQLExpression
+ expr = self._cw.entity_from_eid(self.eidto).expression
+ MemSchemaPermissionDel(self._cw, action=action, eid=self.eidfrom,
+ expr=expr)
+
+
+class UpdateFTIndexOp(hook.DataOperationMixIn, hook.SingleLastOperation):
+ """operation to update full text indexation of entity whose schema change
+
+ We wait after the commit to as the schema in memory is only updated after
+ the commit.
+ """
+ containercls = list
+
+ def postcommit_event(self):
+ cnx = self.cnx
+ source = cnx.repo.system_source
+ schema = cnx.repo.vreg.schema
+ to_reindex = self.get_data()
+ self.info('%i etypes need full text indexed reindexation',
+ len(to_reindex))
+ for etype in to_reindex:
+ rset = cnx.execute('Any X WHERE X is %s' % etype)
+ self.info('Reindexing full text index for %i entity of type %s',
+ len(rset), etype)
+ still_fti = list(schema[etype].indexable_attributes())
+ for entity in rset.entities():
+ source.fti_unindex_entities(cnx, [entity])
+ for container in entity.cw_adapt_to('IFTIndexable').fti_containers():
+ if still_fti or container is not entity:
+ source.fti_unindex_entities(cnx, [container])
+ source.fti_index_entities(cnx, [container])
+ if to_reindex:
+ # Transaction has already been committed
+ cnx.cnxset.commit()
+
+
+# specializes synchronization hooks ############################################
+
+class AfterAddSpecializesHook(SyncSchemaHook):
+ __regid__ = 'syncaddspecializes'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype('specializes')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ MemSchemaSpecializesAdd(self._cw, etypeeid=self.eidfrom,
+ parentetypeeid=self.eidto)
+
+
+class AfterDelSpecializesHook(SyncSchemaHook):
+ __regid__ = 'syncdelspecializes'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype('specializes')
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ MemSchemaSpecializesDel(self._cw, etypeeid=self.eidfrom,
+ parentetypeeid=self.eidto)
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/syncsession.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/syncsession.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,254 @@
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Core hooks: synchronize living session on persistent data changes"""
+
+__docformat__ = "restructuredtext en"
+
+from cubicweb import _
+from cubicweb import UnknownProperty, BadConnectionId, validation_error
+from cubicweb.predicates import is_instance
+from cubicweb.server import hook
+
+
+def get_user_sessions(repo, ueid):
+ for session in repo._sessions.values():
+ if ueid == session.user.eid:
+ yield session
+
+
+class SyncSessionHook(hook.Hook):
+ __abstract__ = True
+ category = 'syncsession'
+
+
+# user/groups synchronisation #################################################
+
+class _GroupOperation(hook.Operation):
+ """base class for group operation"""
+ cnxuser = None # make pylint happy
+
+ def __init__(self, cnx, *args, **kwargs):
+ """override to get the group name before actual groups manipulation:
+
+ we may temporarily loose right access during a commit event, so
+ no query should be emitted while comitting
+ """
+ rql = 'Any N WHERE G eid %(x)s, G name N'
+ result = cnx.execute(rql, {'x': kwargs['geid']}, build_descr=False)
+ hook.Operation.__init__(self, cnx, *args, **kwargs)
+ self.group = result[0][0]
+
+
+class _DeleteGroupOp(_GroupOperation):
+ """Synchronize user when a in_group relation has been deleted"""
+
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ groups = self.cnxuser.groups
+ try:
+ groups.remove(self.group)
+ except KeyError:
+ self.error('user %s not in group %s', self.cnxuser, self.group)
+
+
+class _AddGroupOp(_GroupOperation):
+ """Synchronize user when a in_group relation has been added"""
+
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ groups = self.cnxuser.groups
+ if self.group in groups:
+ self.warning('user %s already in group %s', self.cnxuser,
+ self.group)
+ else:
+ groups.add(self.group)
+
+
+class SyncInGroupHook(SyncSessionHook):
+ """Watch addition/removal of in_group relation to synchronize living sessions accordingly"""
+ __regid__ = 'syncingroup'
+ __select__ = SyncSessionHook.__select__ & hook.match_rtype('in_group')
+ events = ('after_delete_relation', 'after_add_relation')
+
+ def __call__(self):
+ if self.event == 'after_delete_relation':
+ opcls = _DeleteGroupOp
+ else:
+ opcls = _AddGroupOp
+ for session in get_user_sessions(self._cw.repo, self.eidfrom):
+ opcls(self._cw, cnxuser=session.user, geid=self.eidto)
+
+
+class _DelUserOp(hook.Operation):
+ """close associated user's session when it is deleted"""
+ def __init__(self, cnx, sessionid):
+ self.sessionid = sessionid
+ hook.Operation.__init__(self, cnx)
+
+ def postcommit_event(self):
+ try:
+ self.cnx.repo.close(self.sessionid)
+ except BadConnectionId:
+ pass # already closed
+
+
+class CloseDeletedUserSessionsHook(SyncSessionHook):
+ __regid__ = 'closession'
+ __select__ = SyncSessionHook.__select__ & is_instance('CWUser')
+ events = ('after_delete_entity',)
+
+ def __call__(self):
+ for session in get_user_sessions(self._cw.repo, self.entity.eid):
+ _DelUserOp(self._cw, session.sessionid)
+
+
+# CWProperty hooks #############################################################
+
+class _DelCWPropertyOp(hook.Operation):
+ """a user's custom properties has been deleted"""
+ cwpropdict = key = None # make pylint happy
+
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ try:
+ del self.cwpropdict[self.key]
+ except KeyError:
+ self.error('%s has no associated value', self.key)
+
+
+class _ChangeCWPropertyOp(hook.Operation):
+ """a user's custom properties has been added/changed"""
+ cwpropdict = key = value = None # make pylint happy
+
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ self.cwpropdict[self.key] = self.value
+
+
+class _AddCWPropertyOp(hook.Operation):
+ """a user's custom properties has been added/changed"""
+ cwprop = None # make pylint happy
+
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ cwprop = self.cwprop
+ if not cwprop.for_user:
+ self.cnx.vreg['propertyvalues'][cwprop.pkey] = \
+ self.cnx.vreg.typed_value(cwprop.pkey, cwprop.value)
+ # if for_user is set, update is handled by a ChangeCWPropertyOp operation
+
+
+class AddCWPropertyHook(SyncSessionHook):
+ __regid__ = 'addcwprop'
+ __select__ = SyncSessionHook.__select__ & is_instance('CWProperty')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ key, value = self.entity.pkey, self.entity.value
+ if key.startswith('sources.'):
+ return
+ cnx = self._cw
+ try:
+ value = cnx.vreg.typed_value(key, value)
+ except UnknownProperty:
+ msg = _('unknown property key %s')
+ raise validation_error(self.entity, {('pkey', 'subject'): msg}, (key,))
+ except ValueError as ex:
+ raise validation_error(self.entity,
+ {('value', 'subject'): str(ex)})
+ if not cnx.user.matching_groups('managers'):
+ cnx.add_relation(self.entity.eid, 'for_user', cnx.user.eid)
+ else:
+ _AddCWPropertyOp(cnx, cwprop=self.entity)
+
+
+class UpdateCWPropertyHook(AddCWPropertyHook):
+ __regid__ = 'updatecwprop'
+ events = ('after_update_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ if not ('pkey' in entity.cw_edited or
+ 'value' in entity.cw_edited):
+ return
+ key, value = entity.pkey, entity.value
+ if key.startswith('sources.'):
+ return
+ cnx = self._cw
+ try:
+ value = cnx.vreg.typed_value(key, value)
+ except UnknownProperty:
+ return
+ except ValueError as ex:
+ raise validation_error(entity, {('value', 'subject'): str(ex)})
+ if entity.for_user:
+ for session in get_user_sessions(cnx.repo, entity.for_user[0].eid):
+ _ChangeCWPropertyOp(cnx, cwpropdict=session.user.properties,
+ key=key, value=value)
+ else:
+ # site wide properties
+ _ChangeCWPropertyOp(cnx, cwpropdict=cnx.vreg['propertyvalues'],
+ key=key, value=value)
+
+
+class DeleteCWPropertyHook(AddCWPropertyHook):
+ __regid__ = 'delcwprop'
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ cnx = self._cw
+ for eidfrom, rtype, eidto in cnx.transaction_data.get('pendingrelations', ()):
+ if rtype == 'for_user' and eidfrom == self.entity.eid:
+ # if for_user was set, delete already handled by hook on for_user deletion
+ break
+ else:
+ _DelCWPropertyOp(cnx, cwpropdict=cnx.vreg['propertyvalues'],
+ key=self.entity.pkey)
+
+
+class AddForUserRelationHook(SyncSessionHook):
+ __regid__ = 'addcwpropforuser'
+ __select__ = SyncSessionHook.__select__ & hook.match_rtype('for_user')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ cnx = self._cw
+ eidfrom = self.eidfrom
+ if not cnx.entity_metas(eidfrom)['type'] == 'CWProperty':
+ return
+ key, value = cnx.execute('Any K,V WHERE P eid %(x)s,P pkey K,P value V',
+ {'x': eidfrom})[0]
+ if cnx.vreg.property_info(key)['sitewide']:
+ msg = _("site-wide property can't be set for user")
+ raise validation_error(eidfrom, {('for_user', 'subject'): msg})
+ for session in get_user_sessions(cnx.repo, self.eidto):
+ _ChangeCWPropertyOp(cnx, cwpropdict=session.user.properties,
+ key=key, value=value)
+
+
+class DelForUserRelationHook(AddForUserRelationHook):
+ __regid__ = 'delcwpropforuser'
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ cnx = self._cw
+ key = cnx.execute('Any K WHERE P eid %(x)s, P pkey K', {'x': self.eidfrom})[0][0]
+ cnx.transaction_data.setdefault('pendingrelations', []).append(
+ (self.eidfrom, self.rtype, self.eidto))
+ for session in get_user_sessions(cnx.repo, self.eidto):
+ _DelCWPropertyOp(cnx, cwpropdict=session.user.properties, key=key)
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/syncsources.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/syncsources.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,208 @@
+# copyright 2010-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""hooks for repository sources synchronization"""
+
+from cubicweb import _
+
+from socket import gethostname
+
+from logilab.common.decorators import clear_cache
+
+from cubicweb import validation_error
+from cubicweb.predicates import is_instance
+from cubicweb.server import SOURCE_TYPES, hook
+
+class SourceHook(hook.Hook):
+ __abstract__ = True
+ category = 'cw.sources'
+
+
+# repo sources synchronization #################################################
+
+class SourceAddedOp(hook.Operation):
+ entity = None # make pylint happy
+ def postcommit_event(self):
+ self.cnx.repo.add_source(self.entity)
+
+class SourceAddedHook(SourceHook):
+ __regid__ = 'cw.sources.added'
+ __select__ = SourceHook.__select__ & is_instance('CWSource')
+ events = ('after_add_entity',)
+ def __call__(self):
+ try:
+ sourcecls = SOURCE_TYPES[self.entity.type]
+ except KeyError:
+ msg = _('Unknown source type')
+ raise validation_error(self.entity, {('type', 'subject'): msg})
+ # ignore creation of the system source done during database
+ # initialisation, as config for this source is in a file and handling
+ # is done separatly (no need for the operation either)
+ if self.entity.name != 'system':
+ sourcecls.check_conf_dict(self.entity.eid, self.entity.host_config,
+ fail_if_unknown=not self._cw.vreg.config.repairing)
+ SourceAddedOp(self._cw, entity=self.entity)
+
+
+class SourceRemovedOp(hook.Operation):
+ uri = None # make pylint happy
+ def postcommit_event(self):
+ self.cnx.repo.remove_source(self.uri)
+
+class SourceRemovedHook(SourceHook):
+ __regid__ = 'cw.sources.removed'
+ __select__ = SourceHook.__select__ & is_instance('CWSource')
+ events = ('before_delete_entity',)
+ def __call__(self):
+ if self.entity.name == 'system':
+ msg = _("You cannot remove the system source")
+ raise validation_error(self.entity, {None: msg})
+ SourceRemovedOp(self._cw, uri=self.entity.name)
+
+
+class SourceConfigUpdatedOp(hook.DataOperationMixIn, hook.Operation):
+
+ def precommit_event(self):
+ self.__processed = []
+ for source in self.get_data():
+ if not self.cnx.deleted_in_transaction(source.eid):
+ conf = source.repo_source.check_config(source)
+ self.__processed.append( (source, conf) )
+
+ def postcommit_event(self):
+ for source, conf in self.__processed:
+ source.repo_source.update_config(source, conf)
+
+
+class SourceRenamedOp(hook.LateOperation):
+ oldname = newname = None # make pylint happy
+
+ def precommit_event(self):
+ source = self.cnx.repo.sources_by_uri[self.oldname]
+ sql = 'UPDATE entities SET asource=%(newname)s WHERE asource=%(oldname)s'
+ self.cnx.system_sql(sql, {'oldname': self.oldname,
+ 'newname': self.newname})
+
+ def postcommit_event(self):
+ repo = self.cnx.repo
+ # XXX race condition
+ source = repo.sources_by_uri.pop(self.oldname)
+ source.uri = self.newname
+ source.public_config['uri'] = self.newname
+ repo.sources_by_uri[self.newname] = source
+ repo._type_source_cache.clear()
+ clear_cache(repo, 'source_defs')
+
+
+class SourceUpdatedHook(SourceHook):
+ __regid__ = 'cw.sources.configupdate'
+ __select__ = SourceHook.__select__ & is_instance('CWSource')
+ events = ('before_update_entity',)
+ def __call__(self):
+ if 'name' in self.entity.cw_edited:
+ oldname, newname = self.entity.cw_edited.oldnewvalue('name')
+ if oldname == 'system':
+ msg = _("You cannot rename the system source")
+ raise validation_error(self.entity, {('name', 'subject'): msg})
+ SourceRenamedOp(self._cw, oldname=oldname, newname=newname)
+ if 'config' in self.entity.cw_edited or 'url' in self.entity.cw_edited:
+ if self.entity.name == 'system' and self.entity.config:
+ msg = _("Configuration of the system source goes to "
+ "the 'sources' file, not in the database")
+ raise validation_error(self.entity, {('config', 'subject'): msg})
+ SourceConfigUpdatedOp.get_instance(self._cw).add_data(self.entity)
+
+
+class SourceHostConfigUpdatedHook(SourceHook):
+ __regid__ = 'cw.sources.hostconfigupdate'
+ __select__ = SourceHook.__select__ & is_instance('CWSourceHostConfig')
+ events = ('after_add_entity', 'after_update_entity', 'before_delete_entity',)
+ def __call__(self):
+ if self.entity.match(gethostname()):
+ if self.event == 'after_update_entity' and \
+ not 'config' in self.entity.cw_edited:
+ return
+ try:
+ SourceConfigUpdatedOp.get_instance(self._cw).add_data(self.entity.cwsource)
+ except IndexError:
+ # XXX no source linked to the host config yet
+ pass
+
+
+# source mapping synchronization ###############################################
+#
+# Expect cw_for_source/cw_schema are immutable relations (i.e. can't change from
+# a source or schema to another).
+
+class SourceMappingImmutableHook(SourceHook):
+ """check cw_for_source and cw_schema are immutable relations
+
+ XXX empty delete perms would be enough?
+ """
+ __regid__ = 'cw.sources.mapping.immutable'
+ __select__ = SourceHook.__select__ & hook.match_rtype('cw_for_source', 'cw_schema')
+ events = ('before_add_relation',)
+ def __call__(self):
+ if not self._cw.added_in_transaction(self.eidfrom):
+ msg = _("You can't change this relation")
+ raise validation_error(self.eidfrom, {self.rtype: msg})
+
+
+class SourceMappingChangedOp(hook.DataOperationMixIn, hook.Operation):
+ def check_or_update(self, checkonly):
+ cnx = self.cnx
+ # take care, can't call get_data() twice
+ try:
+ data = self.__data
+ except AttributeError:
+ data = self.__data = self.get_data()
+ for schemacfg, source in data:
+ if source is None:
+ source = schemacfg.cwsource.repo_source
+ if cnx.added_in_transaction(schemacfg.eid):
+ if not cnx.deleted_in_transaction(schemacfg.eid):
+ source.add_schema_config(schemacfg, checkonly=checkonly)
+ elif cnx.deleted_in_transaction(schemacfg.eid):
+ source.del_schema_config(schemacfg, checkonly=checkonly)
+ else:
+ source.update_schema_config(schemacfg, checkonly=checkonly)
+
+ def precommit_event(self):
+ self.check_or_update(True)
+
+ def postcommit_event(self):
+ self.check_or_update(False)
+
+
+class SourceMappingChangedHook(SourceHook):
+ __regid__ = 'cw.sources.schemaconfig'
+ __select__ = SourceHook.__select__ & is_instance('CWSourceSchemaConfig')
+ events = ('after_add_entity', 'after_update_entity')
+ def __call__(self):
+ if self.event == 'after_add_entity' or (
+ self.event == 'after_update_entity' and 'options' in self.entity.cw_edited):
+ SourceMappingChangedOp.get_instance(self._cw).add_data(
+ (self.entity, None) )
+
+class SourceMappingDeleteHook(SourceHook):
+ __regid__ = 'cw.sources.delschemaconfig'
+ __select__ = SourceHook.__select__ & hook.match_rtype('cw_for_source')
+ events = ('before_delete_relation',)
+ def __call__(self):
+ SourceMappingChangedOp.get_instance(self._cw).add_data(
+ (self._cw.entity_from_eid(self.eidfrom),
+ self._cw.entity_from_eid(self.eidto).repo_source) )
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/data-computed/schema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/data-computed/schema.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,46 @@
+# copyright 2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from yams.buildobjs import EntityType, String, Int, SubjectRelation, RelationDefinition
+
+THISYEAR = 2014
+
+class Person(EntityType):
+ name = String()
+ salaire = Int()
+ birth_year = Int(required=True)
+ travaille = SubjectRelation('Societe')
+ age = Int(formula='Any %d - D WHERE X birth_year D' % THISYEAR)
+
+class Societe(EntityType):
+ nom = String()
+ salaire_total = Int(formula='Any SUM(SA) GROUPBY X WHERE P travaille X, P salaire SA')
+
+
+class Agent(EntityType):
+ asalae_id = String(formula='Any E WHERE M mirror_of X, M extid E')
+
+class MirrorEntity(EntityType):
+ extid = String(required=True, unique=True,
+ description=_('external identifier of the object'))
+
+
+class mirror_of(RelationDefinition):
+ subject = 'MirrorEntity'
+ object = ('Agent', 'Societe')
+ cardinality = '?*'
+ inlined = True
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/data/hooks.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/data/hooks.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,8 @@
+from cubicweb.predicates import is_instance
+from cubicweb.hooks import notification
+
+
+class FolderUpdateHook(notification.EntityUpdateHook):
+ __select__ = (notification.EntityUpdateHook.__select__ &
+ is_instance('Folder'))
+ order = 100 # late trigger so that metadata hooks come before.
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/data/schema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/data/schema.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,85 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+from yams.buildobjs import (RelationDefinition, RelationType, EntityType,
+ String, Datetime, Int)
+from yams.reader import context
+
+from cubicweb.schema import ERQLExpression
+
+from cubicweb import _
+
+class friend(RelationDefinition):
+ subject = ('CWUser', 'CWGroup')
+ object = ('CWUser', 'CWGroup')
+ symmetric = True
+
+class Folder(EntityType):
+ name = String()
+
+class parent(RelationDefinition):
+ subject = 'Folder'
+ object = 'Folder'
+ composite = 'object'
+ cardinality = '?*'
+
+class children(RelationDefinition):
+ subject = 'Folder'
+ object = 'Folder'
+ composite = 'subject'
+
+
+class Email(EntityType):
+ """electronic mail"""
+ subject = String(fulltextindexed=True)
+ date = Datetime(description=_('UTC time on which the mail was sent'))
+ messageid = String(required=True, indexed=True)
+ headers = String(description=_('raw headers'))
+
+
+
+class EmailPart(EntityType):
+ """an email attachment"""
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',), # XXX if E parts X, U has_read_permission E
+ 'add': ('managers', ERQLExpression('E parts X, U has_update_permission E'),),
+ 'delete': ('managers', ERQLExpression('E parts X, U has_update_permission E')),
+ 'update': ('managers', 'owners',),
+ }
+
+ content = String(fulltextindexed=True)
+ content_format = String(required=True, maxsize=50)
+ ordernum = Int(required=True)
+
+
+class parts(RelationType):
+ subject = 'Email'
+ object = 'EmailPart'
+ cardinality = '*1'
+ composite = 'subject'
+ fulltext_container = 'subject'
+
+class sender(RelationDefinition):
+ subject = 'Email'
+ object = 'EmailAddress'
+ cardinality = '?*'
+ inlined = True
+
+class recipients(RelationDefinition):
+ subject = 'Email'
+ object = 'EmailAddress'
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/unittest_bookmarks.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_bookmarks.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,38 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from logilab.common.testlib import unittest_main
+from cubicweb.devtools.testlib import CubicWebTC
+
+class BookmarkHooksTC(CubicWebTC):
+
+
+ def test_auto_delete_bookmarks(self):
+ with self.admin_access.repo_cnx() as cnx:
+ beid = cnx.execute('INSERT Bookmark X: X title "hop", X path "view", X bookmarked_by U '
+ 'WHERE U login "admin"')[0][0]
+ cnx.execute('SET X bookmarked_by U WHERE U login "anon"')
+ cnx.commit()
+ cnx.execute('DELETE X bookmarked_by U WHERE U login "admin"')
+ cnx.commit()
+ self.assertTrue(cnx.execute('Any X WHERE X eid %(x)s', {'x': beid}))
+ cnx.execute('DELETE X bookmarked_by U WHERE U login "anon"')
+ cnx.commit()
+ self.assertFalse(cnx.execute('Any X WHERE X eid %(x)s', {'x': beid}))
+
+if __name__ == '__main__':
+ unittest_main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/unittest_hooks.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_hooks.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""functional tests for core hooks
+
+Note:
+ syncschema.py hooks are mostly tested in server/test/unittest_migrations.py
+"""
+
+from datetime import datetime
+
+from six import text_type
+
+from pytz import utc
+
+from cubicweb import ValidationError
+from cubicweb.devtools.testlib import CubicWebTC
+
+
+class CoreHooksTC(CubicWebTC):
+
+ def test_inlined(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.assertEqual(self.repo.schema['sender'].inlined, True)
+ cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr", X alias "hop"')
+ cnx.execute('INSERT EmailPart X: X content_format "text/plain", X ordernum 1, '
+ 'X content "this is a test"')
+ eeid = cnx.execute('INSERT Email X: X messageid "<1234>", X subject "test", '
+ 'X sender Y, X recipients Y, X parts P '
+ 'WHERE Y is EmailAddress, P is EmailPart')[0][0]
+ cnx.execute('SET X sender Y WHERE X is Email, Y is EmailAddress')
+ rset = cnx.execute('Any S WHERE X sender S, X eid %s' % eeid)
+ self.assertEqual(len(rset), 1)
+
+ def test_symmetric(self):
+ with self.admin_access.repo_cnx() as cnx:
+ u1 = self.create_user(cnx, u'1')
+ u2 = self.create_user(cnx, u'2')
+ u3 = self.create_user(cnx, u'3')
+ ga = cnx.create_entity('CWGroup', name=u'A')
+ gb = cnx.create_entity('CWGroup', name=u'B')
+ u1.cw_set(friend=u2)
+ u2.cw_set(friend=u3)
+ ga.cw_set(friend=gb)
+ ga.cw_set(friend=u1)
+ cnx.commit()
+ for l1, l2 in ((u'1', u'2'),
+ (u'2', u'3')):
+ self.assertTrue(cnx.execute('Any U1,U2 WHERE U1 friend U2, U1 login %(l1)s, U2 login %(l2)s',
+ {'l1': l1, 'l2': l2}))
+ self.assertTrue(cnx.execute('Any U1,U2 WHERE U2 friend U1, U1 login %(l1)s, U2 login %(l2)s',
+ {'l1': l1, 'l2': l2}))
+ self.assertTrue(cnx.execute('Any GA,GB WHERE GA friend GB, GA name "A", GB name "B"'))
+ self.assertTrue(cnx.execute('Any GA,GB WHERE GB friend GA, GA name "A", GB name "B"'))
+ self.assertTrue(cnx.execute('Any GA,U1 WHERE GA friend U1, GA name "A", U1 login "1"'))
+ self.assertTrue(cnx.execute('Any GA,U1 WHERE U1 friend GA, GA name "A", U1 login "1"'))
+ self.assertFalse(cnx.execute('Any GA,U WHERE GA friend U, GA name "A", U login "2"'))
+ for l1, l2 in ((u'1', u'3'),
+ (u'3', u'1')):
+ self.assertFalse(cnx.execute('Any U1,U2 WHERE U1 friend U2, U1 login %(l1)s, U2 login %(l2)s',
+ {'l1': l1, 'l2': l2}))
+ self.assertFalse(cnx.execute('Any U1,U2 WHERE U2 friend U1, U1 login %(l1)s, U2 login %(l2)s',
+ {'l1': l1, 'l2': l2}))
+
+ def test_html_tidy_hook(self):
+ with self.admin_access.client_cnx() as cnx:
+ entity = cnx.create_entity('Workflow', name=u'wf1',
+ description_format=u'text/html',
+ description=u'yo')
+ self.assertEqual(u'yo', entity.description)
+ entity = cnx.create_entity('Workflow', name=u'wf2',
+ description_format=u'text/html',
+ description=u'yo')
+ self.assertEqual(u'yo', entity.description)
+ entity = cnx.create_entity('Workflow', name=u'wf3',
+ description_format=u'text/html',
+ description=u'yo')
+ self.assertEqual(u'yo', entity.description)
+ entity = cnx.create_entity('Workflow', name=u'wf4',
+ description_format=u'text/html',
+ description=u'R&D')
+ self.assertEqual(u'R&D', entity.description, )
+ entity = cnx.create_entity('Workflow', name=u'wf5',
+ description_format=u'text/html',
+ description=u"
')
+
+ def test_metadata_cwuri(self):
+ with self.admin_access.repo_cnx() as cnx:
+ entity = cnx.create_entity('Workflow', name=u'wf1')
+ self.assertEqual(entity.cwuri, self.repo.config['base-url'] + str(entity.eid))
+
+ def test_metadata_creation_modification_date(self):
+ with self.admin_access.repo_cnx() as cnx:
+ _now = datetime.now(utc)
+ entity = cnx.create_entity('Workflow', name=u'wf1')
+ self.assertEqual((entity.creation_date - _now).seconds, 0)
+ self.assertEqual((entity.modification_date - _now).seconds, 0)
+
+ def test_metadata_created_by(self):
+ with self.admin_access.repo_cnx() as cnx:
+ entity = cnx.create_entity('Bookmark', title=u'wf1', path=u'/view')
+ cnx.commit() # fire operations
+ self.assertEqual(len(entity.created_by), 1) # make sure we have only one creator
+ self.assertEqual(entity.created_by[0].eid, cnx.user.eid)
+
+ def test_metadata_owned_by(self):
+ with self.admin_access.repo_cnx() as cnx:
+ entity = cnx.create_entity('Bookmark', title=u'wf1', path=u'/view')
+ cnx.commit() # fire operations
+ self.assertEqual(len(entity.owned_by), 1) # make sure we have only one owner
+ self.assertEqual(entity.owned_by[0].eid, cnx.user.eid)
+
+ def test_user_login_stripped(self):
+ with self.admin_access.repo_cnx() as cnx:
+ u = self.create_user(cnx, ' joe ')
+ tname = cnx.execute('Any L WHERE E login L, E eid %(e)s',
+ {'e': u.eid})[0][0]
+ self.assertEqual(tname, 'joe')
+ cnx.execute('SET X login " jijoe " WHERE X eid %(x)s', {'x': u.eid})
+ tname = cnx.execute('Any L WHERE E login L, E eid %(e)s',
+ {'e': u.eid})[0][0]
+ self.assertEqual(tname, 'jijoe')
+
+
+class UserGroupHooksTC(CubicWebTC):
+
+ def test_user_group_synchronization(self):
+ with self.admin_access.repo_cnx() as cnx:
+ user = cnx.user
+ self.assertEqual(user.groups, set(('managers',)))
+ cnx.execute('SET X in_group G WHERE X eid %s, G name "guests"' % user.eid)
+ self.assertEqual(user.groups, set(('managers',)))
+ cnx.commit()
+ self.assertEqual(user.groups, set(('managers', 'guests')))
+ cnx.execute('DELETE X in_group G WHERE X eid %s, G name "guests"' % user.eid)
+ self.assertEqual(user.groups, set(('managers', 'guests')))
+ cnx.commit()
+ self.assertEqual(user.groups, set(('managers',)))
+
+ def test_user_composite_owner(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.create_user(cnx, 'toto').eid
+ # composite of euser should be owned by the euser regardless of who created it
+ cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr", U use_email X '
+ 'WHERE U login "toto"')
+ cnx.commit()
+ self.assertEqual(cnx.execute('Any A WHERE X owned_by U, U use_email X,'
+ 'U login "toto", X address A')[0][0],
+ 'toto@logilab.fr')
+
+ def test_user_composite_no_owner_on_deleted_entity(self):
+ with self.admin_access.repo_cnx() as cnx:
+ u = self.create_user(cnx, 'toto').eid
+ cnx.commit()
+ e = cnx.create_entity('EmailAddress', address=u'toto@logilab.fr', reverse_use_email=u)
+ e.cw_delete()
+ cnx.commit()
+ self.assertFalse(cnx.system_sql(
+ 'SELECT * FROM owned_by_relation '
+ 'WHERE eid_from NOT IN (SELECT eid FROM entities)').fetchall())
+
+ def test_no_created_by_on_deleted_entity(self):
+ with self.admin_access.repo_cnx() as cnx:
+ eid = cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr"')[0][0]
+ cnx.execute('DELETE EmailAddress X WHERE X eid %s' % eid)
+ cnx.commit()
+ self.assertFalse(cnx.execute('Any X WHERE X created_by Y, X eid >= %(x)s', {'x': eid}))
+
+
+class SchemaHooksTC(CubicWebTC):
+
+ def test_duplicate_etype_error(self):
+ with self.admin_access.repo_cnx() as cnx:
+ # check we can't add a CWEType or CWRType entity if it already exists one
+ # with the same name
+ self.assertRaises(ValidationError,
+ cnx.execute, 'INSERT CWEType X: X name "CWUser"')
+ cnx.rollback()
+ self.assertRaises(ValidationError,
+ cnx.execute, 'INSERT CWRType X: X name "in_group"')
+
+ def test_validation_unique_constraint(self):
+ with self.admin_access.repo_cnx() as cnx:
+ with self.assertRaises(ValidationError) as cm:
+ cnx.execute('INSERT CWUser X: X login "admin", X upassword "admin"')
+ ex = cm.exception
+ ex.translate(text_type)
+ self.assertIsInstance(ex.entity, int)
+ self.assertEqual(ex.errors,
+ {'': u'some relations violate a unicity constraint',
+ 'login': u'login is part of violated unicity constraint'})
+
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/unittest_integrity.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_integrity.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""functional tests for integrity hooks"""
+
+from cubicweb import ValidationError
+from cubicweb.devtools.testlib import CubicWebTC
+
+class CoreHooksTC(CubicWebTC):
+
+ def test_delete_internal_entities(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.assertRaises(ValidationError, cnx.execute,
+ 'DELETE CWEType X WHERE X name "CWEType"')
+ cnx.rollback()
+ self.assertRaises(ValidationError, cnx.execute,
+ 'DELETE CWRType X WHERE X name "relation_type"')
+ cnx.rollback()
+ self.assertRaises(ValidationError, cnx.execute,
+ 'DELETE CWGroup X WHERE X name "owners"')
+
+ def test_delete_required_relations_subject(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT CWUser X: X login "toto", X upassword "hop", X in_group Y '
+ 'WHERE Y name "users"')
+ cnx.commit()
+ cnx.execute('DELETE X in_group Y WHERE X login "toto", Y name "users"')
+ self.assertRaises(ValidationError, cnx.commit)
+ cnx.rollback()
+ cnx.execute('DELETE X in_group Y WHERE X login "toto"')
+ cnx.execute('SET X in_group Y WHERE X login "toto", Y name "guests"')
+ cnx.commit()
+
+ def test_static_vocabulary_check(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.assertRaises(ValidationError,
+ cnx.execute,
+ 'SET X composite "whatever" WHERE X from_entity FE, FE name "CWUser", '
+ 'X relation_type RT, RT name "in_group"')
+
+ def test_missing_required_relations_subject_inline(self):
+ with self.admin_access.repo_cnx() as cnx:
+ # missing in_group relation
+ cnx.execute('INSERT CWUser X: X login "toto", X upassword "hop"')
+ self.assertRaises(ValidationError, cnx.commit)
+
+ def test_composite_1(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr", X alias "hop"')
+ cnx.execute('INSERT EmailPart X: X content_format "text/plain", X ordernum 1, '
+ 'X content "this is a test"')
+ cnx.execute('INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, '
+ 'X recipients Y, X parts P '
+ 'WHERE Y is EmailAddress, P is EmailPart')
+ self.assertTrue(cnx.execute('Email X WHERE X sender Y'))
+ cnx.commit()
+ cnx.execute('DELETE Email X')
+ rset = cnx.execute('Any X WHERE X is EmailPart')
+ self.assertEqual(len(rset), 0)
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X is EmailPart')
+ self.assertEqual(len(rset), 0)
+
+ def test_composite_2(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr", X alias "hop"')
+ cnx.execute('INSERT EmailPart X: X content_format "text/plain", X ordernum 1, '
+ 'X content "this is a test"')
+ cnx.execute('INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, '
+ 'X recipients Y, X parts P '
+ 'WHERE Y is EmailAddress, P is EmailPart')
+ cnx.commit()
+ cnx.execute('DELETE Email X')
+ cnx.execute('DELETE EmailPart X')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X is EmailPart')
+ self.assertEqual(len(rset), 0)
+
+ def test_composite_redirection(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr", X alias "hop"')
+ cnx.execute('INSERT EmailPart X: X content_format "text/plain", X ordernum 1, '
+ 'X content "this is a test"')
+ cnx.execute('INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, '
+ 'X recipients Y, X parts P '
+ 'WHERE Y is EmailAddress, P is EmailPart')
+ cnx.execute('INSERT Email X: X messageid "<2345>", X subject "test2", X sender Y, '
+ 'X recipients Y '
+ 'WHERE Y is EmailAddress')
+ cnx.commit()
+ cnx.execute('DELETE X parts Y WHERE X messageid "<1234>"')
+ cnx.execute('SET X parts Y WHERE X messageid "<2345>"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X is EmailPart')
+ self.assertEqual(len(rset), 1)
+ self.assertEqual(rset.get_entity(0, 0).reverse_parts[0].messageid, '<2345>')
+
+ def test_composite_object_relation_deletion(self):
+ with self.admin_access.repo_cnx() as cnx:
+ root = cnx.create_entity('Folder', name=u'root')
+ a = cnx.create_entity('Folder', name=u'a', parent=root)
+ cnx.create_entity('Folder', name=u'b', parent=a)
+ cnx.create_entity('Folder', name=u'c', parent=root)
+ cnx.commit()
+ cnx.execute('DELETE Folder F WHERE F name "a"')
+ cnx.execute('DELETE F parent R WHERE R name "root"')
+ cnx.commit()
+ self.assertEqual([['root'], ['c']],
+ cnx.execute('Any NF WHERE F is Folder, F name NF').rows)
+ self.assertEqual([], cnx.execute('Any NF,NP WHERE F parent P, F name NF, P name NP').rows)
+
+ def test_composite_subject_relation_deletion(self):
+ with self.admin_access.repo_cnx() as cnx:
+ root = cnx.create_entity('Folder', name=u'root')
+ a = cnx.create_entity('Folder', name=u'a')
+ b = cnx.create_entity('Folder', name=u'b')
+ c = cnx.create_entity('Folder', name=u'c')
+ root.cw_set(children=(a, c))
+ a.cw_set(children=b)
+ cnx.commit()
+ cnx.execute('DELETE Folder F WHERE F name "a"')
+ cnx.execute('DELETE R children F WHERE R name "root"')
+ cnx.commit()
+ self.assertEqual([['root'], ['c']],
+ cnx.execute('Any NF WHERE F is Folder, F name NF').rows)
+ self.assertEqual([], cnx.execute('Any NF,NP WHERE F parent P, F name NF, P name NP').rows)
+
+ def test_unsatisfied_constraints(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('SET U in_group G WHERE G name "owners", U login "admin"')[0][0]
+ with self.assertRaises(ValidationError) as cm:
+ cnx.commit()
+ self.assertEqual(cm.exception.errors,
+ {'in_group-object': u'RQLConstraint NOT O name "owners" failed'})
+
+ def test_unique_constraint(self):
+ with self.admin_access.repo_cnx() as cnx:
+ entity = cnx.create_entity('CWGroup', name=u'trout')
+ cnx.commit()
+ self.assertRaises(ValidationError, cnx.create_entity, 'CWGroup', name=u'trout')
+ cnx.rollback()
+ cnx.execute('SET X name "trout" WHERE X eid %(x)s', {'x': entity.eid})
+ cnx.commit()
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/unittest_notificationhooks.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_notificationhooks.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,39 @@
+# copyright 2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""tests for notification hooks"""
+
+from cubicweb.devtools.testlib import CubicWebTC
+
+
+class NotificationHooksTC(CubicWebTC):
+
+ def test_entity_update(self):
+ """Check transaction_data['changes'] filled by "notifentityupdated" hook.
+ """
+ with self.admin_access.repo_cnx() as cnx:
+ root = cnx.create_entity('Folder', name=u'a')
+ cnx.commit()
+ root.cw_set(name=u'b')
+ self.assertIn('changes', cnx.transaction_data)
+ self.assertEqual(cnx.transaction_data['changes'],
+ {root.eid: set([('name', u'a', u'b')])})
+
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/unittest_security.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_security.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,56 @@
+# copyright 2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.server import hook
+from cubicweb.predicates import is_instance
+
+
+class SecurityHooksTC(CubicWebTC):
+ def setup_database(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.add_eid = cnx.create_entity('EmailAddress',
+ address=u'hop@perdu.com',
+ reverse_use_email=cnx.user.eid).eid
+ cnx.commit()
+
+ def test_inlined_cw_edited_relation(self):
+ """modification of cw_edited to add an inlined relation shouldn't trigger a security error.
+
+ Test for https://www.cubicweb.org/ticket/5477315
+ """
+ sender = self.repo.schema['Email'].rdef('sender')
+ with self.temporary_permissions((sender, {'add': ()})):
+
+ class MyHook(hook.Hook):
+ __regid__ = 'test.pouet'
+ __select__ = hook.Hook.__select__ & is_instance('Email')
+ events = ('before_add_entity',)
+
+ def __call__(self):
+ self.entity.cw_edited['sender'] = self._cw.user.primary_email[0].eid
+
+ with self.temporary_appobjects(MyHook):
+ with self.admin_access.repo_cnx() as cnx:
+ email = cnx.create_entity('Email', messageid=u'1234')
+ cnx.commit()
+ self.assertEqual(email.sender[0].eid, self.add_eid)
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/unittest_synccomputed.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_synccomputed.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,146 @@
+# copyright 2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""unit tests for computed attributes/relations hooks"""
+
+from unittest import TestCase
+
+from yams.buildobjs import EntityType, String, Int, SubjectRelation
+
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.schema import build_schema_from_namespace
+
+
+class FormulaDependenciesMatrixTC(TestCase):
+
+ def simple_schema(self):
+ THISYEAR = 2014
+
+ class Person(EntityType):
+ name = String()
+ salary = Int()
+ birth_year = Int(required=True)
+ works_for = SubjectRelation('Company')
+ age = Int(formula='Any %d - D WHERE X birth_year D' % THISYEAR)
+
+ class Company(EntityType):
+ name = String()
+ total_salary = Int(formula='Any SUM(SA) GROUPBY X WHERE P works_for X, P salary SA')
+
+ schema = build_schema_from_namespace(vars().items())
+ return schema
+
+ def setUp(self):
+ from cubicweb.hooks.synccomputed import _FormulaDependenciesMatrix
+ self.schema = self.simple_schema()
+ self.dependencies = _FormulaDependenciesMatrix(self.schema)
+
+ def test_computed_attributes_by_etype(self):
+ comp_by_etype = self.dependencies.computed_attribute_by_etype
+ self.assertEqual(len(comp_by_etype), 2)
+ values = comp_by_etype['Person']
+ self.assertEqual(len(values), 1)
+ self.assertEqual(values[0].rtype, 'age')
+ values = comp_by_etype['Company']
+ self.assertEqual(len(values), 1)
+ self.assertEqual(values[0].rtype, 'total_salary')
+
+ def test_computed_attribute_by_relation(self):
+ comp_by_rdef = self.dependencies.computed_attribute_by_relation
+ self.assertEqual(len(comp_by_rdef), 1)
+ key, values = next(iter(comp_by_rdef.items()))
+ self.assertEqual(key.rtype, 'works_for')
+ self.assertEqual(len(values), 1)
+ self.assertEqual(values[0].rtype, 'total_salary')
+
+ def test_computed_attribute_by_etype_attrs(self):
+ comp_by_attr = self.dependencies.computed_attribute_by_etype_attrs
+ self.assertEqual(len(comp_by_attr), 1)
+ values = comp_by_attr['Person']
+ self.assertEqual(len(values), 2)
+ values = set((rdef.formula, tuple(v))
+ for rdef, v in values.items())
+ self.assertEquals(values,
+ set((('Any 2014 - D WHERE X birth_year D', tuple(('birth_year',))),
+ ('Any SUM(SA) GROUPBY X WHERE P works_for X, P salary SA', tuple(('salary',)))))
+ )
+
+
+class ComputedAttributeTC(CubicWebTC):
+ appid = 'data-computed'
+
+ def setup_entities(self, req):
+ self.societe = req.create_entity('Societe', nom=u'Foo')
+ req.create_entity('Person', name=u'Titi', salaire=1000,
+ travaille=self.societe, birth_year=2001)
+ self.tata = req.create_entity('Person', name=u'Tata', salaire=2000,
+ travaille=self.societe, birth_year=1990)
+
+
+ def test_update_on_add_remove_relation(self):
+ """check the rewriting of a computed attribute"""
+ with self.admin_access.web_request() as req:
+ self.setup_entities(req)
+ req.cnx.commit()
+ rset = req.execute('Any S WHERE X salaire_total S, X nom "Foo"')
+ self.assertEqual(rset[0][0], 3000)
+ # Add relation.
+ toto = req.create_entity('Person', name=u'Toto', salaire=1500,
+ travaille=self.societe, birth_year=1988)
+ req.cnx.commit()
+ rset = req.execute('Any S WHERE X salaire_total S, X nom "Foo"')
+ self.assertEqual(rset[0][0], 4500)
+ # Delete relation.
+ toto.cw_set(travaille=None)
+ req.cnx.commit()
+ rset = req.execute('Any S WHERE X salaire_total S, X nom "Foo"')
+ self.assertEqual(rset[0][0], 3000)
+
+ def test_recompute_on_attribute_update(self):
+ """check the modification of an attribute triggers the update of the
+ computed attributes that depend on it"""
+ with self.admin_access.web_request() as req:
+ self.setup_entities(req)
+ req.cnx.commit()
+ rset = req.execute('Any S WHERE X salaire_total S, X nom "Foo"')
+ self.assertEqual(rset[0][0], 3000)
+ # Update attribute.
+ self.tata.cw_set(salaire=1000)
+ req.cnx.commit()
+ rset = req.execute('Any S WHERE X salaire_total S, X nom "Foo"')
+ self.assertEqual(rset[0][0], 2000)
+
+ def test_init_on_entity_creation(self):
+ """check the computed attribute is initialized on entity creation"""
+ with self.admin_access.web_request() as req:
+ p = req.create_entity('Person', name=u'Tata', salaire=2000,
+ birth_year=1990)
+ req.cnx.commit()
+ rset = req.execute('Any A, X WHERE X age A, X name "Tata"')
+ self.assertEqual(rset[0][0], 2014 - 1990)
+
+
+ def test_recompute_on_ambiguous_relation(self):
+ # check we don't end up with TypeResolverException as in #4901163
+ with self.admin_access.client_cnx() as cnx:
+ societe = cnx.create_entity('Societe', nom=u'Foo')
+ cnx.create_entity('MirrorEntity', mirror_of=societe, extid=u'1')
+ cnx.commit()
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/unittest_syncschema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_syncschema.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,406 @@
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""cubicweb.server.hooks.syncschema unit and functional tests"""
+
+from yams.constraints import BoundaryConstraint
+
+from cubicweb import ValidationError, Binary
+from cubicweb.schema import META_RTYPES
+from cubicweb.devtools import startpgcluster, stoppgcluster, PostgresApptestConfiguration
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.server.sqlutils import SQL_PREFIX
+from cubicweb.devtools.repotest import schema_eids_idx
+
+
+def setUpModule():
+ startpgcluster(__file__)
+
+
+def tearDownModule(*args):
+ stoppgcluster(__file__)
+ del SchemaModificationHooksTC.schema_eids
+
+
+class SchemaModificationHooksTC(CubicWebTC):
+ configcls = PostgresApptestConfiguration
+
+ def setUp(self):
+ super(SchemaModificationHooksTC, self).setUp()
+ self.repo.set_schema(self.repo.deserialize_schema(), resetvreg=False)
+ self.__class__.schema_eids = schema_eids_idx(self.repo.schema)
+
+ def index_exists(self, cnx, etype, attr, unique=False):
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = cnx.cnxset.cu
+ return dbhelper.index_exists(sqlcursor,
+ SQL_PREFIX + etype,
+ SQL_PREFIX + attr,
+ unique=unique)
+
+ def _set_perms(self, cnx, eid):
+ cnx.execute('SET X read_permission G WHERE X eid %(x)s, G is CWGroup',
+ {'x': eid})
+ cnx.execute('SET X add_permission G WHERE X eid %(x)s, G is CWGroup, '
+ 'G name "managers"', {'x': eid})
+ cnx.execute('SET X delete_permission G WHERE X eid %(x)s, G is CWGroup, '
+ 'G name "owners"', {'x': eid})
+
+ def _set_attr_perms(self, cnx, eid):
+ cnx.execute('SET X read_permission G WHERE X eid %(x)s, G is CWGroup',
+ {'x': eid})
+ cnx.execute('SET X update_permission G WHERE X eid %(x)s, G is CWGroup, G name "managers"',
+ {'x': eid})
+
+ def test_base(self):
+ with self.admin_access.repo_cnx() as cnx:
+ schema = self.repo.schema
+ self.assertFalse(schema.has_entity('Societe2'))
+ self.assertFalse(schema.has_entity('concerne2'))
+ # schema should be update on insertion (after commit)
+ eeid = cnx.execute('INSERT CWEType X: X name "Societe2", '
+ 'X description "", X final FALSE')[0][0]
+ self._set_perms(cnx, eeid)
+ cnx.execute('INSERT CWRType X: X name "concerne2", X description "", '
+ 'X final FALSE, X symmetric FALSE')
+ self.assertFalse(schema.has_entity('Societe2'))
+ self.assertFalse(schema.has_entity('concerne2'))
+ # have to commit before adding definition relations
+ cnx.commit()
+ self.assertTrue(schema.has_entity('Societe2'))
+ self.assertTrue(schema.has_relation('concerne2'))
+ attreid = cnx.execute('INSERT CWAttribute X: X cardinality "11", '
+ 'X defaultval %(default)s, X indexed TRUE, '
+ 'X relation_type RT, X from_entity E, X to_entity F '
+ 'WHERE RT name "name", E name "Societe2", '
+ 'F name "String"',
+ {'default': Binary.zpickle('noname')})[0][0]
+ self._set_attr_perms(cnx, attreid)
+ concerne2_rdef_eid = cnx.execute(
+ 'INSERT CWRelation X: X cardinality "**", X relation_type RT, '
+ 'X from_entity E, X to_entity E '
+ 'WHERE RT name "concerne2", E name "Societe2"')[0][0]
+ self._set_perms(cnx, concerne2_rdef_eid)
+ self.assertNotIn('name', schema['Societe2'].subject_relations())
+ self.assertNotIn('concerne2', schema['Societe2'].subject_relations())
+ self.assertFalse(self.index_exists(cnx, 'Societe2', 'name'))
+ cnx.commit()
+ self.assertIn('name', schema['Societe2'].subject_relations())
+ self.assertIn('concerne2', schema['Societe2'].subject_relations())
+ self.assertTrue(self.index_exists(cnx, 'Societe2', 'name'))
+ # now we should be able to insert and query Societe2
+ s2eid = cnx.execute('INSERT Societe2 X: X name "logilab"')[0][0]
+ cnx.execute('Societe2 X WHERE X name "logilab"')
+ cnx.execute('SET X concerne2 X WHERE X name "logilab"')
+ rset = cnx.execute('Any X WHERE X concerne2 Y')
+ self.assertEqual(rset.rows, [[s2eid]])
+ # check that when a relation definition is deleted, existing relations are deleted
+ rdefeid = cnx.execute('INSERT CWRelation X: X cardinality "**", X relation_type RT, '
+ ' X from_entity E, X to_entity E '
+ 'WHERE RT name "concerne2", E name "CWUser"')[0][0]
+ self._set_perms(cnx, rdefeid)
+ cnx.commit()
+ cnx.execute('DELETE CWRelation X WHERE X eid %(x)s', {'x': concerne2_rdef_eid})
+ cnx.commit()
+ self.assertIn('concerne2', schema['CWUser'].subject_relations())
+ self.assertNotIn('concerne2', schema['Societe2'].subject_relations())
+ self.assertFalse(cnx.execute('Any X WHERE X concerne2 Y'))
+ # schema should be cleaned on delete (after commit)
+ cnx.execute('DELETE CWEType X WHERE X name "Societe2"')
+ cnx.execute('DELETE CWRType X WHERE X name "concerne2"')
+ self.assertTrue(self.index_exists(cnx, 'Societe2', 'name'))
+ self.assertTrue(schema.has_entity('Societe2'))
+ self.assertTrue(schema.has_relation('concerne2'))
+ cnx.commit()
+ self.assertFalse(self.index_exists(cnx, 'Societe2', 'name'))
+ self.assertFalse(schema.has_entity('Societe2'))
+ self.assertFalse(schema.has_entity('concerne2'))
+ self.assertNotIn('concerne2', schema['CWUser'].subject_relations())
+
+ def test_metartype_with_nordefs(self):
+ with self.admin_access.repo_cnx() as cnx:
+ META_RTYPES.add('custom_meta')
+ cnx.execute('INSERT CWRType X: X name "custom_meta", X description "", '
+ 'X final FALSE, X symmetric FALSE')
+ cnx.commit()
+ eeid = cnx.execute('INSERT CWEType X: X name "NEWEtype", '
+ 'X description "", X final FALSE')[0][0]
+ self._set_perms(cnx, eeid)
+ cnx.commit()
+ META_RTYPES.remove('custom_meta')
+
+ def test_metartype_with_somerdefs(self):
+ with self.admin_access.repo_cnx() as cnx:
+ META_RTYPES.add('custom_meta')
+ cnx.execute('INSERT CWRType X: X name "custom_meta", X description "", '
+ 'X final FALSE, X symmetric FALSE')
+ cnx.commit()
+ rdefeid = cnx.execute('INSERT CWRelation X: X cardinality "**", X relation_type RT, '
+ ' X from_entity E, X to_entity E '
+ 'WHERE RT name "custom_meta", E name "CWUser"')[0][0]
+ self._set_perms(cnx, rdefeid)
+ cnx.commit()
+ eeid = cnx.execute('INSERT CWEType X: X name "NEWEtype", '
+ 'X description "", X final FALSE')[0][0]
+ self._set_perms(cnx, eeid)
+ cnx.commit()
+ META_RTYPES.remove('custom_meta')
+
+ def test_is_instance_of_insertions(self):
+ with self.admin_access.repo_cnx() as cnx:
+ seid = cnx.execute('INSERT Transition T: T name "subdiv"')[0][0]
+ is_etypes = [etype for etype, in cnx.execute('Any ETN WHERE X eid %s, '
+ 'X is ET, ET name ETN' % seid)]
+ self.assertEqual(is_etypes, ['Transition'])
+ instanceof_etypes = [etype
+ for etype, in cnx.execute('Any ETN WHERE X eid %s, '
+ 'X is_instance_of ET, ET name ETN'
+ % seid)]
+ self.assertEqual(sorted(instanceof_etypes), ['BaseTransition', 'Transition'])
+ snames = [name for name, in cnx.execute('Any N WHERE S is BaseTransition, S name N')]
+ self.assertNotIn('subdiv', snames)
+ snames = [name for name, in cnx.execute('Any N WHERE S is_instance_of BaseTransition, '
+ 'S name N')]
+ self.assertIn('subdiv', snames)
+
+ def test_perms_synchronization_1(self):
+ with self.admin_access.repo_cnx() as cnx:
+ schema = self.repo.schema
+ self.assertEqual(schema['CWUser'].get_groups('read'), set(('managers', 'users')))
+ self.assertTrue(cnx.execute('Any X, Y WHERE X is CWEType, X name "CWUser", '
+ 'Y is CWGroup, Y name "users"')[0])
+ cnx.execute('DELETE X read_permission Y '
+ 'WHERE X is CWEType, X name "CWUser", Y name "users"')
+ self.assertEqual(schema['CWUser'].get_groups('read'), set(('managers', 'users', )))
+ cnx.commit()
+ self.assertEqual(schema['CWUser'].get_groups('read'), set(('managers',)))
+ cnx.execute('SET X read_permission Y WHERE X is CWEType, '
+ 'X name "CWUser", Y name "users"')
+ cnx.commit()
+ self.assertEqual(schema['CWUser'].get_groups('read'),
+ set(('managers', 'users',)))
+
+ def test_perms_synchronization_2(self):
+ with self.admin_access.repo_cnx() as cnx:
+ schema = self.repo.schema['in_group'].rdefs[('CWUser', 'CWGroup')]
+ self.assertEqual(schema.get_groups('read'),
+ set(('managers', 'users', 'guests')))
+ cnx.execute('DELETE X read_permission Y WHERE X relation_type RT, '
+ 'RT name "in_group", Y name "guests"')
+ self.assertEqual(schema.get_groups('read'),
+ set(('managers', 'users', 'guests')))
+ cnx.commit()
+ self.assertEqual(schema.get_groups('read'),
+ set(('managers', 'users')))
+ cnx.execute('SET X read_permission Y WHERE X relation_type RT, '
+ 'RT name "in_group", Y name "guests"')
+ self.assertEqual(schema.get_groups('read'),
+ set(('managers', 'users')))
+ cnx.commit()
+ self.assertEqual(schema.get_groups('read'),
+ set(('managers', 'users', 'guests')))
+
+ def test_nonregr_user_edit_itself(self):
+ with self.admin_access.repo_cnx() as cnx:
+ ueid = cnx.user.eid
+ groupeids = [eid for eid, in cnx.execute('CWGroup G WHERE G name '
+ 'in ("managers", "users")')]
+ cnx.execute('DELETE X in_group Y WHERE X eid %s' % ueid)
+ cnx.execute('SET X surname "toto" WHERE X eid %s' % ueid)
+ cnx.execute('SET X in_group Y WHERE X eid %s, Y name "managers"' % ueid)
+ cnx.commit()
+ eeid = cnx.execute('Any X WHERE X is CWEType, X name "CWEType"')[0][0]
+ cnx.execute('DELETE X read_permission Y WHERE X eid %s' % eeid)
+ cnx.execute('SET X final FALSE WHERE X eid %s' % eeid)
+ cnx.execute('SET X read_permission Y WHERE X eid %s, Y eid in (%s, %s)'
+ % (eeid, groupeids[0], groupeids[1]))
+ cnx.commit()
+ cnx.execute('Any X WHERE X is CWEType, X name "CWEType"')
+
+ # schema modification hooks tests #########################################
+
+ def test_uninline_relation(self):
+ with self.admin_access.repo_cnx() as cnx:
+ try:
+ self.assertTrue(self.schema['state_of'].inlined)
+ cnx.execute('SET X inlined FALSE WHERE X name "state_of"')
+ self.assertTrue(self.schema['state_of'].inlined)
+ cnx.commit()
+ self.assertFalse(self.schema['state_of'].inlined)
+ self.assertFalse(self.index_exists(cnx, 'State', 'state_of'))
+ rset = cnx.execute('Any X, Y WHERE X state_of Y')
+ self.assertEqual(len(rset), 2) # user states
+ finally:
+ cnx.execute('SET X inlined TRUE WHERE X name "state_of"')
+ self.assertFalse(self.schema['state_of'].inlined)
+ cnx.commit()
+ self.assertTrue(self.schema['state_of'].inlined)
+ self.assertTrue(self.index_exists(cnx, 'State', 'state_of'))
+ rset = cnx.execute('Any X, Y WHERE X state_of Y')
+ self.assertEqual(len(rset), 2)
+
+ def test_indexed_change(self):
+ with self.admin_access.repo_cnx() as cnx:
+ try:
+ cnx.execute('SET X indexed FALSE WHERE X relation_type R, R name "name"')
+ self.assertTrue(self.schema['name'].rdef('Workflow', 'String').indexed)
+ self.assertTrue(self.index_exists(cnx, 'Workflow', 'name'))
+ cnx.commit()
+ self.assertFalse(self.schema['name'].rdef('Workflow', 'String').indexed)
+ self.assertFalse(self.index_exists(cnx, 'Workflow', 'name'))
+ finally:
+ cnx.execute('SET X indexed TRUE WHERE X relation_type R, R name "name"')
+ self.assertFalse(self.schema['name'].rdef('Workflow', 'String').indexed)
+ self.assertFalse(self.index_exists(cnx, 'Workflow', 'name'))
+ cnx.commit()
+ self.assertTrue(self.schema['name'].rdef('Workflow', 'String').indexed)
+ self.assertTrue(self.index_exists(cnx, 'Workflow', 'name'))
+
+ def test_unique_change(self):
+ with self.admin_access.repo_cnx() as cnx:
+ try:
+ eid = cnx.execute('INSERT CWConstraint X: X cstrtype CT, X value "{}", '
+ ' DEF constrained_by X '
+ 'WHERE CT name "UniqueConstraint", DEF relation_type RT, '
+ 'DEF from_entity E, RT name "name", '
+ 'E name "Workflow"').rows[0][0]
+ self.assertFalse(self.schema['Workflow'].has_unique_values('name'))
+ self.assertFalse(self.index_exists(cnx, 'Workflow', 'name', unique=True))
+ cnx.commit()
+ self.assertTrue(self.schema['Workflow'].has_unique_values('name'))
+ self.assertTrue(self.index_exists(cnx, 'Workflow', 'name', unique=True))
+ finally:
+ cnx.execute('DELETE CWConstraint C WHERE C eid %(eid)s', {'eid': eid})
+ cnx.commit()
+ self.assertFalse(self.schema['Workflow'].has_unique_values('name'))
+ self.assertFalse(self.index_exists(cnx, 'Workflow', 'name', unique=True))
+
+ def test_required_change_1(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('SET DEF cardinality "?1" '
+ 'WHERE DEF relation_type RT, DEF from_entity E,'
+ 'RT name "title", E name "Bookmark"')
+ cnx.commit()
+ # should now be able to add bookmark without title
+ cnx.execute('INSERT Bookmark X: X path "/view"')
+ cnx.commit()
+
+ def test_required_change_2(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('SET DEF cardinality "11" '
+ 'WHERE DEF relation_type RT, DEF from_entity E,'
+ 'RT name "surname", E name "CWUser"')
+ cnx.execute('SET U surname "Doe" WHERE U surname NULL')
+ cnx.commit()
+ # should not be able anymore to add cwuser without surname
+ self.assertRaises(ValidationError, self.create_user, cnx, "toto")
+ cnx.rollback()
+ cnx.execute('SET DEF cardinality "?1" '
+ 'WHERE DEF relation_type RT, DEF from_entity E,'
+ 'RT name "surname", E name "CWUser"')
+ cnx.commit()
+
+ def test_add_attribute_to_base_class(self):
+ with self.admin_access.repo_cnx() as cnx:
+ attreid = cnx.execute(
+ 'INSERT CWAttribute X: X cardinality "11", X defaultval %(default)s, '
+ 'X indexed TRUE, X relation_type RT, X from_entity E, X to_entity F '
+ 'WHERE RT name "messageid", E name "BaseTransition", F name "String"',
+ {'default': Binary.zpickle('noname')})[0][0]
+ assert cnx.execute('SET X read_permission Y WHERE X eid %(x)s, Y name "managers"',
+ {'x': attreid})
+ cnx.commit()
+ self.schema.rebuild_infered_relations()
+ self.assertIn('Transition', self.schema['messageid'].subjects())
+ self.assertIn('WorkflowTransition', self.schema['messageid'].subjects())
+ cnx.execute('Any X WHERE X is_instance_of BaseTransition, X messageid "hop"')
+
+ def test_change_fulltextindexed(self):
+ with self.admin_access.repo_cnx() as cnx:
+ target = cnx.create_entity(u'Email', messageid=u'1234',
+ subject=u'rick.roll@dance.com')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertIn(target.eid, [item[0] for item in rset])
+ assert cnx.execute('SET A fulltextindexed FALSE '
+ 'WHERE E is CWEType, E name "Email", A is CWAttribute,'
+ 'A from_entity E, A relation_type R, R name "subject"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertFalse(rset)
+ assert cnx.execute('SET A fulltextindexed TRUE '
+ 'WHERE A from_entity E, A relation_type R, '
+ 'E name "Email", R name "subject"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertIn(target.eid, [item[0] for item in rset])
+
+ def test_change_fulltext_container(self):
+ with self.admin_access.repo_cnx() as cnx:
+ target = cnx.create_entity(u'EmailAddress', address=u'rick.roll@dance.com')
+ target.cw_set(reverse_use_email=cnx.user)
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertIn(cnx.user.eid, [item[0] for item in rset])
+ assert cnx.execute('SET R fulltext_container NULL '
+ 'WHERE R name "use_email"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertIn(target.eid, [item[0] for item in rset])
+ assert cnx.execute('SET R fulltext_container "subject" '
+ 'WHERE R name "use_email"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertIn(cnx.user.eid, [item[0] for item in rset])
+
+ def test_update_constraint(self):
+ with self.admin_access.repo_cnx() as cnx:
+ rdef = self.schema['Transition'].rdef('type')
+ cstr = rdef.constraint_by_type('StaticVocabularyConstraint')
+ cnx.execute('SET X value %(v)s WHERE X eid %(x)s',
+ {'x': cstr.eid, 'v': u"u'normal', u'auto', u'new'"})
+ cnx.execute('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, '
+ 'EDEF constrained_by X WHERE CT name %(ct)s, EDEF eid %(x)s',
+ {'ct': 'SizeConstraint', 'value': u'max=10', 'x': rdef.eid})
+ cnx.commit()
+ cstr = rdef.constraint_by_type('StaticVocabularyConstraint')
+ self.assertEqual(cstr.values, (u'normal', u'auto', u'new'))
+ cnx.execute('INSERT Transition T: T name "hop", T type "new"')
+
+ def test_add_constraint(self):
+ with self.admin_access.repo_cnx() as cnx:
+ rdef = self.schema['EmailPart'].rdef('ordernum')
+ cstr = BoundaryConstraint('>=', 0)
+ cnx.execute('INSERT CWConstraint X: X value %(v)s, X cstrtype CT, '
+ 'EDEF constrained_by X WHERE CT name %(ct)s, EDEF eid %(x)s',
+ {'ct': cstr.__class__.__name__, 'v': cstr.serialize(), 'x': rdef.eid})
+ cnx.commit()
+ cstr2 = rdef.constraint_by_type('BoundaryConstraint')
+ self.assertEqual(cstr, cstr2)
+ cstr3 = BoundaryConstraint('<=', 1000)
+ cnx.execute('INSERT CWConstraint X: X value %(v)s, X cstrtype CT, '
+ 'EDEF constrained_by X WHERE CT name %(ct)s, EDEF eid %(x)s',
+ {'ct': cstr3.__class__.__name__, 'v': cstr3.serialize(), 'x': rdef.eid})
+ cnx.commit()
+ # Do not use assertCountEqual as it does "strange" equality
+ # comparison on Python 2.
+ self.assertEqual(set(rdef.constraints), set([cstr, cstr3]))
+
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/test/unittest_syncsession.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_syncsession.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""functional tests for core hooks
+
+Note:
+ syncschema.py hooks are mostly tested in server/test/unittest_migrations.py
+"""
+
+from six import text_type
+
+from cubicweb import ValidationError
+from cubicweb.devtools.testlib import CubicWebTC
+
+
+class CWPropertyHooksTC(CubicWebTC):
+
+ def test_unexistant_cwproperty(self):
+ with self.admin_access.web_request() as req:
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "bla.bla", '
+ 'X value "hop", X for_user U')
+ cm.exception.translate(text_type)
+ self.assertEqual(cm.exception.errors,
+ {'pkey-subject': 'unknown property key bla.bla'})
+
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "bla.bla", X value "hop"')
+ cm.exception.translate(text_type)
+ self.assertEqual(cm.exception.errors,
+ {'pkey-subject': 'unknown property key bla.bla'})
+
+ def test_site_wide_cwproperty(self):
+ with self.admin_access.web_request() as req:
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "ui.site-title", '
+ 'X value "hop", X for_user U')
+ self.assertEqual(cm.exception.errors,
+ {'for_user-subject': "site-wide property can't be set for user"})
+
+ def test_system_cwproperty(self):
+ with self.admin_access.web_request() as req:
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "system.version.cubicweb", '
+ 'X value "hop", X for_user U')
+ self.assertEqual(cm.exception.errors,
+ {'for_user-subject': "site-wide property can't be set for user"})
+
+ def test_bad_type_cwproperty(self):
+ with self.admin_access.web_request() as req:
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "ui.language", '
+ 'X value "hop", X for_user U')
+ self.assertEqual(cm.exception.errors,
+ {'value-subject': u'unauthorized value'})
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "ui.language", X value "hop"')
+ self.assertEqual(cm.exception.errors, {'value-subject': u'unauthorized value'})
+
+ def test_vreg_propertyvalues_update(self):
+ self.vreg.register_property(
+ 'test.int', type='Int', help='', sitewide=True)
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT CWProperty X: X pkey "test.int", X value "42"')
+ cnx.commit()
+ self.assertEqual(self.vreg.property_value('test.int'), 42)
+
+ def test_sync_user_props(self):
+ with self.admin_access.client_cnx() as cnx:
+ self.assertNotIn('ui.language', cnx.user.properties)
+ cnx.user.set_property(u'ui.language', u'fr')
+ self.assertNotIn('ui.language', cnx.user.properties)
+ cnx.commit()
+ self.assertEqual(cnx.user.properties['ui.language'], 'fr')
+ cnx.user.set_property(u'ui.language', u'en')
+ self.assertEqual(cnx.user.properties['ui.language'], 'fr')
+ cnx.commit()
+ self.assertEqual(cnx.user.properties['ui.language'], 'en')
+ cnx.execute('DELETE CWProperty X WHERE X for_user U, U eid %(u)s',
+ {'u': cnx.user.eid})
+ self.assertEqual(cnx.user.properties['ui.language'], 'en')
+ cnx.commit()
+ self.assertNotIn('ui.language', cnx.user.properties)
+
+ def test_sync_sitewide_props(self):
+ with self.admin_access.client_cnx() as cnx:
+ self.assertNotIn('ui.language', cnx.vreg['propertyvalues'])
+ cwprop = cnx.create_entity('CWProperty', pkey=u'ui.language', value=u'fr')
+ self.assertNotIn('ui.language', cnx.vreg['propertyvalues'])
+ cnx.commit()
+ self.assertEqual(cnx.vreg['propertyvalues']['ui.language'], 'fr')
+ cwprop.cw_set(value=u'en')
+ self.assertEqual(cnx.vreg['propertyvalues']['ui.language'], 'fr')
+ cnx.commit()
+ self.assertEqual(cnx.vreg['propertyvalues']['ui.language'], 'en')
+ cwprop.cw_delete()
+ self.assertEqual(cnx.vreg['propertyvalues']['ui.language'], 'en')
+ cnx.commit()
+ self.assertNotIn('ui.language', cnx.vreg['propertyvalues'])
+
+
+class UserGroupsSyncTC(CubicWebTC):
+
+ def test_sync_groups(self):
+ with self.admin_access.client_cnx() as cnx:
+ cnx.execute('SET U in_group G WHERE G name "users", U eid %(u)s',
+ {'u': cnx.user.eid})
+ self.assertEqual(cnx.user.groups, set(['managers']))
+ cnx.commit()
+ self.assertEqual(cnx.user.groups, set(['managers', 'users']))
+ cnx.execute('DELETE U in_group G WHERE G name "users", U eid %(u)s',
+ {'u': cnx.user.eid})
+ self.assertEqual(cnx.user.groups, set(['managers', 'users']))
+ cnx.commit()
+ self.assertEqual(cnx.user.groups, set(['managers']))
+
+
+if __name__ == '__main__':
+ import unittest
+ unittest.main()
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/workflow.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/workflow.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,357 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Core hooks: workflow related hooks"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from datetime import datetime
+
+
+from cubicweb import RepositoryError, validation_error
+from cubicweb.predicates import is_instance, adaptable
+from cubicweb.server import hook
+
+
+def _change_state(cnx, x, oldstate, newstate):
+ nocheck = cnx.transaction_data.setdefault('skip-security', set())
+ nocheck.add((x, 'in_state', oldstate))
+ nocheck.add((x, 'in_state', newstate))
+ # delete previous state first
+ cnx.delete_relation(x, 'in_state', oldstate)
+ cnx.add_relation(x, 'in_state', newstate)
+
+
+# operations ###################################################################
+
+class _SetInitialStateOp(hook.Operation):
+ """make initial state be a default state"""
+ eid = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ entity = cnx.entity_from_eid(self.eid)
+ iworkflowable = entity.cw_adapt_to('IWorkflowable')
+ # if there is an initial state and the entity's state is not set,
+ # use the initial state as a default state
+ if not (cnx.deleted_in_transaction(entity.eid) or entity.in_state) \
+ and iworkflowable.current_workflow:
+ state = iworkflowable.current_workflow.initial
+ if state:
+ cnx.add_relation(self.eid, 'in_state', state.eid)
+ _FireAutotransitionOp(cnx, eid=self.eid)
+
+class _FireAutotransitionOp(hook.Operation):
+ """try to fire auto transition after state changes"""
+ eid = None # make pylint happy
+
+ def precommit_event(self):
+ entity = self.cnx.entity_from_eid(self.eid)
+ iworkflowable = entity.cw_adapt_to('IWorkflowable')
+ autotrs = list(iworkflowable.possible_transitions('auto'))
+ if autotrs:
+ assert len(autotrs) == 1
+ iworkflowable.fire_transition(autotrs[0])
+
+
+class _WorkflowChangedOp(hook.Operation):
+ """fix entity current state when changing its workflow"""
+ eid = wfeid = None # make pylint happy
+
+ def precommit_event(self):
+ # notice that enforcement that new workflow apply to the entity's type is
+ # done by schema rule, no need to check it here
+ cnx = self.cnx
+ pendingeids = cnx.transaction_data.get('pendingeids', ())
+ if self.eid in pendingeids:
+ return
+ entity = cnx.entity_from_eid(self.eid)
+ iworkflowable = entity.cw_adapt_to('IWorkflowable')
+ # check custom workflow has not been rechanged to another one in the same
+ # transaction
+ mainwf = iworkflowable.main_workflow
+ if mainwf.eid == self.wfeid:
+ deststate = mainwf.initial
+ if not deststate:
+ msg = _('workflow has no initial state')
+ raise validation_error(entity, {('custom_workflow', 'subject'): msg})
+ if mainwf.state_by_eid(iworkflowable.current_state.eid):
+ # nothing to do
+ return
+ # if there are no history, simply go to new workflow's initial state
+ if not iworkflowable.workflow_history:
+ if iworkflowable.current_state.eid != deststate.eid:
+ _change_state(cnx, entity.eid,
+ iworkflowable.current_state.eid, deststate.eid)
+ _FireAutotransitionOp(cnx, eid=entity.eid)
+ return
+ msg = cnx._('workflow changed to "%s"')
+ msg %= cnx._(mainwf.name)
+ cnx.transaction_data[(entity.eid, 'customwf')] = self.wfeid
+ iworkflowable.change_state(deststate, msg, u'text/plain')
+
+
+class _CheckTrExitPoint(hook.Operation):
+ treid = None # make pylint happy
+
+ def precommit_event(self):
+ tr = self.cnx.entity_from_eid(self.treid)
+ outputs = set()
+ for ep in tr.subworkflow_exit:
+ if ep.subwf_state.eid in outputs:
+ msg = _("can't have multiple exits on the same state")
+ raise validation_error(self.treid, {('subworkflow_exit', 'subject'): msg})
+ outputs.add(ep.subwf_state.eid)
+
+
+class _SubWorkflowExitOp(hook.Operation):
+ foreid = trinfo = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ forentity = cnx.entity_from_eid(self.foreid)
+ iworkflowable = forentity.cw_adapt_to('IWorkflowable')
+ trinfo = self.trinfo
+ # we're in a subworkflow, check if we've reached an exit point
+ wftr = iworkflowable.subworkflow_input_transition()
+ if wftr is None:
+ # inconsistency detected
+ msg = _("state doesn't belong to entity's current workflow")
+ raise validation_error(self.trinfo, {('to_state', 'subject'): msg})
+ tostate = wftr.get_exit_point(forentity, trinfo.cw_attr_cache['to_state'])
+ if tostate is not None:
+ # reached an exit point
+ msg = _('exiting from subworkflow %s')
+ msg %= cnx._(iworkflowable.current_workflow.name)
+ cnx.transaction_data[(forentity.eid, 'subwfentrytr')] = True
+ iworkflowable.change_state(tostate, msg, u'text/plain', tr=wftr)
+
+
+# hooks ########################################################################
+
+class WorkflowHook(hook.Hook):
+ __abstract__ = True
+ category = 'metadata'
+
+
+class SetInitialStateHook(WorkflowHook):
+ __regid__ = 'wfsetinitial'
+ __select__ = WorkflowHook.__select__ & adaptable('IWorkflowable')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ _SetInitialStateOp(self._cw, eid=self.entity.eid)
+
+
+class FireTransitionHook(WorkflowHook):
+ """check the transition is allowed and add missing information into the
+ TrInfo entity.
+
+ Expect that:
+ * wf_info_for inlined relation is set
+ * by_transition or to_state (managers only) inlined relation is set
+
+ Check for automatic transition to be fired at the end
+ """
+ __regid__ = 'wffiretransition'
+ __select__ = WorkflowHook.__select__ & is_instance('TrInfo')
+ events = ('before_add_entity',)
+
+ def __call__(self):
+ cnx = self._cw
+ entity = self.entity
+ # first retreive entity to which the state change apply
+ try:
+ foreid = entity.cw_attr_cache['wf_info_for']
+ except KeyError:
+ msg = _('mandatory relation')
+ raise validation_error(entity, {('wf_info_for', 'subject'): msg})
+ forentity = cnx.entity_from_eid(foreid)
+ # see comment in the TrInfo entity definition
+ entity.cw_edited['tr_count']=len(forentity.reverse_wf_info_for)
+ iworkflowable = forentity.cw_adapt_to('IWorkflowable')
+ # then check it has a workflow set, unless we're in the process of changing
+ # entity's workflow
+ if cnx.transaction_data.get((forentity.eid, 'customwf')):
+ wfeid = cnx.transaction_data[(forentity.eid, 'customwf')]
+ wf = cnx.entity_from_eid(wfeid)
+ else:
+ wf = iworkflowable.current_workflow
+ if wf is None:
+ msg = _('related entity has no workflow set')
+ raise validation_error(entity, {None: msg})
+ # then check it has a state set
+ fromstate = iworkflowable.current_state
+ if fromstate is None:
+ msg = _('related entity has no state')
+ raise validation_error(entity, {None: msg})
+ # True if we are coming back from subworkflow
+ swtr = cnx.transaction_data.pop((forentity.eid, 'subwfentrytr'), None)
+ cowpowers = (cnx.user.is_in_group('managers')
+ or not cnx.write_security)
+ # no investigate the requested state change...
+ try:
+ treid = entity.cw_attr_cache['by_transition']
+ except KeyError:
+ # no transition set, check user is a manager and destination state
+ # is specified (and valid)
+ if not cowpowers:
+ msg = _('mandatory relation')
+ raise validation_error(entity, {('by_transition', 'subject'): msg})
+ deststateeid = entity.cw_attr_cache.get('to_state')
+ if not deststateeid:
+ msg = _('mandatory relation')
+ raise validation_error(entity, {('by_transition', 'subject'): msg})
+ deststate = wf.state_by_eid(deststateeid)
+ if deststate is None:
+ msg = _("state doesn't belong to entity's workflow")
+ raise validation_error(entity, {('to_state', 'subject'): msg})
+ else:
+ # check transition is valid and allowed, unless we're coming back
+ # from subworkflow
+ tr = cnx.entity_from_eid(treid)
+ if swtr is None:
+ qname = ('by_transition', 'subject')
+ if tr is None:
+ msg = _("transition doesn't belong to entity's workflow")
+ raise validation_error(entity, {qname: msg})
+ if not tr.has_input_state(fromstate):
+ msg = _("transition %(tr)s isn't allowed from %(st)s")
+ raise validation_error(entity, {qname: msg}, {
+ 'tr': tr.name, 'st': fromstate.name}, ['tr', 'st'])
+ if not tr.may_be_fired(foreid):
+ msg = _("transition may not be fired")
+ raise validation_error(entity, {qname: msg})
+ deststateeid = entity.cw_attr_cache.get('to_state')
+ if deststateeid is not None:
+ if not cowpowers and deststateeid != tr.destination(forentity).eid:
+ msg = _("transition isn't allowed")
+ raise validation_error(entity, {('by_transition', 'subject'): msg})
+ if swtr is None:
+ deststate = cnx.entity_from_eid(deststateeid)
+ if not cowpowers and deststate is None:
+ msg = _("state doesn't belong to entity's workflow")
+ raise validation_error(entity, {('to_state', 'subject'): msg})
+ else:
+ deststateeid = tr.destination(forentity).eid
+ # everything is ok, add missing information on the trinfo entity
+ entity.cw_edited['from_state'] = fromstate.eid
+ entity.cw_edited['to_state'] = deststateeid
+ nocheck = cnx.transaction_data.setdefault('skip-security', set())
+ nocheck.add((entity.eid, 'from_state', fromstate.eid))
+ nocheck.add((entity.eid, 'to_state', deststateeid))
+ _FireAutotransitionOp(cnx, eid=forentity.eid)
+
+
+class FiredTransitionHook(WorkflowHook):
+ """change related entity state and handle exit of subworkflow"""
+ __regid__ = 'wffiretransition'
+ __select__ = WorkflowHook.__select__ & is_instance('TrInfo')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ trinfo = self.entity
+ rcache = trinfo.cw_attr_cache
+ _change_state(self._cw, rcache['wf_info_for'], rcache['from_state'],
+ rcache['to_state'])
+ forentity = self._cw.entity_from_eid(rcache['wf_info_for'])
+ iworkflowable = forentity.cw_adapt_to('IWorkflowable')
+ assert iworkflowable.current_state.eid == rcache['to_state']
+ if iworkflowable.main_workflow.eid != iworkflowable.current_workflow.eid:
+ _SubWorkflowExitOp(self._cw, foreid=forentity.eid, trinfo=trinfo)
+
+
+class CheckInStateChangeAllowed(WorkflowHook):
+ """check state apply, in case of direct in_state change using unsafe execute
+ """
+ __regid__ = 'wfcheckinstate'
+ __select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
+ events = ('before_add_relation',)
+ category = 'integrity'
+
+ def __call__(self):
+ cnx = self._cw
+ nocheck = cnx.transaction_data.get('skip-security', ())
+ if (self.eidfrom, 'in_state', self.eidto) in nocheck:
+ # state changed through TrInfo insertion, so we already know it's ok
+ return
+ entity = cnx.entity_from_eid(self.eidfrom)
+ iworkflowable = entity.cw_adapt_to('IWorkflowable')
+ mainwf = iworkflowable.main_workflow
+ if mainwf is None:
+ msg = _('entity has no workflow set')
+ raise validation_error(entity, {None: msg})
+ for wf in mainwf.iter_workflows():
+ if wf.state_by_eid(self.eidto):
+ break
+ else:
+ msg = _("state doesn't belong to entity's workflow. You may "
+ "want to set a custom workflow for this entity first.")
+ raise validation_error(self.eidfrom, {('in_state', 'subject'): msg})
+ if iworkflowable.current_workflow and wf.eid != iworkflowable.current_workflow.eid:
+ msg = _("state doesn't belong to entity's current workflow")
+ raise validation_error(self.eidfrom, {('in_state', 'subject'): msg})
+
+
+class SetModificationDateOnStateChange(WorkflowHook):
+ """update entity's modification date after changing its state"""
+ __regid__ = 'wfsyncmdate'
+ __select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ if self._cw.added_in_transaction(self.eidfrom):
+ # new entity, not needed
+ return
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ try:
+ entity.cw_set(modification_date=datetime.utcnow())
+ except RepositoryError as ex:
+ # usually occurs if entity is coming from a read-only source
+ # (eg ldap user)
+ self.warning('cant change modification date for %s: %s', entity, ex)
+
+
+class CheckWorkflowTransitionExitPoint(WorkflowHook):
+ """check that there is no multiple exits from the same state"""
+ __regid__ = 'wfcheckwftrexit'
+ __select__ = WorkflowHook.__select__ & hook.match_rtype('subworkflow_exit')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ _CheckTrExitPoint(self._cw, treid=self.eidfrom)
+
+
+class SetCustomWorkflow(WorkflowHook):
+ __regid__ = 'wfsetcustom'
+ __select__ = WorkflowHook.__select__ & hook.match_rtype('custom_workflow')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ _WorkflowChangedOp(self._cw, eid=self.eidfrom, wfeid=self.eidto)
+
+
+class DelCustomWorkflow(SetCustomWorkflow):
+ __regid__ = 'wfdelcustom'
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ typewf = entity.cw_adapt_to('IWorkflowable').cwetype_workflow()
+ if typewf is not None:
+ _WorkflowChangedOp(self._cw, eid=self.eidfrom, wfeid=typewf.eid)
diff -r e1caf133b81c -r b23d58050076 cubicweb/hooks/zmq.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/zmq.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# copyright 2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+from cubicweb.server import hook
+
+class ZMQStopHook(hook.Hook):
+ __regid__ = 'zmqstop'
+ events = ('server_shutdown',)
+
+ def __call__(self):
+ self.repo.app_instances_bus.stop()
+
+class ZMQStartHook(hook.Hook):
+ __regid__ = 'zmqstart'
+ events = ('server_startup',)
+ order = -1
+
+ def __call__(self):
+ config = self.repo.config
+ address_pub = config.get('zmq-address-pub')
+ address_sub = config.get('zmq-address-sub')
+ if not address_pub and not address_sub:
+ return
+ from cubicweb.server import cwzmq
+ self.repo.app_instances_bus = cwzmq.ZMQComm()
+ if address_pub:
+ self.repo.app_instances_bus.add_publisher(address_pub)
+ def clear_cache_callback(msg):
+ self.debug('clear_caches: %s', ' '.join(msg))
+ self.repo.clear_caches(msg[1:])
+ self.repo.app_instances_bus.add_subscription('delete', clear_cache_callback)
+ for address in address_sub:
+ self.repo.app_instances_bus.add_subscriber(address)
+ self.repo.app_instances_bus.start()
diff -r e1caf133b81c -r b23d58050076 cubicweb/i18n.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/i18n.py Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,117 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Some i18n/gettext utilities."""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import re
+import os
+from os.path import join, basename, splitext, exists
+from glob import glob
+
+from six import PY2
+
+from cubicweb.toolsutils import create_dir
+
+def extract_from_tal(files, output_file):
+ """extract i18n strings from tal and write them into the given output file
+ using standard python gettext marker (_)
+ """
+ output = open(output_file, 'w')
+ for filepath in files:
+ for match in re.finditer('i18n:(content|replace)="([^"]+)"', open(filepath).read()):
+ output.write('_("%s")' % match.group(2))
+ output.close()
+
+
+def add_msg(w, msgid, msgctx=None):
+ """write an empty pot msgid definition"""
+ if PY2 and isinstance(msgid, unicode):
+ msgid = msgid.encode('utf-8')
+ if msgctx:
+ if PY2 and isinstance(msgctx, unicode):
+ msgctx = msgctx.encode('utf-8')
+ w('msgctxt "%s"\n' % msgctx)
+ msgid = msgid.replace('"', r'\"').splitlines()
+ if len(msgid) > 1:
+ w('msgid ""\n')
+ for line in msgid:
+ w('"%s"' % line.replace('"', r'\"'))
+ else:
+ w('msgid "%s"\n' % msgid[0])
+ w('msgstr ""\n\n')
+
+def execute2(args):
+ # XXX replace this with check_output in Python 2.7
+ from subprocess import Popen, PIPE, CalledProcessError
+ p = Popen(args, stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ if p.returncode != 0:
+ exc = CalledProcessError(p.returncode, args[0])
+ exc.cmd = args
+ exc.data = (out, err)
+ raise exc
+
+def available_catalogs(i18ndir=None):
+ if i18ndir is None:
+ wildcard = '*.po'
+ else:
+ wildcard = join(i18ndir, '*.po')
+ for popath in glob(wildcard):
+ lang = splitext(basename(popath))[0]
+ yield lang, popath
+
+
+def compile_i18n_catalogs(sourcedirs, destdir, langs):
+ """generate .mo files for a set of languages into the `destdir` i18n directory
+ """
+ from subprocess import CalledProcessError
+ from logilab.common.fileutils import ensure_fs_mode
+ print('-> compiling message catalogs to %s' % destdir)
+ errors = []
+ for lang in langs:
+ langdir = join(destdir, lang, 'LC_MESSAGES')
+ if not exists(langdir):
+ create_dir(langdir)
+ pofiles = [join(path, '%s.po' % lang) for path in sourcedirs]
+ pofiles = [pof for pof in pofiles if exists(pof)]
+ mergedpo = join(destdir, '%s_merged.po' % lang)
+ try:
+ # merge instance/cubes messages catalogs with the stdlib's one
+ cmd = ['msgcat', '--use-first', '--sort-output', '--strict',
+ '-o', mergedpo] + pofiles
+ execute2(cmd)
+ # make sure the .mo file is writeable and compiles with *msgfmt*
+ applmo = join(destdir, lang, 'LC_MESSAGES', 'cubicweb.mo')
+ try:
+ ensure_fs_mode(applmo)
+ except OSError:
+ pass # suppose not exists
+ execute2(['msgfmt', mergedpo, '-o', applmo])
+ except CalledProcessError as exc:
+ errors.append(u'while handling language %s:\ncmd:\n%s\nstdout:\n%s\nstderr:\n%s\n' %
+ (lang, exc.cmd, repr(exc.data[0]), repr(exc.data[1])))
+ except Exception as exc:
+ errors.append(u'while handling language %s: %s' % (lang, exc))
+ try:
+ # clean everything
+ os.unlink(mergedpo)
+ except Exception:
+ continue
+ return errors
diff -r e1caf133b81c -r b23d58050076 cubicweb/i18n/de.po
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/i18n/de.po Mon Sep 26 16:45:30 2016 +0200
@@ -0,0 +1,4708 @@
+# cubicweb i18n catalog
+# Copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# Logilab
+msgid ""
+msgstr ""
+"Project-Id-Version: 2.0\n"
+"POT-Creation-Date: 2006-01-12 17:35+CET\n"
+"PO-Revision-Date: 2010-09-15 14:55+0200\n"
+"Last-Translator: Dr. Leo \n"
+"Language-Team: English \n"
+"Language: de\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: pygettext.py 1.5\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+#, python-format
+msgid ""
+"\n"
+"%(user)s changed status from <%(previous_state)s> to <%(current_state)s> for "
+"entity\n"
+"'%(title)s'\n"
+"\n"
+"%(comment)s\n"
+"\n"
+"url: %(url)s\n"
+msgstr ""
+"\n"
+"%(user)s hat den Zustand geändert von <%(previous_state)s> in <"
+"%(current_state)s> für die Entität\n"
+"'%(title)s'\n"
+"\n"
+"%(comment)s\n"
+"\n"
+"url: %(url)s\n"
+
+#, python-format
+msgid " from state %(fromstate)s to state %(tostate)s\n"
+msgstr " aus dem Zustand %(fromstate)s in den Zustand %(tostate)s\n"
+
+msgid " :"
+msgstr " :"
+
+#, python-format
+msgid "\"action\" must be specified in options; allowed values are %s"
+msgstr ""
+
+msgid "\"role=subject\" or \"role=object\" must be specified in options"
+msgstr ""
+
+#, python-format
+msgid "%(KEY-cstr)s constraint failed for value %(KEY-value)r"
+msgstr ""
+
+#, python-format
+msgid "%(KEY-rtype)s is part of violated unicity constraint"
+msgstr ""
+
+#, python-format
+msgid "%(KEY-value)r doesn't match the %(KEY-regexp)r regular expression"
+msgstr ""
+
+#, python-format
+msgid "%(attr)s set to %(newvalue)s"
+msgstr "%(attr)s geändert in %(newvalue)s"
+
+#, python-format
+msgid "%(attr)s updated from %(oldvalue)s to %(newvalue)s"
+msgstr "%(attr)s geändert von %(oldvalue)s in %(newvalue)s"
+
+#, python-format
+msgid "%(etype)s by %(author)s"
+msgstr ""
+
+#, python-format
+msgid "%(firstname)s %(surname)s"
+msgstr "%(firstname)s %(surname)s"
+
+#, python-format
+msgid "%(subject)s %(etype)s #%(eid)s (%(login)s)"
+msgstr "%(subject)s %(etype)s #%(eid)s (%(login)s)"
+
+#, python-format
+msgid "%d days"
+msgstr "%d Tage"
+
+#, python-format
+msgid "%d hours"
+msgstr "%d Stunden"
+
+#, python-format
+msgid "%d minutes"
+msgstr "%d Minuten"
+
+#, python-format
+msgid "%d months"
+msgstr "%d Monate"
+
+#, python-format
+msgid "%d seconds"
+msgstr "%d Sekunden"
+
+#, python-format
+msgid "%d weeks"
+msgstr "%d Wochen"
+
+#, python-format
+msgid "%d years"
+msgstr "%d Jahre"
+
+#, python-format
+msgid "%s could be supported"
+msgstr ""
+
+#, python-format
+msgid "%s error report"
+msgstr "%s Fehlerbericht"
+
+#, python-format
+msgid "%s software version of the database"
+msgstr "Software-Version der Datenbank %s"
+
+#, python-format
+msgid "%s updated"
+msgstr "%s aktualisiert"
+
+#, python-format
+msgid "'%s' action doesn't take any options"
+msgstr ""
+
+#, python-format
+msgid ""
+"'%s' action for in_state relation should at least have 'linkattr=name' option"
+msgstr ""
+
+#, python-format
+msgid "'%s' action requires 'linkattr' option"
+msgstr ""
+
+msgid "(UNEXISTANT EID)"
+msgstr "(EID nicht gefunden)"
+
+#, python-format
+msgid "(suppressed) entity #%d"
+msgstr ""
+
+msgid "**"
+msgstr "0..n 0..n"
+
+msgid "*+"
+msgstr "0..n 1..n"
+
+msgid "*1"
+msgstr "0..n 1"
+
+msgid "*?"
+msgstr "0..n 0..1"
+
+msgid "+*"
+msgstr "1..n 0..n"
+
+msgid "++"
+msgstr "1..n 1..n"
+
+msgid "+1"
+msgstr "1..n 1"
+
+msgid "+?"
+msgstr "1..n 0..1"
+
+msgid "1*"
+msgstr "1 0..n"
+
+msgid "1+"
+msgstr "1 1..n"
+
+msgid "11"
+msgstr "1 1"
+
+msgid "1?"
+msgstr "1 0..1"
+
+#, python-format
+msgid "<%s not specified>"
+msgstr "<%s nicht spezifiziert>"
+
+#, python-format
+msgid ""
+"