# HG changeset patch
# User Denis Laxalde
# Date 1452948531 -3600
# Node ID 0b59724cb3f21e2414b86fc37f759dd41da83330
# Parent 058bb3dc685f4692882a0c47646184713ecd3be4
Reorganize source tree to have a "cubicweb" top-level package
Basically:
mkdir cubicweb
hg mv *.py -X setup.py cubicweb
hg mv dataimport devtools entities etwist ext hooks i18n misc schemas server skeleton sobjects test web wsgi cubicweb
Other changes:
* adjust path to cubicweb-ctl in devtools tests
* update setup.py to avoid importing __pkginfo__ (exec it instead),
replace os.path.walk by os.walk and prepend `modname` here and there
* update tox.ini to account for new test locations
* update doc/conf.py so that it still finds __pkginfo__.py and CWDIR in
doc/Makefile
diff -r 058bb3dc685f -r 0b59724cb3f2 MANIFEST.in
--- a/MANIFEST.in Mon Jan 04 18:40:30 2016 +0100
+++ b/MANIFEST.in Sat Jan 16 13:48:51 2016 +0100
@@ -19,33 +19,33 @@
recursive-include doc/images *.png *.svg
include doc/conf.py
-recursive-include misc *.py *.png *.display
+recursive-include cubicweb/misc *.py *.png *.display
-include web/views/*.pt
-recursive-include web/data external_resources *.js *.css *.py *.png *.gif *.ico *.ttf *.svg *.woff *.eot
-recursive-include web/wdoc *.rst *.png *.xml ChangeLog*
-recursive-include devtools/data *.js *.css *.sh
+include cubicweb/web/views/*.pt
+recursive-include cubicweb/web/data external_resources *.js *.css *.py *.png *.gif *.ico *.ttf *.svg *.woff *.eot
+recursive-include cubicweb/web/wdoc *.rst *.png *.xml ChangeLog*
+recursive-include cubicweb/devtools/data *.js *.css *.sh
-recursive-include i18n *.pot *.po
-recursive-include schemas *.py *.sql
+recursive-include cubicweb/i18n *.pot *.po
+recursive-include cubicweb/schemas *.py *.sql
-recursive-include test/data bootstrap_cubes *.py *.sql
-recursive-include entities/test/data bootstrap_cubes *.py
-recursive-include sobjects/test/data bootstrap_cubes *.py
-recursive-include hooks/test/data bootstrap_cubes *.py
-recursive-include server/test/data bootstrap_cubes *.py source* *.conf.in *.ldif
-recursive-include devtools/test/data bootstrap_cubes *.py *.txt *.js *.po.ref
-recursive-include web/test/data bootstrap_cubes pouet.css *.py
-recursive-include etwist/test/data *.py
+recursive-include cubicweb/test/data bootstrap_cubes *.py *.sql
+recursive-include cubicweb/entities/test/data bootstrap_cubes *.py
+recursive-include cubicweb/sobjects/test/data bootstrap_cubes *.py
+recursive-include cubicweb/hooks/test/data bootstrap_cubes *.py
+recursive-include cubicweb/server/test/data bootstrap_cubes *.py source* *.conf.in *.ldif
+recursive-include cubicweb/devtools/test/data bootstrap_cubes *.py *.txt *.js *.po.ref
+recursive-include cubicweb/web/test/data bootstrap_cubes pouet.css *.py
+recursive-include cubicweb/etwist/test/data *.py
-recursive-include web/test/jstests *.js *.html *.css *.json
-recursive-include web/test/windmill *.py
+recursive-include cubicweb/web/test/jstests *.js *.html *.css *.json
+recursive-include cubicweb/web/test/windmill *.py
-recursive-include skeleton *.py *.css *.js *.po compat *.in *.tmpl rules
+recursive-include cubicweb/skeleton *.py *.css *.js *.po compat *.in *.tmpl rules
prune doc/book/en/.static
prune doc/book/fr/.static
prune doc/html/_sources/
-prune misc/cwfs
+prune cubicweb/misc/cwfs
prune doc/js_api
global-exclude *.pyc
diff -r 058bb3dc685f -r 0b59724cb3f2 __init__.py
--- a/__init__.py Mon Jan 04 18:40:30 2016 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,265 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see .
-"""CubicWeb is a generic framework to quickly build applications which describes
-relations between entitites.
-"""
-__docformat__ = "restructuredtext en"
-
-# ignore the pygments UserWarnings
-import warnings
-import zlib
-warnings.filterwarnings('ignore', category=UserWarning,
- message='.*was already imported',
- module='.*pygments')
-
-
-from six import PY2, binary_type, text_type
-from six.moves import builtins
-
-CW_SOFTWARE_ROOT = __path__[0]
-
-import sys, os, logging
-from io import BytesIO
-
-from six.moves import cPickle as pickle
-
-from logilab.common.deprecation import deprecated
-from logilab.common.logging_ext import set_log_methods
-from yams.constraints import BASE_CONVERTERS, BASE_CHECKERS
-
-# pre python 2.7.2 safety
-logging.basicConfig()
-
-from cubicweb.__pkginfo__ import version as __version__
-
-
-set_log_methods(sys.modules[__name__], logging.getLogger('cubicweb'))
-
-# make all exceptions accessible from the package
-from cubicweb._exceptions import *
-from logilab.common.registry import ObjectNotFound, NoSelectableObject, RegistryNotFound
-
-
-# '_' is available to mark internationalized string but should not be used to
-# do the actual translation
-_ = text_type
-if not hasattr(builtins, '_'):
- builtins._ = deprecated("[3.22] Use 'from cubicweb import _'")(_)
-
-
-# convert eid to the right type, raise ValueError if it's not a valid eid
-@deprecated('[3.17] typed_eid() was removed. replace it with int() when needed.')
-def typed_eid(eid):
- return int(eid)
-
-#def log_thread(f, w, a):
-# print f.f_code.co_filename, f.f_code.co_name
-#import threading
-#threading.settrace(log_thread)
-
-class Binary(BytesIO):
- """class to hold binary data. Use BytesIO to prevent use of unicode data"""
- _allowed_types = (binary_type, bytearray, buffer if PY2 else memoryview)
-
- def __init__(self, buf=b''):
- assert isinstance(buf, self._allowed_types), \
- "Binary objects must use bytes/buffer objects, not %s" % buf.__class__
- super(Binary, self).__init__(buf)
-
- def write(self, data):
- assert isinstance(data, self._allowed_types), \
- "Binary objects must use bytes/buffer objects, not %s" % data.__class__
- super(Binary, self).write(data)
-
- def to_file(self, fobj):
- """write a binary to disk
-
- the writing is performed in a safe way for files stored on
- Windows SMB shares
- """
- pos = self.tell()
- self.seek(0)
- if sys.platform == 'win32':
- while True:
- # the 16kB chunksize comes from the shutil module
- # in stdlib
- chunk = self.read(16*1024)
- if not chunk:
- break
- fobj.write(chunk)
- else:
- fobj.write(self.read())
- self.seek(pos)
-
- @staticmethod
- def from_file(filename):
- """read a file and returns its contents in a Binary
-
- the reading is performed in a safe way for files stored on
- Windows SMB shares
- """
- binary = Binary()
- with open(filename, 'rb') as fobj:
- if sys.platform == 'win32':
- while True:
- # the 16kB chunksize comes from the shutil module
- # in stdlib
- chunk = fobj.read(16*1024)
- if not chunk:
- break
- binary.write(chunk)
- else:
- binary.write(fobj.read())
- binary.seek(0)
- return binary
-
- def __eq__(self, other):
- if not isinstance(other, Binary):
- return False
- return self.getvalue() == other.getvalue()
-
-
- # Binary helpers to store/fetch python objects
-
- @classmethod
- def zpickle(cls, obj):
- """ return a Binary containing a gzipped pickle of obj """
- retval = cls()
- retval.write(zlib.compress(pickle.dumps(obj, protocol=2)))
- return retval
-
- def unzpickle(self):
- """ decompress and loads the stream before returning it """
- return pickle.loads(zlib.decompress(self.getvalue()))
-
-
-def check_password(eschema, value):
- return isinstance(value, (binary_type, Binary))
-BASE_CHECKERS['Password'] = check_password
-
-def str_or_binary(value):
- if isinstance(value, Binary):
- return value
- return binary_type(value)
-BASE_CONVERTERS['Password'] = str_or_binary
-
-
-# use this dictionary to rename entity types while keeping bw compat
-ETYPE_NAME_MAP = {}
-
-# XXX cubic web cube migration map. See if it's worth keeping this mecanism
-# to help in cube renaming
-CW_MIGRATION_MAP = {}
-
-def neg_role(role):
- if role == 'subject':
- return 'object'
- return 'subject'
-
-def role(obj):
- try:
- return obj.role
- except AttributeError:
- return neg_role(obj.target)
-
-def target(obj):
- try:
- return obj.target
- except AttributeError:
- return neg_role(obj.role)
-
-
-class CubicWebEventManager(object):
- """simple event / callback manager.
-
- Typical usage to register a callback::
-
- >>> from cubicweb import CW_EVENT_MANAGER
- >>> CW_EVENT_MANAGER.bind('after-registry-reload', mycallback)
-
- Typical usage to emit an event::
-
- >>> from cubicweb import CW_EVENT_MANAGER
- >>> CW_EVENT_MANAGER.emit('after-registry-reload')
-
- emit() accepts an additional context parameter that will be passed
- to the callback if specified (and only in that case)
- """
- def __init__(self):
- self.callbacks = {}
-
- def bind(self, event, callback, *args, **kwargs):
- self.callbacks.setdefault(event, []).append( (callback, args, kwargs) )
-
- def emit(self, event, context=None):
- for callback, args, kwargs in self.callbacks.get(event, ()):
- if context is None:
- callback(*args, **kwargs)
- else:
- callback(context, *args, **kwargs)
-
-CW_EVENT_MANAGER = CubicWebEventManager()
-
-def onevent(event, *args, **kwargs):
- """decorator to ease event / callback binding
-
- >>> from cubicweb import onevent
- >>> @onevent('before-registry-reload')
- ... def mycallback():
- ... print 'hello'
- ...
- >>>
- """
- def _decorator(func):
- CW_EVENT_MANAGER.bind(event, func, *args, **kwargs)
- return func
- return _decorator
-
-
-from yams.schema import role_name as rname
-
-def validation_error(entity, errors, substitutions=None, i18nvalues=None):
- """easy way to retrieve a :class:`cubicweb.ValidationError` for an entity or eid.
-
- You may also have 2-tuple as error keys, :func:`yams.role_name` will be
- called automatically for them.
-
- Messages in errors **should not be translated yet**, though marked for
- internationalization. You may give an additional substition dictionary that
- will be used for interpolation after the translation.
- """
- if substitutions is None:
- # set empty dict else translation won't be done for backward
- # compatibility reason (see ValidationError.translate method)
- substitutions = {}
- for key in list(errors):
- if isinstance(key, tuple):
- errors[rname(*key)] = errors.pop(key)
- return ValidationError(getattr(entity, 'eid', entity), errors,
- substitutions, i18nvalues)
-
-
-# exceptions ##################################################################
-
-class ProgrammingError(Exception): #DatabaseError):
- """Exception raised for errors that are related to the database's operation
- and not necessarily under the control of the programmer, e.g. an unexpected
- disconnect occurs, the data source name is not found, a transaction could
- not be processed, a memory allocation error occurred during processing,
- etc.
- """
diff -r 058bb3dc685f -r 0b59724cb3f2 __pkginfo__.py
--- a/__pkginfo__.py Mon Jan 04 18:40:30 2016 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,138 +0,0 @@
-# pylint: disable=W0622,C0103
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see .
-"""cubicweb global packaging information for the cubicweb knowledge management
-software
-"""
-
-modname = distname = "cubicweb"
-
-numversion = (3, 22, 0)
-version = '.'.join(str(num) for num in numversion)
-
-description = "a repository of entities / relations for knowledge management"
-author = "Logilab"
-author_email = "contact@logilab.fr"
-web = 'http://www.cubicweb.org'
-license = 'LGPL'
-
-classifiers = [
- 'Environment :: Web Environment',
- 'Framework :: CubicWeb',
- 'Programming Language :: Python',
- 'Programming Language :: JavaScript',
-]
-
-__depends__ = {
- 'six': '>= 1.4.0',
- 'logilab-common': '>= 0.63.1',
- 'logilab-mtconverter': '>= 0.8.0',
- 'rql': '>= 0.34.0',
- 'yams': '>= 0.42.0',
- #gettext # for xgettext, msgcat, etc...
- # web dependencies
- 'lxml': '',
- # XXX graphviz
- # server dependencies
- 'logilab-database': '>= 1.15.0',
- 'passlib': '',
- 'pytz': '',
- 'Markdown': ''
- }
-
-__recommends__ = {
- 'docutils': '>= 0.6',
- 'Pillow': '', # for captcha
- 'pycrypto': '', # for crypto extensions
- 'fyzz': '>= 0.1.0', # for sparql
- 'vobject': '>= 0.6.0', # for ical view
- 'rdflib': None, #
- 'pyzmq': None,
- 'Twisted': '',
- #'Products.FCKeditor':'',
- #'SimpleTAL':'>= 4.1.6',
- }
-
-import sys
-from os import listdir, environ
-from os.path import join, isdir
-import glob
-
-scripts = [s for s in glob.glob(join('bin', 'cubicweb-*'))
- if not s.endswith('.bat')]
-include_dirs = [join('test', 'data'),
- join('server', 'test', 'data'),
- join('hooks', 'test', 'data'),
- join('web', 'test', 'data'),
- join('devtools', 'data'),
- join('devtools', 'test', 'data'),
- 'schemas', 'skeleton']
-
-
-_server_migration_dir = join('misc', 'migration')
-_data_dir = join('web', 'data')
-_wdoc_dir = join('web', 'wdoc')
-_wdocimages_dir = join(_wdoc_dir, 'images')
-_views_dir = join('web', 'views')
-_i18n_dir = 'i18n'
-
-_pyversion = '.'.join(str(num) for num in sys.version_info[0:2])
-if '--home' in sys.argv:
- # --home install
- pydir = 'python' + _pyversion
-else:
- pydir = join('python' + _pyversion, 'site-packages')
-
-# data files that shall be copied into the main package directory
-package_data = {
- 'cubicweb.web.views':['*.pt'],
- }
-
-try:
- # data files that shall be copied outside the main package directory
- data_files = [
- # server data
- [join('share', 'cubicweb', 'schemas'),
- glob.glob(join('schemas', '*.sql'))],
- [join('share', 'cubicweb', 'migration'),
- [join(_server_migration_dir, filename)
- for filename in listdir(_server_migration_dir)]],
- # web data
- [join('share', 'cubicweb', 'cubes', 'shared', 'data'),
- [join(_data_dir, fname) for fname in listdir(_data_dir)
- if not isdir(join(_data_dir, fname))]],
- [join('share', 'cubicweb', 'cubes', 'shared', 'data', 'images'),
- [join(_data_dir, 'images', fname) for fname in listdir(join(_data_dir, 'images'))]],
- [join('share', 'cubicweb', 'cubes', 'shared', 'data', 'jquery-treeview'),
- [join(_data_dir, 'jquery-treeview', fname) for fname in listdir(join(_data_dir, 'jquery-treeview'))
- if not isdir(join(_data_dir, 'jquery-treeview', fname))]],
- [join('share', 'cubicweb', 'cubes', 'shared', 'data', 'jquery-treeview', 'images'),
- [join(_data_dir, 'jquery-treeview', 'images', fname)
- for fname in listdir(join(_data_dir, 'jquery-treeview', 'images'))]],
- [join('share', 'cubicweb', 'cubes', 'shared', 'wdoc'),
- [join(_wdoc_dir, fname) for fname in listdir(_wdoc_dir)
- if not isdir(join(_wdoc_dir, fname))]],
- [join('share', 'cubicweb', 'cubes', 'shared', 'wdoc', 'images'),
- [join(_wdocimages_dir, fname) for fname in listdir(_wdocimages_dir)]],
- [join('share', 'cubicweb', 'cubes', 'shared', 'i18n'),
- glob.glob(join(_i18n_dir, '*.po'))],
- # skeleton
- ]
-except OSError:
- # we are in an installed directory, don't care about this
- pass
diff -r 058bb3dc685f -r 0b59724cb3f2 _exceptions.py
--- a/_exceptions.py Mon Jan 04 18:40:30 2016 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,209 +0,0 @@
-# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see .
-"""Exceptions shared by different cubicweb packages."""
-
-__docformat__ = "restructuredtext en"
-
-from warnings import warn
-
-from six import PY3, text_type
-
-from logilab.common.decorators import cachedproperty
-
-from yams import ValidationError
-
-# abstract exceptions #########################################################
-
-class CubicWebException(Exception):
- """base class for cubicweb server exception"""
- msg = ""
- def __unicode__(self):
- if self.msg:
- if self.args:
- return self.msg % tuple(self.args)
- else:
- return self.msg
- else:
- return u' '.join(text_type(arg) for arg in self.args)
- __str__ = __unicode__ if PY3 else lambda self: self.__unicode__().encode('utf-8')
-
-class ConfigurationError(CubicWebException):
- """a misconfiguration error"""
-
-class InternalError(CubicWebException):
- """base class for exceptions which should not occur"""
-
-class SecurityError(CubicWebException):
- """base class for cubicweb server security exceptions"""
-
-class RepositoryError(CubicWebException):
- """base class for repository exceptions"""
-
-class SourceException(CubicWebException):
- """base class for source exceptions"""
-
-class CubicWebRuntimeError(CubicWebException):
- """base class for runtime exceptions"""
-
-# repository exceptions #######################################################
-
-class ConnectionError(RepositoryError):
- """raised when a bad connection id is given or when an attempt to establish
- a connection failed
- """
-
-class AuthenticationError(ConnectionError):
- """raised when an attempt to establish a connection failed due to wrong
- connection information (login / password or other authentication token)
- """
-
-class BadConnectionId(ConnectionError):
- """raised when a bad connection id is given"""
-
-class UnknownEid(RepositoryError):
- """the eid is not defined in the system tables"""
- msg = 'No entity with eid %s in the repository'
-
-class UniqueTogetherError(RepositoryError):
- """raised when a unique_together constraint caused an IntegrityError"""
- def __init__(self, session, **kwargs):
- self.session = session
- assert 'rtypes' in kwargs or 'cstrname' in kwargs
- self.kwargs = kwargs
- # fill cache while the session is open
- self.rtypes
-
- @cachedproperty
- def rtypes(self):
- if 'rtypes' in self.kwargs:
- return self.kwargs['rtypes']
- cstrname = unicode(self.kwargs['cstrname'])
- cstr = self.session.find('CWUniqueTogetherConstraint', name=cstrname).one()
- return sorted(rtype.name for rtype in cstr.relations)
-
- @cachedproperty
- def args(self):
- warn('[3.18] UniqueTogetherError.args is deprecated, just use '
- 'the .rtypes accessor.',
- DeprecationWarning)
- # the first argument, etype, is never used and was never garanteed anyway
- return None, self.rtypes
-
-
-class ViolatedConstraint(RepositoryError):
- def __init__(self, cnx, cstrname):
- self.cnx = cnx
- self.cstrname = cstrname
-
-
-# security exceptions #########################################################
-
-class Unauthorized(SecurityError):
- """raised when a user tries to perform an action without sufficient
- credentials
- """
- msg = 'You are not allowed to perform this operation'
- msg1 = 'You are not allowed to perform %s operation on %s'
- var = None
-
- def __str__(self):
- try:
- if self.args and len(self.args) == 2:
- return self.msg1 % self.args
- if self.args:
- return ' '.join(self.args)
- return self.msg
- except Exception as ex:
- return str(ex)
-
-class Forbidden(SecurityError):
- """raised when a user tries to perform a forbidden action
- """
-
-# source exceptions ###########################################################
-
-class EidNotInSource(SourceException):
- """trying to access an object with a particular eid from a particular
- source has failed
- """
- msg = 'No entity with eid %s in %s'
-
-
-# registry exceptions #########################################################
-
-# pre 3.15 bw compat
-from logilab.common.registry import RegistryException, ObjectNotFound, NoSelectableObject
-
-class UnknownProperty(RegistryException):
- """property found in database but unknown in registry"""
-
-# query exception #############################################################
-
-class QueryError(CubicWebRuntimeError):
- """a query try to do something it shouldn't"""
-
-class NotAnEntity(CubicWebRuntimeError):
- """raised when get_entity is called for a column which doesn't contain
- a non final entity
- """
-
-class MultipleResultsError(CubicWebRuntimeError):
- """raised when ResultSet.one() is called on a resultset with multiple rows
- of multiple columns.
- """
-
-class NoResultError(CubicWebRuntimeError):
- """raised when no result is found but at least one is expected.
- """
-
-class UndoTransactionException(QueryError):
- """Raised when undoing a transaction could not be performed completely.
-
- Note that :
- 1) the partial undo operation might be acceptable
- depending upon the final application
-
- 2) the undo operation can also fail with a `ValidationError` in
- cases where the undoing breaks integrity constraints checked
- immediately.
-
- 3) It might be that neither of those exception is raised but a
- subsequent `commit` might raise a `ValidationError` in cases
- where the undoing breaks integrity constraints checked at
- commit time.
-
- :type txuuix: int
- :param txuuid: Unique identifier of the partially undone transaction
-
- :type errors: list
- :param errors: List of errors occurred during undoing
- """
- msg = u"The following error(s) occurred while undoing transaction #%d : %s"
-
- def __init__(self, txuuid, errors):
- super(UndoTransactionException, self).__init__(txuuid, errors)
- self.txuuid = txuuid
- self.errors = errors
-
-# tools exceptions ############################################################
-
-class ExecutionError(Exception):
- """server execution control error (already started, not running...)"""
-
-# pylint: disable=W0611
-from logilab.common.clcommands import BadCommandUsage
diff -r 058bb3dc685f -r 0b59724cb3f2 _gcdebug.py
--- a/_gcdebug.py Mon Jan 04 18:40:30 2016 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,112 +0,0 @@
-# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see .
-from __future__ import print_function
-
-import gc, types, weakref
-
-from cubicweb.schema import CubicWebRelationSchema, CubicWebEntitySchema
-try:
- from cubicweb.web.request import _NeedAuthAccessMock
-except ImportError:
- _NeedAuthAccessMock = None
-
-listiterator = type(iter([]))
-
-IGNORE_CLASSES = (
- type, tuple, dict, list, set, frozenset, type(len),
- weakref.ref, weakref.WeakKeyDictionary,
- listiterator,
- property, classmethod,
- types.ModuleType, types.FunctionType, types.MethodType,
- types.MemberDescriptorType, types.GetSetDescriptorType,
- )
-if _NeedAuthAccessMock is not None:
- IGNORE_CLASSES = IGNORE_CLASSES + (_NeedAuthAccessMock,)
-
-def _get_counted_class(obj, classes):
- for cls in classes:
- if isinstance(obj, cls):
- return cls
- raise AssertionError()
-
-def gc_info(countclasses,
- ignoreclasses=IGNORE_CLASSES,
- viewreferrersclasses=(), showobjs=False, maxlevel=1):
- gc.collect()
- gc.collect()
- counters = {}
- ocounters = {}
- for obj in gc.get_objects():
- if isinstance(obj, countclasses):
- cls = _get_counted_class(obj, countclasses)
- try:
- counters[cls.__name__] += 1
- except KeyError:
- counters[cls.__name__] = 1
- elif not isinstance(obj, ignoreclasses):
- try:
- key = '%s.%s' % (obj.__class__.__module__,
- obj.__class__.__name__)
- except AttributeError:
- key = str(obj)
- try:
- ocounters[key] += 1
- except KeyError:
- ocounters[key] = 1
- if isinstance(obj, viewreferrersclasses):
- print(' ', obj, referrers(obj, showobjs, maxlevel))
- garbage = [repr(obj) for obj in gc.garbage]
- return counters, ocounters, garbage
-
-
-def referrers(obj, showobj=False, maxlevel=1):
- objreferrers = _referrers(obj, maxlevel)
- try:
- return sorted(set((type(x), showobj and x or getattr(x, '__name__', '%#x' % id(x)))
- for x in objreferrers))
- except TypeError:
- s = set()
- unhashable = []
- for x in objreferrers:
- try:
- s.add(x)
- except TypeError:
- unhashable.append(x)
- return sorted(s) + unhashable
-
-def _referrers(obj, maxlevel, _seen=None, _level=0):
- interesting = []
- if _seen is None:
- _seen = set()
- for x in gc.get_referrers(obj):
- if id(x) in _seen:
- continue
- _seen.add(id(x))
- if isinstance(x, types.FrameType):
- continue
- if isinstance(x, (CubicWebRelationSchema, CubicWebEntitySchema)):
- continue
- if isinstance(x, (list, tuple, set, dict, listiterator)):
- if _level >= maxlevel:
- pass
- #interesting.append(x)
- else:
- interesting += _referrers(x, maxlevel, _seen, _level+1)
- else:
- interesting.append(x)
- return interesting
diff -r 058bb3dc685f -r 0b59724cb3f2 appobject.py
--- a/appobject.py Mon Jan 04 18:40:30 2016 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,161 +0,0 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see .
-"""
-
-The `AppObject` class
----------------------
-
-The AppObject class is the base class for all dynamically loaded objects
-(application objects) accessible through the vregistry.
-
-We can find a certain number of attributes and methods defined in this class and
-common to all the application objects.
-
-"""
-__docformat__ = "restructuredtext en"
-
-from logging import getLogger
-
-from logilab.common.deprecation import deprecated, class_renamed
-from logilab.common.logging_ext import set_log_methods
-
-# first line imports for bw compat
-from logilab.common.registry import (objectify_predicate, traced_selection, Predicate,
- RegistrableObject, yes)
-
-
-objectify_selector = deprecated('[3.15] objectify_selector has been '
- 'renamed to objectify_predicates in '
- 'logilab.common.registry')(objectify_predicate)
-traced_selection = deprecated('[3.15] traced_selection has been '
- 'moved to logilab.common.registry')(traced_selection)
-Selector = class_renamed('Selector', Predicate,
- '[3.15] Selector has been renamed to Predicate '
- 'in logilab.common.registry')
-
-@deprecated('[3.15] lltrace decorator can now be removed')
-def lltrace(func):
- return func
-
-# the base class for all appobjects ############################################
-
-class AppObject(RegistrableObject):
- """This is the base class for CubicWeb application objects which are
- selected in a request context.
-
- The following attributes should be set on concrete appobject classes:
-
- At selection time, the following attributes are set on the instance:
-
- :attr:`_cw`
- current request
- :attr:`cw_extra_kwargs`
- other received arguments
-
- And also the following, only if `rset` is found in arguments (in which case
- rset/row/col will be removed from `cwextra_kwargs`):
-
- :attr:`cw_rset`
- context result set or None
-
- :attr:`cw_row`
- if a result set is set and the context is about a particular cell in the
- result set, and not the result set as a whole, specify the row number we
- are interested in, else None
-
- :attr:`cw_col`
- if a result set is set and the context is about a particular cell in the
- result set, and not the result set as a whole, specify the col number we
- are interested in, else None
-
-
- .. Note::
-
- * do not inherit directly from this class but from a more specific class
- such as `AnyEntity`, `EntityView`, `AnyRsetView`, `Action`...
-
- """
- __select__ = yes()
-
- @classmethod
- def __registered__(cls, registry):
- """called by the registry when the appobject has been registered.
-
- It must return the object that will be actually registered (this may be
- the right hook to create an instance for example). By default the
- appobject is returned without any transformation.
- """
- pdefs = getattr(cls, 'cw_property_defs', {})
- for propid, pdef in pdefs.items():
- pdef = pdef.copy() # may be shared
- pdef['default'] = getattr(cls, propid, pdef['default'])
- pdef['sitewide'] = getattr(cls, 'site_wide', pdef.get('sitewide'))
- registry.vreg.register_property(cls._cwpropkey(propid), **pdef)
- assert callable(cls.__select__), cls
- return cls
-
- def __init__(self, req, **extra):
- super(AppObject, self).__init__()
- self._cw = req
- try:
- self.cw_rset = extra.pop('rset')
- self.cw_row = extra.pop('row', None)
- self.cw_col = extra.pop('col', None)
- except KeyError:
- pass
- self.cw_extra_kwargs = extra
-
- # persistent class properties ##############################################
- #
- # optional `cw_property_defs` dict on a class defines available persistent
- # properties for this class:
- #
- # * key: id of the property (the actual CWProperty key is build using
- # ..
- # * value: tuple (property type, vocabfunc, default value, property description)
- # possible types are those used by `logilab.common.configuration`
- #
- # notice that when it exists multiple objects with the same id (adaptation,
- # overriding) only the first encountered definition is considered, so those
- # objects can't try to have different default values for instance.
- #
- # you can then access to a property value using self.cw_propval, where self
- # is an instance of class
-
- @classmethod
- def _cwpropkey(cls, propid):
- """return cw property key for the property of the given id for this
- class
- """
- return '%s.%s.%s' % (cls.__registry__, cls.__regid__, propid)
-
- def cw_propval(self, propid):
- """return cw property value associated to key
-
- ..
- """
- return self._cw.property_value(self._cwpropkey(propid))
-
- # these are overridden by set_log_methods below
- # only defining here to prevent pylint from complaining
- info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
-
-set_log_methods(AppObject, getLogger('cubicweb.appobject'))
-
-# defined here to avoid warning on usage on the AppObject class
-yes = deprecated('[3.15] yes has been moved to logilab.common.registry')(yes)
diff -r 058bb3dc685f -r 0b59724cb3f2 crypto.py
--- a/crypto.py Mon Jan 04 18:40:30 2016 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
-# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
-#
-# This file is part of CubicWeb.
-#
-# CubicWeb is free software: you can redistribute it and/or modify it under the
-# terms of the GNU Lesser General Public License as published by the Free
-# Software Foundation, either version 2.1 of the License, or (at your option)
-# any later version.
-#
-# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
-# details.
-#
-# You should have received a copy of the GNU Lesser General Public License along
-# with CubicWeb. If not, see .
-"""Simple cryptographic routines, based on python-crypto."""
-__docformat__ = "restructuredtext en"
-
-from base64 import b64encode, b64decode
-
-from six.moves import cPickle as pickle
-
-from Crypto.Cipher import Blowfish
-
-
-_CYPHERERS = {}
-def _cypherer(seed):
- try:
- return _CYPHERERS[seed]
- except KeyError:
- _CYPHERERS[seed] = Blowfish.new(seed, Blowfish.MODE_ECB)
- return _CYPHERERS[seed]
-
-
-def encrypt(data, seed):
- string = pickle.dumps(data)
- string = string + '*' * (8 - len(string) % 8)
- string = b64encode(_cypherer(seed).encrypt(string))
- return unicode(string)
-
-
-def decrypt(string, seed):
- # pickle ignores trailing characters so we do not need to strip them off
- string = _cypherer(seed).decrypt(b64decode(string))
- return pickle.loads(string)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/__init__.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,265 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""CubicWeb is a generic framework to quickly build applications which describes
+relations between entitites.
+"""
+__docformat__ = "restructuredtext en"
+
+# ignore the pygments UserWarnings
+import warnings
+import zlib
+warnings.filterwarnings('ignore', category=UserWarning,
+ message='.*was already imported',
+ module='.*pygments')
+
+
+from six import PY2, binary_type, text_type
+from six.moves import builtins
+
+CW_SOFTWARE_ROOT = __path__[0]
+
+import sys, os, logging
+from io import BytesIO
+
+from six.moves import cPickle as pickle
+
+from logilab.common.deprecation import deprecated
+from logilab.common.logging_ext import set_log_methods
+from yams.constraints import BASE_CONVERTERS, BASE_CHECKERS
+
+# pre python 2.7.2 safety
+logging.basicConfig()
+
+from cubicweb.__pkginfo__ import version as __version__
+
+
+set_log_methods(sys.modules[__name__], logging.getLogger('cubicweb'))
+
+# make all exceptions accessible from the package
+from cubicweb._exceptions import *
+from logilab.common.registry import ObjectNotFound, NoSelectableObject, RegistryNotFound
+
+
+# '_' is available to mark internationalized string but should not be used to
+# do the actual translation
+_ = text_type
+if not hasattr(builtins, '_'):
+ builtins._ = deprecated("[3.22] Use 'from cubicweb import _'")(_)
+
+
+# convert eid to the right type, raise ValueError if it's not a valid eid
+@deprecated('[3.17] typed_eid() was removed. replace it with int() when needed.')
+def typed_eid(eid):
+ return int(eid)
+
+#def log_thread(f, w, a):
+# print f.f_code.co_filename, f.f_code.co_name
+#import threading
+#threading.settrace(log_thread)
+
+class Binary(BytesIO):
+ """class to hold binary data. Use BytesIO to prevent use of unicode data"""
+ _allowed_types = (binary_type, bytearray, buffer if PY2 else memoryview)
+
+ def __init__(self, buf=b''):
+ assert isinstance(buf, self._allowed_types), \
+ "Binary objects must use bytes/buffer objects, not %s" % buf.__class__
+ super(Binary, self).__init__(buf)
+
+ def write(self, data):
+ assert isinstance(data, self._allowed_types), \
+ "Binary objects must use bytes/buffer objects, not %s" % data.__class__
+ super(Binary, self).write(data)
+
+ def to_file(self, fobj):
+ """write a binary to disk
+
+ the writing is performed in a safe way for files stored on
+ Windows SMB shares
+ """
+ pos = self.tell()
+ self.seek(0)
+ if sys.platform == 'win32':
+ while True:
+ # the 16kB chunksize comes from the shutil module
+ # in stdlib
+ chunk = self.read(16*1024)
+ if not chunk:
+ break
+ fobj.write(chunk)
+ else:
+ fobj.write(self.read())
+ self.seek(pos)
+
+ @staticmethod
+ def from_file(filename):
+ """read a file and returns its contents in a Binary
+
+ the reading is performed in a safe way for files stored on
+ Windows SMB shares
+ """
+ binary = Binary()
+ with open(filename, 'rb') as fobj:
+ if sys.platform == 'win32':
+ while True:
+ # the 16kB chunksize comes from the shutil module
+ # in stdlib
+ chunk = fobj.read(16*1024)
+ if not chunk:
+ break
+ binary.write(chunk)
+ else:
+ binary.write(fobj.read())
+ binary.seek(0)
+ return binary
+
+ def __eq__(self, other):
+ if not isinstance(other, Binary):
+ return False
+ return self.getvalue() == other.getvalue()
+
+
+ # Binary helpers to store/fetch python objects
+
+ @classmethod
+ def zpickle(cls, obj):
+ """ return a Binary containing a gzipped pickle of obj """
+ retval = cls()
+ retval.write(zlib.compress(pickle.dumps(obj, protocol=2)))
+ return retval
+
+ def unzpickle(self):
+ """ decompress and loads the stream before returning it """
+ return pickle.loads(zlib.decompress(self.getvalue()))
+
+
+def check_password(eschema, value):
+ return isinstance(value, (binary_type, Binary))
+BASE_CHECKERS['Password'] = check_password
+
+def str_or_binary(value):
+ if isinstance(value, Binary):
+ return value
+ return binary_type(value)
+BASE_CONVERTERS['Password'] = str_or_binary
+
+
+# use this dictionary to rename entity types while keeping bw compat
+ETYPE_NAME_MAP = {}
+
+# XXX cubic web cube migration map. See if it's worth keeping this mecanism
+# to help in cube renaming
+CW_MIGRATION_MAP = {}
+
+def neg_role(role):
+ if role == 'subject':
+ return 'object'
+ return 'subject'
+
+def role(obj):
+ try:
+ return obj.role
+ except AttributeError:
+ return neg_role(obj.target)
+
+def target(obj):
+ try:
+ return obj.target
+ except AttributeError:
+ return neg_role(obj.role)
+
+
+class CubicWebEventManager(object):
+ """simple event / callback manager.
+
+ Typical usage to register a callback::
+
+ >>> from cubicweb import CW_EVENT_MANAGER
+ >>> CW_EVENT_MANAGER.bind('after-registry-reload', mycallback)
+
+ Typical usage to emit an event::
+
+ >>> from cubicweb import CW_EVENT_MANAGER
+ >>> CW_EVENT_MANAGER.emit('after-registry-reload')
+
+ emit() accepts an additional context parameter that will be passed
+ to the callback if specified (and only in that case)
+ """
+ def __init__(self):
+ self.callbacks = {}
+
+ def bind(self, event, callback, *args, **kwargs):
+ self.callbacks.setdefault(event, []).append( (callback, args, kwargs) )
+
+ def emit(self, event, context=None):
+ for callback, args, kwargs in self.callbacks.get(event, ()):
+ if context is None:
+ callback(*args, **kwargs)
+ else:
+ callback(context, *args, **kwargs)
+
+CW_EVENT_MANAGER = CubicWebEventManager()
+
+def onevent(event, *args, **kwargs):
+ """decorator to ease event / callback binding
+
+ >>> from cubicweb import onevent
+ >>> @onevent('before-registry-reload')
+ ... def mycallback():
+ ... print 'hello'
+ ...
+ >>>
+ """
+ def _decorator(func):
+ CW_EVENT_MANAGER.bind(event, func, *args, **kwargs)
+ return func
+ return _decorator
+
+
+from yams.schema import role_name as rname
+
+def validation_error(entity, errors, substitutions=None, i18nvalues=None):
+ """easy way to retrieve a :class:`cubicweb.ValidationError` for an entity or eid.
+
+ You may also have 2-tuple as error keys, :func:`yams.role_name` will be
+ called automatically for them.
+
+ Messages in errors **should not be translated yet**, though marked for
+ internationalization. You may give an additional substition dictionary that
+ will be used for interpolation after the translation.
+ """
+ if substitutions is None:
+ # set empty dict else translation won't be done for backward
+ # compatibility reason (see ValidationError.translate method)
+ substitutions = {}
+ for key in list(errors):
+ if isinstance(key, tuple):
+ errors[rname(*key)] = errors.pop(key)
+ return ValidationError(getattr(entity, 'eid', entity), errors,
+ substitutions, i18nvalues)
+
+
+# exceptions ##################################################################
+
+class ProgrammingError(Exception): #DatabaseError):
+ """Exception raised for errors that are related to the database's operation
+ and not necessarily under the control of the programmer, e.g. an unexpected
+ disconnect occurs, the data source name is not found, a transaction could
+ not be processed, a memory allocation error occurred during processing,
+ etc.
+ """
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/__pkginfo__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/__pkginfo__.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,138 @@
+# pylint: disable=W0622,C0103
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""cubicweb global packaging information for the cubicweb knowledge management
+software
+"""
+
+modname = distname = "cubicweb"
+
+numversion = (3, 22, 0)
+version = '.'.join(str(num) for num in numversion)
+
+description = "a repository of entities / relations for knowledge management"
+author = "Logilab"
+author_email = "contact@logilab.fr"
+web = 'http://www.cubicweb.org'
+license = 'LGPL'
+
+classifiers = [
+ 'Environment :: Web Environment',
+ 'Framework :: CubicWeb',
+ 'Programming Language :: Python',
+ 'Programming Language :: JavaScript',
+]
+
+__depends__ = {
+ 'six': '>= 1.4.0',
+ 'logilab-common': '>= 0.63.1',
+ 'logilab-mtconverter': '>= 0.8.0',
+ 'rql': '>= 0.34.0',
+ 'yams': '>= 0.42.0',
+ #gettext # for xgettext, msgcat, etc...
+ # web dependencies
+ 'lxml': '',
+ # XXX graphviz
+ # server dependencies
+ 'logilab-database': '>= 1.15.0',
+ 'passlib': '',
+ 'pytz': '',
+ 'Markdown': ''
+ }
+
+__recommends__ = {
+ 'docutils': '>= 0.6',
+ 'Pillow': '', # for captcha
+ 'pycrypto': '', # for crypto extensions
+ 'fyzz': '>= 0.1.0', # for sparql
+ 'vobject': '>= 0.6.0', # for ical view
+ 'rdflib': None, #
+ 'pyzmq': None,
+ 'Twisted': '',
+ #'Products.FCKeditor':'',
+ #'SimpleTAL':'>= 4.1.6',
+ }
+
+import sys
+from os import listdir, environ
+from os.path import join, isdir
+import glob
+
+scripts = [s for s in glob.glob(join('bin', 'cubicweb-*'))
+ if not s.endswith('.bat')]
+include_dirs = [join('test', 'data'),
+ join('server', 'test', 'data'),
+ join('hooks', 'test', 'data'),
+ join('web', 'test', 'data'),
+ join('devtools', 'data'),
+ join('devtools', 'test', 'data'),
+ 'schemas', 'skeleton']
+
+
+_server_migration_dir = join(modname, 'misc', 'migration')
+_data_dir = join(modname, 'web', 'data')
+_wdoc_dir = join(modname, 'web', 'wdoc')
+_wdocimages_dir = join(_wdoc_dir, 'images')
+_views_dir = join(modname, 'web', 'views')
+_i18n_dir = join(modname, 'i18n')
+
+_pyversion = '.'.join(str(num) for num in sys.version_info[0:2])
+if '--home' in sys.argv:
+ # --home install
+ pydir = 'python' + _pyversion
+else:
+ pydir = join('python' + _pyversion, 'site-packages')
+
+# data files that shall be copied into the main package directory
+package_data = {
+ 'cubicweb.web.views':['*.pt'],
+ }
+
+try:
+ # data files that shall be copied outside the main package directory
+ data_files = [
+ # server data
+ [join('share', 'cubicweb', 'schemas'),
+ glob.glob(join(modname, 'schemas', '*.sql'))],
+ [join('share', 'cubicweb', 'migration'),
+ [join(_server_migration_dir, filename)
+ for filename in listdir(_server_migration_dir)]],
+ # web data
+ [join('share', 'cubicweb', 'cubes', 'shared', 'data'),
+ [join(_data_dir, fname) for fname in listdir(_data_dir)
+ if not isdir(join(_data_dir, fname))]],
+ [join('share', 'cubicweb', 'cubes', 'shared', 'data', 'images'),
+ [join(_data_dir, 'images', fname) for fname in listdir(join(_data_dir, 'images'))]],
+ [join('share', 'cubicweb', 'cubes', 'shared', 'data', 'jquery-treeview'),
+ [join(_data_dir, 'jquery-treeview', fname) for fname in listdir(join(_data_dir, 'jquery-treeview'))
+ if not isdir(join(_data_dir, 'jquery-treeview', fname))]],
+ [join('share', 'cubicweb', 'cubes', 'shared', 'data', 'jquery-treeview', 'images'),
+ [join(_data_dir, 'jquery-treeview', 'images', fname)
+ for fname in listdir(join(_data_dir, 'jquery-treeview', 'images'))]],
+ [join('share', 'cubicweb', 'cubes', 'shared', 'wdoc'),
+ [join(_wdoc_dir, fname) for fname in listdir(_wdoc_dir)
+ if not isdir(join(_wdoc_dir, fname))]],
+ [join('share', 'cubicweb', 'cubes', 'shared', 'wdoc', 'images'),
+ [join(_wdocimages_dir, fname) for fname in listdir(_wdocimages_dir)]],
+ [join('share', 'cubicweb', 'cubes', 'shared', 'i18n'),
+ glob.glob(join(_i18n_dir, '*.po'))],
+ # skeleton
+ ]
+except OSError:
+ # we are in an installed directory, don't care about this
+ pass
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/_exceptions.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/_exceptions.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,209 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Exceptions shared by different cubicweb packages."""
+
+__docformat__ = "restructuredtext en"
+
+from warnings import warn
+
+from six import PY3, text_type
+
+from logilab.common.decorators import cachedproperty
+
+from yams import ValidationError
+
+# abstract exceptions #########################################################
+
+class CubicWebException(Exception):
+ """base class for cubicweb server exception"""
+ msg = ""
+ def __unicode__(self):
+ if self.msg:
+ if self.args:
+ return self.msg % tuple(self.args)
+ else:
+ return self.msg
+ else:
+ return u' '.join(text_type(arg) for arg in self.args)
+ __str__ = __unicode__ if PY3 else lambda self: self.__unicode__().encode('utf-8')
+
+class ConfigurationError(CubicWebException):
+ """a misconfiguration error"""
+
+class InternalError(CubicWebException):
+ """base class for exceptions which should not occur"""
+
+class SecurityError(CubicWebException):
+ """base class for cubicweb server security exceptions"""
+
+class RepositoryError(CubicWebException):
+ """base class for repository exceptions"""
+
+class SourceException(CubicWebException):
+ """base class for source exceptions"""
+
+class CubicWebRuntimeError(CubicWebException):
+ """base class for runtime exceptions"""
+
+# repository exceptions #######################################################
+
+class ConnectionError(RepositoryError):
+ """raised when a bad connection id is given or when an attempt to establish
+ a connection failed
+ """
+
+class AuthenticationError(ConnectionError):
+ """raised when an attempt to establish a connection failed due to wrong
+ connection information (login / password or other authentication token)
+ """
+
+class BadConnectionId(ConnectionError):
+ """raised when a bad connection id is given"""
+
+class UnknownEid(RepositoryError):
+ """the eid is not defined in the system tables"""
+ msg = 'No entity with eid %s in the repository'
+
+class UniqueTogetherError(RepositoryError):
+ """raised when a unique_together constraint caused an IntegrityError"""
+ def __init__(self, session, **kwargs):
+ self.session = session
+ assert 'rtypes' in kwargs or 'cstrname' in kwargs
+ self.kwargs = kwargs
+ # fill cache while the session is open
+ self.rtypes
+
+ @cachedproperty
+ def rtypes(self):
+ if 'rtypes' in self.kwargs:
+ return self.kwargs['rtypes']
+ cstrname = unicode(self.kwargs['cstrname'])
+ cstr = self.session.find('CWUniqueTogetherConstraint', name=cstrname).one()
+ return sorted(rtype.name for rtype in cstr.relations)
+
+ @cachedproperty
+ def args(self):
+ warn('[3.18] UniqueTogetherError.args is deprecated, just use '
+ 'the .rtypes accessor.',
+ DeprecationWarning)
+ # the first argument, etype, is never used and was never garanteed anyway
+ return None, self.rtypes
+
+
+class ViolatedConstraint(RepositoryError):
+ def __init__(self, cnx, cstrname):
+ self.cnx = cnx
+ self.cstrname = cstrname
+
+
+# security exceptions #########################################################
+
+class Unauthorized(SecurityError):
+ """raised when a user tries to perform an action without sufficient
+ credentials
+ """
+ msg = 'You are not allowed to perform this operation'
+ msg1 = 'You are not allowed to perform %s operation on %s'
+ var = None
+
+ def __str__(self):
+ try:
+ if self.args and len(self.args) == 2:
+ return self.msg1 % self.args
+ if self.args:
+ return ' '.join(self.args)
+ return self.msg
+ except Exception as ex:
+ return str(ex)
+
+class Forbidden(SecurityError):
+ """raised when a user tries to perform a forbidden action
+ """
+
+# source exceptions ###########################################################
+
+class EidNotInSource(SourceException):
+ """trying to access an object with a particular eid from a particular
+ source has failed
+ """
+ msg = 'No entity with eid %s in %s'
+
+
+# registry exceptions #########################################################
+
+# pre 3.15 bw compat
+from logilab.common.registry import RegistryException, ObjectNotFound, NoSelectableObject
+
+class UnknownProperty(RegistryException):
+ """property found in database but unknown in registry"""
+
+# query exception #############################################################
+
+class QueryError(CubicWebRuntimeError):
+ """a query try to do something it shouldn't"""
+
+class NotAnEntity(CubicWebRuntimeError):
+ """raised when get_entity is called for a column which doesn't contain
+ a non final entity
+ """
+
+class MultipleResultsError(CubicWebRuntimeError):
+ """raised when ResultSet.one() is called on a resultset with multiple rows
+ of multiple columns.
+ """
+
+class NoResultError(CubicWebRuntimeError):
+ """raised when no result is found but at least one is expected.
+ """
+
+class UndoTransactionException(QueryError):
+ """Raised when undoing a transaction could not be performed completely.
+
+ Note that :
+ 1) the partial undo operation might be acceptable
+ depending upon the final application
+
+ 2) the undo operation can also fail with a `ValidationError` in
+ cases where the undoing breaks integrity constraints checked
+ immediately.
+
+ 3) It might be that neither of those exception is raised but a
+ subsequent `commit` might raise a `ValidationError` in cases
+ where the undoing breaks integrity constraints checked at
+ commit time.
+
+ :type txuuix: int
+ :param txuuid: Unique identifier of the partially undone transaction
+
+ :type errors: list
+ :param errors: List of errors occurred during undoing
+ """
+ msg = u"The following error(s) occurred while undoing transaction #%d : %s"
+
+ def __init__(self, txuuid, errors):
+ super(UndoTransactionException, self).__init__(txuuid, errors)
+ self.txuuid = txuuid
+ self.errors = errors
+
+# tools exceptions ############################################################
+
+class ExecutionError(Exception):
+ """server execution control error (already started, not running...)"""
+
+# pylint: disable=W0611
+from logilab.common.clcommands import BadCommandUsage
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/_gcdebug.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/_gcdebug.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,112 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from __future__ import print_function
+
+import gc, types, weakref
+
+from cubicweb.schema import CubicWebRelationSchema, CubicWebEntitySchema
+try:
+ from cubicweb.web.request import _NeedAuthAccessMock
+except ImportError:
+ _NeedAuthAccessMock = None
+
+listiterator = type(iter([]))
+
+IGNORE_CLASSES = (
+ type, tuple, dict, list, set, frozenset, type(len),
+ weakref.ref, weakref.WeakKeyDictionary,
+ listiterator,
+ property, classmethod,
+ types.ModuleType, types.FunctionType, types.MethodType,
+ types.MemberDescriptorType, types.GetSetDescriptorType,
+ )
+if _NeedAuthAccessMock is not None:
+ IGNORE_CLASSES = IGNORE_CLASSES + (_NeedAuthAccessMock,)
+
+def _get_counted_class(obj, classes):
+ for cls in classes:
+ if isinstance(obj, cls):
+ return cls
+ raise AssertionError()
+
+def gc_info(countclasses,
+ ignoreclasses=IGNORE_CLASSES,
+ viewreferrersclasses=(), showobjs=False, maxlevel=1):
+ gc.collect()
+ gc.collect()
+ counters = {}
+ ocounters = {}
+ for obj in gc.get_objects():
+ if isinstance(obj, countclasses):
+ cls = _get_counted_class(obj, countclasses)
+ try:
+ counters[cls.__name__] += 1
+ except KeyError:
+ counters[cls.__name__] = 1
+ elif not isinstance(obj, ignoreclasses):
+ try:
+ key = '%s.%s' % (obj.__class__.__module__,
+ obj.__class__.__name__)
+ except AttributeError:
+ key = str(obj)
+ try:
+ ocounters[key] += 1
+ except KeyError:
+ ocounters[key] = 1
+ if isinstance(obj, viewreferrersclasses):
+ print(' ', obj, referrers(obj, showobjs, maxlevel))
+ garbage = [repr(obj) for obj in gc.garbage]
+ return counters, ocounters, garbage
+
+
+def referrers(obj, showobj=False, maxlevel=1):
+ objreferrers = _referrers(obj, maxlevel)
+ try:
+ return sorted(set((type(x), showobj and x or getattr(x, '__name__', '%#x' % id(x)))
+ for x in objreferrers))
+ except TypeError:
+ s = set()
+ unhashable = []
+ for x in objreferrers:
+ try:
+ s.add(x)
+ except TypeError:
+ unhashable.append(x)
+ return sorted(s) + unhashable
+
+def _referrers(obj, maxlevel, _seen=None, _level=0):
+ interesting = []
+ if _seen is None:
+ _seen = set()
+ for x in gc.get_referrers(obj):
+ if id(x) in _seen:
+ continue
+ _seen.add(id(x))
+ if isinstance(x, types.FrameType):
+ continue
+ if isinstance(x, (CubicWebRelationSchema, CubicWebEntitySchema)):
+ continue
+ if isinstance(x, (list, tuple, set, dict, listiterator)):
+ if _level >= maxlevel:
+ pass
+ #interesting.append(x)
+ else:
+ interesting += _referrers(x, maxlevel, _seen, _level+1)
+ else:
+ interesting.append(x)
+ return interesting
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/appobject.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/appobject.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,161 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""
+
+The `AppObject` class
+---------------------
+
+The AppObject class is the base class for all dynamically loaded objects
+(application objects) accessible through the vregistry.
+
+We can find a certain number of attributes and methods defined in this class and
+common to all the application objects.
+
+"""
+__docformat__ = "restructuredtext en"
+
+from logging import getLogger
+
+from logilab.common.deprecation import deprecated, class_renamed
+from logilab.common.logging_ext import set_log_methods
+
+# first line imports for bw compat
+from logilab.common.registry import (objectify_predicate, traced_selection, Predicate,
+ RegistrableObject, yes)
+
+
+objectify_selector = deprecated('[3.15] objectify_selector has been '
+ 'renamed to objectify_predicates in '
+ 'logilab.common.registry')(objectify_predicate)
+traced_selection = deprecated('[3.15] traced_selection has been '
+ 'moved to logilab.common.registry')(traced_selection)
+Selector = class_renamed('Selector', Predicate,
+ '[3.15] Selector has been renamed to Predicate '
+ 'in logilab.common.registry')
+
+@deprecated('[3.15] lltrace decorator can now be removed')
+def lltrace(func):
+ return func
+
+# the base class for all appobjects ############################################
+
+class AppObject(RegistrableObject):
+ """This is the base class for CubicWeb application objects which are
+ selected in a request context.
+
+ The following attributes should be set on concrete appobject classes:
+
+ At selection time, the following attributes are set on the instance:
+
+ :attr:`_cw`
+ current request
+ :attr:`cw_extra_kwargs`
+ other received arguments
+
+ And also the following, only if `rset` is found in arguments (in which case
+ rset/row/col will be removed from `cwextra_kwargs`):
+
+ :attr:`cw_rset`
+ context result set or None
+
+ :attr:`cw_row`
+ if a result set is set and the context is about a particular cell in the
+ result set, and not the result set as a whole, specify the row number we
+ are interested in, else None
+
+ :attr:`cw_col`
+ if a result set is set and the context is about a particular cell in the
+ result set, and not the result set as a whole, specify the col number we
+ are interested in, else None
+
+
+ .. Note::
+
+ * do not inherit directly from this class but from a more specific class
+ such as `AnyEntity`, `EntityView`, `AnyRsetView`, `Action`...
+
+ """
+ __select__ = yes()
+
+ @classmethod
+ def __registered__(cls, registry):
+ """called by the registry when the appobject has been registered.
+
+ It must return the object that will be actually registered (this may be
+ the right hook to create an instance for example). By default the
+ appobject is returned without any transformation.
+ """
+ pdefs = getattr(cls, 'cw_property_defs', {})
+ for propid, pdef in pdefs.items():
+ pdef = pdef.copy() # may be shared
+ pdef['default'] = getattr(cls, propid, pdef['default'])
+ pdef['sitewide'] = getattr(cls, 'site_wide', pdef.get('sitewide'))
+ registry.vreg.register_property(cls._cwpropkey(propid), **pdef)
+ assert callable(cls.__select__), cls
+ return cls
+
+ def __init__(self, req, **extra):
+ super(AppObject, self).__init__()
+ self._cw = req
+ try:
+ self.cw_rset = extra.pop('rset')
+ self.cw_row = extra.pop('row', None)
+ self.cw_col = extra.pop('col', None)
+ except KeyError:
+ pass
+ self.cw_extra_kwargs = extra
+
+ # persistent class properties ##############################################
+ #
+ # optional `cw_property_defs` dict on a class defines available persistent
+ # properties for this class:
+ #
+ # * key: id of the property (the actual CWProperty key is build using
+ # ..
+ # * value: tuple (property type, vocabfunc, default value, property description)
+ # possible types are those used by `logilab.common.configuration`
+ #
+ # notice that when it exists multiple objects with the same id (adaptation,
+ # overriding) only the first encountered definition is considered, so those
+ # objects can't try to have different default values for instance.
+ #
+ # you can then access to a property value using self.cw_propval, where self
+ # is an instance of class
+
+ @classmethod
+ def _cwpropkey(cls, propid):
+ """return cw property key for the property of the given id for this
+ class
+ """
+ return '%s.%s.%s' % (cls.__registry__, cls.__regid__, propid)
+
+ def cw_propval(self, propid):
+ """return cw property value associated to key
+
+ ..
+ """
+ return self._cw.property_value(self._cwpropkey(propid))
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
+set_log_methods(AppObject, getLogger('cubicweb.appobject'))
+
+# defined here to avoid warning on usage on the AppObject class
+yes = deprecated('[3.15] yes has been moved to logilab.common.registry')(yes)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/crypto.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/crypto.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,47 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Simple cryptographic routines, based on python-crypto."""
+__docformat__ = "restructuredtext en"
+
+from base64 import b64encode, b64decode
+
+from six.moves import cPickle as pickle
+
+from Crypto.Cipher import Blowfish
+
+
+_CYPHERERS = {}
+def _cypherer(seed):
+ try:
+ return _CYPHERERS[seed]
+ except KeyError:
+ _CYPHERERS[seed] = Blowfish.new(seed, Blowfish.MODE_ECB)
+ return _CYPHERERS[seed]
+
+
+def encrypt(data, seed):
+ string = pickle.dumps(data)
+ string = string + '*' * (8 - len(string) % 8)
+ string = b64encode(_cypherer(seed).encrypt(string))
+ return unicode(string)
+
+
+def decrypt(string, seed):
+ # pickle ignores trailing characters so we do not need to strip them off
+ string = _cypherer(seed).decrypt(b64decode(string))
+ return pickle.loads(string)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/cwconfig.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/cwconfig.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1346 @@
+# -*- coding: utf-8 -*-
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""
+.. _ResourceMode:
+
+Resource mode
+-------------
+
+Standard resource mode
+```````````````````````````
+
+A resource *mode* is a predefined set of settings for various resources
+directories, such as cubes, instances, etc. to ease development with the
+framework. There are two running modes with *CubicWeb*:
+
+* **system**: resources are searched / created in the system directories (eg
+ usually requiring root access):
+
+ - instances are stored in :file:`/etc/cubicweb.d`
+ - temporary files (such as pid file) in :file:`/var/run/cubicweb`
+
+ where `` is the detected installation prefix ('/usr/local' for
+ instance).
+
+* **user**: resources are searched / created in the user home directory:
+
+ - instances are stored in :file:`~/etc/cubicweb.d`
+ - temporary files (such as pid file) in :file:`/tmp`
+
+
+
+
+.. _CubicwebWithinVirtualEnv:
+
+Within virtual environment
+```````````````````````````
+
+If you are not administrator of you machine or if you need to play with some
+specific version of |cubicweb| you can use `virtualenv`_ a tool to create
+isolated Python environments.
+
+- instances are stored in :file:`/etc/cubicweb.d`
+- temporary files (such as pid file) in :file:`/var/run/cubicweb`
+
+.. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
+
+Custom resource location
+````````````````````````````````
+
+Notice that each resource path may be explicitly set using an environment
+variable if the default doesn't suit your needs. Here are the default resource
+directories that are affected according to mode:
+
+* **system**: ::
+
+ CW_INSTANCES_DIR = /etc/cubicweb.d/
+ CW_INSTANCES_DATA_DIR = /var/lib/cubicweb/instances/
+ CW_RUNTIME_DIR = /var/run/cubicweb/
+
+* **user**: ::
+
+ CW_INSTANCES_DIR = ~/etc/cubicweb.d/
+ CW_INSTANCES_DATA_DIR = ~/etc/cubicweb.d/
+ CW_RUNTIME_DIR = /tmp
+
+Cubes search path is also affected, see the :ref:`Cube` section.
+
+Setting Cubicweb Mode
+`````````````````````
+
+By default, the mode is set to 'system' for standard installation. The mode is
+set to 'user' if `cubicweb is used from a mercurial repository`_. You can force
+this by setting the :envvar:`CW_MODE` environment variable to either 'user' or
+'system' so you can easily:
+
+* use system wide installation but user specific instances and all, without root
+ privileges on the system (`export CW_MODE=user`)
+
+* use local checkout of cubicweb on system wide instances (requires root
+ privileges on the system (`export CW_MODE=system`)
+
+If you've a doubt about the mode you're currently running, check the first line
+outputed by the :command:`cubicweb-ctl list` command.
+
+.. _`cubicweb is used from a mercurial repository`: CubicwebDevelopmentMod_
+
+.. _CubicwebDevelopmentMod:
+
+Development Mode
+`````````````````````
+If :file:`.hg` directory is found into the cubicweb package, there are specific resource rules.
+
+`` is the source checkout's ``cubicweb`` directory:
+
+* main cubes directory is `/../../cubes`. You can specify
+ another one with :envvar:`CW_INSTANCES_DIR` environment variable or simply
+ add some other directories by using :envvar:`CW_CUBES_PATH`
+
+* cubicweb migration files are searched in `/misc/migration`
+ instead of `/share/cubicweb/migration/`.
+
+
+.. _ConfigurationEnv:
+
+Environment configuration
+-------------------------
+
+Python
+``````
+
+If you installed *CubicWeb* by cloning the Mercurial shell repository or from source
+distribution, then you will need to update the environment variable PYTHONPATH by
+adding the path to `cubicweb`:
+
+Add the following lines to either :file:`.bashrc` or :file:`.bash_profile` to
+configure your development environment ::
+
+ export PYTHONPATH=/full/path/to/grshell-cubicweb
+
+If you installed *CubicWeb* with packages, no configuration is required and your
+new cubes will be placed in `/usr/share/cubicweb/cubes` and your instances will
+be placed in `/etc/cubicweb.d`.
+
+
+CubicWeb
+````````
+
+Here are all environment variables that may be used to configure *CubicWeb*:
+
+.. envvar:: CW_MODE
+
+ Resource mode: user or system, as explained in :ref:`ResourceMode`.
+
+.. envvar:: CW_CUBES_PATH
+
+ Augments the default search path for cubes. You may specify several
+ directories using ':' as separator (';' under windows environment).
+
+.. envvar:: CW_INSTANCES_DIR
+
+ Directory where cubicweb instances will be found.
+
+.. envvar:: CW_INSTANCES_DATA_DIR
+
+ Directory where cubicweb instances data will be written (backup file...)
+
+.. envvar:: CW_RUNTIME_DIR
+
+ Directory where pid files will be written
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import sys
+import os
+import stat
+import logging
+import logging.config
+from smtplib import SMTP
+from threading import Lock
+from os.path import (exists, join, expanduser, abspath, normpath,
+ basename, isdir, dirname, splitext)
+from warnings import warn, filterwarnings
+
+from six import text_type
+
+from logilab.common.decorators import cached, classproperty
+from logilab.common.deprecation import deprecated
+from logilab.common.logging_ext import set_log_methods, init_log
+from logilab.common.configuration import (Configuration, Method,
+ ConfigurationMixIn, merge_options)
+
+from cubicweb import (CW_SOFTWARE_ROOT, CW_MIGRATION_MAP,
+ ConfigurationError, Binary, _)
+from cubicweb.toolsutils import create_dir
+
+CONFIGURATIONS = []
+
+SMTP_LOCK = Lock()
+
+
+def configuration_cls(name):
+ """return the configuration class registered with the given name"""
+ try:
+ return [c for c in CONFIGURATIONS if c.name == name][0]
+ except IndexError:
+ raise ConfigurationError('no such config %r (check it exists with "cubicweb-ctl list")' % name)
+
+def possible_configurations(directory):
+ """return a list of installed configurations in a directory
+ according to \*-ctl files
+ """
+ return [name for name in ('repository', 'all-in-one')
+ if exists(join(directory, '%s.conf' % name))]
+
+def guess_configuration(directory):
+ """try to guess the configuration to use for a directory. If multiple
+ configurations are found, ConfigurationError is raised
+ """
+ modes = possible_configurations(directory)
+ if len(modes) != 1:
+ raise ConfigurationError('unable to guess configuration from %r %s'
+ % (directory, modes))
+ return modes[0]
+
+def _find_prefix(start_path=CW_SOFTWARE_ROOT):
+ """Runs along the parent directories of *start_path* (default to cubicweb source directory)
+ looking for one containing a 'share/cubicweb' directory.
+ The first matching directory is assumed as the prefix installation of cubicweb
+
+ Returns the matching prefix or None.
+ """
+ prefix = start_path
+ old_prefix = None
+ if not isdir(start_path):
+ prefix = dirname(start_path)
+ while (not isdir(join(prefix, 'share', 'cubicweb'))
+ or prefix.endswith('.egg')) and prefix != old_prefix:
+ old_prefix = prefix
+ prefix = dirname(prefix)
+ if isdir(join(prefix, 'share', 'cubicweb')):
+ return prefix
+ return sys.prefix
+
+# persistent options definition
+PERSISTENT_OPTIONS = (
+ ('encoding',
+ {'type' : 'string',
+ 'default': 'UTF-8',
+ 'help': _('user interface encoding'),
+ 'group': 'ui', 'sitewide': True,
+ }),
+ ('language',
+ {'type' : 'string',
+ 'default': 'en',
+ 'vocabulary': Method('available_languages'),
+ 'help': _('language of the user interface'),
+ 'group': 'ui',
+ }),
+ ('date-format',
+ {'type' : 'string',
+ 'default': '%Y/%m/%d',
+ 'help': _('how to format date in the ui (see this page for format description)'),
+ 'group': 'ui',
+ }),
+ ('datetime-format',
+ {'type' : 'string',
+ 'default': '%Y/%m/%d %H:%M',
+ 'help': _('how to format date and time in the ui (see this page for format description)'),
+ 'group': 'ui',
+ }),
+ ('time-format',
+ {'type' : 'string',
+ 'default': '%H:%M',
+ 'help': _('how to format time in the ui (see this page for format description)'),
+ 'group': 'ui',
+ }),
+ ('float-format',
+ {'type' : 'string',
+ 'default': '%.3f',
+ 'help': _('how to format float numbers in the ui'),
+ 'group': 'ui',
+ }),
+ ('default-text-format',
+ {'type' : 'choice',
+ 'choices': ('text/plain', 'text/rest', 'text/html', 'text/markdown'),
+ 'default': 'text/plain',
+ 'help': _('default text format for rich text fields.'),
+ 'group': 'ui',
+ }),
+ ('short-line-size',
+ {'type' : 'int',
+ 'default': 80,
+ 'help': _('maximum number of characters in short description'),
+ 'group': 'navigation',
+ }),
+ )
+
+def register_persistent_options(options):
+ global PERSISTENT_OPTIONS
+ PERSISTENT_OPTIONS = merge_options(PERSISTENT_OPTIONS + options)
+
+CFGTYPE2ETYPE_MAP = {
+ 'string': 'String',
+ 'choice': 'String',
+ 'yn': 'Boolean',
+ 'int': 'Int',
+ 'float' : 'Float',
+ }
+
+_forced_mode = os.environ.get('CW_MODE')
+assert _forced_mode in (None, 'system', 'user')
+
+# CWDEV tells whether directories such as i18n/, web/data/, etc. (ie containing
+# some other resources than python libraries) are located with the python code
+# or as a 'shared' cube
+CWDEV = exists(join(CW_SOFTWARE_ROOT, 'i18n'))
+
+try:
+ _INSTALL_PREFIX = os.environ['CW_INSTALL_PREFIX']
+except KeyError:
+ _INSTALL_PREFIX = _find_prefix()
+_USR_INSTALL = _INSTALL_PREFIX == '/usr'
+
+class CubicWebNoAppConfiguration(ConfigurationMixIn):
+ """base class for cubicweb configuration without a specific instance directory
+ """
+ # to set in concrete configuration
+ name = None
+ # log messages format (see logging module documentation for available keys)
+ log_format = '%(asctime)s - (%(name)s) %(levelname)s: %(message)s'
+ # the format below can be useful to debug multi thread issues:
+ # log_format = '%(asctime)s - [%(threadName)s] (%(name)s) %(levelname)s: %(message)s'
+ # nor remove appobjects based on unused interface [???]
+ cleanup_unused_appobjects = True
+
+ quick_start = False
+
+ if (CWDEV and _forced_mode != 'system'):
+ mode = 'user'
+ _CUBES_DIR = join(CW_SOFTWARE_ROOT, '../../cubes')
+ else:
+ mode = _forced_mode or 'system'
+ _CUBES_DIR = join(_INSTALL_PREFIX, 'share', 'cubicweb', 'cubes')
+
+ CUBES_DIR = abspath(os.environ.get('CW_CUBES_DIR', _CUBES_DIR))
+ CUBES_PATH = os.environ.get('CW_CUBES_PATH', '').split(os.pathsep)
+
+ options = (
+ ('log-threshold',
+ {'type' : 'string', # XXX use a dedicated type?
+ 'default': 'WARNING',
+ 'help': 'server\'s log level',
+ 'group': 'main', 'level': 1,
+ }),
+ ('umask',
+ {'type' : 'int',
+ 'default': 0o077,
+ 'help': 'permission umask for files created by the server',
+ 'group': 'main', 'level': 2,
+ }),
+ # common configuration options which are potentially required as soon as
+ # you're using "base" application objects (ie to really server/web
+ # specific)
+ ('base-url',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'web server root url',
+ 'group': 'main', 'level': 1,
+ }),
+ ('allow-email-login',
+ {'type' : 'yn',
+ 'default': False,
+ 'help': 'allow users to login with their primary email if set',
+ 'group': 'main', 'level': 2,
+ }),
+ ('mangle-emails',
+ {'type' : 'yn',
+ 'default': False,
+ 'help': "don't display actual email addresses but mangle them if \
+this option is set to yes",
+ 'group': 'email', 'level': 3,
+ }),
+ )
+ # static and class methods used to get instance independant resources ##
+ @staticmethod
+ def cubicweb_version():
+ """return installed cubicweb version"""
+ from logilab.common.changelog import Version
+ from cubicweb import __pkginfo__
+ version = __pkginfo__.numversion
+ assert len(version) == 3, version
+ return Version(version)
+
+ @staticmethod
+ def persistent_options_configuration():
+ return Configuration(options=PERSISTENT_OPTIONS)
+
+ @classmethod
+ def shared_dir(cls):
+ """return the shared data directory (i.e. directory where standard
+ library views and data may be found)
+ """
+ if CWDEV:
+ return join(CW_SOFTWARE_ROOT, 'web')
+ return cls.cube_dir('shared')
+
+ @classmethod
+ def i18n_lib_dir(cls):
+ """return instance's i18n directory"""
+ if CWDEV:
+ return join(CW_SOFTWARE_ROOT, 'i18n')
+ return join(cls.shared_dir(), 'i18n')
+
+ @classmethod
+ def cw_languages(cls):
+ for fname in os.listdir(join(cls.i18n_lib_dir())):
+ if fname.endswith('.po'):
+ yield splitext(fname)[0]
+
+
+ @classmethod
+ def available_cubes(cls):
+ import re
+ cubes = set()
+ for directory in cls.cubes_search_path():
+ if not exists(directory):
+ cls.error('unexistant directory in cubes search path: %s'
+ % directory)
+ continue
+ for cube in os.listdir(directory):
+ if cube == 'shared':
+ continue
+ if not re.match('[_A-Za-z][_A-Za-z0-9]*$', cube):
+ continue # skip invalid python package name
+ cubedir = join(directory, cube)
+ if isdir(cubedir) and exists(join(cubedir, '__init__.py')):
+ cubes.add(cube)
+ return sorted(cubes)
+
+ @classmethod
+ def cubes_search_path(cls):
+ """return the path of directories where cubes should be searched"""
+ path = [abspath(normpath(directory)) for directory in cls.CUBES_PATH
+ if directory.strip() and exists(directory.strip())]
+ if not cls.CUBES_DIR in path and exists(cls.CUBES_DIR):
+ path.append(cls.CUBES_DIR)
+ return path
+
+ @classproperty
+ def extrapath(cls):
+ extrapath = {}
+ for cubesdir in cls.cubes_search_path():
+ if cubesdir != cls.CUBES_DIR:
+ extrapath[cubesdir] = 'cubes'
+ return extrapath
+
+ @classmethod
+ def cube_dir(cls, cube):
+ """return the cube directory for the given cube id, raise
+ `ConfigurationError` if it doesn't exist
+ """
+ for directory in cls.cubes_search_path():
+ cubedir = join(directory, cube)
+ if exists(cubedir):
+ return cubedir
+ raise ConfigurationError('no cube %r in %s' % (
+ cube, cls.cubes_search_path()))
+
+ @classmethod
+ def cube_migration_scripts_dir(cls, cube):
+ """cube migration scripts directory"""
+ return join(cls.cube_dir(cube), 'migration')
+
+ @classmethod
+ def cube_pkginfo(cls, cube):
+ """return the information module for the given cube"""
+ cube = CW_MIGRATION_MAP.get(cube, cube)
+ try:
+ parent = __import__('cubes.%s.__pkginfo__' % cube)
+ return getattr(parent, cube).__pkginfo__
+ except Exception as ex:
+ raise ConfigurationError(
+ 'unable to find packaging information for cube %s (%s: %s)'
+ % (cube, ex.__class__.__name__, ex))
+
+ @classmethod
+ def cube_version(cls, cube):
+ """return the version of the cube located in the given directory
+ """
+ from logilab.common.changelog import Version
+ version = cls.cube_pkginfo(cube).numversion
+ assert len(version) == 3, version
+ return Version(version)
+
+ @classmethod
+ def _cube_deps(cls, cube, key, oldkey):
+ """return cubicweb cubes used by the given cube"""
+ pkginfo = cls.cube_pkginfo(cube)
+ try:
+ # explicit __xxx_cubes__ attribute
+ deps = getattr(pkginfo, key)
+ except AttributeError:
+ # deduce cubes from generic __xxx__ attribute
+ try:
+ gendeps = getattr(pkginfo, key.replace('_cubes', ''))
+ except AttributeError:
+ deps = {}
+ else:
+ deps = dict( (x[len('cubicweb-'):], v)
+ for x, v in gendeps.items()
+ if x.startswith('cubicweb-'))
+ for depcube in deps:
+ try:
+ newname = CW_MIGRATION_MAP[depcube]
+ except KeyError:
+ pass
+ else:
+ deps[newname] = deps.pop(depcube)
+ return deps
+
+ @classmethod
+ def cube_depends_cubicweb_version(cls, cube):
+ # XXX no backward compat (see _cube_deps above)
+ try:
+ pkginfo = cls.cube_pkginfo(cube)
+ deps = getattr(pkginfo, '__depends__')
+ return deps.get('cubicweb')
+ except AttributeError:
+ return None
+
+ @classmethod
+ def cube_dependencies(cls, cube):
+ """return cubicweb cubes used by the given cube"""
+ return cls._cube_deps(cube, '__depends_cubes__', '__use__')
+
+ @classmethod
+ def cube_recommends(cls, cube):
+ """return cubicweb cubes recommended by the given cube"""
+ return cls._cube_deps(cube, '__recommends_cubes__', '__recommend__')
+
+ @classmethod
+ def expand_cubes(cls, cubes, with_recommends=False):
+ """expand the given list of top level cubes used by adding recursivly
+ each cube dependencies
+ """
+ cubes = list(cubes)
+ todo = cubes[:]
+ if with_recommends:
+ available = set(cls.available_cubes())
+ while todo:
+ cube = todo.pop(0)
+ for depcube in cls.cube_dependencies(cube):
+ if depcube not in cubes:
+ cubes.append(depcube)
+ todo.append(depcube)
+ if with_recommends:
+ for depcube in cls.cube_recommends(cube):
+ if depcube not in cubes and depcube in available:
+ cubes.append(depcube)
+ todo.append(depcube)
+ return cubes
+
+ @classmethod
+ def reorder_cubes(cls, cubes):
+ """reorder cubes from the top level cubes to inner dependencies
+ cubes
+ """
+ from logilab.common.graph import ordered_nodes, UnorderableGraph
+ graph = {}
+ for cube in cubes:
+ cube = CW_MIGRATION_MAP.get(cube, cube)
+ graph[cube] = set(dep for dep in cls.cube_dependencies(cube)
+ if dep in cubes)
+ graph[cube] |= set(dep for dep in cls.cube_recommends(cube)
+ if dep in cubes)
+ try:
+ return ordered_nodes(graph)
+ except UnorderableGraph as ex:
+ raise ConfigurationError(ex)
+
+ @classmethod
+ def cls_adjust_sys_path(cls):
+ """update python path if necessary"""
+ cubes_parent_dir = normpath(join(cls.CUBES_DIR, '..'))
+ if not cubes_parent_dir in sys.path:
+ sys.path.insert(0, cubes_parent_dir)
+ try:
+ import cubes
+ cubes.__path__ = cls.cubes_search_path()
+ except ImportError:
+ return # cubes dir doesn't exists
+
+ @classmethod
+ def load_available_configs(cls):
+ from logilab.common.modutils import load_module_from_file
+ for conffile in ('web/webconfig.py', 'etwist/twconfig.py',
+ 'server/serverconfig.py',):
+ if exists(join(CW_SOFTWARE_ROOT, conffile)):
+ load_module_from_file(join(CW_SOFTWARE_ROOT, conffile))
+
+ @classmethod
+ def load_cwctl_plugins(cls):
+ from logilab.common.modutils import load_module_from_file
+ cls.cls_adjust_sys_path()
+ for ctlfile in ('web/webctl.py', 'etwist/twctl.py',
+ 'server/serverctl.py',
+ 'devtools/devctl.py', 'goa/goactl.py'):
+ if exists(join(CW_SOFTWARE_ROOT, ctlfile)):
+ try:
+ load_module_from_file(join(CW_SOFTWARE_ROOT, ctlfile))
+ except ImportError as err:
+ cls.error('could not import the command provider %s: %s',
+ ctlfile, err)
+ cls.info('loaded cubicweb-ctl plugin %s', ctlfile)
+ for cube in cls.available_cubes():
+ pluginfile = join(cls.cube_dir(cube), 'ccplugin.py')
+ initfile = join(cls.cube_dir(cube), '__init__.py')
+ if exists(pluginfile):
+ try:
+ __import__('cubes.%s.ccplugin' % cube)
+ cls.info('loaded cubicweb-ctl plugin from %s', cube)
+ except Exception:
+ cls.exception('while loading plugin %s', pluginfile)
+ elif exists(initfile):
+ try:
+ __import__('cubes.%s' % cube)
+ except Exception:
+ cls.exception('while loading cube %s', cube)
+ else:
+ cls.warning('no __init__ file in cube %s', cube)
+
+ @classmethod
+ def init_available_cubes(cls):
+ """cubes may register some sources (svnfile for instance) in their
+ __init__ file, so they should be loaded early in the startup process
+ """
+ for cube in cls.available_cubes():
+ try:
+ __import__('cubes.%s' % cube)
+ except Exception as ex:
+ cls.warning("can't init cube %s: %s", cube, ex)
+
+ cubicweb_appobject_path = set(['entities'])
+ cube_appobject_path = set(['entities'])
+
+ def __init__(self, debugmode=False):
+ if debugmode:
+ # in python 2.7, DeprecationWarning are not shown anymore by default
+ filterwarnings('default', category=DeprecationWarning)
+ register_stored_procedures()
+ self._cubes = None
+ super(CubicWebNoAppConfiguration, self).__init__()
+ self.debugmode = debugmode
+ self.adjust_sys_path()
+ self.load_defaults()
+ # will be properly initialized later by _gettext_init
+ self.translations = {'en': (text_type, lambda ctx, msgid: text_type(msgid) )}
+ self._site_loaded = set()
+ # don't register ReStructured Text directives by simple import, avoid pb
+ # with eg sphinx.
+ # XXX should be done properly with a function from cw.uicfg
+ try:
+ from cubicweb.ext.rest import cw_rest_init
+ except ImportError:
+ pass
+ else:
+ cw_rest_init()
+
+ def adjust_sys_path(self):
+ # overriden in CubicWebConfiguration
+ self.cls_adjust_sys_path()
+
+ def init_log(self, logthreshold=None, logfile=None, syslog=False):
+ """init the log service"""
+ if logthreshold is None:
+ if self.debugmode:
+ logthreshold = 'DEBUG'
+ else:
+ logthreshold = self['log-threshold']
+ if sys.platform == 'win32':
+ # no logrotate on win32, so use logging rotation facilities
+ # for now, hard code weekly rotation every sunday, and 52 weeks kept
+ # idea: make this configurable?
+ init_log(self.debugmode, syslog, logthreshold, logfile, self.log_format,
+ rotation_parameters={'when': 'W6', # every sunday
+ 'interval': 1,
+ 'backupCount': 52})
+ else:
+ init_log(self.debugmode, syslog, logthreshold, logfile, self.log_format)
+ # configure simpleTal logger
+ logging.getLogger('simpleTAL').setLevel(logging.ERROR)
+
+ def appobjects_path(self):
+ """return a list of files or directories where the registry will look
+ for application objects. By default return nothing in NoApp config.
+ """
+ return []
+
+ def build_appobjects_path(self, templpath, evobjpath=None, tvobjpath=None):
+ """given a list of directories, return a list of sub files and
+ directories that should be loaded by the instance objects registry.
+
+ :param evobjpath:
+ optional list of sub-directories (or files without the .py ext) of
+ the cubicweb library that should be tested and added to the output list
+ if they exists. If not give, default to `cubicweb_appobject_path` class
+ attribute.
+ :param tvobjpath:
+ optional list of sub-directories (or files without the .py ext) of
+ directories given in `templpath` that should be tested and added to
+ the output list if they exists. If not give, default to
+ `cube_appobject_path` class attribute.
+ """
+ vregpath = self.build_appobjects_cubicweb_path(evobjpath)
+ vregpath += self.build_appobjects_cube_path(templpath, tvobjpath)
+ return vregpath
+
+ def build_appobjects_cubicweb_path(self, evobjpath=None):
+ vregpath = []
+ if evobjpath is None:
+ evobjpath = self.cubicweb_appobject_path
+ # NOTE: for the order, see http://www.cubicweb.org/ticket/2330799
+ # it is clearly a workaround
+ for subdir in sorted(evobjpath, key=lambda x:x != 'entities'):
+ path = join(CW_SOFTWARE_ROOT, subdir)
+ if exists(path):
+ vregpath.append(path)
+ return vregpath
+
+ def build_appobjects_cube_path(self, templpath, tvobjpath=None):
+ vregpath = []
+ if tvobjpath is None:
+ tvobjpath = self.cube_appobject_path
+ for directory in templpath:
+ # NOTE: for the order, see http://www.cubicweb.org/ticket/2330799
+ for subdir in sorted(tvobjpath, key=lambda x:x != 'entities'):
+ path = join(directory, subdir)
+ if exists(path):
+ vregpath.append(path)
+ elif exists(path + '.py'):
+ vregpath.append(path + '.py')
+ return vregpath
+
+ apphome = None
+
+ def load_site_cubicweb(self, paths=None):
+ """load instance's specific site_cubicweb file"""
+ if paths is None:
+ paths = self.cubes_path()
+ if self.apphome is not None:
+ paths = [self.apphome] + paths
+ for path in reversed(paths):
+ sitefile = join(path, 'site_cubicweb.py')
+ if exists(sitefile) and not sitefile in self._site_loaded:
+ self._load_site_cubicweb(sitefile)
+ self._site_loaded.add(sitefile)
+
+ def _load_site_cubicweb(self, sitefile):
+ # XXX extrapath argument to load_module_from_file only in lgc > 0.50.2
+ from logilab.common.modutils import load_module_from_modpath, modpath_from_file
+ module = load_module_from_modpath(modpath_from_file(sitefile, self.extrapath))
+ self.debug('%s loaded', sitefile)
+ return module
+
+ def cwproperty_definitions(self):
+ cfg = self.persistent_options_configuration()
+ for section, options in cfg.options_by_section():
+ section = section.lower()
+ for optname, optdict, value in options:
+ key = '%s.%s' % (section, optname)
+ type, vocab = self.map_option(optdict)
+ default = cfg.option_default(optname, optdict)
+ pdef = {'type': type, 'vocabulary': vocab, 'default': default,
+ 'help': optdict['help'],
+ 'sitewide': optdict.get('sitewide', False)}
+ yield key, pdef
+
+ def map_option(self, optdict):
+ try:
+ vocab = optdict['choices']
+ except KeyError:
+ vocab = optdict.get('vocabulary')
+ if isinstance(vocab, Method):
+ vocab = getattr(self, vocab.method, ())
+ return CFGTYPE2ETYPE_MAP[optdict['type']], vocab
+
+ def default_instance_id(self):
+ """return the instance identifier, useful for option which need this
+ as default value
+ """
+ return None
+
+ _cubes = None
+
+ def init_cubes(self, cubes):
+ self._cubes = self.reorder_cubes(cubes)
+ # load cubes'__init__.py file first
+ for cube in cubes:
+ __import__('cubes.%s' % cube)
+ self.load_site_cubicweb()
+
+ def cubes(self):
+ """return the list of cubes used by this instance
+
+ result is ordered from the top level cubes to inner dependencies
+ cubes
+ """
+ assert self._cubes is not None, 'cubes not initialized'
+ return self._cubes
+
+ def cubes_path(self):
+ """return the list of path to cubes used by this instance, from outer
+ most to inner most cubes
+ """
+ return [self.cube_dir(p) for p in self.cubes()]
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ @classmethod
+ def debug(cls, msg, *a, **kw):
+ pass
+ info = warning = error = critical = exception = debug
+
+
+class CubicWebConfiguration(CubicWebNoAppConfiguration):
+ """base class for cubicweb server and web configurations"""
+
+ if CubicWebNoAppConfiguration.mode == 'user':
+ _INSTANCES_DIR = expanduser('~/etc/cubicweb.d/')
+ #mode == system'
+ elif _USR_INSTALL:
+ _INSTANCES_DIR = '/etc/cubicweb.d/'
+ else:
+ _INSTANCES_DIR = join(_INSTALL_PREFIX, 'etc', 'cubicweb.d')
+
+ # set to true during repair (shell, migration) to allow some things which
+ # wouldn't be possible otherwise
+ repairing = False
+
+ # set by upgrade command
+ verbosity = 0
+ cmdline_options = None
+ options = CubicWebNoAppConfiguration.options + (
+ ('log-file',
+ {'type' : 'string',
+ 'default': Method('default_log_file'),
+ 'help': 'file where output logs should be written',
+ 'group': 'main', 'level': 2,
+ }),
+ ('statsd-endpoint',
+ {'type' : 'string',
+ 'default': '',
+ 'help': 'UDP address of the statsd endpoint; it must be formatted'
+ 'like :; disabled is unset.',
+ 'group': 'main', 'level': 2,
+ }),
+ # email configuration
+ ('smtp-host',
+ {'type' : 'string',
+ 'default': 'mail',
+ 'help': 'hostname of the SMTP mail server',
+ 'group': 'email', 'level': 1,
+ }),
+ ('smtp-port',
+ {'type' : 'int',
+ 'default': 25,
+ 'help': 'listening port of the SMTP mail server',
+ 'group': 'email', 'level': 1,
+ }),
+ ('sender-name',
+ {'type' : 'string',
+ 'default': Method('default_instance_id'),
+ 'help': 'name used as HELO name for outgoing emails from the \
+repository.',
+ 'group': 'email', 'level': 2,
+ }),
+ ('sender-addr',
+ {'type' : 'string',
+ 'default': 'cubicweb@mydomain.com',
+ 'help': 'email address used as HELO address for outgoing emails from \
+the repository',
+ 'group': 'email', 'level': 1,
+ }),
+ ('logstat-interval',
+ {'type' : 'int',
+ 'default': 0,
+ 'help': 'interval (in seconds) at which stats are dumped in the logstat file; set 0 to disable',
+ 'group': 'main', 'level': 2,
+ }),
+ ('logstat-file',
+ {'type' : 'string',
+ 'default': Method('default_stats_file'),
+ 'help': 'file where stats for the instance should be written',
+ 'group': 'main', 'level': 2,
+ }),
+ )
+
+ @classmethod
+ def instances_dir(cls):
+ """return the control directory"""
+ return abspath(os.environ.get('CW_INSTANCES_DIR', cls._INSTANCES_DIR))
+
+ @classmethod
+ def migration_scripts_dir(cls):
+ """cubicweb migration scripts directory"""
+ if CWDEV:
+ return join(CW_SOFTWARE_ROOT, 'misc', 'migration')
+ mdir = join(_INSTALL_PREFIX, 'share', 'cubicweb', 'migration')
+ if not exists(mdir):
+ raise ConfigurationError('migration path %s doesn\'t exist' % mdir)
+ return mdir
+
+ @classmethod
+ def config_for(cls, appid, config=None, debugmode=False, creating=False):
+ """return a configuration instance for the given instance identifier
+ """
+ cls.load_available_configs()
+ config = config or guess_configuration(cls.instance_home(appid))
+ configcls = configuration_cls(config)
+ return configcls(appid, debugmode, creating)
+
+ @classmethod
+ def possible_configurations(cls, appid):
+ """return the name of possible configurations for the given
+ instance id
+ """
+ home = cls.instance_home(appid)
+ return possible_configurations(home)
+
+ @classmethod
+ def instance_home(cls, appid):
+ """return the home directory of the instance with the given
+ instance id
+ """
+ home = join(cls.instances_dir(), appid)
+ if not exists(home):
+ raise ConfigurationError('no such instance %s (check it exists with'
+ ' "cubicweb-ctl list")' % appid)
+ return home
+
+ MODES = ('common', 'repository', 'Any')
+ MCOMPAT = {'all-in-one': MODES,
+ 'repository': ('common', 'repository', 'Any')}
+ @classmethod
+ def accept_mode(cls, mode):
+ #assert mode in cls.MODES, mode
+ return mode in cls.MCOMPAT[cls.name]
+
+ # default configuration methods ###########################################
+
+ def default_instance_id(self):
+ """return the instance identifier, useful for option which need this
+ as default value
+ """
+ return self.appid
+
+ def default_log_file(self):
+ """return default path to the log file of the instance'server"""
+ if self.mode == 'user':
+ import tempfile
+ basepath = join(tempfile.gettempdir(), '%s-%s' % (
+ basename(self.appid), self.name))
+ path = basepath + '.log'
+ i = 1
+ while exists(path) and i < 100: # arbitrary limit to avoid infinite loop
+ try:
+ open(path, 'a')
+ break
+ except IOError:
+ path = '%s-%s.log' % (basepath, i)
+ i += 1
+ return path
+ if _USR_INSTALL:
+ return '/var/log/cubicweb/%s-%s.log' % (self.appid, self.name)
+ else:
+ log_path = os.path.join(_INSTALL_PREFIX, 'var', 'log', 'cubicweb', '%s-%s.log')
+ return log_path % (self.appid, self.name)
+
+ def default_stats_file(self):
+ """return default path to the stats file of the instance'server"""
+ logfile = self.default_log_file()
+ if logfile.endswith('.log'):
+ logfile = logfile[:-4]
+ return logfile + '.stats'
+
+ def default_pid_file(self):
+ """return default path to the pid file of the instance'server"""
+ if self.mode == 'system':
+ if _USR_INSTALL:
+ default = '/var/run/cubicweb/'
+ else:
+ default = os.path.join(_INSTALL_PREFIX, 'var', 'run', 'cubicweb')
+ else:
+ import tempfile
+ default = tempfile.gettempdir()
+ # runtime directory created on startup if necessary, don't check it
+ # exists
+ rtdir = abspath(os.environ.get('CW_RUNTIME_DIR', default))
+ return join(rtdir, '%s-%s.pid' % (self.appid, self.name))
+
+ # config -> repository
+
+ def repository(self, vreg=None):
+ from cubicweb.server.repository import Repository
+ from cubicweb.server.utils import TasksManager
+ return Repository(self, TasksManager(), vreg=vreg)
+
+ # instance methods used to get instance specific resources #############
+
+ def __init__(self, appid, debugmode=False, creating=False):
+ self.appid = appid
+ # set to true while creating an instance
+ self.creating = creating
+ super(CubicWebConfiguration, self).__init__(debugmode)
+ fake_gettext = (text_type, lambda ctx, msgid: text_type(msgid))
+ for lang in self.available_languages():
+ self.translations[lang] = fake_gettext
+ self._cubes = None
+ self.load_file_configuration(self.main_config_file())
+
+ def adjust_sys_path(self):
+ super(CubicWebConfiguration, self).adjust_sys_path()
+ # adding apphome to python path is not usually necessary in production
+ # environments, but necessary for tests
+ if self.apphome and self.apphome not in sys.path:
+ sys.path.insert(0, self.apphome)
+
+ @property
+ def apphome(self):
+ return join(self.instances_dir(), self.appid)
+
+ @property
+ def appdatahome(self):
+ if self.mode == 'system':
+ if _USR_INSTALL:
+ iddir = os.path.join('/var','lib', 'cubicweb', 'instances')
+ else:
+ iddir = os.path.join(_INSTALL_PREFIX, 'var', 'lib', 'cubicweb', 'instances')
+ else:
+ iddir = self.instances_dir()
+ iddir = abspath(os.environ.get('CW_INSTANCES_DATA_DIR', iddir))
+ return join(iddir, self.appid)
+
+ def init_cubes(self, cubes):
+ super(CubicWebConfiguration, self).init_cubes(cubes)
+ # reload config file in cases options are defined in cubes __init__
+ # or site_cubicweb files
+ self.load_file_configuration(self.main_config_file())
+ # configuration initialization hook
+ self.load_configuration(**(self.cmdline_options or {}))
+
+ def add_cubes(self, cubes):
+ """add given cubes to the list of used cubes"""
+ if not isinstance(cubes, list):
+ cubes = list(cubes)
+ self._cubes = self.reorder_cubes(list(self._cubes) + cubes)
+ self.load_site_cubicweb([self.cube_dir(cube) for cube in cubes])
+
+ def main_config_file(self):
+ """return instance's control configuration file"""
+ return join(self.apphome, '%s.conf' % self.name)
+
+ def save(self):
+ """write down current configuration"""
+ with open(self.main_config_file(), 'w') as fobj:
+ self.generate_config(fobj)
+
+ def check_writeable_uid_directory(self, path):
+ """check given directory path exists, belongs to the user running the
+ server process and is writeable.
+
+ If not, try to fix this, letting exception propagate when not possible.
+ """
+ if not exists(path):
+ self.info('creating %s directory', path)
+ try:
+ os.makedirs(path)
+ except OSError as ex:
+ self.warning('error while creating %s directory: %s', path, ex)
+ return
+ if self['uid']:
+ try:
+ uid = int(self['uid'])
+ except ValueError:
+ from pwd import getpwnam
+ uid = getpwnam(self['uid']).pw_uid
+ else:
+ try:
+ uid = os.getuid()
+ except AttributeError: # we are on windows
+ return
+ fstat = os.stat(path)
+ if fstat.st_uid != uid:
+ self.info('giving ownership of %s directory to %s', path, self['uid'])
+ try:
+ os.chown(path, uid, os.getgid())
+ except OSError as ex:
+ self.warning('error while giving ownership of %s directory to %s: %s',
+ path, self['uid'], ex)
+ if not (fstat.st_mode & stat.S_IWUSR):
+ self.info('forcing write permission on directory %s', path)
+ try:
+ os.chmod(path, fstat.st_mode | stat.S_IWUSR)
+ except OSError as ex:
+ self.warning('error while forcing write permission on directory %s: %s',
+ path, ex)
+ return
+
+ @cached
+ def instance_md5_version(self):
+ from hashlib import md5 # pylint: disable=E0611
+ infos = []
+ for pkg in sorted(self.cubes()):
+ version = self.cube_version(pkg)
+ infos.append('%s-%s' % (pkg, version))
+ infos.append('cubicweb-%s' % str(self.cubicweb_version()))
+ return md5((';'.join(infos)).encode('ascii')).hexdigest()
+
+ def load_configuration(self, **kw):
+ """load instance's configuration files"""
+ super(CubicWebConfiguration, self).load_configuration(**kw)
+ if self.apphome and not self.creating:
+ # init gettext
+ self._gettext_init()
+
+ def _load_site_cubicweb(self, sitefile):
+ # overridden to register cube specific options
+ mod = super(CubicWebConfiguration, self)._load_site_cubicweb(sitefile)
+ if getattr(mod, 'options', None):
+ self.register_options(mod.options)
+ self.load_defaults()
+
+ def init_log(self, logthreshold=None, force=False):
+ """init the log service"""
+ if not force and hasattr(self, '_logging_initialized'):
+ return
+ self._logging_initialized = True
+ super_self = super(CubicWebConfiguration, self)
+ super_self.init_log(logthreshold, logfile=self.get('log-file'))
+ # read a config file if it exists
+ logconfig = join(self.apphome, 'logging.conf')
+ if exists(logconfig):
+ logging.config.fileConfig(logconfig)
+ # set the statsd address, if any
+ if self.get('statsd-endpoint'):
+ try:
+ address, port = self.get('statsd-endpoint').split(':')
+ port = int(port)
+ except:
+ self.error('statsd-endpoint: invalid address format ({}); '
+ 'it should be "ip:port"'.format(self.get('statsd-endpoint')))
+ else:
+ import statsd_logger
+ statsd_logger.setup('cubicweb.%s' % self.appid, (address, port))
+
+ def available_languages(self, *args):
+ """return available translation for an instance, by looking for
+ compiled catalog
+
+ take \*args to be usable as a vocabulary method
+ """
+ from glob import glob
+ yield 'en' # ensure 'en' is yielded even if no .mo found
+ for path in glob(join(self.apphome, 'i18n',
+ '*', 'LC_MESSAGES')):
+ lang = path.split(os.sep)[-2]
+ if lang != 'en':
+ yield lang
+
+ def _gettext_init(self):
+ """set language for gettext"""
+ from cubicweb.cwgettext import translation
+ path = join(self.apphome, 'i18n')
+ for language in self.available_languages():
+ self.info("loading language %s", language)
+ try:
+ tr = translation('cubicweb', path, languages=[language])
+ self.translations[language] = (tr.ugettext, tr.upgettext)
+ except (ImportError, AttributeError, IOError):
+ if self.mode != 'test':
+ # in test contexts, data/i18n does not exist, hence
+ # logging will only pollute the logs
+ self.exception('localisation support error for language %s',
+ language)
+
+ def appobjects_path(self):
+ """return a list of files or directories where the registry will look
+ for application objects
+ """
+ templpath = list(reversed(self.cubes_path()))
+ if self.apphome: # may be unset in tests
+ templpath.append(self.apphome)
+ return self.build_appobjects_path(templpath)
+
+ def set_sources_mode(self, sources):
+ if not 'all' in sources:
+ print('warning: ignoring specified sources, requires a repository '
+ 'configuration')
+
+ def i18ncompile(self, langs=None):
+ from cubicweb import i18n
+ if langs is None:
+ langs = self.available_languages()
+ i18ndir = join(self.apphome, 'i18n')
+ if not exists(i18ndir):
+ create_dir(i18ndir)
+ sourcedirs = [join(path, 'i18n') for path in self.cubes_path()]
+ sourcedirs.append(self.i18n_lib_dir())
+ return i18n.compile_i18n_catalogs(sourcedirs, i18ndir, langs)
+
+ def sendmails(self, msgs, fromaddr=None):
+ """msgs: list of 2-uple (message object, recipients). Return False
+ if connection to the smtp server failed, else True.
+ """
+ server, port = self['smtp-host'], self['smtp-port']
+ if fromaddr is None:
+ fromaddr = '%s <%s>' % (self['sender-name'], self['sender-addr'])
+ SMTP_LOCK.acquire()
+ try:
+ try:
+ smtp = SMTP(server, port)
+ except Exception as ex:
+ self.exception("can't connect to smtp server %s:%s (%s)",
+ server, port, ex)
+ return False
+ for msg, recipients in msgs:
+ try:
+ smtp.sendmail(fromaddr, recipients, msg.as_string())
+ except Exception as ex:
+ self.exception("error sending mail to %s (%s)",
+ recipients, ex)
+ smtp.close()
+ finally:
+ SMTP_LOCK.release()
+ return True
+
+set_log_methods(CubicWebNoAppConfiguration,
+ logging.getLogger('cubicweb.configuration'))
+
+# alias to get a configuration instance from an instance id
+instance_configuration = CubicWebConfiguration.config_for
+application_configuration = deprecated('use instance_configuration')(instance_configuration)
+
+
+_EXT_REGISTERED = False
+def register_stored_procedures():
+ from logilab.database import FunctionDescr
+ from rql.utils import register_function, iter_funcnode_variables
+ from rql.nodes import SortTerm, Constant, VariableRef
+
+ global _EXT_REGISTERED
+ if _EXT_REGISTERED:
+ return
+ _EXT_REGISTERED = True
+
+ class COMMA_JOIN(FunctionDescr):
+ supported_backends = ('postgres', 'sqlite',)
+ rtype = 'String'
+
+ def st_description(self, funcnode, mainindex, tr):
+ return ', '.join(sorted(term.get_description(mainindex, tr)
+ for term in iter_funcnode_variables(funcnode)))
+
+ register_function(COMMA_JOIN) # XXX do not expose?
+
+
+ class CONCAT_STRINGS(COMMA_JOIN):
+ aggregat = True
+
+ register_function(CONCAT_STRINGS) # XXX bw compat
+
+
+ class GROUP_CONCAT(CONCAT_STRINGS):
+ supported_backends = ('mysql', 'postgres', 'sqlite',)
+
+ register_function(GROUP_CONCAT)
+
+
+ class LIMIT_SIZE(FunctionDescr):
+ supported_backends = ('postgres', 'sqlite',)
+ minargs = maxargs = 3
+ rtype = 'String'
+
+ def st_description(self, funcnode, mainindex, tr):
+ return funcnode.children[0].get_description(mainindex, tr)
+
+ register_function(LIMIT_SIZE)
+
+
+ class TEXT_LIMIT_SIZE(LIMIT_SIZE):
+ supported_backends = ('mysql', 'postgres', 'sqlite',)
+ minargs = maxargs = 2
+
+ register_function(TEXT_LIMIT_SIZE)
+
+
+ class FTIRANK(FunctionDescr):
+ """return ranking of a variable that must be used as some has_text
+ relation subject in the query's restriction. Usually used to sort result
+ of full-text search by ranking.
+ """
+ supported_backends = ('postgres',)
+ rtype = 'Float'
+
+ def st_check_backend(self, backend, funcnode):
+ """overriden so that on backend not supporting fti ranking, the
+ function is removed when in an orderby clause, or replaced by a 1.0
+ constant.
+ """
+ if not self.supports(backend):
+ parent = funcnode.parent
+ while parent is not None and not isinstance(parent, SortTerm):
+ parent = parent.parent
+ if isinstance(parent, SortTerm):
+ parent.parent.remove(parent)
+ else:
+ funcnode.parent.replace(funcnode, Constant(1.0, 'Float'))
+ parent = funcnode
+ for vref in parent.iget_nodes(VariableRef):
+ vref.unregister_reference()
+
+ register_function(FTIRANK)
+
+
+ class FSPATH(FunctionDescr):
+ """return path of some bytes attribute stored using the Bytes
+ File-System Storage (bfss)
+ """
+ rtype = 'Bytes' # XXX return a String? potential pb with fs encoding
+
+ def update_cb_stack(self, stack):
+ assert len(stack) == 1
+ stack[0] = self.source_execute
+
+ def as_sql(self, backend, args):
+ raise NotImplementedError(
+ 'This callback is only available for BytesFileSystemStorage '
+ 'managed attribute. Is FSPATH() argument BFSS managed?')
+
+ def source_execute(self, source, session, value):
+ fpath = source.binary_to_str(value)
+ try:
+ return Binary(fpath)
+ except OSError as ex:
+ source.critical("can't open %s: %s", fpath, ex)
+ return None
+
+ register_function(FSPATH)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/cwctl.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/cwctl.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1154 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""the cubicweb-ctl tool, based on logilab.common.clcommands to
+provide a pluggable commands system.
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+# *ctl module should limit the number of import to be imported as quickly as
+# possible (for cubicweb-ctl reactivity, necessary for instance for usable bash
+# completion). So import locally in command helpers.
+import sys
+from warnings import warn, filterwarnings
+from os import remove, listdir, system, pathsep
+from os.path import exists, join, isfile, isdir, dirname, abspath
+
+try:
+ from os import kill, getpgid
+except ImportError:
+ def kill(*args):
+ """win32 kill implementation"""
+ def getpgid():
+ """win32 getpgid implementation"""
+
+from six.moves.urllib.parse import urlparse
+
+from logilab.common.clcommands import CommandLine
+from logilab.common.shellutils import ASK
+from logilab.common.configuration import merge_options
+
+from cubicweb import ConfigurationError, ExecutionError, BadCommandUsage
+from cubicweb.utils import support_args
+from cubicweb.cwconfig import CubicWebConfiguration as cwcfg, CWDEV, CONFIGURATIONS
+from cubicweb.toolsutils import Command, rm, create_dir, underline_title
+from cubicweb.__pkginfo__ import version
+
+# don't check duplicated commands, it occurs when reloading site_cubicweb
+CWCTL = CommandLine('cubicweb-ctl', 'The CubicWeb swiss-knife.',
+ version=version, check_duplicated_command=False)
+
+def wait_process_end(pid, maxtry=10, waittime=1):
+ """wait for a process to actually die"""
+ import signal
+ from time import sleep
+ nbtry = 0
+ while nbtry < maxtry:
+ try:
+ kill(pid, signal.SIGUSR1)
+ except (OSError, AttributeError): # XXX win32
+ break
+ nbtry += 1
+ sleep(waittime)
+ else:
+ raise ExecutionError('can\'t kill process %s' % pid)
+
+def list_instances(regdir):
+ if isdir(regdir):
+ return sorted(idir for idir in listdir(regdir) if isdir(join(regdir, idir)))
+ else:
+ return []
+
+def detect_available_modes(templdir):
+ modes = []
+ for fname in ('schema', 'schema.py'):
+ if exists(join(templdir, fname)):
+ modes.append('repository')
+ break
+ for fname in ('data', 'views', 'views.py'):
+ if exists(join(templdir, fname)):
+ modes.append('web ui')
+ break
+ return modes
+
+
+class InstanceCommand(Command):
+ """base class for command taking 0 to n instance id as arguments
+ (0 meaning all registered instances)
+ """
+ arguments = '[...]'
+ options = (
+ ("force",
+ {'short': 'f', 'action' : 'store_true',
+ 'default': False,
+ 'help': 'force command without asking confirmation',
+ }
+ ),
+ )
+ actionverb = None
+
+ def ordered_instances(self):
+ """return instances in the order in which they should be started,
+ considering $REGISTRY_DIR/startorder file if it exists (useful when
+ some instances depends on another as external source).
+
+ Instance used by another one should appears first in the file (one
+ instance per line)
+ """
+ regdir = cwcfg.instances_dir()
+ _allinstances = list_instances(regdir)
+ if isfile(join(regdir, 'startorder')):
+ allinstances = []
+ for line in open(join(regdir, 'startorder')):
+ line = line.strip()
+ if line and not line.startswith('#'):
+ try:
+ _allinstances.remove(line)
+ allinstances.append(line)
+ except ValueError:
+ print('ERROR: startorder file contains unexistant '
+ 'instance %s' % line)
+ allinstances += _allinstances
+ else:
+ allinstances = _allinstances
+ return allinstances
+
+ def run(self, args):
+ """run the _method on each argument (a list of instance
+ identifiers)
+ """
+ if not args:
+ args = self.ordered_instances()
+ try:
+ askconfirm = not self.config.force
+ except AttributeError:
+ # no force option
+ askconfirm = False
+ else:
+ askconfirm = False
+ self.run_args(args, askconfirm)
+
+ def run_args(self, args, askconfirm):
+ status = 0
+ for appid in args:
+ if askconfirm:
+ print('*'*72)
+ if not ASK.confirm('%s instance %r ?' % (self.name, appid)):
+ continue
+ try:
+ status = max(status, self.run_arg(appid))
+ except (KeyboardInterrupt, SystemExit):
+ sys.stderr.write('%s aborted\n' % self.name)
+ return 2 # specific error code
+ sys.exit(status)
+
+ def run_arg(self, appid):
+ cmdmeth = getattr(self, '%s_instance' % self.name)
+ try:
+ status = cmdmeth(appid)
+ except (ExecutionError, ConfigurationError) as ex:
+ sys.stderr.write('instance %s not %s: %s\n' % (
+ appid, self.actionverb, ex))
+ status = 4
+ except Exception as ex:
+ import traceback
+ traceback.print_exc()
+ sys.stderr.write('instance %s not %s: %s\n' % (
+ appid, self.actionverb, ex))
+ status = 8
+ return status
+
+class InstanceCommandFork(InstanceCommand):
+ """Same as `InstanceCommand`, but command is forked in a new environment
+ for each argument
+ """
+
+ def run_args(self, args, askconfirm):
+ if len(args) > 1:
+ forkcmd = ' '.join(w for w in sys.argv if not w in args)
+ else:
+ forkcmd = None
+ for appid in args:
+ if askconfirm:
+ print('*'*72)
+ if not ASK.confirm('%s instance %r ?' % (self.name, appid)):
+ continue
+ if forkcmd:
+ status = system('%s %s' % (forkcmd, appid))
+ if status:
+ print('%s exited with status %s' % (forkcmd, status))
+ else:
+ self.run_arg(appid)
+
+
+# base commands ###############################################################
+
+class ListCommand(Command):
+ """List configurations, cubes and instances.
+
+ List available configurations, installed cubes, and registered instances.
+
+ If given, the optional argument allows to restrict listing only a category of items.
+ """
+ name = 'list'
+ arguments = '[all|cubes|configurations|instances]'
+ options = (
+ ('verbose',
+ {'short': 'v', 'action' : 'store_true',
+ 'help': "display more information."}),
+ )
+
+ def run(self, args):
+ """run the command with its specific arguments"""
+ if not args:
+ mode = 'all'
+ elif len(args) == 1:
+ mode = args[0]
+ else:
+ raise BadCommandUsage('Too many arguments')
+
+ from cubicweb.migration import ConfigurationProblem
+
+ if mode == 'all':
+ print('CubicWeb %s (%s mode)' % (cwcfg.cubicweb_version(), cwcfg.mode))
+ print()
+
+ if mode in ('all', 'config', 'configurations'):
+ print('Available configurations:')
+ for config in CONFIGURATIONS:
+ print('*', config.name)
+ for line in config.__doc__.splitlines():
+ line = line.strip()
+ if not line:
+ continue
+ print(' ', line)
+ print()
+
+ if mode in ('all', 'cubes'):
+ cfgpb = ConfigurationProblem(cwcfg)
+ try:
+ cubesdir = pathsep.join(cwcfg.cubes_search_path())
+ namesize = max(len(x) for x in cwcfg.available_cubes())
+ except ConfigurationError as ex:
+ print('No cubes available:', ex)
+ except ValueError:
+ print('No cubes available in %s' % cubesdir)
+ else:
+ print('Available cubes (%s):' % cubesdir)
+ for cube in cwcfg.available_cubes():
+ try:
+ tinfo = cwcfg.cube_pkginfo(cube)
+ tversion = tinfo.version
+ cfgpb.add_cube(cube, tversion)
+ except (ConfigurationError, AttributeError) as ex:
+ tinfo = None
+ tversion = '[missing cube information: %s]' % ex
+ print('* %s %s' % (cube.ljust(namesize), tversion))
+ if self.config.verbose:
+ if tinfo:
+ descr = getattr(tinfo, 'description', '')
+ if not descr:
+ descr = tinfo.__doc__
+ if descr:
+ print(' '+ ' \n'.join(descr.splitlines()))
+ modes = detect_available_modes(cwcfg.cube_dir(cube))
+ print(' available modes: %s' % ', '.join(modes))
+ print()
+
+ if mode in ('all', 'instances'):
+ try:
+ regdir = cwcfg.instances_dir()
+ except ConfigurationError as ex:
+ print('No instance available:', ex)
+ print()
+ return
+ instances = list_instances(regdir)
+ if instances:
+ print('Available instances (%s):' % regdir)
+ for appid in instances:
+ modes = cwcfg.possible_configurations(appid)
+ if not modes:
+ print('* %s (BROKEN instance, no configuration found)' % appid)
+ continue
+ print('* %s (%s)' % (appid, ', '.join(modes)))
+ try:
+ config = cwcfg.config_for(appid, modes[0])
+ except Exception as exc:
+ print(' (BROKEN instance, %s)' % exc)
+ continue
+ else:
+ print('No instance available in %s' % regdir)
+ print()
+
+ if mode == 'all':
+ # configuration management problem solving
+ cfgpb.solve()
+ if cfgpb.warnings:
+ print('Warnings:\n', '\n'.join('* '+txt for txt in cfgpb.warnings))
+ if cfgpb.errors:
+ print('Errors:')
+ for op, cube, version, src in cfgpb.errors:
+ if op == 'add':
+ print('* cube', cube, end=' ')
+ if version:
+ print(' version', version, end=' ')
+ print('is not installed, but required by %s' % src)
+ else:
+ print('* cube %s version %s is installed, but version %s is required by %s' % (
+ cube, cfgpb.cubes[cube], version, src))
+
+def check_options_consistency(config):
+ if config.automatic and config.config_level > 0:
+ raise BadCommandUsage('--automatic and --config-level should not be '
+ 'used together')
+
+class CreateInstanceCommand(Command):
+ """Create an instance from a cube. This is a unified
+ command which can handle web / server / all-in-one installation
+ according to available parts of the software library and of the
+ desired cube.
+
+
+ the name of cube to use (list available cube names using
+ the "list" command). You can use several cubes by separating
+ them using comma (e.g. 'jpl,email')
+
+ an identifier for the instance to create
+ """
+ name = 'create'
+ arguments = ''
+ min_args = max_args = 2
+ options = (
+ ('automatic',
+ {'short': 'a', 'action' : 'store_true',
+ 'default': False,
+ 'help': 'automatic mode: never ask and use default answer to every '
+ 'question. this may require that your login match a database super '
+ 'user (allowed to create database & all).',
+ }),
+ ('config-level',
+ {'short': 'l', 'type' : 'int', 'metavar': '',
+ 'default': 0,
+ 'help': 'configuration level (0..2): 0 will ask for essential '
+ 'configuration parameters only while 2 will ask for all parameters',
+ }),
+ ('config',
+ {'short': 'c', 'type' : 'choice', 'metavar': '',
+ 'choices': ('all-in-one', 'repository'),
+ 'default': 'all-in-one',
+ 'help': 'installation type, telling which part of an instance '
+ 'should be installed. You can list available configurations using the'
+ ' "list" command. Default to "all-in-one", e.g. an installation '
+ 'embedding both the RQL repository and the web server.',
+ }),
+ ('no-db-create',
+ {'short': 'S',
+ 'action': 'store_true',
+ 'default': False,
+ 'help': 'stop after creation and do not continue with db-create',
+ }),
+ )
+
+ def run(self, args):
+ """run the command with its specific arguments"""
+ from logilab.common.textutils import splitstrip
+ check_options_consistency(self.config)
+ configname = self.config.config
+ cubes, appid = args
+ cubes = splitstrip(cubes)
+ # get the configuration and helper
+ config = cwcfg.config_for(appid, configname, creating=True)
+ cubes = config.expand_cubes(cubes)
+ config.init_cubes(cubes)
+ helper = self.config_helper(config)
+ # check the cube exists
+ try:
+ templdirs = [cwcfg.cube_dir(cube)
+ for cube in cubes]
+ except ConfigurationError as ex:
+ print(ex)
+ print('\navailable cubes:', end=' ')
+ print(', '.join(cwcfg.available_cubes()))
+ return
+ # create the registry directory for this instance
+ print('\n'+underline_title('Creating the instance %s' % appid))
+ create_dir(config.apphome)
+ # cubicweb-ctl configuration
+ if not self.config.automatic:
+ print('\n'+underline_title('Configuring the instance (%s.conf)'
+ % configname))
+ config.input_config('main', self.config.config_level)
+ # configuration'specific stuff
+ print()
+ helper.bootstrap(cubes, self.config.automatic, self.config.config_level)
+ # input for cubes specific options
+ if not self.config.automatic:
+ sections = set(sect.lower() for sect, opt, odict in config.all_options()
+ if 'type' in odict
+ and odict.get('level') <= self.config.config_level)
+ for section in sections:
+ if section not in ('main', 'email', 'web'):
+ print('\n' + underline_title('%s options' % section))
+ config.input_config(section, self.config.config_level)
+ # write down configuration
+ config.save()
+ self._handle_win32(config, appid)
+ print('-> generated config %s' % config.main_config_file())
+ # handle i18n files structure
+ # in the first cube given
+ from cubicweb import i18n
+ langs = [lang for lang, _ in i18n.available_catalogs(join(templdirs[0], 'i18n'))]
+ errors = config.i18ncompile(langs)
+ if errors:
+ print('\n'.join(errors))
+ if self.config.automatic \
+ or not ASK.confirm('error while compiling message catalogs, '
+ 'continue anyway ?'):
+ print('creation not completed')
+ return
+ # create the additional data directory for this instance
+ if config.appdatahome != config.apphome: # true in dev mode
+ create_dir(config.appdatahome)
+ create_dir(join(config.appdatahome, 'backup'))
+ if config['uid']:
+ from logilab.common.shellutils import chown
+ # this directory should be owned by the uid of the server process
+ print('set %s as owner of the data directory' % config['uid'])
+ chown(config.appdatahome, config['uid'])
+ print('\n-> creation done for %s\n' % repr(config.apphome)[1:-1])
+ if not self.config.no_db_create:
+ helper.postcreate(self.config.automatic, self.config.config_level)
+
+ def _handle_win32(self, config, appid):
+ if sys.platform != 'win32':
+ return
+ service_template = """
+import sys
+import win32serviceutil
+sys.path.insert(0, r"%(CWPATH)s")
+
+from cubicweb.etwist.service import CWService
+
+classdict = {'_svc_name_': 'cubicweb-%(APPID)s',
+ '_svc_display_name_': 'CubicWeb ' + '%(CNAME)s',
+ 'instance': '%(APPID)s'}
+%(CNAME)sService = type('%(CNAME)sService', (CWService,), classdict)
+
+if __name__ == '__main__':
+ win32serviceutil.HandleCommandLine(%(CNAME)sService)
+"""
+ open(join(config.apphome, 'win32svc.py'), 'wb').write(
+ service_template % {'APPID': appid,
+ 'CNAME': appid.capitalize(),
+ 'CWPATH': abspath(join(dirname(__file__), '..'))})
+
+
+class DeleteInstanceCommand(Command):
+ """Delete an instance. Will remove instance's files and
+ unregister it.
+ """
+ name = 'delete'
+ arguments = ''
+ min_args = max_args = 1
+ options = ()
+
+ def run(self, args):
+ """run the command with its specific arguments"""
+ appid = args[0]
+ configs = [cwcfg.config_for(appid, configname)
+ for configname in cwcfg.possible_configurations(appid)]
+ if not configs:
+ raise ExecutionError('unable to guess configuration for %s' % appid)
+ for config in configs:
+ helper = self.config_helper(config, required=False)
+ if helper:
+ helper.cleanup()
+ # remove home
+ rm(config.apphome)
+ # remove instance data directory
+ try:
+ rm(config.appdatahome)
+ except OSError as ex:
+ import errno
+ if ex.errno != errno.ENOENT:
+ raise
+ confignames = ', '.join([config.name for config in configs])
+ print('-> instance %s (%s) deleted.' % (appid, confignames))
+
+
+# instance commands ########################################################
+
+class StartInstanceCommand(InstanceCommandFork):
+ """Start the given instances. If no instance is given, start them all.
+
+ ...
+ identifiers of the instances to start. If no instance is
+ given, start them all.
+ """
+ name = 'start'
+ actionverb = 'started'
+ options = (
+ ("debug",
+ {'short': 'D', 'action' : 'store_true',
+ 'help': 'start server in debug mode.'}),
+ ("force",
+ {'short': 'f', 'action' : 'store_true',
+ 'default': False,
+ 'help': 'start the instance even if it seems to be already \
+running.'}),
+ ('profile',
+ {'short': 'P', 'type' : 'string', 'metavar': '',
+ 'default': None,
+ 'help': 'profile code and use the specified file to store stats',
+ }),
+ ('loglevel',
+ {'short': 'l', 'type' : 'choice', 'metavar': '',
+ 'default': None, 'choices': ('debug', 'info', 'warning', 'error'),
+ 'help': 'debug if -D is set, error otherwise',
+ }),
+ ('param',
+ {'short': 'p', 'type' : 'named', 'metavar' : 'key1:value1,key2:value2',
+ 'default': {},
+ 'help': 'override configuration file option with .',
+ }),
+ )
+
+ def start_instance(self, appid):
+ """start the instance's server"""
+ try:
+ import twisted # noqa
+ except ImportError:
+ msg = (
+ "Twisted is required by the 'start' command\n"
+ "Either install it, or use one of the alternative commands:\n"
+ "- '{ctl} wsgi {appid}'\n"
+ "- '{ctl} pyramid {appid}' (requires the pyramid cube)\n")
+ raise ExecutionError(msg.format(ctl='cubicweb-ctl', appid=appid))
+ config = cwcfg.config_for(appid, debugmode=self['debug'])
+ # override config file values with cmdline options
+ config.cmdline_options = self.config.param
+ init_cmdline_log_threshold(config, self['loglevel'])
+ if self['profile']:
+ config.global_set_option('profile', self.config.profile)
+ helper = self.config_helper(config, cmdname='start')
+ pidf = config['pid-file']
+ if exists(pidf) and not self['force']:
+ msg = "%s seems to be running. Remove %s by hand if necessary or use \
+the --force option."
+ raise ExecutionError(msg % (appid, pidf))
+ if helper.start_server(config) == 1:
+ print('instance %s started' % appid)
+
+
+def init_cmdline_log_threshold(config, loglevel):
+ if loglevel is not None:
+ config.global_set_option('log-threshold', loglevel.upper())
+ config.init_log(config['log-threshold'], force=True)
+
+
+class StopInstanceCommand(InstanceCommand):
+ """Stop the given instances.
+
+ ...
+ identifiers of the instances to stop. If no instance is
+ given, stop them all.
+ """
+ name = 'stop'
+ actionverb = 'stopped'
+
+ def ordered_instances(self):
+ instances = super(StopInstanceCommand, self).ordered_instances()
+ instances.reverse()
+ return instances
+
+ def stop_instance(self, appid):
+ """stop the instance's server"""
+ config = cwcfg.config_for(appid)
+ helper = self.config_helper(config, cmdname='stop')
+ helper.poststop() # do this anyway
+ pidf = config['pid-file']
+ if not exists(pidf):
+ sys.stderr.write("%s doesn't exist.\n" % pidf)
+ return
+ import signal
+ pid = int(open(pidf).read().strip())
+ try:
+ kill(pid, signal.SIGTERM)
+ except Exception:
+ sys.stderr.write("process %s seems already dead.\n" % pid)
+ else:
+ try:
+ wait_process_end(pid)
+ except ExecutionError as ex:
+ sys.stderr.write('%s\ntrying SIGKILL\n' % ex)
+ try:
+ kill(pid, signal.SIGKILL)
+ except Exception:
+ # probably dead now
+ pass
+ wait_process_end(pid)
+ try:
+ remove(pidf)
+ except OSError:
+ # already removed by twistd
+ pass
+ print('instance %s stopped' % appid)
+
+
+class RestartInstanceCommand(StartInstanceCommand):
+ """Restart the given instances.
+
+ ...
+ identifiers of the instances to restart. If no instance is
+ given, restart them all.
+ """
+ name = 'restart'
+ actionverb = 'restarted'
+
+ def run_args(self, args, askconfirm):
+ regdir = cwcfg.instances_dir()
+ if not isfile(join(regdir, 'startorder')) or len(args) <= 1:
+ # no specific startorder
+ super(RestartInstanceCommand, self).run_args(args, askconfirm)
+ return
+ print ('some specific start order is specified, will first stop all '
+ 'instances then restart them.')
+ # get instances in startorder
+ for appid in args:
+ if askconfirm:
+ print('*'*72)
+ if not ASK.confirm('%s instance %r ?' % (self.name, appid)):
+ continue
+ StopInstanceCommand(self.logger).stop_instance(appid)
+ forkcmd = [w for w in sys.argv if not w in args]
+ forkcmd[1] = 'start'
+ forkcmd = ' '.join(forkcmd)
+ for appid in reversed(args):
+ status = system('%s %s' % (forkcmd, appid))
+ if status:
+ sys.exit(status)
+
+ def restart_instance(self, appid):
+ StopInstanceCommand(self.logger).stop_instance(appid)
+ self.start_instance(appid)
+
+
+class ReloadConfigurationCommand(RestartInstanceCommand):
+ """Reload the given instances. This command is equivalent to a
+ restart for now.
+
+ ...
+ identifiers of the instances to reload. If no instance is
+ given, reload them all.
+ """
+ name = 'reload'
+
+ def reload_instance(self, appid):
+ self.restart_instance(appid)
+
+
+class StatusCommand(InstanceCommand):
+ """Display status information about the given instances.
+
+ ...
+ identifiers of the instances to status. If no instance is
+ given, get status information about all registered instances.
+ """
+ name = 'status'
+ options = ()
+
+ @staticmethod
+ def status_instance(appid):
+ """print running status information for an instance"""
+ status = 0
+ for mode in cwcfg.possible_configurations(appid):
+ config = cwcfg.config_for(appid, mode)
+ print('[%s-%s]' % (appid, mode), end=' ')
+ try:
+ pidf = config['pid-file']
+ except KeyError:
+ print('buggy instance, pid file not specified')
+ continue
+ if not exists(pidf):
+ print("doesn't seem to be running")
+ status = 1
+ continue
+ pid = int(open(pidf).read().strip())
+ # trick to guess whether or not the process is running
+ try:
+ getpgid(pid)
+ except OSError:
+ print("should be running with pid %s but the process can not be found" % pid)
+ status = 1
+ continue
+ print("running with pid %s" % (pid))
+ return status
+
+class UpgradeInstanceCommand(InstanceCommandFork):
+ """Upgrade an instance after cubicweb and/or component(s) upgrade.
+
+ For repository update, you will be prompted for a login / password to use
+ to connect to the system database. For some upgrades, the given user
+ should have create or alter table permissions.
+
+ ...
+ identifiers of the instances to upgrade. If no instance is
+ given, upgrade them all.
+ """
+ name = 'upgrade'
+ actionverb = 'upgraded'
+ options = InstanceCommand.options + (
+ ('force-cube-version',
+ {'short': 't', 'type' : 'named', 'metavar': 'cube1:X.Y.Z,cube2:X.Y.Z',
+ 'default': None,
+ 'help': 'force migration from the indicated version for the specified cube(s).'}),
+
+ ('force-cubicweb-version',
+ {'short': 'e', 'type' : 'string', 'metavar': 'X.Y.Z',
+ 'default': None,
+ 'help': 'force migration from the indicated cubicweb version.'}),
+
+ ('fs-only',
+ {'short': 's', 'action' : 'store_true',
+ 'default': False,
+ 'help': 'only upgrade files on the file system, not the database.'}),
+
+ ('nostartstop',
+ {'short': 'n', 'action' : 'store_true',
+ 'default': False,
+ 'help': 'don\'t try to stop instance before migration and to restart it after.'}),
+
+ ('verbosity',
+ {'short': 'v', 'type' : 'int', 'metavar': '<0..2>',
+ 'default': 1,
+ 'help': "0: no confirmation, 1: only main commands confirmed, 2 ask \
+for everything."}),
+
+ ('backup-db',
+ {'short': 'b', 'type' : 'yn', 'metavar': '',
+ 'default': None,
+ 'help': "Backup the instance database before upgrade.\n"\
+ "If the option is ommitted, confirmation will be ask.",
+ }),
+
+ ('ext-sources',
+ {'short': 'E', 'type' : 'csv', 'metavar': '',
+ 'default': None,
+ 'help': "For multisources instances, specify to which sources the \
+repository should connect to for upgrading. When unspecified or 'migration' is \
+given, appropriate sources for migration will be automatically selected \
+(recommended). If 'all' is given, will connect to all defined sources.",
+ }),
+ )
+
+ def upgrade_instance(self, appid):
+ print('\n' + underline_title('Upgrading the instance %s' % appid))
+ from logilab.common.changelog import Version
+ config = cwcfg.config_for(appid)
+ instance_running = exists(config['pid-file'])
+ config.repairing = True # notice we're not starting the server
+ config.verbosity = self.config.verbosity
+ set_sources_mode = getattr(config, 'set_sources_mode', None)
+ if set_sources_mode is not None:
+ set_sources_mode(self.config.ext_sources or ('migration',))
+ # get instance and installed versions for the server and the componants
+ mih = config.migration_handler()
+ repo = mih.repo
+ vcconf = repo.get_versions()
+ helper = self.config_helper(config, required=False)
+ if self.config.force_cube_version:
+ for cube, version in self.config.force_cube_version.items():
+ vcconf[cube] = Version(version)
+ toupgrade = []
+ for cube in config.cubes():
+ installedversion = config.cube_version(cube)
+ try:
+ applversion = vcconf[cube]
+ except KeyError:
+ config.error('no version information for %s' % cube)
+ continue
+ if installedversion > applversion:
+ toupgrade.append( (cube, applversion, installedversion) )
+ cubicwebversion = config.cubicweb_version()
+ if self.config.force_cubicweb_version:
+ applcubicwebversion = Version(self.config.force_cubicweb_version)
+ vcconf['cubicweb'] = applcubicwebversion
+ else:
+ applcubicwebversion = vcconf.get('cubicweb')
+ if cubicwebversion > applcubicwebversion:
+ toupgrade.append(('cubicweb', applcubicwebversion, cubicwebversion))
+ # only stop once we're sure we have something to do
+ if instance_running and not (CWDEV or self.config.nostartstop):
+ StopInstanceCommand(self.logger).stop_instance(appid)
+ # run cubicweb/componants migration scripts
+ if self.config.fs_only or toupgrade:
+ for cube, fromversion, toversion in toupgrade:
+ print('-> migration needed from %s to %s for %s' % (fromversion, toversion, cube))
+ with mih.cnx:
+ with mih.cnx.security_enabled(False, False):
+ mih.migrate(vcconf, reversed(toupgrade), self.config)
+ else:
+ print('-> no data migration needed for instance %s.' % appid)
+ # rewrite main configuration file
+ mih.rewrite_configuration()
+ mih.shutdown()
+ # handle i18n upgrade
+ if not self.i18nupgrade(config):
+ return
+ print()
+ if helper:
+ helper.postupgrade(repo)
+ print('-> instance migrated.')
+ if instance_running and not (CWDEV or self.config.nostartstop):
+ # restart instance through fork to get a proper environment, avoid
+ # uicfg pb (and probably gettext catalogs, to check...)
+ forkcmd = '%s start %s' % (sys.argv[0], appid)
+ status = system(forkcmd)
+ if status:
+ print('%s exited with status %s' % (forkcmd, status))
+ print()
+
+ def i18nupgrade(self, config):
+ # handle i18n upgrade:
+ # * install new languages
+ # * recompile catalogs
+ # XXX search available language in the first cube given
+ from cubicweb import i18n
+ templdir = cwcfg.cube_dir(config.cubes()[0])
+ langs = [lang for lang, _ in i18n.available_catalogs(join(templdir, 'i18n'))]
+ errors = config.i18ncompile(langs)
+ if errors:
+ print('\n'.join(errors))
+ if not ASK.confirm('Error while compiling message catalogs, '
+ 'continue anyway?'):
+ print('-> migration not completed.')
+ return False
+ return True
+
+
+class ListVersionsInstanceCommand(InstanceCommand):
+ """List versions used by an instance.
+
+ ...
+ identifiers of the instances to list versions for.
+ """
+ name = 'versions'
+
+ def versions_instance(self, appid):
+ config = cwcfg.config_for(appid)
+ # should not raise error if db versions don't match fs versions
+ config.repairing = True
+ # no need to load all appobjects and schema
+ config.quick_start = True
+ if hasattr(config, 'set_sources_mode'):
+ config.set_sources_mode(('migration',))
+ vcconf = config.repository().get_versions()
+ for key in sorted(vcconf):
+ print(key+': %s.%s.%s' % vcconf[key])
+
+class ShellCommand(Command):
+ """Run an interactive migration shell on an instance. This is a python shell
+ with enhanced migration commands predefined in the namespace. An additional
+ argument may be given corresponding to a file containing commands to execute
+ in batch mode.
+
+ By default it will connect to a local instance using an in memory
+ connection, unless a URL to a running instance is specified.
+
+ Arguments after bare "--" string will not be processed by the shell command
+ You can use it to pass extra arguments to your script and expect for
+ them in '__args__' afterwards.
+
+
+ the identifier of the instance to connect.
+ """
+ name = 'shell'
+ arguments = ' [batch command file(s)] [-- ", re.M|re.I|re.S)
+def _remove_script_tags(data):
+ """Remove the script (usually javascript) tags to help the lxml
+ XMLParser / HTMLParser do their job. Without that, they choke on
+ tags embedded in JS strings.
+ """
+ # Notice we may want to use lxml cleaner, but it's far too intrusive:
+ #
+ # cleaner = Cleaner(scripts=True,
+ # javascript=False,
+ # comments=False,
+ # style=False,
+ # links=False,
+ # meta=False,
+ # page_structure=False,
+ # processing_instructions=False,
+ # embedded=False,
+ # frames=False,
+ # forms=False,
+ # annoying_tags=False,
+ # remove_tags=(),
+ # remove_unknown_tags=False,
+ # safe_attrs_only=False,
+ # add_nofollow=False)
+ # >>> cleaner.clean_html('')
+ # ''
+ # >>> cleaner.clean_html('')
+ # ''
+ # >>> cleaner.clean_html('')
+ # ''
+ # >>> cleaner.clean_html(' ')
+ # ' '
+ # >>> cleaner.clean_html(' ')
+ # ' '
+ #
+ # using that, we'll miss most actual validation error we want to
+ # catch. For now, use dumb regexp
+ return _REM_SCRIPT_RGX.sub(b'', data)
+
+
+class Validator(object):
+ """ base validator API """
+ parser = None
+
+ def parse_string(self, source):
+ etree = self._parse(self.preprocess_data(source))
+ return PageInfo(source, etree)
+
+ def preprocess_data(self, data):
+ return data
+
+ def _parse(self, pdata):
+ try:
+ return etree.fromstring(pdata, self.parser)
+ except etree.XMLSyntaxError as exc:
+ new_exc = AssertionError(u'invalid document: %s' % exc)
+ new_exc.position = exc.position
+ raise new_exc
+
+
+class DTDValidator(Validator):
+ def __init__(self):
+ Validator.__init__(self)
+ # XXX understand what's happening under windows
+ self.parser = etree.XMLParser(dtd_validation=sys.platform != 'win32')
+
+ def preprocess_data(self, data):
+ """used to fix potential blockquote mess generated by docutils"""
+ if STRICT_DOCTYPE not in data:
+ return data
+ # parse using transitional DTD
+ data = data.replace(STRICT_DOCTYPE, TRANSITIONAL_DOCTYPE)
+ tree = self._parse(data)
+ namespace = tree.nsmap.get(None)
+ # this is the list of authorized child tags for
nodes
+ expected = 'p h1 h2 h3 h4 h5 h6 div ul ol dl pre hr blockquote address ' \
+ 'fieldset table form noscript ins del script'.split()
+ if namespace:
+ blockquotes = tree.findall('.//{%s}blockquote' % namespace)
+ expected = ['{%s}%s' % (namespace, tag) for tag in expected]
+ else:
+ blockquotes = tree.findall('.//blockquote')
+ # quick and dirty approach: remove all blockquotes
+ for blockquote in blockquotes:
+ parent = blockquote.getparent()
+ parent.remove(blockquote)
+ data = etree.tostring(tree)
+ return '%s\n%s' % (
+ STRICT_DOCTYPE, data)
+
+
+class XMLValidator(Validator):
+ """XML validator, checks that XML is well-formed and used XMLNS are defined"""
+
+ def __init__(self):
+ Validator.__init__(self)
+ self.parser = etree.XMLParser()
+
+SaxOnlyValidator = class_renamed('SaxOnlyValidator',
+ XMLValidator,
+ '[3.17] you should use the '
+ 'XMLValidator class instead')
+
+
+class XMLSyntaxValidator(Validator):
+ """XML syntax validator, check XML is well-formed"""
+
+ class MySaxErrorHandler(sax.ErrorHandler):
+ """override default handler to avoid choking because of unknown entity"""
+ def fatalError(self, exception):
+ # XXX check entity in htmlentitydefs
+ if not str(exception).endswith('undefined entity'):
+ raise exception
+ _parser = sax.make_parser()
+ _parser.setContentHandler(sax.handler.ContentHandler())
+ _parser.setErrorHandler(MySaxErrorHandler())
+
+ def __init__(self):
+ super(XMLSyntaxValidator, self).__init__()
+ # XMLParser() wants xml namespaces defined
+ # XMLParser(recover=True) will accept almost anything
+ #
+ # -> use the later but preprocess will check xml well-formness using a
+ # dumb SAX parser
+ self.parser = etree.XMLParser(recover=True)
+
+ def preprocess_data(self, data):
+ return _remove_script_tags(data)
+
+ def _parse(self, data):
+ inpsrc = sax.InputSource()
+ inpsrc.setByteStream(BytesIO(data))
+ try:
+ self._parser.parse(inpsrc)
+ except sax.SAXParseException as exc:
+ new_exc = AssertionError(u'invalid document: %s' % exc)
+ new_exc.position = (exc._linenum, exc._colnum)
+ raise new_exc
+ return super(XMLSyntaxValidator, self)._parse(data)
+
+
+class HTMLValidator(Validator):
+
+ def __init__(self):
+ Validator.__init__(self)
+ self.parser = etree.HTMLParser(recover=False)
+
+ def preprocess_data(self, data):
+ return _remove_script_tags(data)
+
+
+class PageInfo(object):
+ """holds various informations on the view's output"""
+ def __init__(self, source, root):
+ self.source = source
+ self.etree = root
+ self.raw_text = u''.join(root.xpath('//text()'))
+ self.namespace = self.etree.nsmap
+ self.default_ns = self.namespace.get(None)
+ self.a_tags = self.find_tag('a')
+ self.h1_tags = self.find_tag('h1')
+ self.h2_tags = self.find_tag('h2')
+ self.h3_tags = self.find_tag('h3')
+ self.h4_tags = self.find_tag('h4')
+ self.input_tags = self.find_tag('input')
+ self.title_tags = [self.h1_tags, self.h2_tags, self.h3_tags, self.h4_tags]
+
+ def _iterstr(self, tag):
+ if self.default_ns is None:
+ return ".//%s" % tag
+ else:
+ return ".//{%s}%s" % (self.default_ns, tag)
+
+ def matching_nodes(self, tag, **attrs):
+ for elt in self.etree.iterfind(self._iterstr(tag)):
+ eltattrs = elt.attrib
+ for attr, value in attrs.items():
+ try:
+ if eltattrs[attr] != value:
+ break
+ except KeyError:
+ break
+ else: # all attributes match
+ yield elt
+
+ def has_tag(self, tag, nboccurs=1, **attrs):
+ """returns True if tag with given attributes appears in the page
+ `nbtimes` (any if None)
+ """
+ for elt in self.matching_nodes(tag, **attrs):
+ if nboccurs is None: # no need to check number of occurences
+ return True
+ if not nboccurs: # too much occurences
+ return False
+ nboccurs -= 1
+ if nboccurs == 0: # correct number of occurences
+ return True
+ return False # no matching tag/attrs
+
+ def find_tag(self, tag, gettext=True):
+ """return a list which contains text of all "tag" elements """
+ iterstr = self._iterstr(tag)
+ if not gettext or tag in ('a', 'input'):
+ return [(elt.text, elt.attrib)
+ for elt in self.etree.iterfind(iterstr)]
+ return [u''.join(elt.xpath('.//text()'))
+ for elt in self.etree.iterfind(iterstr)]
+
+ def appears(self, text):
+ """returns True if appears in the page"""
+ return text in self.raw_text
+
+ def __contains__(self, text):
+ return text in self.source
+
+ def has_title(self, text, level=None):
+ """returns True if text
+
+ :param level: the title's level (1 for h1, 2 for h2, etc.)
+ """
+ if level is None:
+ for hlist in self.title_tags:
+ if text in hlist:
+ return True
+ return False
+ else:
+ hlist = self.title_tags[level - 1]
+ return text in hlist
+
+ def has_title_regexp(self, pattern, level=None):
+ """returns True if pattern"""
+ sre = re.compile(pattern)
+ if level is None:
+ for hlist in self.title_tags:
+ for title in hlist:
+ if sre.match(title):
+ return True
+ return False
+ else:
+ hlist = self.title_tags[level - 1]
+ for title in hlist:
+ if sre.match(title):
+ return True
+ return False
+
+ def has_link(self, text, url=None):
+ """returns True if text was found in the page"""
+ for link_text, attrs in self.a_tags:
+ if text == link_text:
+ if url is None:
+ return True
+ try:
+ href = attrs['href']
+ if href == url:
+ return True
+ except KeyError:
+ continue
+ return False
+
+ def has_link_regexp(self, pattern, url=None):
+ """returns True if pattern was found in the page"""
+ sre = re.compile(pattern)
+ for link_text, attrs in self.a_tags:
+ if sre.match(link_text):
+ if url is None:
+ return True
+ try:
+ href = attrs['href']
+ if href == url:
+ return True
+ except KeyError:
+ continue
+ return False
+
+VALMAP = {None: None,
+ 'dtd': DTDValidator,
+ 'xml': XMLValidator,
+ 'html': HTMLValidator,
+ }
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/httptest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/httptest.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,170 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""this module contains base classes and utilities for integration with running
+http server
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import random
+import threading
+import socket
+
+from six.moves import range, http_client
+from six.moves.urllib.parse import urlparse
+
+
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.devtools import ApptestConfiguration
+
+
+def get_available_port(ports_scan):
+ """return the first available port from the given ports range
+
+ Try to connect port by looking for refused connection (111) or transport
+ endpoint already connected (106) errors
+
+ Raise a RuntimeError if no port can be found
+
+ :type ports_range: list
+ :param ports_range: range of ports to test
+ :rtype: int
+
+ .. see:: :func:`test.test_support.bind_port`
+ """
+ ports_scan = list(ports_scan)
+ random.shuffle(ports_scan) # lower the chance of race condition
+ for port in ports_scan:
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock = s.connect(("localhost", port))
+ except socket.error as err:
+ if err.args[0] in (111, 106):
+ return port
+ finally:
+ s.close()
+ raise RuntimeError('get_available_port([ports_range]) cannot find an available port')
+
+
+class CubicWebServerTC(CubicWebTC):
+ """Class for running a Twisted-based test web server.
+ """
+ ports_range = range(7000, 8000)
+
+ def start_server(self):
+ from twisted.internet import reactor
+ from cubicweb.etwist.server import run
+ # use a semaphore to avoid starting test while the http server isn't
+ # fully initilialized
+ semaphore = threading.Semaphore(0)
+ def safe_run(*args, **kwargs):
+ try:
+ run(*args, **kwargs)
+ finally:
+ semaphore.release()
+
+ reactor.addSystemEventTrigger('after', 'startup', semaphore.release)
+ t = threading.Thread(target=safe_run, name='cubicweb_test_web_server',
+ args=(self.config, True), kwargs={'repo': self.repo})
+ self.web_thread = t
+ t.start()
+ semaphore.acquire()
+ if not self.web_thread.isAlive():
+ # XXX race condition with actual thread death
+ raise RuntimeError('Could not start the web server')
+ #pre init utils connection
+ parseurl = urlparse(self.config['base-url'])
+ assert parseurl.port == self.config['port'], (self.config['base-url'], self.config['port'])
+ self._web_test_cnx = http_client.HTTPConnection(parseurl.hostname,
+ parseurl.port)
+ self._ident_cookie = None
+
+ def stop_server(self, timeout=15):
+ """Stop the webserver, waiting for the thread to return"""
+ from twisted.internet import reactor
+ if self._web_test_cnx is None:
+ self.web_logout()
+ self._web_test_cnx.close()
+ try:
+ reactor.stop()
+ self.web_thread.join(timeout)
+ assert not self.web_thread.isAlive()
+
+ finally:
+ reactor.__init__()
+
+ def web_login(self, user=None, passwd=None):
+ """Log the current http session for the provided credential
+
+ If no user is provided, admin connection are used.
+ """
+ if user is None:
+ user = self.admlogin
+ passwd = self.admpassword
+ if passwd is None:
+ passwd = user
+ response = self.web_get("login?__login=%s&__password=%s" %
+ (user, passwd))
+ assert response.status == http_client.SEE_OTHER, response.status
+ self._ident_cookie = response.getheader('Set-Cookie')
+ assert self._ident_cookie
+ return True
+
+ def web_logout(self, user='admin', pwd=None):
+ """Log out current http user"""
+ if self._ident_cookie is not None:
+ response = self.web_get('logout')
+ self._ident_cookie = None
+
+ def web_request(self, path='', method='GET', body=None, headers=None):
+ """Return an http_client.HTTPResponse object for the specified path
+
+ Use available credential if available.
+ """
+ if headers is None:
+ headers = {}
+ if self._ident_cookie is not None:
+ assert 'Cookie' not in headers
+ headers['Cookie'] = self._ident_cookie
+ self._web_test_cnx.request(method, '/' + path, headers=headers, body=body)
+ response = self._web_test_cnx.getresponse()
+ response.body = response.read() # to chain request
+ response.read = lambda : response.body
+ return response
+
+ def web_get(self, path='', body=None, headers=None):
+ return self.web_request(path=path, body=body, headers=headers)
+
+ def setUp(self):
+ super(CubicWebServerTC, self).setUp()
+ port = self.config['port'] or get_available_port(self.ports_range)
+ self.config.global_set_option('port', port) # force rewrite here
+ self.config.global_set_option('base-url', 'http://127.0.0.1:%d/' % port)
+ # call load_configuration again to let the config reset its datadir_url
+ self.config.load_configuration()
+ self.start_server()
+
+ def tearDown(self):
+ from twisted.internet import error
+ try:
+ self.stop_server()
+ except error.ReactorNotRunning as err:
+ # Server could be launched manually
+ print(err)
+ super(CubicWebServerTC, self).tearDown()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/instrument.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/instrument.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,225 @@
+# copyright 2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr -- mailto:contact@logilab.fr
+#
+# This program is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with this program. If not, see .
+"""Instrumentation utilities"""
+from __future__ import print_function
+
+import os
+
+try:
+ import pygraphviz
+except ImportError:
+ pygraphviz = None
+
+from cubicweb.cwvreg import CWRegistryStore
+from cubicweb.devtools.devctl import DevConfiguration
+
+
+ALL_COLORS = [
+ "00FF00", "0000FF", "FFFF00", "FF00FF", "00FFFF", "000000",
+ "800000", "008000", "000080", "808000", "800080", "008080", "808080",
+ "C00000", "00C000", "0000C0", "C0C000", "C000C0", "00C0C0", "C0C0C0",
+ "400000", "004000", "000040", "404000", "400040", "004040", "404040",
+ "200000", "002000", "000020", "202000", "200020", "002020", "202020",
+ "600000", "006000", "000060", "606000", "600060", "006060", "606060",
+ "A00000", "00A000", "0000A0", "A0A000", "A000A0", "00A0A0", "A0A0A0",
+ "E00000", "00E000", "0000E0", "E0E000", "E000E0", "00E0E0", "E0E0E0",
+ ]
+_COLORS = {}
+def get_color(key):
+ try:
+ return _COLORS[key]
+ except KeyError:
+ _COLORS[key] = '#'+ALL_COLORS[len(_COLORS) % len(ALL_COLORS)]
+ return _COLORS[key]
+
+def warn(msg, *args):
+ print('WARNING: %s' % (msg % args))
+
+def info(msg):
+ print('INFO: ' + msg)
+
+
+class PropagationAnalyzer(object):
+ """Abstract propagation analyzer, providing utility function to extract
+ entities involved in propagation from a schema, as well as propagation
+ rules from hooks (provided they use intrumentalized sets, see
+ :class:`CubeTracerSet`).
+
+ Concrete classes should at least define `prop_rel` class attribute and
+ implements the `is_root` method.
+
+ See `localperms` or `nosylist` cubes for example usage (`ccplugin` module).
+ """
+ prop_rel = None # name of the propagation relation
+
+ def init(self, cube):
+ """Initialize analyze for the given cube, returning the (already loaded)
+ vregistry and a set of entities which we're interested in.
+ """
+ config = DevConfiguration(cube)
+ schema = config.load_schema()
+ vreg = CWRegistryStore(config)
+ vreg.set_schema(schema) # set_schema triggers objects registrations
+ eschemas = set(eschema for eschema in schema.entities()
+ if self.should_include(eschema))
+ return vreg, eschemas
+
+ def is_root(self, eschema):
+ """Return `True` if given entity schema is a root of the graph"""
+ raise NotImplementedError()
+
+ def should_include(self, eschema):
+ """Return `True` if given entity schema should be included by the graph.
+ """
+
+ if self.prop_rel in eschema.subjrels or self.is_root(eschema):
+ return True
+ return False
+
+ def prop_edges(self, s_rels, o_rels, eschemas):
+ """Return a set of edges where propagation has been detected.
+
+ Each edge is defined by a 4-uple (from node, to node, rtype, package)
+ where `rtype` is the relation type bringing from to and `package` is the cube adding the rule to the propagation
+ control set (see see :class:`CubeTracerSet`).
+ """
+ schema = iter(eschemas).next().schema
+ prop_edges = set()
+ for rtype in s_rels:
+ found = False
+ for subj, obj in schema.rschema(rtype).rdefs:
+ if subj in eschemas and obj in eschemas:
+ found = True
+ prop_edges.add( (subj, obj, rtype, s_rels.value_cube[rtype]) )
+ if not found:
+ warn('no rdef match for %s', rtype)
+ for rtype in o_rels:
+ found = False
+ for subj, obj in schema.rschema(rtype).rdefs:
+ if subj in eschemas and obj in eschemas:
+ found = True
+ prop_edges.add( (obj, subj, rtype, o_rels.value_cube[rtype]) )
+ if not found:
+ warn('no rdef match for %s', rtype)
+ return prop_edges
+
+ def detect_problems(self, eschemas, edges):
+ """Given the set of analyzed entity schemas and edges between them,
+ return a set of entity schemas where a problem has been detected.
+ """
+ problematic = set()
+ for eschema in eschemas:
+ if self.has_problem(eschema, edges):
+ problematic.add(eschema)
+ not_problematic = set(eschemas).difference(problematic)
+ if not_problematic:
+ info('nothing problematic in: %s' %
+ ', '.join(e.type for e in not_problematic))
+ return problematic
+
+ def has_problem(self, eschema, edges):
+ """Return `True` if the given schema is considered problematic,
+ considering base propagation rules.
+ """
+ root = self.is_root(eschema)
+ has_prop_rel = self.prop_rel in eschema.subjrels
+ # root but no propagation relation
+ if root and not has_prop_rel:
+ warn('%s is root but miss %s', eschema, self.prop_rel)
+ return True
+ # propagated but without propagation relation / not propagated but
+ # with propagation relation
+ if not has_prop_rel and \
+ any(edge for edge in edges if edge[1] == eschema):
+ warn("%s miss %s but is reached by propagation",
+ eschema, self.prop_rel)
+ return True
+ elif has_prop_rel and not root:
+ rdef = eschema.rdef(self.prop_rel, takefirst=True)
+ edges = [edge for edge in edges if edge[1] == eschema]
+ if not edges:
+ warn("%s has %s but isn't reached by "
+ "propagation", eschema, self.prop_rel)
+ return True
+ # require_permission relation / propagation rule not added by
+ # the same cube
+ elif not any(edge for edge in edges if edge[-1] == rdef.package):
+ warn('%s has %s relation / propagation rule'
+ ' not added by the same cube (%s / %s)', eschema,
+ self.prop_rel, rdef.package, edges[0][-1])
+ return True
+ return False
+
+ def init_graph(self, eschemas, edges, problematic):
+ """Initialize and return graph, adding given nodes (entity schemas) and
+ edges between them.
+
+ Require pygraphviz installed.
+ """
+ if pygraphviz is None:
+ raise RuntimeError('pygraphviz is not installed')
+ graph = pygraphviz.AGraph(strict=False, directed=True)
+ for eschema in eschemas:
+ if eschema in problematic:
+ params = {'color': '#ff0000', 'fontcolor': '#ff0000'}
+ else:
+ params = {}#'color': get_color(eschema.package)}
+ graph.add_node(eschema.type, **params)
+ for subj, obj, rtype, package in edges:
+ graph.add_edge(str(subj), str(obj), label=rtype,
+ color=get_color(package))
+ return graph
+
+ def add_colors_legend(self, graph):
+ """Add a legend of used colors to the graph."""
+ for package, color in sorted(_COLORS.items()):
+ graph.add_node(package, color=color, fontcolor=color, shape='record')
+
+
+class CubeTracerSet(object):
+ """Dumb set implementation whose purpose is to keep track of which cube is
+ being loaded when something is added to the set.
+
+ Results will be found in the `value_cube` attribute dictionary.
+
+ See `localperms` or `nosylist` cubes for example usage (`hooks` module).
+ """
+ def __init__(self, vreg, wrapped):
+ self.vreg = vreg
+ self.wrapped = wrapped
+ self.value_cube = {}
+
+ def add(self, value):
+ self.wrapped.add(value)
+ cube = self.vreg.currently_loading_cube
+ if value in self.value_cube:
+ warn('%s is propagated by cube %s and cube %s',
+ value, self.value_cube[value], cube)
+ else:
+ self.value_cube[value] = cube
+
+ def __iter__(self):
+ return iter(self.wrapped)
+
+ def __ior__(self, other):
+ for value in other:
+ self.add(value)
+ return self
+
+ def __ror__(self, other):
+ other |= self.wrapped
+ return other
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/qunit.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/qunit.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,293 @@
+# copyright 2010-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from __future__ import absolute_import
+
+import os, os.path as osp
+import errno
+from tempfile import mkdtemp
+from subprocess import Popen, PIPE, STDOUT
+
+from six.moves.queue import Queue, Empty
+
+# imported by default to simplify further import statements
+from logilab.common.testlib import unittest_main, with_tempdir, InnerTest, Tags
+import webtest.http
+
+import cubicweb
+from cubicweb.view import View
+from cubicweb.web.controller import Controller
+from cubicweb.web.views.staticcontrollers import StaticFileController, STATIC_CONTROLLERS
+from cubicweb.devtools import webtest as cwwebtest
+
+
+class FirefoxHelper(object):
+
+ def __init__(self, url=None):
+ self._process = None
+ self._profile_dir = mkdtemp(prefix='cwtest-ffxprof-')
+ self.firefox_cmd = ['firefox', '-no-remote']
+ if os.name == 'posix':
+ self.firefox_cmd = [osp.join(osp.dirname(__file__), 'data', 'xvfb-run.sh'),
+ '-a', '-s', '-noreset -screen 0 800x600x24'] + self.firefox_cmd
+
+ def test(self):
+ try:
+ proc = Popen(['firefox', '--help'], stdout=PIPE, stderr=STDOUT)
+ stdout, _ = proc.communicate()
+ return proc.returncode == 0, stdout
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ msg = '[%s] %s' % (errno.errorcode[exc.errno], exc.strerror)
+ return False, msg
+ raise
+
+ def start(self, url):
+ self.stop()
+ cmd = self.firefox_cmd + ['-silent', '--profile', self._profile_dir,
+ '-url', url]
+ with open(os.devnull, 'w') as fnull:
+ self._process = Popen(cmd, stdout=fnull, stderr=fnull)
+
+ def stop(self):
+ if self._process is not None:
+ assert self._process.returncode is None, self._process.returncode
+ self._process.terminate()
+ self._process.wait()
+ self._process = None
+
+ def __del__(self):
+ self.stop()
+
+
+class QUnitTestCase(cwwebtest.CubicWebTestTC):
+
+ tags = cwwebtest.CubicWebTestTC.tags | Tags(('qunit',))
+
+ # testfile, (dep_a, dep_b)
+ all_js_tests = ()
+
+ def setUp(self):
+ super(QUnitTestCase, self).setUp()
+ self.test_queue = Queue()
+ class MyQUnitResultController(QUnitResultController):
+ tc = self
+ test_queue = self.test_queue
+ self._qunit_controller = MyQUnitResultController
+ self.webapp.app.appli.vreg.register(MyQUnitResultController)
+ self.webapp.app.appli.vreg.register(QUnitView)
+ self.webapp.app.appli.vreg.register(CWDevtoolsStaticController)
+ self.server = webtest.http.StopableWSGIServer.create(self.webapp.app)
+ self.config.global_set_option('base-url', self.server.application_url)
+
+ def tearDown(self):
+ self.server.shutdown()
+ self.webapp.app.appli.vreg.unregister(self._qunit_controller)
+ self.webapp.app.appli.vreg.unregister(QUnitView)
+ self.webapp.app.appli.vreg.unregister(CWDevtoolsStaticController)
+ super(QUnitTestCase, self).tearDown()
+
+ def test_javascripts(self):
+ for args in self.all_js_tests:
+ self.assertIn(len(args), (1, 2))
+ test_file = args[0]
+ if len(args) > 1:
+ depends = args[1]
+ else:
+ depends = ()
+ for js_test in self._test_qunit(test_file, depends):
+ yield js_test
+
+ @with_tempdir
+ def _test_qunit(self, test_file, depends=(), timeout=10):
+ QUnitView.test_file = test_file
+ QUnitView.depends = depends
+
+ while not self.test_queue.empty():
+ self.test_queue.get(False)
+
+ browser = FirefoxHelper()
+ isavailable, reason = browser.test()
+ if not isavailable:
+ self.fail('firefox not available or not working properly (%s)' % reason)
+ browser.start(self.config['base-url'] + "?vid=qunit")
+ test_count = 0
+ error = False
+ def raise_exception(cls, *data):
+ raise cls(*data)
+ while not error:
+ try:
+ result, test_name, msg = self.test_queue.get(timeout=timeout)
+ test_name = '%s (%s)' % (test_name, test_file)
+ self.set_description(test_name)
+ if result is None:
+ break
+ test_count += 1
+ if result:
+ yield InnerTest(test_name, lambda : 1)
+ else:
+ yield InnerTest(test_name, self.fail, msg)
+ except Empty:
+ error = True
+ msg = '%s inactivity timeout (%is). %i test results received'
+ yield InnerTest(test_file, raise_exception, RuntimeError,
+ msg % (test_file, timeout, test_count))
+ browser.stop()
+ if test_count <= 0 and not error:
+ yield InnerTest(test_name, raise_exception, RuntimeError,
+ 'No test yielded by qunit for %s' % test_file)
+
+class QUnitResultController(Controller):
+
+ __regid__ = 'qunit_result'
+
+
+ # Class variables to circumvent the instantiation of a new Controller for each request.
+ _log_stack = [] # store QUnit log messages
+ _current_module_name = '' # store the current QUnit module name
+
+ def publish(self, rset=None):
+ event = self._cw.form['event']
+ getattr(self, 'handle_%s' % event)()
+ return b''
+
+ def handle_module_start(self):
+ self.__class__._current_module_name = self._cw.form.get('name', '')
+
+ def handle_test_done(self):
+ name = '%s // %s' % (self._current_module_name, self._cw.form.get('name', ''))
+ failures = int(self._cw.form.get('failures', 0))
+ total = int(self._cw.form.get('total', 0))
+
+ self._log_stack.append('%i/%i assertions failed' % (failures, total))
+ msg = '\n'.join(self._log_stack)
+
+ if failures:
+ self.tc.test_queue.put((False, name, msg))
+ else:
+ self.tc.test_queue.put((True, name, msg))
+ self._log_stack[:] = []
+
+ def handle_done(self):
+ self.tc.test_queue.put((None, None, None))
+
+ def handle_log(self):
+ result = self._cw.form['result']
+ message = self._cw.form.get('message', '')
+ actual = self._cw.form.get('actual')
+ expected = self._cw.form.get('expected')
+ source = self._cw.form.get('source')
+ log = '%s: %s' % (result, message)
+ if result == 'false' and actual is not None and expected is not None:
+ log += ' (got: %s, expected: %s)' % (actual, expected)
+ if source is not None:
+ log += '\n' + source
+ self._log_stack.append(log)
+
+
+class QUnitView(View):
+ __regid__ = 'qunit'
+
+ templatable = False
+
+ depends = None
+ test_file = None
+
+ def call(self, **kwargs):
+ w = self.w
+ req = self._cw
+ w(u'''
+
+
+
+
+
+
+
+ ''')
+ w(u'')
+ w(u'')
+ w(u'')
+
+ for dep in self.depends:
+ w(u' \n' % dep)
+
+ w(u' ')
+ w(u' ' % self.test_file)
+ w(u'''
+
+
+
+
+ ''')
+
+
+class CWDevtoolsStaticController(StaticFileController):
+ __regid__ = 'devtools'
+
+ def publish(self, rset=None):
+ staticdir = osp.join(osp.dirname(__file__), 'data')
+ relpath = self.relpath[len(self.__regid__) + 1:]
+ return self.static_file(osp.join(staticdir, relpath))
+
+
+STATIC_CONTROLLERS.append(CWDevtoolsStaticController)
+
+
+if __name__ == '__main__':
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/realdbtest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/realdbtest.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,59 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from cubicweb import toolsutils
+from cubicweb.devtools import DEFAULT_SOURCES, BaseApptestConfiguration
+
+class RealDatabaseConfiguration(BaseApptestConfiguration):
+ init_repository = False
+ sourcesdef = DEFAULT_SOURCES.copy()
+
+ def sources(self):
+ """
+ By default, we run tests with the sqlite DB backend.
+ One may use its own configuration by just creating a
+ 'sources' file in the test directory from wich tests are
+ launched.
+ """
+ self._sources = self.sourcesdef
+ return self._sources
+
+
+def buildconfig(dbuser, dbpassword, dbname, adminuser, adminpassword, dbhost=None):
+ """convenience function that builds a real-db configuration class"""
+ sourcesdef = {'system': {'adapter' : 'native',
+ 'db-encoding' : 'UTF-8', #'ISO-8859-1',
+ 'db-user' : dbuser,
+ 'db-password' : dbpassword,
+ 'db-name' : dbname,
+ 'db-driver' : 'postgres',
+ 'db-host' : dbhost,
+ },
+ 'admin' : {'login': adminuser,
+ 'password': adminpassword,
+ },
+ }
+ return type('MyRealDBConfig', (RealDatabaseConfiguration,),
+ {'sourcesdef': sourcesdef})
+
+
+def loadconfig(filename):
+ """convenience function that builds a real-db configuration class
+ from a file
+ """
+ return type('MyRealDBConfig', (RealDatabaseConfiguration,),
+ {'sourcesdef': toolsutils.read_config(filename)})
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/repotest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/repotest.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,353 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""some utilities to ease repository testing
+
+This module contains functions to initialize a new repository.
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+from pprint import pprint
+
+from logilab.common.testlib import SkipTest
+
+def tuplify(mylist):
+ return [tuple(item) for item in mylist]
+
+def snippet_key(a):
+ # a[0] may be a dict or a key/value tuple
+ return (sorted(dict(a[0]).items()), [e.expression for e in a[1]])
+
+def test_plan(self, rql, expected, kwargs=None):
+ with self.session.new_cnx() as cnx:
+ plan = self._prepare_plan(cnx, rql, kwargs)
+ self.planner.build_plan(plan)
+ try:
+ self.assertEqual(len(plan.steps), len(expected),
+ 'expected %s steps, got %s' % (len(expected), len(plan.steps)))
+ # step order is important
+ for i, step in enumerate(plan.steps):
+ compare_steps(self, step.test_repr(), expected[i])
+ except AssertionError:
+ pprint([step.test_repr() for step in plan.steps])
+ raise
+
+def compare_steps(self, step, expected):
+ try:
+ self.assertEqual(step[0], expected[0], 'expected step type %s, got %s' % (expected[0], step[0]))
+ if len(step) > 2 and isinstance(step[1], list) and isinstance(expected[1], list):
+ queries, equeries = step[1], expected[1]
+ self.assertEqual(len(queries), len(equeries),
+ 'expected %s queries, got %s' % (len(equeries), len(queries)))
+ for i, (rql, sol) in enumerate(queries):
+ self.assertEqual(rql, equeries[i][0])
+ self.assertEqual(sorted(sorted(x.items()) for x in sol), sorted(sorted(x.items()) for x in equeries[i][1]))
+ idx = 2
+ else:
+ idx = 1
+ self.assertEqual(step[idx:-1], expected[idx:-1],
+ 'expected step characteristic \n%s\n, got\n%s' % (expected[1:-1], step[1:-1]))
+ self.assertEqual(len(step[-1]), len(expected[-1]),
+ 'got %s child steps, expected %s' % (len(step[-1]), len(expected[-1])))
+ except AssertionError:
+ print('error on step ', end=' ')
+ pprint(step[:-1])
+ raise
+ children = step[-1]
+ if step[0] in ('UnionFetchStep', 'UnionStep'):
+ # sort children
+ children = sorted(children)
+ expectedchildren = sorted(expected[-1])
+ else:
+ expectedchildren = expected[-1]
+ for i, substep in enumerate(children):
+ compare_steps(self, substep, expectedchildren[i])
+
+
+class DumbOrderedDict(list):
+ def __iter__(self):
+ return self.iterkeys()
+ def __contains__(self, key):
+ return key in self.iterkeys()
+ def __getitem__(self, key):
+ for key_, value in list.__iter__(self):
+ if key == key_:
+ return value
+ raise KeyError(key)
+ def iterkeys(self):
+ return (x for x, y in list.__iter__(self))
+ def iteritems(self):
+ return (x for x in list.__iter__(self))
+ def items(self):
+ return [x for x in list.__iter__(self)]
+
+class DumbOrderedDict2(object):
+ def __init__(self, origdict, sortkey):
+ self.origdict = origdict
+ self.sortkey = sortkey
+ def __getattr__(self, attr):
+ return getattr(self.origdict, attr)
+ def __iter__(self):
+ return iter(sorted(self.origdict, key=self.sortkey))
+
+def schema_eids_idx(schema):
+ """return a dictionary mapping schema types to their eids so we can reread
+ it from the fs instead of the db (too costly) between tests
+ """
+ schema_eids = {}
+ for x in schema.entities():
+ schema_eids[x] = x.eid
+ for x in schema.relations():
+ schema_eids[x] = x.eid
+ for rdef in x.rdefs.values():
+ schema_eids[(rdef.subject, rdef.rtype, rdef.object)] = rdef.eid
+ return schema_eids
+
+def restore_schema_eids_idx(schema, schema_eids):
+ """rebuild schema eid index"""
+ for x in schema.entities():
+ x.eid = schema_eids[x]
+ schema._eid_index[x.eid] = x
+ for x in schema.relations():
+ x.eid = schema_eids[x]
+ schema._eid_index[x.eid] = x
+ for rdef in x.rdefs.values():
+ rdef.eid = schema_eids[(rdef.subject, rdef.rtype, rdef.object)]
+ schema._eid_index[rdef.eid] = rdef
+
+
+from logilab.common.testlib import TestCase, mock_object
+from logilab.database import get_db_helper
+
+from rql import RQLHelper
+
+from cubicweb.devtools.fake import FakeRepo, FakeConfig, FakeSession
+from cubicweb.server import set_debug, debugged
+from cubicweb.server.querier import QuerierHelper
+from cubicweb.server.session import Session
+from cubicweb.server.sources.rql2sql import SQLGenerator, remove_unused_solutions
+
+class RQLGeneratorTC(TestCase):
+ schema = backend = None # set this in concrete class
+
+ @classmethod
+ def setUpClass(cls):
+ if cls.backend is not None:
+ try:
+ cls.dbhelper = get_db_helper(cls.backend)
+ except ImportError as ex:
+ raise SkipTest(str(ex))
+
+ def setUp(self):
+ self.repo = FakeRepo(self.schema, config=FakeConfig(apphome=self.datadir))
+ self.repo.system_source = mock_object(dbdriver=self.backend)
+ self.rqlhelper = RQLHelper(self.schema,
+ special_relations={'eid': 'uid',
+ 'has_text': 'fti'},
+ backend=self.backend)
+ self.qhelper = QuerierHelper(self.repo, self.schema)
+ ExecutionPlan._check_permissions = _dummy_check_permissions
+ rqlannotation._select_principal = _select_principal
+ if self.backend is not None:
+ self.o = SQLGenerator(self.schema, self.dbhelper)
+
+ def tearDown(self):
+ ExecutionPlan._check_permissions = _orig_check_permissions
+ rqlannotation._select_principal = _orig_select_principal
+
+ def set_debug(self, debug):
+ set_debug(debug)
+ def debugged(self, debug):
+ return debugged(debug)
+
+ def _prepare(self, rql):
+ #print '******************** prepare', rql
+ union = self.rqlhelper.parse(rql)
+ #print '********* parsed', union.as_string()
+ self.rqlhelper.compute_solutions(union)
+ #print '********* solutions', solutions
+ self.rqlhelper.simplify(union)
+ #print '********* simplified', union.as_string()
+ plan = self.qhelper.plan_factory(union, {}, FakeSession(self.repo))
+ plan.preprocess(union)
+ for select in union.children:
+ select.solutions.sort(key=lambda x: list(x.items()))
+ #print '********* ppsolutions', solutions
+ return union
+
+
+class BaseQuerierTC(TestCase):
+ repo = None # set this in concrete class
+
+ def setUp(self):
+ self.o = self.repo.querier
+ self.session = next(iter(self.repo._sessions.values()))
+ self.ueid = self.session.user.eid
+ assert self.ueid != -1
+ self.repo._type_source_cache = {} # clear cache
+ self.maxeid = self.get_max_eid()
+ do_monkey_patch()
+ self._dumb_sessions = []
+
+ def get_max_eid(self):
+ with self.session.new_cnx() as cnx:
+ return cnx.execute('Any MAX(X)')[0][0]
+
+ def cleanup(self):
+ with self.session.new_cnx() as cnx:
+ cnx.execute('DELETE Any X WHERE X eid > %s' % self.maxeid)
+ cnx.commit()
+
+ def tearDown(self):
+ undo_monkey_patch()
+ self.cleanup()
+ assert self.session.user.eid != -1
+
+ def set_debug(self, debug):
+ set_debug(debug)
+ def debugged(self, debug):
+ return debugged(debug)
+
+ def _rqlhelper(self):
+ rqlhelper = self.repo.vreg.rqlhelper
+ # reset uid_func so it don't try to get type from eids
+ rqlhelper._analyser.uid_func = None
+ rqlhelper._analyser.uid_func_mapping = {}
+ return rqlhelper
+
+ def _prepare_plan(self, cnx, rql, kwargs=None, simplify=True):
+ rqlhelper = self._rqlhelper()
+ rqlst = rqlhelper.parse(rql)
+ rqlhelper.compute_solutions(rqlst, kwargs=kwargs)
+ if simplify:
+ rqlhelper.simplify(rqlst)
+ for select in rqlst.children:
+ select.solutions.sort(key=lambda x: list(x.items()))
+ return self.o.plan_factory(rqlst, kwargs, cnx)
+
+ def _prepare(self, cnx, rql, kwargs=None):
+ plan = self._prepare_plan(cnx, rql, kwargs, simplify=False)
+ plan.preprocess(plan.rqlst)
+ rqlst = plan.rqlst.children[0]
+ rqlst.solutions = remove_unused_solutions(rqlst, rqlst.solutions, {}, self.repo.schema)[0]
+ return rqlst
+
+ def user_groups_session(self, *groups):
+ """lightweight session using the current user with hi-jacked groups"""
+ # use self.session.user.eid to get correct owned_by relation, unless explicit eid
+ with self.session.new_cnx() as cnx:
+ u = self.repo._build_user(cnx, self.session.user.eid)
+ u._groups = set(groups)
+ s = Session(u, self.repo)
+ return s
+
+ def qexecute(self, rql, args=None, build_descr=True):
+ with self.session.new_cnx() as cnx:
+ try:
+ return self.o.execute(cnx, rql, args, build_descr)
+ finally:
+ if rql.startswith(('INSERT', 'DELETE', 'SET')):
+ cnx.commit()
+
+
+class BasePlannerTC(BaseQuerierTC):
+
+ def setup(self):
+ # XXX source_defs
+ self.o = self.repo.querier
+ self.session = self.repo._sessions.values()[0]
+ self.schema = self.o.schema
+ self.system = self.repo.system_source
+ do_monkey_patch()
+ self.repo.vreg.rqlhelper.backend = 'postgres' # so FTIRANK is considered
+
+ def tearDown(self):
+ undo_monkey_patch()
+
+ def _prepare_plan(self, cnx, rql, kwargs=None):
+ rqlst = self.o.parse(rql, annotate=True)
+ self.o.solutions(cnx, rqlst, kwargs)
+ if rqlst.TYPE == 'select':
+ self.repo.vreg.rqlhelper.annotate(rqlst)
+ for select in rqlst.children:
+ select.solutions.sort(key=lambda x: list(x.items()))
+ else:
+ rqlst.solutions.sort(key=lambda x: list(x.items()))
+ return self.o.plan_factory(rqlst, kwargs, cnx)
+
+
+# monkey patch some methods to get predictable results #######################
+
+from cubicweb import rqlrewrite
+_orig_iter_relations = rqlrewrite.iter_relations
+_orig_insert_snippets = rqlrewrite.RQLRewriter.insert_snippets
+_orig_build_variantes = rqlrewrite.RQLRewriter.build_variantes
+
+def _insert_snippets(self, snippets, varexistsmap=None):
+ _orig_insert_snippets(self, sorted(snippets, key=snippet_key), varexistsmap)
+
+def _build_variantes(self, newsolutions):
+ variantes = _orig_build_variantes(self, newsolutions)
+ sortedvariantes = []
+ for variante in variantes:
+ orderedkeys = sorted((k[1], k[2], v) for k, v in variante.items())
+ variante = DumbOrderedDict(sorted(variante.items(),
+ key=lambda a: (a[0][1], a[0][2], a[1])))
+ sortedvariantes.append( (orderedkeys, variante) )
+ return [v for ok, v in sorted(sortedvariantes)]
+
+from cubicweb.server.querier import ExecutionPlan
+_orig_check_permissions = ExecutionPlan._check_permissions
+
+def _check_permissions(*args, **kwargs):
+ res, restricted = _orig_check_permissions(*args, **kwargs)
+ res = DumbOrderedDict(sorted(res.items(), key=lambda x: [y.items() for y in x[1]]))
+ return res, restricted
+
+def _dummy_check_permissions(self, rqlst):
+ return {(): rqlst.solutions}, set()
+
+from cubicweb.server import rqlannotation
+_orig_select_principal = rqlannotation._select_principal
+
+def _select_principal(scope, relations):
+ def sort_key(something):
+ try:
+ return something.r_type
+ except AttributeError:
+ return (something[0].r_type, something[1])
+ return _orig_select_principal(scope, relations,
+ _sort=lambda rels: sorted(rels, key=sort_key))
+
+
+def _ordered_iter_relations(stinfo):
+ return sorted(_orig_iter_relations(stinfo), key=lambda x:x.r_type)
+
+def do_monkey_patch():
+ rqlrewrite.iter_relations = _ordered_iter_relations
+ rqlrewrite.RQLRewriter.insert_snippets = _insert_snippets
+ rqlrewrite.RQLRewriter.build_variantes = _build_variantes
+ ExecutionPlan._check_permissions = _check_permissions
+ ExecutionPlan.tablesinorder = None
+
+def undo_monkey_patch():
+ rqlrewrite.iter_relations = _orig_iter_relations
+ rqlrewrite.RQLRewriter.insert_snippets = _orig_insert_snippets
+ rqlrewrite.RQLRewriter.build_variantes = _orig_build_variantes
+ ExecutionPlan._check_permissions = _orig_check_permissions
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/stresstester.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/stresstester.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,196 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+""" Usage: %s [OPTIONS]
+
+Stress test a CubicWeb repository
+
+OPTIONS:
+ -h / --help
+ Display this help message and exit.
+
+ -u / --user
+ Connect as instead of being prompted to give it.
+ -p / --password
+ Automatically give for authentication instead of being prompted
+ to give it.
+
+ -n / --nb-times
+ Repeat queries times.
+ -t / --nb-threads
+ Execute queries in parallel threads.
+ -P / --profile
+ dumps profile results (hotshot) in
+ -o / --report-output
+ Write profiler report into rather than on stdout
+
+Copyright (c) 2003-2011 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+http://www.logilab.fr/ -- mailto:contact@logilab.fr
+"""
+from __future__ import print_function
+
+import os
+import sys
+import threading
+import getopt
+import traceback
+from getpass import getpass
+from os.path import basename
+from time import clock
+
+from logilab.common.fileutils import lines
+from logilab.common.ureports import Table, TextWriter
+from cubicweb.server.repository import Repository
+from cubicweb.dbapi import Connection
+
+TB_LOCK = threading.Lock()
+
+class QueryExecutor:
+ def __init__(self, cursor, times, queries, reporter = None):
+ self._cursor = cursor
+ self._times = times
+ self._queries = queries
+ self._reporter = reporter
+
+ def run(self):
+ cursor = self._cursor
+ times = self._times
+ while times:
+ for index, query in enumerate(self._queries):
+ start = clock()
+ try:
+ cursor.execute(query)
+ except Exception:
+ TB_LOCK.acquire()
+ traceback.print_exc()
+ TB_LOCK.release()
+ return
+ if self._reporter is not None:
+ self._reporter.add_proftime(clock() - start, index)
+ times -= 1
+
+def usage(status=0):
+ """print usage string and exit"""
+ print(__doc__ % basename(sys.argv[0]))
+ sys.exit(status)
+
+
+class ProfileReporter:
+ """a profile reporter gathers all profile informations from several
+ threads and can write a report that summarizes all profile informations
+ """
+ profiler_lock = threading.Lock()
+
+ def __init__(self, queries):
+ self._queries = tuple(queries)
+ self._profile_results = [(0., 0)] * len(self._queries)
+ # self._table_report = Table(3, rheaders = True)
+ len_max = max([len(query) for query in self._queries]) + 5
+ self._query_fmt = '%%%ds' % len_max
+
+ def add_proftime(self, elapsed_time, query_index):
+ """add a new time measure for query"""
+ ProfileReporter.profiler_lock.acquire()
+ cumul_time, times = self._profile_results[query_index]
+ cumul_time += elapsed_time
+ times += 1.
+ self._profile_results[query_index] = (cumul_time, times)
+ ProfileReporter.profiler_lock.release()
+
+ def dump_report(self, output = sys.stdout):
+ """dump report in 'output'"""
+ table_elems = ['RQL Query', 'Times', 'Avg Time']
+ total_time = 0.
+ for query, (cumul_time, times) in zip(self._queries, self._profile_results):
+ avg_time = cumul_time / float(times)
+ table_elems += [str(query), '%f' % times, '%f' % avg_time ]
+ total_time += cumul_time
+ table_elems.append('Total time :')
+ table_elems.append(str(total_time))
+ table_elems.append(' ')
+ table_layout = Table(3, rheaders = True, children = table_elems)
+ TextWriter().format(table_layout, output)
+ # output.write('\n'.join(tmp_output))
+
+
+def run(args):
+ """run the command line tool"""
+ try:
+ opts, args = getopt.getopt(args, 'hn:t:u:p:P:o:', ['help', 'user=', 'password=',
+ 'nb-times=', 'nb-threads=',
+ 'profile', 'report-output=',])
+ except Exception as ex:
+ print(ex)
+ usage(1)
+ repeat = 100
+ threads = 1
+ user = os.environ.get('USER', os.environ.get('LOGNAME'))
+ password = None
+ report_output = sys.stdout
+ prof_file = None
+ for opt, val in opts:
+ if opt in ('-h', '--help'):
+ usage()
+ if opt in ('-u', '--user'):
+ user = val
+ elif opt in ('-p', '--password'):
+ password = val
+ elif opt in ('-n', '--nb-times'):
+ repeat = int(val)
+ elif opt in ('-t', '--nb-threads'):
+ threads = int(val)
+ elif opt in ('-P', '--profile'):
+ prof_file = val
+ elif opt in ('-o', '--report-output'):
+ report_output = open(val, 'w')
+ if len(args) != 2:
+ usage(1)
+ queries = [query for query in lines(args[1]) if not query.startswith('#')]
+ if user is None:
+ user = raw_input('login: ')
+ if password is None:
+ password = getpass('password: ')
+ from cubicweb.cwconfig import instance_configuration
+ config = instance_configuration(args[0])
+ # get local access to the repository
+ print("Creating repo", prof_file)
+ repo = Repository(config, prof_file)
+ cnxid = repo.connect(user, password=password)
+ # connection to the CubicWeb repository
+ repo_cnx = Connection(repo, cnxid)
+ repo_cursor = repo_cnx.cursor()
+ reporter = ProfileReporter(queries)
+ if threads > 1:
+ executors = []
+ while threads:
+ qe = QueryExecutor(repo_cursor, repeat, queries, reporter = reporter)
+ executors.append(qe)
+ thread = threading.Thread(target=qe.run)
+ qe.thread = thread
+ thread.start()
+ threads -= 1
+ for qe in executors:
+ qe.thread.join()
+## for qe in executors:
+## print qe.thread, repeat - qe._times, 'times'
+ else:
+ QueryExecutor(repo_cursor, repeat, queries, reporter = reporter).run()
+ reporter.dump_report(report_output)
+
+
+if __name__ == '__main__':
+ run(sys.argv[1:])
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/bootstrap_cubes
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/bootstrap_cubes Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+person
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/cubes/__init__.py
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/cubes/i18ntestcube/__init__.py
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/cubes/i18ntestcube/__pkginfo__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/cubes/i18ntestcube/__pkginfo__.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,18 @@
+# pylint: disable=W0622
+"""cubicweb i18n test cube application packaging information"""
+
+modname = 'i18ntestcube'
+distname = 'cubicweb-i18ntestcube'
+
+numversion = (0, 1, 0)
+version = '.'.join(str(num) for num in numversion)
+
+license = 'LGPL'
+author = 'LOGILAB S.A. (Paris, FRANCE)'
+author_email = 'contact@logilab.fr'
+description = 'forum'
+web = 'http://www.cubicweb.org/project/%s' % distname
+
+__depends__ = {'cubicweb': '>= 3.16.4',
+ }
+__recommends__ = {}
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/cubes/i18ntestcube/i18n/en.po.ref
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/cubes/i18ntestcube/i18n/en.po.ref Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,182 @@
+msgid ""
+msgstr ""
+"Project-Id-Version: cubicweb 3.16.5\n"
+"PO-Revision-Date: 2008-03-28 18:14+0100\n"
+"Last-Translator: Logilab Team \n"
+"Language-Team: fr \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: cubicweb-devtools\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+# schema pot file, generated on 2013-07-12 16:18:12
+#
+# singular and plural forms for each entity type
+# subject and object forms for each relation type
+# (no object form for final or symmetric relation types)
+msgid "Forum"
+msgstr ""
+
+msgid "Forum_plural"
+msgstr ""
+
+msgid "This Forum"
+msgstr ""
+
+msgid "This Forum:"
+msgstr ""
+
+msgid "New Forum"
+msgstr ""
+
+msgctxt "inlined:Forum.in_forum.object"
+msgid "add a ForumThread"
+msgstr ""
+
+msgctxt "inlined:Forum.in_forum.object"
+msgid "ForumThread"
+msgstr ""
+
+msgid "add ForumThread in_forum Forum object"
+msgstr ""
+
+msgid "add a Forum"
+msgstr ""
+
+msgid "add a ForumThread"
+msgstr ""
+
+msgid "creating ForumThread (ForumThread in_forum Forum %(linkto)s)"
+msgstr ""
+
+msgid "ForumThread"
+msgstr ""
+
+msgid "ForumThread_plural"
+msgstr ""
+
+msgid "This ForumThread"
+msgstr ""
+
+msgid "This ForumThread:"
+msgstr ""
+
+msgid "New ForumThread"
+msgstr ""
+
+msgid "content"
+msgstr ""
+
+msgctxt "ForumThread"
+msgid "content"
+msgstr ""
+
+msgid "content_format"
+msgstr ""
+
+msgctxt "ForumThread"
+msgid "content_format"
+msgstr ""
+
+msgctxt "Forum"
+msgid "description"
+msgstr ""
+
+msgctxt "Forum"
+msgid "description_format"
+msgstr ""
+
+msgid "in_forum"
+msgstr ""
+
+msgctxt "ForumThread"
+msgid "in_forum"
+msgstr ""
+
+msgctxt "Forum"
+msgid "in_forum_object"
+msgstr ""
+
+msgid "in_forum_object"
+msgstr ""
+
+msgid "interested_in"
+msgstr ""
+
+msgctxt "CWUser"
+msgid "interested_in"
+msgstr ""
+
+msgctxt "ForumThread"
+msgid "interested_in_object"
+msgstr ""
+
+msgctxt "Forum"
+msgid "interested_in_object"
+msgstr ""
+
+msgid "interested_in_object"
+msgstr ""
+
+msgid "nosy_list"
+msgstr ""
+
+msgctxt "ForumThread"
+msgid "nosy_list"
+msgstr ""
+
+msgctxt "Forum"
+msgid "nosy_list"
+msgstr ""
+
+msgctxt "CWUser"
+msgid "nosy_list_object"
+msgstr ""
+
+msgid "nosy_list_object"
+msgstr ""
+
+msgctxt "ForumThread"
+msgid "title"
+msgstr ""
+
+msgid "topic"
+msgstr ""
+
+msgctxt "Forum"
+msgid "topic"
+msgstr ""
+
+msgid "Topic"
+msgstr ""
+
+msgid "Description"
+msgstr ""
+
+msgid "Number of threads"
+msgstr ""
+
+msgid "Last activity"
+msgstr ""
+
+msgid ""
+"a long\n"
+"tranlated line\n"
+"hop."
+msgstr ""
+
+msgid "Subject"
+msgstr ""
+
+msgid "Created"
+msgstr ""
+
+msgid "Answers"
+msgstr ""
+
+msgid "Last answered"
+msgstr ""
+
+msgid "This forum does not have any thread yet."
+msgstr ""
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/cubes/i18ntestcube/schema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/cubes/i18ntestcube/schema.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+# copyright 2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr -- mailto:contact@logilab.fr
+#
+# This program is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with this program. If not, see .
+
+"""cubicweb-forum schema"""
+
+from yams.buildobjs import (String, RichString, EntityType,
+ RelationDefinition, SubjectRelation)
+from yams.reader import context
+
+class Forum(EntityType):
+ topic = String(maxsize=50, required=True, unique=True)
+ description = RichString()
+
+class ForumThread(EntityType):
+ __permissions__ = {
+ 'read': ('managers', 'users'),
+ 'add': ('managers', 'users'),
+ 'update': ('managers', 'owners'),
+ 'delete': ('managers', 'owners')
+ }
+ title = String(required=True, fulltextindexed=True, maxsize=256)
+ content = RichString(required=True, fulltextindexed=True)
+ in_forum = SubjectRelation('Forum', cardinality='1*', inlined=True,
+ composite='object')
+class interested_in(RelationDefinition):
+ subject = 'CWUser'
+ object = ('ForumThread', 'Forum')
+
+class nosy_list(RelationDefinition):
+ subject = ('Forum', 'ForumThread')
+ object = 'CWUser'
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/cubes/i18ntestcube/views.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/cubes/i18ntestcube/views.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,61 @@
+# copyright 2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr -- mailto:contact@logilab.fr
+#
+# This program is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with this program. If not, see .
+
+"""cubicweb-forum views/forms/actions/components for web ui"""
+
+from cubicweb import view
+from cubicweb.predicates import is_instance
+from cubicweb.web.views import primary, baseviews, uicfg
+from cubicweb.web.views.uicfg import autoform_section as afs
+
+class MyAFS(uicfg.AutoformSectionRelationTags):
+ __select__ = is_instance('ForumThread')
+
+_myafs = MyAFS()
+
+_myafs.tag_object_of(('*', 'in_forum', 'Forum'), 'main', 'inlined')
+
+afs.tag_object_of(('*', 'in_forum', 'Forum'), 'main', 'inlined')
+
+
+class ForumSameETypeListView(baseviews.SameETypeListView):
+ __select__ = baseviews.SameETypeListView.__select__ & is_instance('Forum')
+
+ def call(self, **kwargs):
+ _ = self._cw._
+ _('Topic'), _('Description')
+ _('Number of threads'), _('Last activity')
+ _('''a long
+tranlated line
+hop.''')
+
+
+class ForumLastActivity(view.EntityView):
+ __regid__ = 'forum_last_activity'
+ __select__ = view.EntityView.__select__ & is_instance('Forum')
+
+
+class ForumPrimaryView(primary.PrimaryView):
+ __select__ = primary.PrimaryView.__select__ & is_instance('Forum')
+
+ def render_entity_attributes(self, entity):
+ _ = self._cw._
+ _('Subject'), _('Created'), _('Answers'),
+ _('Last answered')
+ _('This forum does not have any thread yet.')
+
+class ForumThreadPrimaryView(primary.PrimaryView):
+ __select__ = primary.PrimaryView.__select__ & is_instance('ForumThread')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/firstnames.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/firstnames.txt Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1599 @@
+ash
+pasqualino
+asl
+benjy
+wolodymyr
+dionysos
+launce
+khaleel
+sondra
+maaike
+lavinia
+giosu
+daisy
+xiang
+belgin
+edda
+olympia
+treasa
+katya
+misi
+ville
+mahon
+yngve
+moritz
+elder
+gawel
+horsa
+blossom
+deanne
+imelda
+deanna
+cairbre
+eddy
+horst
+gaenor
+breanne
+hewie
+breanna
+jarvis
+jamin
+loise
+jamil
+fingall
+giselle
+jamie
+shinju
+gisella
+akilina
+jordan
+gertie
+cardea
+eiran
+valdemar
+sebestyen
+galia
+bride
+greg
+fausta
+eniola
+rudo
+pratibha
+kisha
+mickey
+charlotte
+karp
+charlotta
+nunzia
+nunzio
+patrice
+kara
+hallam
+collyn
+kari
+karl
+dusan
+lia
+cherokee
+lim
+lin
+yvain
+madlyn
+liv
+lir
+lis
+tullio
+norma
+liz
+lettice
+kae
+kaj
+kai
+tatyanna
+kam
+freddie
+elton
+meinir
+blaise
+kat
+japeth
+alpha
+kay
+mack
+jayna
+jayne
+hormazed
+lupita
+humbert
+vitya
+neoptolemus
+richardine
+hallvard
+diogo
+larkin
+ravi
+louiza
+hermogenes
+alanis
+yadira
+leandra
+milburga
+leandro
+sorin
+randi
+kaleb
+rogerio
+sanna
+kalea
+justice
+kaleo
+dijana
+shprintza
+randy
+colby
+otthild
+mariamne
+patrycja
+darwin
+christal
+khalida
+kaley
+allegria
+vidya
+renaud
+sisel
+suibhne
+lonny
+julienne
+calliope
+rocco
+alexander
+aristide
+edwige
+xzavier
+rajesh
+egil
+gell
+mahavir
+charline
+sigi
+theophania
+maurice
+afon
+konnor
+kiran
+angie
+jalila
+tolly
+havva
+metody
+engel
+philander
+lancelot
+nathalie
+leilah
+dane
+elm
+chatzkel
+keaton
+ashlie
+kudret
+rava
+danette
+eachann
+wilburn
+jeff
+kazimiera
+rukmini
+lauryn
+femie
+mahvash
+berkant
+alesha
+daedalus
+aphra
+karla
+tetty
+agostinho
+bolivar
+savitri
+karly
+forbes
+vencesl
+bahija
+walter
+imam
+iman
+krzys
+imad
+elsa
+neville
+tracie
+else
+anthony
+shevon
+katherine
+marylou
+wojtek
+oddmund
+tristand
+areli
+valkyrie
+garfield
+wyatt
+luanne
+ossia
+luanna
+luciana
+guido
+luciano
+shachar
+astraea
+paco
+leland
+avra
+amenhotep
+kekoa
+gorden
+sameera
+boutros
+ruaidhr
+friedemann
+darrell
+hideaki
+petar
+donatien
+fannie
+eliana
+iason
+fedora
+grant
+shay
+estee
+marcelle
+marcella
+lothair
+shae
+ester
+marcello
+estev
+cassian
+allyson
+dima
+goodwin
+cezar
+blair
+monique
+elwin
+ihsan
+olufunmilayo
+arturo
+nanaia
+greetje
+clovia
+beowulf
+vassily
+madail
+emmeline
+guendolen
+nandag
+eilish
+sakari
+elisheva
+crispin
+aksel
+alvin
+cernunnos
+feardorcha
+heshel
+afra
+iqbal
+pryce
+siddhartha
+mikkel
+alvis
+myrtie
+khajag
+yesenia
+nikki
+grigory
+grigore
+maeve
+rebeca
+diederick
+maeva
+grigori
+cheryl
+rahim
+marco
+marci
+stein
+trista
+olufemi
+emmanuelle
+nadezhda
+wahid
+marcy
+vanda
+lavra
+alida
+amara
+hipolito
+valent
+renatus
+moira
+donny
+lucretia
+donna
+vesta
+cadoc
+reetta
+erma
+markku
+rosamond
+gracia
+tuyet
+sieffre
+gracie
+kodey
+debra
+photine
+jacek
+yanick
+isiah
+khordad
+rui
+stef
+rub
+foma
+sten
+kassy
+rue
+nelly
+merrick
+ayn
+macy
+vincente
+anargyros
+rut
+lenox
+jenessa
+faith
+barnaby
+manny
+jyotsana
+hasan
+iakopa
+edvard
+narcisa
+loredana
+ida
+torborg
+rollo
+stamatios
+pero
+natalya
+maudie
+carlton
+paulina
+aliyah
+lanty
+tadg
+deiniol
+dwayne
+alison
+fabius
+rbj
+latasha
+maarit
+roxanna
+katinka
+publius
+augustijn
+ferdy
+khadiga
+akosua
+rees
+quetzalcoatl
+kristian
+larry
+reed
+krystal
+micheil
+paolo
+chelsey
+ute
+paola
+hamilcar
+malin
+deangelo
+munir
+velma
+malik
+utz
+malie
+govad
+chelsea
+malia
+willem
+seetha
+andrina
+rupert
+myrrine
+theodoros
+tito
+ivonne
+nan
+beryl
+nat
+tawnie
+korn
+marzena
+tinek
+hermine
+kora
+frances
+william
+tianna
+evan
+kory
+merletta
+kort
+nevan
+naheed
+heath
+tyreek
+shona
+amyas
+urjasz
+katy
+gu
+gr
+hilde
+mehmud
+gy
+hilda
+psyche
+olive
+nuno
+vinnie
+ga
+kato
+kata
+jeunesse
+kate
+chandrakant
+caoilainn
+arik
+rhonda
+leocadio
+euan
+aric
+leocadia
+aria
+bronwen
+marcellin
+vladislav
+ferapont
+nichole
+kizzy
+duilio
+jafet
+maas
+tue
+felicity
+mansoor
+rfhlaith
+brigitta
+fishke
+akua
+izabela
+olaf
+vittore
+michael
+skar
+ryan
+gretta
+alvena
+olav
+brigitte
+euterpe
+barbara
+aiolos
+carter
+khalifa
+tziporah
+honora
+feich
+marilena
+onesime
+theo
+gunvor
+sa'id
+katlyn
+nicholas
+preeti
+etzel
+ekewaka
+vinal
+jubal
+ramsey
+rowley
+jocelin
+alfsigr
+kalliope
+micah
+frantisek
+holger
+alysha
+chant
+derry
+corin
+janus
+morcant
+chang
+corie
+gena
+randa
+joost
+vasile
+clark
+clare
+wim
+wil
+clara
+danika
+jory
+eleonoora
+ayelet
+caligula
+zakiah
+kilie
+meliora
+ottavio
+idoya
+ninette
+hudson
+deon
+gawdat
+frida
+jonathan
+reynold
+laocadia
+cerise
+cosmo
+hezekiah
+winston
+isak
+allyn
+noelene
+trajan
+vijaya
+cosma
+tresha
+astrithr
+priya
+astrophel
+pocahontas
+eliphalet
+stafford
+salah
+salal
+pauliina
+lazer
+feidhlim
+jackalyn
+kenny
+alayna
+wilfried
+wasim
+blaine
+femke
+jehu
+kenna
+lenore
+nkechi
+letizia
+kian
+kayleigh
+spartacus
+manuela
+leyton
+lesley
+georg
+ferdinand
+cuauhtemoc
+aeron
+lavrenti
+nyx
+ronald
+yoshiko
+gundula
+eluf
+toma
+riccardo
+ruadh
+matylda
+winter
+mayson
+llew
+clytia
+jamila
+fariha
+aegle
+octavio
+steafan
+jacqui
+mikelo
+dovid
+modestus
+blake
+jeanna
+alessa
+conway
+brook
+sunday
+kizzie
+hande
+catherine
+eckhard
+rr
+gwyneth
+aukusti
+placid
+rufino
+kyleigh
+helah
+benoite
+eluned
+sanaz
+cnaeus
+ettie
+benaiah
+brendan
+wenonah
+nye
+candela
+dragan
+sanda
+naveen
+margar
+naveed
+austen
+sandu
+britta
+brodie
+morton
+kamilla
+sandy
+guilherme
+dorothea
+calix
+braxton
+wigburg
+tryphena
+ricky
+may
+sylwia
+libor
+marek
+ece
+trinity
+katsuro
+tercero
+'ismat
+mared
+jill
+amato
+achim
+princess
+jaquelyn
+eustathios
+tapio
+aglea
+kees
+evstathios
+edwyna
+austin
+cristian
+jouko
+nikandros
+leonora
+kaitlynn
+christoph
+mai
+parthalan
+tancredo
+rosaleen
+lynnette
+yasamin
+encarnacion
+gerolt
+ionut
+harmon
+ailbhe
+islwyn
+muirenn
+nyah
+mariana
+viktor
+greta
+kreszentia
+grete
+hormazd
+foka
+poseidon
+kazimir
+ultan
+ben
+sudhir
+bea
+bee
+saburo
+elnora
+ber
+michelyne
+clytemnestra
+yardena
+gavrel
+michelangelo
+wystan
+odhiambo
+miquel
+bertha
+su
+berthe
+alisia
+kelley
+leonhard
+rodger
+ewald
+oluwaseyi
+celandine
+kunegunda
+luisa
+khayyam
+iisakki
+luise
+ligia
+zaina
+tatiana
+siarl
+jorge
+bronislaw
+bronislav
+montana
+edric
+miloslava
+achilles
+donaldina
+wilfredo
+laurens
+haifa
+stelian
+glenice
+calvino
+rodica
+hulda
+indy
+uri
+laurena
+tzeitel
+laurene
+urs
+danita
+platon
+parker
+chadwick
+lorne
+narinder
+theodoric
+florentina
+ambrosine
+nikephoros
+kapel
+aeolus
+cenek
+hadi
+perle
+alyona
+cyril
+perla
+cicely
+darby
+madhav
+hector
+ethan
+aretha
+ilker
+avdotya
+boris
+sassa
+misty
+bonaventure
+kiefer
+emmet
+arkadios
+farrah
+tivoli
+pietari
+mohammed
+shoshana
+felipe
+felipa
+maurene
+tancred
+raymonde
+sho
+faron
+arundhati
+esteri
+silvanus
+nuha
+aloisia
+baris
+tammie
+fabricio
+lux
+luz
+driskoll
+tyra
+luc
+marsha
+luk
+aron
+joye
+ken
+gethsemane
+kelan
+yuko
+merry
+proserpine
+precious
+suibne
+mindy
+vitus
+olga
+jia
+kalysta
+angharad
+ciera
+careen
+inglebert
+apphia
+muadhnait
+christen
+rebekah
+dominique
+gita
+tori
+harmonie
+anatolius
+harmonia
+denise
+johann
+johano
+denisa
+viktoria
+padmini
+johana
+christer
+barakat
+willy
+sari
+fitzroy
+yaw
+sara
+yan
+quim
+quin
+yaa
+katelin
+pontus
+raelene
+alexus
+gwandoya
+venceslav
+ott
+artemidoros
+zaynab
+folant
+salman
+ealdgy
+randal
+macey
+heriberto
+kimball
+ekin
+dema
+evelyn
+demi
+pip
+simona
+daniil
+emmerson
+kausalya
+kortney
+gavriil
+yered
+parth
+fido
+solange
+oona
+anka
+renie
+anke
+habakkuk
+linwood
+teofilo
+grazyna
+enitan
+bhaskar
+finnian
+perseus
+mordechai
+fyodor
+ashley
+philo
+i
+hecate
+phile
+theodor
+kiaran
+ashlee
+dollie
+savannah
+upton
+sofia
+noak
+sofie
+laurel
+lauren
+dubaku
+zacharjasz
+patricio
+trudi
+sophus
+vida
+patricia
+trudy
+tapani
+mavreena
+jesper
+sandrine
+sonia
+livio
+mikolaj
+laurine
+livia
+finnegan
+oprah
+waheed
+lavonne
+perdita
+liviu
+imen
+attila
+lincoln
+fernanda
+evrard
+fernande
+jaana
+artair
+fernando
+candy
+cande
+kazimierz
+kaija
+shamgar
+laxmi
+martie
+page
+candi
+brody
+piaras
+shea
+herbie
+shem
+kristaps
+sher
+cleveland
+carreen
+margaid
+phinehas
+justina
+wendi
+linus
+wenda
+matrona
+christiane
+wendy
+kerensa
+roch
+fergal
+fanny
+kamila
+oswin
+camilo
+everette
+katashi
+myron
+ridley
+shavonne
+blythe
+nader
+marlowe
+miha
+carolyn
+glenn
+gadar
+rainard
+sybella
+raquel
+rozabela
+serhat
+bashemath
+jing
+gobnet
+yentl
+sylvana
+dolores
+sanjit
+tamsin
+sanjiv
+innes
+daniela
+daniele
+margr
+keysha
+rogelio
+ean
+hj
+philipp
+valerian
+marge
+gail
+margh
+gaia
+engelbert
+kathie
+artemisia
+margo
+stefan
+pansy
+swanhilda
+swanhilde
+alessio
+beata
+beate
+babur
+beatrice
+eris
+erin
+maura
+camryn
+conan
+erik
+krysia
+nigelia
+mauri
+averill
+draco
+eric
+sophronius
+mauro
+diego
+simcha
+malachy
+barth
+maoilios
+germaine
+malachi
+katariina
+lianne
+ferdinando
+donagh
+kelemen
+taletta
+star
+gilah
+faustus
+lfwine
+rayna
+gotthard
+sa'd
+stan
+klemen
+pranay
+howie
+dewey
+tiarnan
+katherina
+uzma
+jabril
+hakan
+martin
+elsie
+cleve
+imani
+moshe
+padma
+inmaculada
+augustine
+trenton
+ghislain
+aiden
+alfhild
+ireneus
+gottschalk
+andra
+jahzeel
+andro
+fredrik
+wynter
+kohar
+tobin
+giustino
+buddy
+marcos
+mieszko
+giustina
+khalil
+aur
+helladius
+riccarda
+elettra
+glykeria
+yeva
+trahaearn
+ulisse
+wilfred
+sorrel
+saara
+ekwueme
+sarita
+finella
+waldo
+herbert
+elissa
+bevan
+lavern
+till
+ruxandra
+lavender
+ghalib
+eldon
+masterman
+tameka
+mihajlo
+mahin
+neo
+asim
+jordon
+pace
+ned
+giampiero
+asia
+nea
+haze
+bearach
+cheng
+pieter
+yonah
+chikako
+maverick
+fonsie
+ozzy
+meg
+mitxel
+filbert
+mel
+neves
+henrik
+mei
+hilaire
+drew
+deemer
+liborio
+dubhghlas
+bogdan
+dipak
+rapha
+golda
+maighread
+masha
+pranciskis
+mitchell
+titilayo
+aydin
+ippolit
+toiba
+omar
+cindy
+alexandrina
+lyubov
+hiltraud
+joshua
+moray
+baptiste
+bahiyya
+marquita
+benedicta
+reagan
+latifah
+scevola
+ardashir
+pakpao
+topaz
+janine
+omolara
+janina
+morag
+euripides
+lennart
+orb
+helmuth
+armo
+diederik
+lennard
+raeburn
+oscar
+odell
+ualan
+noemi
+melba
+berlin
+lazarus
+merla
+meera
+anastas
+rhamantus
+yussel
+meshullam
+esdras
+kumar
+flora
+norwood
+rio
+apollinaris
+oleg
+rim
+nadzeija
+akio
+akim
+efisio
+jayda
+olek
+rowanne
+honey
+karola
+chetana
+candelas
+friede
+phaedrus
+frieda
+joann
+braidy
+hitomi
+kieron
+dakarai
+teofil
+dervila
+ria
+pietrina
+becky
+alechjo
+santos
+egon
+olwin
+ove
+balthazar
+reeta
+becka
+tillo
+royce
+peninnah
+earnestine
+janis
+jakab
+janie
+rosalba
+hosanna
+aharon
+fife
+zacharias
+fifi
+aleesha
+murray
+helena
+helene
+rashmi
+afia
+oswald
+zachariah
+shawnee
+pius
+zdenek
+kichiro
+melchiorre
+erland
+yaroslava
+anushka
+cree
+iser
+rachel
+anik
+fabiola
+ania
+aneurin
+hernando
+ernesto
+ernesta
+astor
+manasseh
+naphtali
+shai
+lorena
+lazar
+luce
+lorenz
+luca
+briana
+rosemary
+dawid
+nava
+payton
+linos
+aida
+gunne
+milan
+tuomas
+sahar
+doug
+mikala
+dawn
+vincenza
+saturninus
+channah
+mandy
+reuven
+cormag
+cormac
+mandi
+sachie
+ladonna
+phuong
+tasha
+ramon
+hashim
+fachtna
+euphemia
+tisha
+jozafat
+horatius
+imke
+venus
+rodolf
+binyamin
+cosmin
+oluwafunmilayo
+nekane
+loup
+kohinoor
+teuvo
+xue
+innokenti
+vincenzo
+kiley
+isa
+hannibal
+vijay
+kornelia
+afanasy
+vittorio
+tuor
+adalia
+damayanti
+afanasi
+grady
+evangelos
+ermete
+brock
+bonita
+arisha
+pelagia
+solvej
+parthenope
+peggie
+kierra
+jozefa
+garry
+giuditta
+ladislas
+jozefo
+swietoslaw
+yildiz
+nasira
+eshe
+helen
+gretchen
+shekhar
+daren
+lenuta
+dymphna
+daina
+matteo
+berjouhi
+jerusha
+solomon
+gernot
+murtagh
+meaveen
+godwin
+ladislao
+minh
+hachiro
+farquhar
+ichabod
+mina
+caleb
+veera
+ginger
+ming
+jaynie
+sharyn
+seonag
+ferdie
+ilana
+gabriela
+gabriele
+lloren
+hooda
+mabelle
+timeus
+teagan
+gorka
+ulrich
+philadelphia
+razvan
+lamprecht
+marit
+kean
+marin
+mario
+rhonwen
+vilko
+konstantin
+tyr
+maria
+fastred
+kazuki
+krister
+don
+dom
+iekika
+ruben
+m
+calanthe
+luchjo
+vicki
+sheryl
+afanen
+kirabo
+dov
+kristel
+dot
+kristen
+pavao
+donelle
+antti
+donella
+katerina
+liza
+wladyslaw
+gerlach
+hrodohaidis
+samnang
+ashok
+raelyn
+tipene
+kallias
+kun
+gebhard
+folke
+katica
+lennie
+rupinder
+maryann
+adolphus
+lachtna
+petri
+monica
+kyriakos
+brannon
+deforest
+shankara
+hourig
+haniyya
+christopher
+griogair
+saturn
+tola
+earl
+decebal
+bas
+petra
+adelia
+cleto
+bao
+bal
+bai
+julien
+clarette
+dimitar
+fioralba
+tommie
+domhnall
+ragnhei
+gunnar
+ailill
+juliet
+pete
+vasya
+peta
+duff
+imaculada
+peti
+manola
+kolab
+petr
+neriah
+manolo
+edoardo
+onora
+elisud
+graciano
+fayza
+as'ad
+romola
+vernon
+pluto
+genevra
+yahweh
+mukesh
+fiacre
+sudarshana
+shahriar
+athanasius
+una
+casimir
+derval
+ernst
+sherilyn
+taranis
+enzo
+bedelia
+winnie
+kalyan
+jinan
+plamen
+quinn
+monat
+alcaeus
+mathieu
+aindri
+raffaella
+armin
+lovell
+cyrus
+chelo
+sidonius
+basia
+tina
+basil
+basim
+fuad
+riley
+tracee
+chun
+talia
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/schema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/schema.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,32 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""
+
+"""
+from yams.buildobjs import EntityType, SubjectRelation, String, Int, Date
+
+from cubes.person.schema import Person
+
+Person.add_relation(Date(), 'birthday')
+
+class Bug(EntityType):
+ title = String(maxsize=64, required=True, fulltextindexed=True)
+ severity = String(vocabulary=('important', 'normal', 'minor'), default='normal')
+ cost = Int()
+ description = String(maxsize=4096, fulltextindexed=True)
+ identical_to = SubjectRelation('Bug', symmetric=True)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/static/js_examples/dep_1.js
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/static/js_examples/dep_1.js Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+a = 4;
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/static/js_examples/deps_2.js
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/static/js_examples/deps_2.js Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+b = a +2;
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/static/js_examples/test_simple_failure.js
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/static/js_examples/test_simple_failure.js Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,18 @@
+$(document).ready(function() {
+
+ QUnit.module("air");
+
+ QUnit.test("test 1", function (assert) {
+ assert.equal(2, 4);
+ });
+
+ QUnit.test("test 2", function (assert) {
+ assert.equal('', '45');
+ assert.equal('1024', '32');
+ });
+
+ QUnit.module("able");
+ QUnit.test("test 3", function (assert) {
+ assert.deepEqual(1, 1);
+ });
+});
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/static/js_examples/test_simple_success.js
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/static/js_examples/test_simple_success.js Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,17 @@
+$(document).ready(function() {
+
+ QUnit.module("air");
+
+ QUnit.test("test 1", function (assert) {
+ assert.equal(2, 2);
+ });
+
+ QUnit.test("test 2", function (assert) {
+ assert.equal('45', '45');
+ });
+
+ QUnit.module("able");
+ QUnit.test("test 3", function (assert) {
+ assert.deepEqual(1, 1);
+ });
+});
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/static/js_examples/test_with_dep.js
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/static/js_examples/test_with_dep.js Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,9 @@
+$(document).ready(function() {
+
+ QUnit.module("air");
+
+ QUnit.test("test 1", function (assert) {
+ assert.equal(a, 4);
+ });
+
+});
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/static/js_examples/test_with_ordered_deps.js
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/static/js_examples/test_with_ordered_deps.js Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,9 @@
+$(document).ready(function() {
+
+ QUnit.module("air");
+
+ QUnit.test("test 1", function (assert) {
+ assert.equal(b, 6);
+ });
+
+});
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/data/static/js_examples/utils.js
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/data/static/js_examples/utils.js Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,29 @@
+function datetuple(d) {
+ return [d.getFullYear(), d.getMonth()+1, d.getDate(),
+ d.getHours(), d.getMinutes()];
+}
+
+function pprint(obj) {
+ print('{');
+ for(k in obj) {
+ print(' ' + k + ' = ' + obj[k]);
+ }
+ print('}');
+}
+
+function arrayrepr(array) {
+ return '[' + array.join(', ') + ']';
+}
+
+function assertArrayEquals(array1, array2) {
+ if (array1.length != array2.length) {
+ throw new crosscheck.AssertionFailure(array1.join(', ') + ' != ' + array2.join(', '));
+ }
+ for (var i=0; i.
+"""only for unit tests !"""
+
+from cubicweb.view import EntityView
+from cubicweb.predicates import is_instance
+
+HTML_PAGE = u"""
+
+
Hello World !
+
+
+"""
+
+class SimpleView(EntityView):
+ __regid__ = 'simple'
+ __select__ = is_instance('Bug',)
+
+ def call(self, **kwargs):
+ self.cell_call(0, 0)
+
+ def cell_call(self, row, col):
+ self.w(HTML_PAGE)
+
+class RaisingView(EntityView):
+ __regid__ = 'raising'
+ __select__ = is_instance('Bug',)
+
+ def cell_call(self, row, col):
+ raise ValueError()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/requirements.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/requirements.txt Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,3 @@
+Twisted
+webtest
+cubicweb-person
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/unittest_dbfill.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/unittest_dbfill.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,121 @@
+# -*- coding: iso-8859-1 -*-
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""unit tests for database value generator"""
+
+import os.path as osp
+import re
+import datetime
+import io
+
+from six.moves import range
+
+from logilab.common.testlib import TestCase, unittest_main
+
+from cubicweb.devtools.fill import ValueGenerator, make_tel
+from cubicweb.devtools import ApptestConfiguration
+
+DATADIR = osp.join(osp.abspath(osp.dirname(__file__)), 'data')
+ISODATE_SRE = re.compile('(?P\d{4})-(?P\d{2})-(?P\d{2})$')
+
+
+class MyValueGenerator(ValueGenerator):
+
+ def generate_Bug_severity(self, entity, index):
+ return u'dangerous'
+
+ def generate_Any_description(self, entity, index, format=None):
+ return u'yo'
+
+
+class ValueGeneratorTC(TestCase):
+ """test case for ValueGenerator"""
+
+ def _choice_func(self, etype, attrname):
+ try:
+ return getattr(self, '_available_%s_%s' % (etype, attrname))(etype, attrname)
+ except AttributeError:
+ return None
+
+ def _available_Person_firstname(self, etype, attrname):
+ return [f.strip() for f in io.open(osp.join(DATADIR, 'firstnames.txt'), encoding='latin1')]
+
+ def setUp(self):
+ config = ApptestConfiguration('data', apphome=DATADIR)
+ config.bootstrap_cubes()
+ schema = config.load_schema()
+ e_schema = schema.eschema('Person')
+ self.person_valgen = ValueGenerator(e_schema, self._choice_func)
+ e_schema = schema.eschema('Bug')
+ self.bug_valgen = MyValueGenerator(e_schema)
+ self.config = config
+
+ def test_string(self):
+ """test string generation"""
+ surname = self.person_valgen.generate_attribute_value({}, 'surname', 12)
+ self.assertEqual(surname, u'&surname12')
+
+ def test_domain_value(self):
+ """test value generation from a given domain value"""
+ firstname = self.person_valgen.generate_attribute_value({}, 'firstname', 12)
+ possible_choices = self._choice_func('Person', 'firstname')
+ self.assertTrue(firstname in possible_choices,
+ '%s not in %s' % (firstname, possible_choices))
+
+ def test_choice(self):
+ """test choice generation"""
+ # Test for random index
+ for index in range(5):
+ sx_value = self.person_valgen.generate_attribute_value({}, 'civility', index)
+ self.assertTrue(sx_value in ('Mr', 'Mrs', 'Ms'))
+
+ def test_integer(self):
+ """test integer generation"""
+ # Test for random index
+ for index in range(5):
+ cost_value = self.bug_valgen.generate_attribute_value({}, 'cost', index)
+ self.assertIn(cost_value, list(range(index+1)))
+
+ def test_date(self):
+ """test date generation"""
+ # Test for random index
+ for index in range(10):
+ date_value = self.person_valgen.generate_attribute_value({}, 'birthday', index)
+ self.assertTrue(isinstance(date_value, datetime.date))
+
+ def test_phone(self):
+ """tests make_tel utility"""
+ self.assertEqual(make_tel(22030405), '22 03 04 05')
+
+ def test_customized_generation(self):
+ self.assertEqual(self.bug_valgen.generate_attribute_value({}, 'severity', 12),
+ u'dangerous')
+ self.assertEqual(self.bug_valgen.generate_attribute_value({}, 'description', 12),
+ u'yo')
+ self.assertEqual(self.person_valgen.generate_attribute_value({}, 'description', 12),
+ u'yo')
+
+
+class ConstraintInsertionTC(TestCase):
+
+ def test_writeme(self):
+ self.skipTest('Test automatic insertion / Schema Constraints')
+
+
+if __name__ == '__main__':
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/unittest_devctl.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/unittest_devctl.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,48 @@
+# copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""unit tests for cubicweb-ctl commands from devtools"""
+
+import os.path as osp
+import sys
+import tempfile
+import shutil
+from subprocess import Popen, PIPE, STDOUT
+from unittest import TestCase
+
+
+class CubicWebCtlTC(TestCase):
+ """test case for devtools commands"""
+
+ def test_newcube(self):
+ cwctl = osp.abspath(osp.join(osp.dirname(__file__),
+ '../../../bin/cubicweb-ctl'))
+
+ tmpdir = tempfile.mkdtemp(prefix="temp-cwctl-newcube")
+ try:
+ cmd = [sys.executable, cwctl, 'newcube',
+ '--directory', tmpdir, 'foo']
+ proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
+ stdout, _ = proc.communicate(b'short_desc\n')
+ finally:
+ shutil.rmtree(tmpdir, ignore_errors=True)
+ self.assertEqual(proc.returncode, 0, msg=stdout)
+
+
+if __name__ == '__main__':
+ from unittest import main
+ main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/unittest_fill.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/unittest_fill.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,70 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""unit tests for cubicweb.devtools.fill module
+
+"""
+import re
+
+from logilab.common.testlib import TestCase, unittest_main
+
+from cubicweb.devtools.fill import ValueGenerator, _ValueGenerator
+
+ISODATE_SRE = re.compile('(?P\d{4})-(?P\d{2})-(?P\d{2})$')
+
+
+class AutoExtendableTC(TestCase):
+
+ def setUp(self):
+ self.attrvalues = dir(_ValueGenerator)
+
+ def tearDown(self):
+ attrvalues = set(dir(_ValueGenerator))
+ for attrname in attrvalues - set(self.attrvalues):
+ delattr(_ValueGenerator, attrname)
+
+
+ def test_autoextend(self):
+ self.assertNotIn('generate_server', dir(ValueGenerator))
+ class MyValueGenerator(ValueGenerator):
+ def generate_server(self, index):
+ return attrname
+ self.assertIn('generate_server', dir(ValueGenerator))
+
+
+ def test_bad_signature_detection(self):
+ self.assertNotIn('generate_server', dir(ValueGenerator))
+ try:
+ class MyValueGenerator(ValueGenerator):
+ def generate_server(self):
+ pass
+ except TypeError:
+ self.assertNotIn('generate_server', dir(ValueGenerator))
+ else:
+ self.fail('TypeError not raised')
+
+
+ def test_signature_extension(self):
+ self.assertNotIn('generate_server', dir(ValueGenerator))
+ class MyValueGenerator(ValueGenerator):
+ def generate_server(self, index, foo):
+ pass
+ self.assertIn('generate_server', dir(ValueGenerator))
+
+
+if __name__ == '__main__':
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/unittest_httptest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/unittest_httptest.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,70 @@
+# copyright 2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""unittest for cubicweb.devtools.httptest module"""
+
+from six.moves import http_client
+
+from logilab.common.testlib import Tags
+from cubicweb.devtools.httptest import CubicWebServerTC
+
+
+class TwistedCWAnonTC(CubicWebServerTC):
+
+ def test_response(self):
+ try:
+ response = self.web_get()
+ except http_client.NotConnected as ex:
+ self.fail("Can't connection to test server: %s" % ex)
+
+ def test_response_anon(self):
+ response = self.web_get()
+ self.assertEqual(response.status, http_client.OK)
+
+ def test_base_url(self):
+ if self.config['base-url'] not in self.web_get().read():
+ self.fail('no mention of base url in retrieved page')
+
+
+class TwistedCWIdentTC(CubicWebServerTC):
+ test_db_id = 'httptest-cwident'
+ anonymous_allowed = False
+ tags = CubicWebServerTC.tags | Tags(('auth',))
+
+ def test_response_denied(self):
+ response = self.web_get()
+ self.assertEqual(response.status, http_client.FORBIDDEN)
+
+ def test_login(self):
+ response = self.web_get()
+ if response.status != http_client.FORBIDDEN:
+ self.skipTest('Already authenticated, "test_response_denied" must have failed')
+ # login
+ self.web_login(self.admlogin, self.admpassword)
+ response = self.web_get()
+ self.assertEqual(response.status, http_client.OK, response.body)
+ # logout
+ self.web_logout()
+ response = self.web_get()
+ self.assertEqual(response.status, http_client.FORBIDDEN, response.body)
+
+
+
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/unittest_i18n.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/unittest_i18n.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,77 @@
+# -*- coding: iso-8859-1 -*-
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""unit tests for i18n messages generator"""
+
+import os, os.path as osp
+import sys
+import subprocess
+
+from unittest import TestCase, main
+
+from cubicweb.cwconfig import CubicWebNoAppConfiguration
+
+DATADIR = osp.join(osp.abspath(osp.dirname(__file__)), 'data')
+
+def load_po(fname):
+ """load a po file and return a set of encountered (msgid, msgctx)"""
+ msgs = set()
+ msgid = msgctxt = None
+ with open(fname) as fobj:
+ for line in fobj:
+ if line.strip() in ('', '#'):
+ continue
+ if line.startswith('msgstr'):
+ assert not (msgid, msgctxt) in msgs
+ msgs.add( (msgid, msgctxt) )
+ msgid = msgctxt = None
+ elif line.startswith('msgid'):
+ msgid = line.split(' ', 1)[1][1:-1]
+ elif line.startswith('msgctx'):
+ msgctxt = line.split(' ', 1)[1][1: -1]
+ elif msgid is not None:
+ msgid += line[1:-1]
+ elif msgctxt is not None:
+ msgctxt += line[1:-1]
+ return msgs
+
+
+class cubePotGeneratorTC(TestCase):
+ """test case for i18n pot file generator"""
+
+ def test_i18ncube(self):
+ env = os.environ.copy()
+ env['CW_CUBES_PATH'] = osp.join(DATADIR, 'cubes')
+ if 'PYTHONPATH' in env:
+ env['PYTHONPATH'] += os.pathsep
+ else:
+ env['PYTHONPATH'] = ''
+ env['PYTHONPATH'] += DATADIR
+ cwctl = osp.abspath(osp.join(osp.dirname(__file__),
+ '../../../bin/cubicweb-ctl'))
+ with open(os.devnull, 'w') as devnull:
+ subprocess.check_call([sys.executable, cwctl, 'i18ncube', 'i18ntestcube'],
+ env=env, stdout=devnull)
+ cube = osp.join(DATADIR, 'cubes', 'i18ntestcube')
+ msgs = load_po(osp.join(cube, 'i18n', 'en.po.ref'))
+ newmsgs = load_po(osp.join(cube, 'i18n', 'en.po'))
+ self.assertEqual(msgs, newmsgs)
+
+
+if __name__ == '__main__':
+ main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/unittest_qunit.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/unittest_qunit.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,27 @@
+from cubicweb.devtools import qunit
+
+
+def js(name):
+ return '/static/js_examples/' + name
+
+class QUnitTestCaseTC(qunit.QUnitTestCase):
+
+ all_js_tests = (
+ (js('test_simple_success.js'),),
+ (js('test_with_dep.js'), (js('dep_1.js'),)),
+ (js('test_with_ordered_deps.js'), (js('dep_1.js'), js('deps_2.js'),)),
+ )
+
+
+ def test_simple_failure(self):
+ js_tests = list(self._test_qunit(js('test_simple_failure.js')))
+ self.assertEqual(len(js_tests), 3)
+ test_1, test_2, test_3 = js_tests
+ self.assertRaises(self.failureException, test_1[0], *test_1[1:])
+ self.assertRaises(self.failureException, test_2[0], *test_2[1:])
+ test_3[0](*test_3[1:])
+
+
+if __name__ == '__main__':
+ from unittest import main
+ main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/unittest_testlib.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/unittest_testlib.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,298 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""unittests for cw.devtools.testlib module"""
+
+from io import BytesIO, StringIO
+from unittest import TextTestRunner
+
+from six import PY2
+
+from logilab.common.testlib import TestSuite, TestCase, unittest_main
+from logilab.common.registry import yes
+
+from cubicweb.devtools import htmlparser
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.pytestconf import clean_repo_test_cls
+
+class FakeFormTC(TestCase):
+ def test_fake_form(self):
+ class entity:
+ cw_etype = 'Entity'
+ eid = 0
+ sio = BytesIO(b'hop\n')
+ form = CubicWebTC.fake_form('import',
+ {'file': ('filename.txt', sio),
+ 'encoding': u'utf-8',
+ }, [(entity, {'field': 'value'})])
+ self.assertEqual(form, {'__form_id': 'import',
+ '__maineid': 0,
+ '__type:0': 'Entity',
+ '_cw_entity_fields:0': '__type,field',
+ '_cw_fields': 'encoding,file',
+ 'eid': [0],
+ 'encoding': u'utf-8',
+ 'field:0': 'value',
+ 'file': ('filename.txt', sio)})
+
+class WebTestTC(TestCase):
+
+ def setUp(self):
+ output = BytesIO() if PY2 else StringIO()
+ self.runner = TextTestRunner(stream=output)
+
+ def test_error_raised(self):
+ class MyWebTest(CubicWebTC):
+
+ def test_error_view(self):
+ with self.admin_access.web_request() as req:
+ req.create_entity('Bug', title=u"bt")
+ self.view('raising', req.execute('Bug B'), template=None, req=req)
+
+ def test_correct_view(self):
+ with self.admin_access.web_request() as req:
+ self.view('primary', req.execute('CWUser U'), template=None, req=req)
+
+ tests = [MyWebTest('test_error_view'), MyWebTest('test_correct_view')]
+ result = self.runner.run(TestSuite(tests))
+ self.assertEqual(result.testsRun, 2)
+ self.assertEqual(len(result.errors), 0)
+ self.assertEqual(len(result.failures), 1)
+ clean_repo_test_cls(MyWebTest)
+
+
+class RepoInstancesConsistencyTC(CubicWebTC):
+ test_db_id = 'RepoInstancesConsistencyTC'
+
+ def pre_setup_database(self, cnx, config):
+ self.assertIs(cnx.repo, config.repository())
+
+ def test_pre_setup(self):
+ pass
+
+
+HTML_PAGE = u"""
+
+ need a title
+
+
Hello World !
+
+
+"""
+
+HTML_PAGE2 = u"""
+
+ need a title
+
+
+
+
+"""
+
+
+class HTMLPageInfoTC(TestCase):
+ """test cases for PageInfo"""
+
+ def setUp(self):
+ parser = htmlparser.HTMLValidator()
+ # disable cleanup that would remove doctype
+ parser.preprocess_data = lambda data: data
+ self.page_info = parser.parse_string(HTML_PAGE2)
+
+ def test_source1(self):
+ """make sure source is stored correctly"""
+ self.assertEqual(self.page_info.source, HTML_PAGE2)
+
+ def test_source2(self):
+ """make sure source is stored correctly - raise exception"""
+ parser = htmlparser.DTDValidator()
+ self.assertRaises(AssertionError, parser.parse_string, HTML_PAGE_ERROR)
+
+ def test_has_title_no_level(self):
+ """tests h? tags information"""
+ self.assertEqual(self.page_info.has_title('Test'), True)
+ self.assertEqual(self.page_info.has_title('Test '), False)
+ self.assertEqual(self.page_info.has_title('Tes'), False)
+ self.assertEqual(self.page_info.has_title('Hello world !'), True)
+
+ def test_has_title_level(self):
+ """tests h? tags information"""
+ self.assertEqual(self.page_info.has_title('Test', level = 1), True)
+ self.assertEqual(self.page_info.has_title('Test', level = 2), False)
+ self.assertEqual(self.page_info.has_title('Test', level = 3), False)
+ self.assertEqual(self.page_info.has_title('Test', level = 4), False)
+ self.assertRaises(IndexError, self.page_info.has_title, 'Test', level = 5)
+
+ def test_has_title_regexp_no_level(self):
+ """tests has_title_regexp() with no particular level specified"""
+ self.assertEqual(self.page_info.has_title_regexp('h[23] title'), True)
+
+ def test_has_title_regexp_level(self):
+ """tests has_title_regexp() with a particular level specified"""
+ self.assertEqual(self.page_info.has_title_regexp('h[23] title', 2), True)
+ self.assertEqual(self.page_info.has_title_regexp('h[23] title', 3), True)
+ self.assertEqual(self.page_info.has_title_regexp('h[23] title', 4), False)
+
+ def test_appears(self):
+ """tests PageInfo.appears()"""
+ self.assertEqual(self.page_info.appears('CW'), True)
+ self.assertEqual(self.page_info.appears('Logilab'), True)
+ self.assertEqual(self.page_info.appears('Logilab introduces'), True)
+ self.assertEqual(self.page_info.appears('H2 title'), False)
+
+ def test_has_link(self):
+ """tests has_link()"""
+ self.assertEqual(self.page_info.has_link('Logilab'), True)
+ self.assertEqual(self.page_info.has_link('logilab'), False)
+ self.assertEqual(self.page_info.has_link('Logilab', 'http://www.logilab.org'), True)
+ self.assertEqual(self.page_info.has_link('Logilab', 'http://www.google.com'), False)
+
+ def test_has_link_regexp(self):
+ """test has_link_regexp()"""
+ self.assertEqual(self.page_info.has_link_regexp('L[oi]gilab'), True)
+ self.assertEqual(self.page_info.has_link_regexp('L[ai]gilab'), False)
+
+
+class CWUtilitiesTC(CubicWebTC):
+
+ def test_temporary_permissions_eschema(self):
+ eschema = self.schema['CWUser']
+ with self.temporary_permissions(CWUser={'read': ()}):
+ self.assertEqual(eschema.permissions['read'], ())
+ self.assertTrue(eschema.permissions['add'])
+ self.assertTrue(eschema.permissions['read'], ())
+
+ def test_temporary_permissions_rdef(self):
+ rdef = self.schema['CWUser'].rdef('in_group')
+ with self.temporary_permissions((rdef, {'read': ()})):
+ self.assertEqual(rdef.permissions['read'], ())
+ self.assertTrue(rdef.permissions['add'])
+ self.assertTrue(rdef.permissions['read'], ())
+
+ def test_temporary_permissions_rdef_with_exception(self):
+ rdef = self.schema['CWUser'].rdef('in_group')
+ try:
+ with self.temporary_permissions((rdef, {'read': ()})):
+ self.assertEqual(rdef.permissions['read'], ())
+ self.assertTrue(rdef.permissions['add'])
+ raise ValueError('goto')
+ except ValueError:
+ self.assertTrue(rdef.permissions['read'], ())
+ else:
+ self.fail('exception was caught unexpectedly')
+
+ def test_temporary_appobjects_registered(self):
+
+ class AnAppobject(object):
+ __registries__ = ('hip',)
+ __regid__ = 'hop'
+ __select__ = yes()
+ registered = None
+
+ @classmethod
+ def __registered__(cls, reg):
+ cls.registered = reg
+
+ with self.temporary_appobjects(AnAppobject):
+ self.assertEqual(self.vreg['hip'], AnAppobject.registered)
+ self.assertIn(AnAppobject, self.vreg['hip']['hop'])
+ self.assertNotIn(AnAppobject, self.vreg['hip']['hop'])
+
+ def test_login(self):
+ """Calling login should not break hook control"""
+ with self.admin_access.repo_cnx() as cnx:
+ self.hook_executed = False
+ self.create_user(cnx, 'babar')
+ cnx.commit()
+
+ from cubicweb.server import hook
+ from cubicweb.predicates import is_instance
+
+ class MyHook(hook.Hook):
+ __regid__ = 'whatever'
+ __select__ = hook.Hook.__select__ & is_instance('CWProperty')
+ category = 'test-hook'
+ events = ('after_add_entity',)
+ test = self
+
+ def __call__(self):
+ self.test.hook_executed = True
+
+ with self.new_access('babar').repo_cnx() as cnx:
+ with self.temporary_appobjects(MyHook):
+ with cnx.allow_all_hooks_but('test-hook'):
+ prop = cnx.create_entity('CWProperty', pkey=u'ui.language', value=u'en')
+ cnx.commit()
+ self.assertFalse(self.hook_executed)
+
+
+class RepoAccessTC(CubicWebTC):
+
+ def test_repo_connection(self):
+ acc = self.new_access('admin')
+ with acc.repo_cnx() as cnx:
+ rset = cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+ def test_client_connection(self):
+ acc = self.new_access('admin')
+ with acc.client_cnx() as cnx:
+ rset = cnx.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+
+ def test_web_request(self):
+ acc = self.new_access('admin')
+ with acc.web_request(elephant='babar') as req:
+ rset = req.execute('Any X WHERE X is CWUser')
+ self.assertTrue(rset)
+ self.assertEqual('babar', req.form['elephant'])
+
+ def test_close(self):
+ acc = self.new_access('admin')
+ acc.close()
+
+ def test_admin_access(self):
+ with self.admin_access.client_cnx() as cnx:
+ self.assertEqual('admin', cnx.user.login)
+
+
+if __name__ == '__main__':
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/test/unittest_webtest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/test/unittest_webtest.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,41 @@
+from six.moves import http_client
+
+from logilab.common.testlib import Tags
+from cubicweb.devtools.webtest import CubicWebTestTC
+
+
+class CWTTC(CubicWebTestTC):
+ def test_response(self):
+ response = self.webapp.get('/')
+ self.assertEqual(200, response.status_int)
+
+ def test_base_url(self):
+ if self.config['base-url'] not in self.webapp.get('/').text:
+ self.fail('no mention of base url in retrieved page')
+
+
+class CWTIdentTC(CubicWebTestTC):
+ test_db_id = 'webtest-ident'
+ anonymous_allowed = False
+ tags = CubicWebTestTC.tags | Tags(('auth',))
+
+ def test_reponse_denied(self):
+ res = self.webapp.get('/', expect_errors=True)
+ self.assertEqual(http_client.FORBIDDEN, res.status_int)
+
+ def test_login(self):
+ res = self.webapp.get('/', expect_errors=True)
+ self.assertEqual(http_client.FORBIDDEN, res.status_int)
+
+ self.login(self.admlogin, self.admpassword)
+ res = self.webapp.get('/')
+ self.assertEqual(http_client.OK, res.status_int)
+
+ self.logout()
+ res = self.webapp.get('/', expect_errors=True)
+ self.assertEqual(http_client.FORBIDDEN, res.status_int)
+
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/testlib.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/testlib.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1335 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""this module contains base classes and utilities for cubicweb tests"""
+from __future__ import print_function
+
+import sys
+import re
+from os.path import dirname, join, abspath
+from math import log
+from contextlib import contextmanager
+from itertools import chain
+
+from six import text_type, string_types
+from six.moves import range
+from six.moves.urllib.parse import urlparse, parse_qs, unquote as urlunquote
+
+import yams.schema
+
+from logilab.common.testlib import TestCase, InnerTest, Tags
+from logilab.common.pytest import nocoverage, pause_trace
+from logilab.common.debugger import Debugger
+from logilab.common.umessage import message_from_string
+from logilab.common.decorators import cached, classproperty, clear_cache, iclassmethod
+from logilab.common.deprecation import deprecated, class_deprecated
+from logilab.common.shellutils import getlogin
+
+from cubicweb import (ValidationError, NoSelectableObject, AuthenticationError,
+ BadConnectionId)
+from cubicweb import cwconfig, devtools, web, server, repoapi
+from cubicweb.utils import json
+from cubicweb.sobjects import notification
+from cubicweb.web import Redirect, application, eid_param
+from cubicweb.server.hook import SendMailOp
+from cubicweb.server.session import Session
+from cubicweb.devtools import SYSTEM_ENTITIES, SYSTEM_RELATIONS, VIEW_VALIDATORS
+from cubicweb.devtools import fake, htmlparser, DEFAULT_EMPTY_DB_ID
+
+
+# low-level utilities ##########################################################
+
+class CubicWebDebugger(Debugger):
+ """special debugger class providing a 'view' function which saves some
+ html into a temporary file and open a web browser to examinate it.
+ """
+ def do_view(self, arg):
+ import webbrowser
+ data = self._getval(arg)
+ with open('/tmp/toto.html', 'w') as toto:
+ toto.write(data)
+ webbrowser.open('file:///tmp/toto.html')
+
+
+def line_context_filter(line_no, center, before=3, after=None):
+ """return true if line are in context
+
+ if after is None: after = before
+ """
+ if after is None:
+ after = before
+ return center - before <= line_no <= center + after
+
+
+def unprotected_entities(schema, strict=False):
+ """returned a set of each non final entity type, excluding "system" entities
+ (eg CWGroup, CWUser...)
+ """
+ if strict:
+ protected_entities = yams.schema.BASE_TYPES
+ else:
+ protected_entities = yams.schema.BASE_TYPES.union(SYSTEM_ENTITIES)
+ return set(schema.entities()) - protected_entities
+
+
+class JsonValidator(object):
+ def parse_string(self, data):
+ return json.loads(data.decode('ascii'))
+
+
+@contextmanager
+def real_error_handling(app):
+ """By default, CubicWebTC `app` attribute (ie the publisher) is monkey
+ patched so that unexpected error are raised rather than going through the
+ `error_handler` method.
+
+ By using this context manager you disable this monkey-patching temporarily.
+ Hence when publishihng a request no error will be raised, you'll get
+ req.status_out set to an HTTP error status code and the generated page will
+ usually hold a traceback as HTML.
+
+ >>> with real_error_handling(app):
+ >>> page = app.handle_request(req)
+ """
+ # remove the monkey patched error handler
+ fake_error_handler = app.error_handler
+ del app.error_handler
+ # return the app
+ yield app
+ # restore
+ app.error_handler = fake_error_handler
+
+
+# email handling, to test emails sent by an application ########################
+
+MAILBOX = []
+
+
+class Email(object):
+ """you'll get instances of Email into MAILBOX during tests that trigger
+ some notification.
+
+ * `msg` is the original message object
+
+ * `recipients` is a list of email address which are the recipients of this
+ message
+ """
+ def __init__(self, fromaddr, recipients, msg):
+ self.fromaddr = fromaddr
+ self.recipients = recipients
+ self.msg = msg
+
+ @property
+ def message(self):
+ return message_from_string(self.msg)
+
+ @property
+ def subject(self):
+ return self.message.get('Subject')
+
+ @property
+ def content(self):
+ return self.message.get_payload(decode=True)
+
+ def __repr__(self):
+ return '' % (','.join(self.recipients),
+ self.message.get('Subject'))
+
+
+# the trick to get email into MAILBOX instead of actually sent: monkey patch
+# cwconfig.SMTP object
+class MockSMTP:
+
+ def __init__(self, server, port):
+ pass
+
+ def close(self):
+ pass
+
+ def sendmail(self, fromaddr, recipients, msg):
+ MAILBOX.append(Email(fromaddr, recipients, msg))
+
+cwconfig.SMTP = MockSMTP
+
+
+# Repoaccess utility ###############################################3###########
+
+class RepoAccess(object):
+ """An helper to easily create object to access the repo as a specific user
+
+ Each RepoAccess have it own session.
+
+ A repo access can create three type of object:
+
+ .. automethod:: cubicweb.testlib.RepoAccess.cnx
+ .. automethod:: cubicweb.testlib.RepoAccess.web_request
+
+ The RepoAccess need to be closed to destroy the associated Session.
+ TestCase usually take care of this aspect for the user.
+
+ .. automethod:: cubicweb.testlib.RepoAccess.close
+ """
+
+ def __init__(self, repo, login, requestcls):
+ self._repo = repo
+ self._login = login
+ self.requestcls = requestcls
+ self._session = self._unsafe_connect(login)
+
+ def _unsafe_connect(self, login, **kwargs):
+ """ a completely unsafe connect method for the tests """
+ # use an internal connection
+ with self._repo.internal_cnx() as cnx:
+ # try to get a user object
+ user = cnx.find('CWUser', login=login).one()
+ user.groups
+ user.properties
+ user.login
+ session = Session(user, self._repo)
+ self._repo._sessions[session.sessionid] = session
+ user._cw = user.cw_rset.req = session
+ with session.new_cnx() as cnx:
+ self._repo.hm.call_hooks('session_open', cnx)
+ # commit connection at this point in case write operation has been
+ # done during `session_open` hooks
+ cnx.commit()
+ return session
+
+ @contextmanager
+ def cnx(self):
+ """Context manager returning a server side connection for the user"""
+ with self._session.new_cnx() as cnx:
+ yield cnx
+
+ # aliases for bw compat
+ client_cnx = repo_cnx = cnx
+
+ @contextmanager
+ def web_request(self, url=None, headers={}, method='GET', **kwargs):
+ """Context manager returning a web request pre-linked to a client cnx
+
+ To commit and rollback use::
+
+ req.cnx.commit()
+ req.cnx.rolback()
+ """
+ req = self.requestcls(self._repo.vreg, url=url, headers=headers,
+ method=method, form=kwargs)
+ with self._session.new_cnx() as cnx:
+ req.set_cnx(cnx)
+ yield req
+
+ def close(self):
+ """Close the session associated to the RepoAccess"""
+ if self._session is not None:
+ self._repo.close(self._session.sessionid)
+ self._session = None
+
+ @contextmanager
+ def shell(self):
+ from cubicweb.server.migractions import ServerMigrationHelper
+ with self._session.new_cnx() as cnx:
+ mih = ServerMigrationHelper(None, repo=self._repo, cnx=cnx,
+ interactive=False,
+ # hack so it don't try to load fs schema
+ schema=1)
+ yield mih
+ cnx.commit()
+
+
+# base class for cubicweb tests requiring a full cw environments ###############
+
+class CubicWebTC(TestCase):
+ """abstract class for test using an apptest environment
+
+ attributes:
+
+ * `vreg`, the vregistry
+ * `schema`, self.vreg.schema
+ * `config`, cubicweb configuration
+ * `cnx`, repoapi connection to the repository using an admin user
+ * `session`, server side session associated to `cnx`
+ * `app`, the cubicweb publisher (for web testing)
+ * `repo`, the repository object
+ * `admlogin`, login of the admin user
+ * `admpassword`, password of the admin user
+ * `shell`, create and use shell environment
+ * `anonymous_allowed`: flag telling if anonymous browsing should be allowed
+ """
+ appid = 'data'
+ configcls = devtools.ApptestConfiguration
+ requestcls = fake.FakeRequest
+ tags = TestCase.tags | Tags('cubicweb', 'cw_repo')
+ test_db_id = DEFAULT_EMPTY_DB_ID
+
+ # anonymous is logged by default in cubicweb test cases
+ anonymous_allowed = True
+
+ def __init__(self, *args, **kwargs):
+ self._admin_session = None
+ self.repo = None
+ self._open_access = set()
+ super(CubicWebTC, self).__init__(*args, **kwargs)
+
+ # repository connection handling ###########################################
+
+ def new_access(self, login):
+ """provide a new RepoAccess object for a given user
+
+ The access is automatically closed at the end of the test."""
+ login = text_type(login)
+ access = RepoAccess(self.repo, login, self.requestcls)
+ self._open_access.add(access)
+ return access
+
+ def _close_access(self):
+ while self._open_access:
+ try:
+ self._open_access.pop().close()
+ except BadConnectionId:
+ continue # already closed
+
+ @property
+ def session(self):
+ """return admin session"""
+ return self._admin_session
+
+ # XXX this doesn't need to a be classmethod anymore
+ def _init_repo(self):
+ """init the repository and connection to it.
+ """
+ # get or restore and working db.
+ db_handler = devtools.get_test_db_handler(self.config, self.init_config)
+ db_handler.build_db_cache(self.test_db_id, self.pre_setup_database)
+ db_handler.restore_database(self.test_db_id)
+ self.repo = db_handler.get_repo(startup=True)
+ # get an admin session (without actual login)
+ login = text_type(db_handler.config.default_admin_config['login'])
+ self.admin_access = self.new_access(login)
+ self._admin_session = self.admin_access._session
+
+ # config management ########################################################
+
+ @classproperty
+ def config(cls):
+ """return the configuration object
+
+ Configuration is cached on the test class.
+ """
+ if cls is CubicWebTC:
+ # Prevent direct use of CubicWebTC directly to avoid database
+ # caching issues
+ return None
+ try:
+ return cls.__dict__['_config']
+ except KeyError:
+ home = abspath(join(dirname(sys.modules[cls.__module__].__file__), cls.appid))
+ config = cls._config = cls.configcls(cls.appid, apphome=home)
+ config.mode = 'test'
+ return config
+
+ @classmethod # XXX could be turned into a regular method
+ def init_config(cls, config):
+ """configuration initialization hooks.
+
+ You may only want to override here the configuraton logic.
+
+ Otherwise, consider to use a different :class:`ApptestConfiguration`
+ defined in the `configcls` class attribute.
+
+ This method will be called by the database handler once the config has
+ been properly bootstrapped.
+ """
+ admincfg = config.default_admin_config
+ cls.admlogin = text_type(admincfg['login'])
+ cls.admpassword = admincfg['password']
+ # uncomment the line below if you want rql queries to be logged
+ # config.global_set_option('query-log-file',
+ # '/tmp/test_rql_log.' + `os.getpid()`)
+ config.global_set_option('log-file', None)
+ # set default-dest-addrs to a dumb email address to avoid mailbox or
+ # mail queue pollution
+ config.global_set_option('default-dest-addrs', ['whatever'])
+ send_to = '%s@logilab.fr' % getlogin()
+ config.global_set_option('sender-addr', send_to)
+ config.global_set_option('default-dest-addrs', send_to)
+ config.global_set_option('sender-name', 'cubicweb-test')
+ config.global_set_option('sender-addr', 'cubicweb-test@logilab.fr')
+ # default_base_url on config class isn't enough for TestServerConfiguration
+ config.global_set_option('base-url', config.default_base_url())
+ # web resources
+ try:
+ config.global_set_option('embed-allowed', re.compile('.*'))
+ except Exception: # not in server only configuration
+ pass
+
+ @property
+ def vreg(self):
+ return self.repo.vreg
+
+ # global resources accessors ###############################################
+
+ @property
+ def schema(self):
+ """return the application schema"""
+ return self.vreg.schema
+
+ def set_option(self, optname, value):
+ self.config.global_set_option(optname, value)
+
+ def set_debug(self, debugmode):
+ server.set_debug(debugmode)
+
+ def debugged(self, debugmode):
+ return server.debugged(debugmode)
+
+ # default test setup and teardown #########################################
+
+ def setUp(self):
+ # monkey patch send mail operation so emails are sent synchronously
+ self._patch_SendMailOp()
+ with pause_trace():
+ previous_failure = self.__class__.__dict__.get('_repo_init_failed')
+ if previous_failure is not None:
+ self.skipTest('repository is not initialised: %r' % previous_failure)
+ try:
+ self._init_repo()
+ except Exception as ex:
+ self.__class__._repo_init_failed = ex
+ raise
+ self.addCleanup(self._close_access)
+ self.config.set_anonymous_allowed(self.anonymous_allowed)
+ self.setup_database()
+ MAILBOX[:] = [] # reset mailbox
+
+ def tearDown(self):
+ # XXX hack until logilab.common.testlib is fixed
+ if self._admin_session is not None:
+ self.repo.close(self._admin_session.sessionid)
+ self._admin_session = None
+ while self._cleanups:
+ cleanup, args, kwargs = self._cleanups.pop(-1)
+ cleanup(*args, **kwargs)
+ self.repo.turn_repo_off()
+
+ def _patch_SendMailOp(self):
+ # monkey patch send mail operation so emails are sent synchronously
+ _old_mail_postcommit_event = SendMailOp.postcommit_event
+ SendMailOp.postcommit_event = SendMailOp.sendmails
+
+ def reverse_SendMailOp_monkey_patch():
+ SendMailOp.postcommit_event = _old_mail_postcommit_event
+
+ self.addCleanup(reverse_SendMailOp_monkey_patch)
+
+ def setup_database(self):
+ """add your database setup code by overriding this method"""
+
+ @classmethod
+ def pre_setup_database(cls, cnx, config):
+ """add your pre database setup code by overriding this method
+
+ Do not forget to set the cls.test_db_id value to enable caching of the
+ result.
+ """
+
+ # user / session management ###############################################
+
+ @deprecated('[3.19] explicitly use RepoAccess object in test instead')
+ def user(self, req=None):
+ """return the application schema"""
+ if req is None:
+ return self.request().user
+ else:
+ return req.user
+
+ @iclassmethod # XXX turn into a class method
+ def create_user(self, req, login=None, groups=('users',), password=None,
+ email=None, commit=True, **kwargs):
+ """create and return a new user entity"""
+ if password is None:
+ password = login
+ if login is not None:
+ login = text_type(login)
+ user = req.create_entity('CWUser', login=login,
+ upassword=password, **kwargs)
+ req.execute('SET X in_group G WHERE X eid %%(x)s, G name IN(%s)'
+ % ','.join(repr(str(g)) for g in groups),
+ {'x': user.eid})
+ if email is not None:
+ req.create_entity('EmailAddress', address=text_type(email),
+ reverse_primary_email=user)
+ user.cw_clear_relation_cache('in_group', 'subject')
+ if commit:
+ try:
+ req.commit() # req is a session
+ except AttributeError:
+ req.cnx.commit()
+ return user
+
+ # other utilities #########################################################
+
+ @contextmanager
+ def temporary_appobjects(self, *appobjects):
+ self.vreg._loadedmods.setdefault(self.__module__, {})
+ for obj in appobjects:
+ self.vreg.register(obj)
+ registered = getattr(obj, '__registered__', None)
+ if registered:
+ for registry in obj.__registries__:
+ registered(self.vreg[registry])
+ try:
+ yield
+ finally:
+ for obj in appobjects:
+ self.vreg.unregister(obj)
+
+ @contextmanager
+ def temporary_permissions(self, *perm_overrides, **perm_kwoverrides):
+ """Set custom schema permissions within context.
+
+ There are two ways to call this method, which may be used together :
+
+ * using positional argument(s):
+
+ .. sourcecode:: python
+
+ rdef = self.schema['CWUser'].rdef('login')
+ with self.temporary_permissions((rdef, {'read': ()})):
+ ...
+
+
+ * using named argument(s):
+
+ .. sourcecode:: python
+
+ with self.temporary_permissions(CWUser={'read': ()}):
+ ...
+
+ Usually the former will be preferred to override permissions on a
+ relation definition, while the latter is well suited for entity types.
+
+ The allowed keys in the permission dictionary depend on the schema type
+ (entity type / relation definition). Resulting permissions will be
+ similar to `orig_permissions.update(partial_perms)`.
+ """
+ torestore = []
+ for erschema, etypeperms in chain(perm_overrides, perm_kwoverrides.items()):
+ if isinstance(erschema, string_types):
+ erschema = self.schema[erschema]
+ for action, actionperms in etypeperms.items():
+ origperms = erschema.permissions[action]
+ erschema.set_action_permissions(action, actionperms)
+ torestore.append([erschema, action, origperms])
+ try:
+ yield
+ finally:
+ for erschema, action, permissions in torestore:
+ if action is None:
+ erschema.permissions = permissions
+ else:
+ erschema.set_action_permissions(action, permissions)
+
+ def assertModificationDateGreater(self, entity, olddate):
+ entity.cw_attr_cache.pop('modification_date', None)
+ self.assertGreater(entity.modification_date, olddate)
+
+ def assertMessageEqual(self, req, params, expected_msg):
+ msg = req.session.data[params['_cwmsgid']]
+ self.assertEqual(expected_msg, msg)
+
+ # workflow utilities #######################################################
+
+ def assertPossibleTransitions(self, entity, expected):
+ transitions = entity.cw_adapt_to('IWorkflowable').possible_transitions()
+ self.assertListEqual(sorted(tr.name for tr in transitions),
+ sorted(expected))
+
+ # views and actions registries inspection ##################################
+
+ def pviews(self, req, rset):
+ return sorted((a.__regid__, a.__class__)
+ for a in self.vreg['views'].possible_views(req, rset=rset))
+
+ def pactions(self, req, rset,
+ skipcategories=('addrelated', 'siteactions', 'useractions',
+ 'footer', 'manage')):
+ return [(a.__regid__, a.__class__)
+ for a in self.vreg['actions'].poss_visible_objects(req, rset=rset)
+ if a.category not in skipcategories]
+
+ def pactions_by_cats(self, req, rset, categories=('addrelated',)):
+ return [(a.__regid__, a.__class__)
+ for a in self.vreg['actions'].poss_visible_objects(req, rset=rset)
+ if a.category in categories]
+
+ def pactionsdict(self, req, rset,
+ skipcategories=('addrelated', 'siteactions', 'useractions',
+ 'footer', 'manage')):
+ res = {}
+ for a in self.vreg['actions'].poss_visible_objects(req, rset=rset):
+ if a.category not in skipcategories:
+ res.setdefault(a.category, []).append(a.__class__)
+ return res
+
+ def action_submenu(self, req, rset, id):
+ return self._test_action(self.vreg['actions'].select(id, req, rset=rset))
+
+ def _test_action(self, action):
+ class fake_menu(list):
+ @property
+ def items(self):
+ return self
+
+ class fake_box(object):
+ def action_link(self, action, **kwargs):
+ return (action.title, action.url())
+ submenu = fake_menu()
+ action.fill_menu(fake_box(), submenu)
+ return submenu
+
+ def list_views_for(self, rset):
+ """returns the list of views that can be applied on `rset`"""
+ req = rset.req
+ only_once_vids = ('primary', 'secondary', 'text')
+ req.data['ex'] = ValueError("whatever")
+ viewsvreg = self.vreg['views']
+ for vid, views in viewsvreg.items():
+ if vid[0] == '_':
+ continue
+ if rset.rowcount > 1 and vid in only_once_vids:
+ continue
+ views = [view for view in views
+ if view.category != 'startupview'
+ and not issubclass(view, notification.NotificationView)
+ and not isinstance(view, class_deprecated)]
+ if views:
+ try:
+ view = viewsvreg._select_best(views, req, rset=rset)
+ if view is None:
+ raise NoSelectableObject((req,), {'rset': rset}, views)
+ if view.linkable():
+ yield view
+ else:
+ not_selected(self.vreg, view)
+ # else the view is expected to be used as subview and should
+ # not be tested directly
+ except NoSelectableObject:
+ continue
+
+ def list_actions_for(self, rset):
+ """returns the list of actions that can be applied on `rset`"""
+ req = rset.req
+ for action in self.vreg['actions'].possible_objects(req, rset=rset):
+ yield action
+
+ def list_boxes_for(self, rset):
+ """returns the list of boxes that can be applied on `rset`"""
+ req = rset.req
+ for box in self.vreg['ctxcomponents'].possible_objects(req, rset=rset):
+ yield box
+
+ def list_startup_views(self):
+ """returns the list of startup views"""
+ with self.admin_access.web_request() as req:
+ for view in self.vreg['views'].possible_views(req, None):
+ if view.category == 'startupview':
+ yield view.__regid__
+ else:
+ not_selected(self.vreg, view)
+
+ # web ui testing utilities #################################################
+
+ @property
+ @cached
+ def app(self):
+ """return a cubicweb publisher"""
+ publisher = application.CubicWebPublisher(self.repo, self.config)
+
+ def raise_error_handler(*args, **kwargs):
+ raise
+
+ publisher.error_handler = raise_error_handler
+ return publisher
+
+ @deprecated('[3.19] use the .remote_calling method')
+ def remote_call(self, fname, *args):
+ """remote json call simulation"""
+ dump = json.dumps
+ args = [dump(arg) for arg in args]
+ req = self.request(fname=fname, pageid='123', arg=args)
+ ctrl = self.vreg['controllers'].select('ajax', req)
+ return ctrl.publish(), req
+
+ @contextmanager
+ def remote_calling(self, fname, *args):
+ """remote json call simulation"""
+ args = [json.dumps(arg) for arg in args]
+ with self.admin_access.web_request(fname=fname, pageid='123', arg=args) as req:
+ ctrl = self.vreg['controllers'].select('ajax', req)
+ yield ctrl.publish(), req
+
+ def app_handle_request(self, req, path='view'):
+ return self.app.core_handle(req, path)
+
+ @deprecated("[3.15] app_handle_request is the new and better way"
+ " (beware of small semantic changes)")
+ def app_publish(self, *args, **kwargs):
+ return self.app_handle_request(*args, **kwargs)
+
+ def ctrl_publish(self, req, ctrl='edit', rset=None):
+ """call the publish method of the edit controller"""
+ ctrl = self.vreg['controllers'].select(ctrl, req, appli=self.app)
+ try:
+ result = ctrl.publish(rset)
+ req.cnx.commit()
+ except web.Redirect:
+ req.cnx.commit()
+ raise
+ return result
+
+ @staticmethod
+ def fake_form(formid, field_dict=None, entity_field_dicts=()):
+ """Build _cw.form dictionnary to fake posting of some standard cubicweb form
+
+ * `formid`, the form id, usually form's __regid__
+
+ * `field_dict`, dictionary of name:value for fields that are not tied to an entity
+
+ * `entity_field_dicts`, list of (entity, dictionary) where dictionary contains name:value
+ for fields that are not tied to the given entity
+ """
+ assert field_dict or entity_field_dicts, \
+ 'field_dict and entity_field_dicts arguments must not be both unspecified'
+ if field_dict is None:
+ field_dict = {}
+ form = {'__form_id': formid}
+ fields = []
+ for field, value in field_dict.items():
+ fields.append(field)
+ form[field] = value
+
+ def _add_entity_field(entity, field, value):
+ entity_fields.append(field)
+ form[eid_param(field, entity.eid)] = value
+
+ for entity, field_dict in entity_field_dicts:
+ if '__maineid' not in form:
+ form['__maineid'] = entity.eid
+ entity_fields = []
+ form.setdefault('eid', []).append(entity.eid)
+ _add_entity_field(entity, '__type', entity.cw_etype)
+ for field, value in field_dict.items():
+ _add_entity_field(entity, field, value)
+ if entity_fields:
+ form[eid_param('_cw_entity_fields', entity.eid)] = ','.join(entity_fields)
+ if fields:
+ form['_cw_fields'] = ','.join(sorted(fields))
+ return form
+
+ @deprecated('[3.19] use .admin_request_from_url instead')
+ def req_from_url(self, url):
+ """parses `url` and builds the corresponding CW-web request
+
+ req.form will be setup using the url's query string
+ """
+ req = self.request(url=url)
+ if isinstance(url, unicode):
+ url = url.encode(req.encoding) # req.setup_params() expects encoded strings
+ querystring = urlparse(url)[-2]
+ params = parse_qs(querystring)
+ req.setup_params(params)
+ return req
+
+ @contextmanager
+ def admin_request_from_url(self, url):
+ """parses `url` and builds the corresponding CW-web request
+
+ req.form will be setup using the url's query string
+ """
+ with self.admin_access.web_request(url=url) as req:
+ if isinstance(url, unicode):
+ url = url.encode(req.encoding) # req.setup_params() expects encoded strings
+ querystring = urlparse(url)[-2]
+ params = parse_qs(querystring)
+ req.setup_params(params)
+ yield req
+
+ def url_publish(self, url, data=None):
+ """takes `url`, uses application's app_resolver to find the appropriate
+ controller and result set, then publishes the result.
+
+ To simulate post of www-form-encoded data, give a `data` dictionary
+ containing desired key/value associations.
+
+ This should pretty much correspond to what occurs in a real CW server
+ except the apache-rewriter component is not called.
+ """
+ with self.admin_request_from_url(url) as req:
+ if data is not None:
+ req.form.update(data)
+ ctrlid, rset = self.app.url_resolver.process(req, req.relative_path(False))
+ return self.ctrl_publish(req, ctrlid, rset)
+
+ def http_publish(self, url, data=None):
+ """like `url_publish`, except this returns a http response, even in case
+ of errors. You may give form parameters using the `data` argument.
+ """
+ with self.admin_request_from_url(url) as req:
+ if data is not None:
+ req.form.update(data)
+ with real_error_handling(self.app):
+ result = self.app_handle_request(req, req.relative_path(False))
+ return result, req
+
+ @staticmethod
+ def _parse_location(req, location):
+ try:
+ path, params = location.split('?', 1)
+ except ValueError:
+ path = location
+ params = {}
+ else:
+ cleanup = lambda p: (p[0], urlunquote(p[1]))
+ params = dict(cleanup(p.split('=', 1)) for p in params.split('&') if p)
+ if path.startswith(req.base_url()): # may be relative
+ path = path[len(req.base_url()):]
+ return path, params
+
+ def expect_redirect(self, callback, req):
+ """call the given callback with req as argument, expecting to get a
+ Redirect exception
+ """
+ try:
+ callback(req)
+ except Redirect as ex:
+ return self._parse_location(req, ex.location)
+ else:
+ self.fail('expected a Redirect exception')
+
+ def expect_redirect_handle_request(self, req, path='edit'):
+ """call the publish method of the application publisher, expecting to
+ get a Redirect exception
+ """
+ self.app_handle_request(req, path)
+ self.assertTrue(300 <= req.status_out < 400, req.status_out)
+ location = req.get_response_header('location')
+ return self._parse_location(req, location)
+
+ @deprecated("[3.15] expect_redirect_handle_request is the new and better way"
+ " (beware of small semantic changes)")
+ def expect_redirect_publish(self, *args, **kwargs):
+ return self.expect_redirect_handle_request(*args, **kwargs)
+
+ def set_auth_mode(self, authmode, anonuser=None):
+ self.set_option('auth-mode', authmode)
+ self.set_option('anonymous-user', anonuser)
+ if anonuser is None:
+ self.config.anonymous_credential = None
+ else:
+ self.config.anonymous_credential = (anonuser, anonuser)
+
+ def init_authentication(self, authmode, anonuser=None):
+ self.set_auth_mode(authmode, anonuser)
+ req = self.requestcls(self.vreg, url='login')
+ sh = self.app.session_handler
+ authm = sh.session_manager.authmanager
+ authm.anoninfo = self.vreg.config.anonymous_user()
+ authm.anoninfo = authm.anoninfo[0], {'password': authm.anoninfo[1]}
+ # not properly cleaned between tests
+ self.open_sessions = sh.session_manager._sessions = {}
+ return req, self.session
+
+ def assertAuthSuccess(self, req, origsession, nbsessions=1):
+ sh = self.app.session_handler
+ session = self.app.get_session(req)
+ cnx = repoapi.Connection(session)
+ req.set_cnx(cnx)
+ self.assertEqual(len(self.open_sessions), nbsessions, self.open_sessions)
+ self.assertEqual(session.login, origsession.login)
+ self.assertEqual(session.anonymous_session, False)
+
+ def assertAuthFailure(self, req, nbsessions=0):
+ with self.assertRaises(AuthenticationError):
+ self.app.get_session(req)
+ # +0 since we do not track the opened session
+ self.assertEqual(len(self.open_sessions), nbsessions)
+ clear_cache(req, 'get_authorization')
+
+ # content validation #######################################################
+
+ # validators are used to validate (XML, DTD, whatever) view's content
+ # validators availables are :
+ # DTDValidator : validates XML + declared DTD
+ # SaxOnlyValidator : guarantees XML is well formed
+ # None : do not try to validate anything
+ # validators used must be imported from from.devtools.htmlparser
+ content_type_validators = {
+ # maps MIME type : validator name
+ #
+ # do not set html validators here, we need HTMLValidator for html
+ # snippets
+ # 'text/html': DTDValidator,
+ # 'application/xhtml+xml': DTDValidator,
+ 'application/xml': htmlparser.XMLValidator,
+ 'text/xml': htmlparser.XMLValidator,
+ 'application/json': JsonValidator,
+ 'text/plain': None,
+ 'text/comma-separated-values': None,
+ 'text/x-vcard': None,
+ 'text/calendar': None,
+ 'image/png': None,
+ }
+ # maps vid : validator name (override content_type_validators)
+ vid_validators = dict((vid, htmlparser.VALMAP[valkey])
+ for vid, valkey in VIEW_VALIDATORS.items())
+
+ def view(self, vid, rset=None, req=None, template='main-template',
+ **kwargs):
+ """This method tests the view `vid` on `rset` using `template`
+
+ If no error occurred while rendering the view, the HTML is analyzed
+ and parsed.
+
+ :returns: an instance of `cubicweb.devtools.htmlparser.PageInfo`
+ encapsulation the generated HTML
+ """
+ if req is None:
+ if rset is None:
+ req = self.request()
+ else:
+ req = rset.req
+ req.form['vid'] = vid
+ viewsreg = self.vreg['views']
+ view = viewsreg.select(vid, req, rset=rset, **kwargs)
+ # set explicit test description
+ if rset is not None:
+ # coerce to "bytes" on py2 because the description will be sent to
+ # sys.stdout/stderr which takes "bytes" on py2 and "unicode" on py3
+ rql = str(rset.printable_rql())
+ self.set_description("testing vid=%s defined in %s with (%s)" % (
+ vid, view.__module__, rql))
+ else:
+ self.set_description("testing vid=%s defined in %s without rset" % (
+ vid, view.__module__))
+ if template is None: # raw view testing, no template
+ viewfunc = view.render
+ else:
+ kwargs['view'] = view
+ viewfunc = lambda **k: viewsreg.main_template(req, template,
+ rset=rset, **kwargs)
+ return self._test_view(viewfunc, view, template, kwargs)
+
+ def _test_view(self, viewfunc, view, template='main-template', kwargs={}):
+ """this method does the actual call to the view
+
+ If no error occurred while rendering the view, the HTML is analyzed
+ and parsed.
+
+ :returns: an instance of `cubicweb.devtools.htmlparser.PageInfo`
+ encapsulation the generated HTML
+ """
+ try:
+ output = viewfunc(**kwargs)
+ except Exception:
+ # hijack exception: generative tests stop when the exception
+ # is not an AssertionError
+ klass, exc, tcbk = sys.exc_info()
+ try:
+ msg = '[%s in %s] %s' % (klass, view.__regid__, exc)
+ except Exception:
+ msg = '[%s in %s] undisplayable exception' % (klass, view.__regid__)
+ exc = AssertionError(msg)
+ exc.__traceback__ = tcbk
+ raise exc
+ return self._check_html(output, view, template)
+
+ def get_validator(self, view=None, content_type=None, output=None):
+ if view is not None:
+ try:
+ return self.vid_validators[view.__regid__]()
+ except KeyError:
+ if content_type is None:
+ content_type = view.content_type
+ if content_type is None:
+ content_type = 'text/html'
+ if content_type in ('text/html', 'application/xhtml+xml') and output:
+ if output.startswith(b''):
+ # only check XML well-formness since HTMLValidator isn't html5
+ # compatible and won't like various other extensions
+ default_validator = htmlparser.XMLSyntaxValidator
+ elif output.startswith(b' used in progress widget, unknown in html dtd
+ output = re.sub('', '', output)
+ return self.assertWellFormed(validator, output.strip(), context=view.__regid__)
+
+ def assertWellFormed(self, validator, content, context=None):
+ try:
+ return validator.parse_string(content)
+ except Exception:
+ # hijack exception: generative tests stop when the exception
+ # is not an AssertionError
+ klass, exc, tcbk = sys.exc_info()
+ if context is None:
+ msg = u'[%s]' % (klass,)
+ else:
+ msg = u'[%s in %s]' % (klass, context)
+ msg = msg.encode(sys.getdefaultencoding(), 'replace')
+
+ try:
+ str_exc = str(exc)
+ except Exception:
+ str_exc = 'undisplayable exception'
+ msg += str_exc.encode(sys.getdefaultencoding(), 'replace')
+ if content is not None:
+ position = getattr(exc, "position", (0,))[0]
+ if position:
+ # define filter
+ if isinstance(content, str):
+ content = unicode(content, sys.getdefaultencoding(), 'replace')
+ content = validator.preprocess_data(content)
+ content = content.splitlines()
+ width = int(log(len(content), 10)) + 1
+ line_template = " %" + ("%i" % width) + "i: %s"
+ # XXX no need to iterate the whole file except to get
+ # the line number
+ content = u'\n'.join(line_template % (idx + 1, line)
+ for idx, line in enumerate(content)
+ if line_context_filter(idx+1, position))
+ msg += u'\nfor content:\n%s' % content
+ exc = AssertionError(msg)
+ exc.__traceback__ = tcbk
+ raise exc
+
+ def assertDocTestFile(self, testfile):
+ # doctest returns tuple (failure_count, test_count)
+ with self.admin_access.shell() as mih:
+ result = mih.process_script(testfile)
+ if result[0] and result[1]:
+ raise self.failureException("doctest file '%s' failed"
+ % testfile)
+
+ # notifications ############################################################
+
+ def assertSentEmail(self, subject, recipients=None, nb_msgs=None):
+ """test recipients in system mailbox for given email subject
+
+ :param subject: email subject to find in mailbox
+ :param recipients: list of email recipients
+ :param nb_msgs: expected number of entries
+ :returns: list of matched emails
+ """
+ messages = [email for email in MAILBOX
+ if email.message.get('Subject') == subject]
+ if recipients is not None:
+ sent_to = set()
+ for msg in messages:
+ sent_to.update(msg.recipients)
+ self.assertSetEqual(set(recipients), sent_to)
+ if nb_msgs is not None:
+ self.assertEqual(len(MAILBOX), nb_msgs)
+ return messages
+
+
+# auto-populating test classes and utilities ###################################
+
+from cubicweb.devtools.fill import insert_entity_queries, make_relations_queries
+
+# XXX cleanup unprotected_entities & all mess
+
+
+def how_many_dict(schema, cnx, how_many, skip):
+ """given a schema, compute how many entities by type we need to be able to
+ satisfy relations cardinality.
+
+ The `how_many` argument tells how many entities of which type we want at
+ least.
+
+ Return a dictionary with entity types as key, and the number of entities for
+ this type as value.
+ """
+ relmap = {}
+ for rschema in schema.relations():
+ if rschema.final:
+ continue
+ for subj, obj in rschema.rdefs:
+ card = rschema.rdef(subj, obj).cardinality
+ # if the relation is mandatory, we'll need at least as many subj and
+ # obj to satisfy it
+ if card[0] in '1+' and card[1] in '1?':
+ # subj has to be linked to at least one obj,
+ # but obj can be linked to only one subj
+ # -> we need at least as many subj as obj to satisfy
+ # cardinalities for this relation
+ relmap.setdefault((rschema, subj), []).append(str(obj))
+ if card[1] in '1+' and card[0] in '1?':
+ # reverse subj and obj in the above explanation
+ relmap.setdefault((rschema, obj), []).append(str(subj))
+ unprotected = unprotected_entities(schema)
+ for etype in skip: # XXX (syt) duh? explain or kill
+ unprotected.add(etype)
+ howmanydict = {}
+ # step 1, compute a base number of each entity types: number of already
+ # existing entities of this type + `how_many`
+ for etype in unprotected_entities(schema, strict=True):
+ howmanydict[str(etype)] = cnx.execute('Any COUNT(X) WHERE X is %s' % etype)[0][0]
+ if etype in unprotected:
+ howmanydict[str(etype)] += how_many
+ # step 2, augment nb entity per types to satisfy cardinality constraints,
+ # by recomputing for each relation that constrained an entity type:
+ #
+ # new num for etype = max(current num, sum(num for possible target etypes))
+ #
+ # XXX we should first check there is no cycle then propagate changes
+ for (rschema, etype), targets in relmap.items():
+ relfactor = sum(howmanydict[e] for e in targets)
+ howmanydict[str(etype)] = max(relfactor, howmanydict[etype])
+ return howmanydict
+
+
+class AutoPopulateTest(CubicWebTC):
+ """base class for test with auto-populating of the database"""
+ __abstract__ = True
+
+ test_db_id = 'autopopulate'
+
+ tags = CubicWebTC.tags | Tags('autopopulated')
+
+ pdbclass = CubicWebDebugger
+ # this is a hook to be able to define a list of rql queries
+ # that are application dependent and cannot be guessed automatically
+ application_rql = []
+
+ no_auto_populate = ()
+ ignored_relations = set()
+
+ def to_test_etypes(self):
+ return unprotected_entities(self.schema, strict=True)
+
+ def custom_populate(self, how_many, cnx):
+ pass
+
+ def post_populate(self, cnx):
+ pass
+
+ @nocoverage
+ def auto_populate(self, how_many):
+ """this method populates the database with `how_many` entities
+ of each possible type. It also inserts random relations between them
+ """
+ with self.admin_access.cnx() as cnx:
+ with cnx.security_enabled(read=False, write=False):
+ self._auto_populate(cnx, how_many)
+ cnx.commit()
+
+ def _auto_populate(self, cnx, how_many):
+ self.custom_populate(how_many, cnx)
+ vreg = self.vreg
+ howmanydict = how_many_dict(self.schema, cnx, how_many, self.no_auto_populate)
+ for etype in unprotected_entities(self.schema):
+ if etype in self.no_auto_populate:
+ continue
+ nb = howmanydict.get(etype, how_many)
+ for rql, args in insert_entity_queries(etype, self.schema, vreg, nb):
+ cnx.execute(rql, args)
+ edict = {}
+ for etype in unprotected_entities(self.schema, strict=True):
+ rset = cnx.execute('%s X' % etype)
+ edict[str(etype)] = set(row[0] for row in rset.rows)
+ existingrels = {}
+ ignored_relations = SYSTEM_RELATIONS | self.ignored_relations
+ for rschema in self.schema.relations():
+ if rschema.final or rschema in ignored_relations:
+ continue
+ rset = cnx.execute('DISTINCT Any X,Y WHERE X %s Y' % rschema)
+ existingrels.setdefault(rschema.type, set()).update((x, y) for x, y in rset)
+ q = make_relations_queries(self.schema, edict, cnx, ignored_relations,
+ existingrels=existingrels)
+ for rql, args in q:
+ try:
+ cnx.execute(rql, args)
+ except ValidationError as ex:
+ # failed to satisfy some constraint
+ print('error in automatic db population', ex)
+ cnx.commit_state = None # reset uncommitable flag
+ self.post_populate(cnx)
+
+ def iter_individual_rsets(self, etypes=None, limit=None):
+ etypes = etypes or self.to_test_etypes()
+ with self.admin_access.web_request() as req:
+ for etype in etypes:
+ if limit:
+ rql = 'Any X LIMIT %s WHERE X is %s' % (limit, etype)
+ else:
+ rql = 'Any X WHERE X is %s' % etype
+ rset = req.execute(rql)
+ for row in range(len(rset)):
+ if limit and row > limit:
+ break
+ # XXX iirk
+ rset2 = rset.limit(limit=1, offset=row)
+ yield rset2
+
+ def iter_automatic_rsets(self, limit=10):
+ """generates basic resultsets for each entity type"""
+ etypes = self.to_test_etypes()
+ if not etypes:
+ return
+ with self.admin_access.web_request() as req:
+ for etype in etypes:
+ yield req.execute('Any X LIMIT %s WHERE X is %s' % (limit, etype))
+ etype1 = etypes.pop()
+ try:
+ etype2 = etypes.pop()
+ except KeyError:
+ etype2 = etype1
+ # test a mixed query (DISTINCT/GROUP to avoid getting duplicate
+ # X which make muledit view failing for instance (html validation fails
+ # because of some duplicate "id" attributes)
+ yield req.execute('DISTINCT Any X, MAX(Y) GROUPBY X WHERE X is %s, Y is %s' %
+ (etype1, etype2))
+ # test some application-specific queries if defined
+ for rql in self.application_rql:
+ yield req.execute(rql)
+
+ def _test_everything_for(self, rset):
+ """this method tries to find everything that can be tested
+ for `rset` and yields a callable test (as needed in generative tests)
+ """
+ propdefs = self.vreg['propertydefs']
+ # make all components visible
+ for k, v in propdefs.items():
+ if k.endswith('visible') and not v['default']:
+ propdefs[k]['default'] = True
+ for view in self.list_views_for(rset):
+ backup_rset = rset.copy(rset.rows, rset.description)
+ yield InnerTest(self._testname(rset, view.__regid__, 'view'),
+ self.view, view.__regid__, rset,
+ rset.req.reset_headers(), 'main-template')
+ # We have to do this because some views modify the
+ # resultset's syntax tree
+ rset = backup_rset
+ for action in self.list_actions_for(rset):
+ yield InnerTest(self._testname(rset, action.__regid__, 'action'),
+ self._test_action, action)
+ for box in self.list_boxes_for(rset):
+ w = [].append
+ yield InnerTest(self._testname(rset, box.__regid__, 'box'), box.render, w)
+
+ @staticmethod
+ def _testname(rset, objid, objtype):
+ return '%s_%s_%s' % ('_'.join(rset.column_types(0)), objid, objtype)
+
+
+# concrete class for automated application testing ############################
+
+class AutomaticWebTest(AutoPopulateTest):
+ """import this if you wan automatic tests to be ran"""
+
+ tags = AutoPopulateTest.tags | Tags('web', 'generated')
+
+ def setUp(self):
+ if self.__class__ is AutomaticWebTest:
+ # Prevent direct use of AutomaticWebTest to avoid database caching
+ # issues.
+ return
+ super(AutomaticWebTest, self).setUp()
+
+ # access to self.app for proper initialization of the authentication
+ # machinery (else some views may fail)
+ self.app
+
+ def test_one_each_config(self):
+ self.auto_populate(1)
+ for rset in self.iter_automatic_rsets(limit=1):
+ for testargs in self._test_everything_for(rset):
+ yield testargs
+
+ def test_ten_each_config(self):
+ self.auto_populate(10)
+ for rset in self.iter_automatic_rsets(limit=10):
+ for testargs in self._test_everything_for(rset):
+ yield testargs
+
+ def test_startup_views(self):
+ for vid in self.list_startup_views():
+ with self.admin_access.web_request() as req:
+ yield self.view, vid, None, req
+
+
+# registry instrumentization ###################################################
+
+def not_selected(vreg, appobject):
+ try:
+ vreg._selected[appobject.__class__] -= 1
+ except (KeyError, AttributeError):
+ pass
+
+
+# def vreg_instrumentize(testclass):
+# # XXX broken
+# from cubicweb.devtools.apptest import TestEnvironment
+# env = testclass._env = TestEnvironment('data', configcls=testclass.configcls)
+# for reg in env.vreg.values():
+# reg._selected = {}
+# try:
+# orig_select_best = reg.__class__.__orig_select_best
+# except Exception:
+# orig_select_best = reg.__class__._select_best
+# def instr_select_best(self, *args, **kwargs):
+# selected = orig_select_best(self, *args, **kwargs)
+# try:
+# self._selected[selected.__class__] += 1
+# except KeyError:
+# self._selected[selected.__class__] = 1
+# except AttributeError:
+# pass # occurs on reg used to restore database
+# return selected
+# reg.__class__._select_best = instr_select_best
+# reg.__class__.__orig_select_best = orig_select_best
+
+
+# def print_untested_objects(testclass, skipregs=('hooks', 'etypes')):
+# for regname, reg in testclass._env.vreg.items():
+# if regname in skipregs:
+# continue
+# for appobjects in reg.values():
+# for appobject in appobjects:
+# if not reg._selected.get(appobject):
+# print 'not tested', regname, appobject
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/devtools/webtest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/devtools/webtest.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,34 @@
+from __future__ import absolute_import
+
+import webtest
+
+from cubicweb.wsgi import handler
+from cubicweb.devtools.testlib import CubicWebTC
+
+
+class CubicWebTestTC(CubicWebTC):
+ def setUp(self):
+ super(CubicWebTestTC, self).setUp()
+ self.config.global_set_option('base-url', 'http://localhost.local/')
+ # call load_configuration again to let the config reset its datadir_url
+ self.config.load_configuration()
+ webapp = handler.CubicWebWSGIApplication(self.config)
+ self.webapp = webtest.TestApp(webapp)
+
+ def tearDown(self):
+ del self.webapp
+ super(CubicWebTestTC, self).tearDown()
+
+ def login(self, user=None, password=None, **args):
+ if user is None:
+ user = self.admlogin
+ if password is None:
+ password = self.admpassword if user == self.admlogin else user
+ args.update({
+ '__login': user,
+ '__password': password
+ })
+ return self.webapp.get('/login', args)
+
+ def logout(self):
+ return self.webapp.get('/logout')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/__init__.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,208 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""base application's entities class implementation: `AnyEntity`"""
+
+__docformat__ = "restructuredtext en"
+
+from warnings import warn
+
+from six import text_type, string_types
+
+from logilab.common.decorators import classproperty
+from logilab.common.deprecation import deprecated
+
+from cubicweb import Unauthorized
+from cubicweb.entity import Entity
+
+
+class AnyEntity(Entity):
+ """an entity instance has e_schema automagically set on the class and
+ instances have access to their issuing cursor
+ """
+ __regid__ = 'Any'
+
+ @classproperty
+ def cw_etype(cls):
+ """entity type as a unicode string"""
+ return text_type(cls.__regid__)
+
+ @classmethod
+ def cw_create_url(cls, req, **kwargs):
+ """ return the url of the entity creation form for this entity type"""
+ return req.build_url('add/%s' % cls.__regid__, **kwargs)
+
+ @classmethod
+ @deprecated('[3.22] use cw_fti_index_rql_limit instead')
+ def cw_fti_index_rql_queries(cls, req):
+ """return the list of rql queries to fetch entities to FT-index
+
+ The default is to fetch all entities at once and to prefetch
+ indexable attributes but one could imagine iterating over
+ "smaller" resultsets if the table is very big or returning
+ a subset of entities that match some business-logic condition.
+ """
+ restrictions = ['X is %s' % cls.__regid__]
+ selected = ['X']
+ for attrschema in sorted(cls.e_schema.indexable_attributes()):
+ varname = attrschema.type.upper()
+ restrictions.append('X %s %s' % (attrschema, varname))
+ selected.append(varname)
+ return ['Any %s WHERE %s' % (', '.join(selected),
+ ', '.join(restrictions))]
+
+ @classmethod
+ def cw_fti_index_rql_limit(cls, req, limit=1000):
+ """generate rsets of entities to FT-index
+
+ By default, each successive result set is limited to 1000 entities
+ """
+ if cls.cw_fti_index_rql_queries.__func__ != AnyEntity.cw_fti_index_rql_queries.__func__:
+ warn("[3.22] cw_fti_index_rql_queries is replaced by cw_fti_index_rql_limit",
+ DeprecationWarning)
+ for rql in cls.cw_fti_index_rql_queries(req):
+ yield req.execute(rql)
+ return
+ restrictions = ['X is %s' % cls.__regid__]
+ selected = ['X']
+ start = 0
+ for attrschema in sorted(cls.e_schema.indexable_attributes()):
+ varname = attrschema.type.upper()
+ restrictions.append('X %s %s' % (attrschema, varname))
+ selected.append(varname)
+ while True:
+ q_restrictions = restrictions + ['X eid > %s' % start]
+ rset = req.execute('Any %s ORDERBY X LIMIT %s WHERE %s' %
+ (', '.join(selected),
+ limit,
+ ', '.join(q_restrictions)))
+ if rset:
+ start = rset[-1][0]
+ yield rset
+ else:
+ break
+
+ # meta data api ###########################################################
+
+ def dc_title(self):
+ """return a suitable *unicode* title for this entity"""
+ for rschema, attrschema in self.e_schema.attribute_definitions():
+ if rschema.meta:
+ continue
+ value = self.cw_attr_value(rschema.type)
+ if value is not None:
+ # make the value printable (dates, floats, bytes, etc.)
+ return self.printable_value(rschema.type, value, attrschema.type,
+ format='text/plain')
+ return u'%s #%s' % (self.dc_type(), self.eid)
+
+ def dc_long_title(self):
+ """return a more detailled title for this entity"""
+ return self.dc_title()
+
+ def dc_description(self, format='text/plain'):
+ """return a suitable description for this entity"""
+ if 'description' in self.e_schema.subjrels:
+ return self.printable_value('description', format=format)
+ return u''
+
+ def dc_authors(self):
+ """return a suitable description for the author(s) of the entity"""
+ try:
+ return ', '.join(u.name() for u in self.owned_by)
+ except Unauthorized:
+ return u''
+
+ def dc_creator(self):
+ """return a suitable description for the creator of the entity"""
+ if self.creator:
+ return self.creator.name()
+ return u''
+
+ def dc_date(self, date_format=None):# XXX default to ISO 8601 ?
+ """return latest modification date of this entity"""
+ return self._cw.format_date(self.modification_date, date_format=date_format)
+
+ def dc_type(self, form=''):
+ """return the display name for the type of this entity (translated)"""
+ return self.e_schema.display_name(self._cw, form)
+
+ def dc_language(self):
+ """return language used by this entity (translated)"""
+ # check if entities has internationalizable attributes
+ # XXX one is enough or check if all String attributes are internationalizable?
+ for rschema, attrschema in self.e_schema.attribute_definitions():
+ if rschema.rdef(self.e_schema, attrschema).internationalizable:
+ return self._cw._(self._cw.user.property_value('ui.language'))
+ return self._cw._(self._cw.vreg.property_value('ui.language'))
+
+ @property
+ def creator(self):
+ """return the CWUser entity which has created this entity, or None if
+ unknown or if the curent user doesn't has access to this euser
+ """
+ try:
+ return self.created_by[0]
+ except (Unauthorized, IndexError):
+ return None
+
+ # abstractions making the whole things (well, some at least) working ######
+
+ def sortvalue(self, rtype=None):
+ """return a value which can be used to sort this entity or given
+ entity's attribute
+ """
+ if rtype is None:
+ return self.dc_title().lower()
+ value = self.cw_attr_value(rtype)
+ # do not restrict to `unicode` because Bytes will return a `str` value
+ if isinstance(value, string_types):
+ return self.printable_value(rtype, format='text/plain').lower()
+ return value
+
+
+def fetch_config(fetchattrs, mainattr=None, pclass=AnyEntity, order='ASC'):
+ """function to ease basic configuration of an entity class ORM. Basic usage
+ is:
+
+ .. sourcecode:: python
+
+ class MyEntity(AnyEntity):
+
+ fetch_attrs, cw_fetch_order = fetch_config(['attr1', 'attr2'])
+ # uncomment line below if you want the same sorting for 'unrelated' entities
+ # cw_fetch_unrelated_order = cw_fetch_order
+
+ Using this, when using ORM methods retrieving this type of entity, 'attr1'
+ and 'attr2' will be automatically prefetched and results will be sorted on
+ 'attr1' ascending (ie the first attribute in the list).
+
+ This function will automatically add to fetched attributes those defined in
+ parent class given using the `pclass` argument.
+
+ Also, You can use `mainattr` and `order` argument to have a different
+ sorting.
+ """
+ if pclass is not None:
+ fetchattrs += pclass.fetch_attrs
+ if mainattr is None:
+ mainattr = fetchattrs[0]
+ @classmethod
+ def fetch_order(cls, select, attr, var):
+ if attr == mainattr:
+ select.add_sort_var(var, order=='ASC')
+ return fetchattrs, fetch_order
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/adapters.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/adapters.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,427 @@
+# copyright 2010-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""some basic entity adapter implementations, for interfaces used in the
+framework itself.
+"""
+from cubicweb import _
+
+from itertools import chain
+from hashlib import md5
+
+from logilab.mtconverter import TransformError
+from logilab.common.decorators import cached
+
+from cubicweb import ValidationError, view, ViolatedConstraint, UniqueTogetherError
+from cubicweb.predicates import is_instance, relation_possible, match_exception
+
+
+class IEmailableAdapter(view.EntityAdapter):
+ __regid__ = 'IEmailable'
+ __select__ = relation_possible('primary_email') | relation_possible('use_email')
+
+ def get_email(self):
+ if getattr(self.entity, 'primary_email', None):
+ return self.entity.primary_email[0].address
+ if getattr(self.entity, 'use_email', None):
+ return self.entity.use_email[0].address
+ return None
+
+ def allowed_massmail_keys(self):
+ """returns a set of allowed email substitution keys
+
+ The default is to return the entity's attribute list but you might
+ override this method to allow extra keys. For instance, a Person
+ class might want to return a `companyname` key.
+ """
+ return set(rschema.type
+ for rschema, attrtype in self.entity.e_schema.attribute_definitions()
+ if attrtype.type not in ('Password', 'Bytes'))
+
+ def as_email_context(self):
+ """returns the dictionary as used by the sendmail controller to
+ build email bodies.
+
+ NOTE: the dictionary keys should match the list returned by the
+ `allowed_massmail_keys` method.
+ """
+ return dict((attr, getattr(self.entity, attr))
+ for attr in self.allowed_massmail_keys())
+
+
+class INotifiableAdapter(view.EntityAdapter):
+ __regid__ = 'INotifiable'
+ __select__ = is_instance('Any')
+
+ def notification_references(self, view):
+ """used to control References field of email send on notification
+ for this entity. `view` is the notification view.
+
+ Should return a list of eids which can be used to generate message
+ identifiers of previously sent email(s)
+ """
+ itree = self.entity.cw_adapt_to('ITree')
+ if itree is not None:
+ return itree.path()[:-1]
+ if view.msgid_timestamp:
+ return (self.entity.eid,)
+ return ()
+
+
+class IFTIndexableAdapter(view.EntityAdapter):
+ """standard adapter to handle fulltext indexing
+
+ .. automethod:: cubicweb.entities.adapters.IFTIndexableAdapter.fti_containers
+ .. automethod:: cubicweb.entities.adapters.IFTIndexableAdapter.get_words
+ """
+ __regid__ = 'IFTIndexable'
+ __select__ = is_instance('Any')
+
+ def fti_containers(self, _done=None):
+ """return the list of entities to index when handling ``self.entity``
+
+ The actual list of entities depends on ``fulltext_container`` usage
+ in the datamodel definition
+ """
+ if _done is None:
+ _done = set()
+ entity = self.entity
+ _done.add(entity.eid)
+ containers = tuple(entity.e_schema.fulltext_containers())
+ if containers:
+ for rschema, role in containers:
+ if role == 'object':
+ targets = getattr(entity, rschema.type)
+ else:
+ targets = getattr(entity, 'reverse_%s' % rschema)
+ for target in targets:
+ if target.eid in _done:
+ continue
+ for container in target.cw_adapt_to('IFTIndexable').fti_containers(_done):
+ yield container
+ else:
+ yield entity
+
+ # weight in ABCD
+ entity_weight = 1.0
+ attr_weight = {}
+
+ def get_words(self):
+ """used by the full text indexer to get words to index
+
+ this method should only be used on the repository side since it depends
+ on the logilab.database package
+
+ :rtype: list
+ :return: the list of indexable word of this entity
+ """
+ from logilab.database.fti import tokenize
+ # take care to cases where we're modyfying the schema
+ entity = self.entity
+ pending = self._cw.transaction_data.setdefault('pendingrdefs', set())
+ words = {}
+ for rschema in entity.e_schema.indexable_attributes():
+ if (entity.e_schema, rschema) in pending:
+ continue
+ weight = self.attr_weight.get(rschema, 'C')
+ try:
+ value = entity.printable_value(rschema, format=u'text/plain')
+ except TransformError:
+ continue
+ except Exception:
+ self.exception("can't add value of %s to text index for entity %s",
+ rschema, entity.eid)
+ continue
+ if value:
+ words.setdefault(weight, []).extend(tokenize(value))
+ for rschema, role in entity.e_schema.fulltext_relations():
+ if role == 'subject':
+ for entity_ in getattr(entity, rschema.type):
+ merge_weight_dict(words, entity_.cw_adapt_to('IFTIndexable').get_words())
+ else: # if role == 'object':
+ for entity_ in getattr(entity, 'reverse_%s' % rschema.type):
+ merge_weight_dict(words, entity_.cw_adapt_to('IFTIndexable').get_words())
+ return words
+
+
+def merge_weight_dict(maindict, newdict):
+ for weight, words in newdict.items():
+ maindict.setdefault(weight, []).extend(words)
+
+
+class IDownloadableAdapter(view.EntityAdapter):
+ """interface for downloadable entities"""
+ __regid__ = 'IDownloadable'
+ __abstract__ = True
+
+ def download_url(self, **kwargs): # XXX not really part of this interface
+ """return a URL to download entity's content
+
+ It should be a unicode object containing url-encoded ASCII.
+ """
+ raise NotImplementedError
+
+ def download_content_type(self):
+ """return MIME type (unicode) of the downloadable content"""
+ raise NotImplementedError
+
+ def download_encoding(self):
+ """return encoding (unicode) of the downloadable content"""
+ raise NotImplementedError
+
+ def download_file_name(self):
+ """return file name (unicode) of the downloadable content"""
+ raise NotImplementedError
+
+ def download_data(self):
+ """return actual data (bytes) of the downloadable content"""
+ raise NotImplementedError
+
+
+# XXX should propose to use two different relations for children/parent
+class ITreeAdapter(view.EntityAdapter):
+ """This adapter provides a tree interface.
+
+ It has to be overriden to be configured using the tree_relation,
+ child_role and parent_role class attributes to benefit from this default
+ implementation.
+
+ This class provides the following methods:
+
+ .. automethod: iterparents
+ .. automethod: iterchildren
+ .. automethod: prefixiter
+
+ .. automethod: is_leaf
+ .. automethod: is_root
+
+ .. automethod: root
+ .. automethod: parent
+ .. automethod: children
+ .. automethod: different_type_children
+ .. automethod: same_type_children
+ .. automethod: children_rql
+ .. automethod: path
+ """
+ __regid__ = 'ITree'
+ __abstract__ = True
+
+ child_role = 'subject'
+ parent_role = 'object'
+
+ def children_rql(self):
+ """Returns RQL to get the children of the entity."""
+ return self.entity.cw_related_rql(self.tree_relation, self.parent_role)
+
+ def different_type_children(self, entities=True):
+ """Return children entities of different type as this entity.
+
+ According to the `entities` parameter, return entity objects or the
+ equivalent result set.
+ """
+ res = self.entity.related(self.tree_relation, self.parent_role,
+ entities=entities)
+ eschema = self.entity.e_schema
+ if entities:
+ return [e for e in res if e.e_schema != eschema]
+ return res.filtered_rset(lambda x: x.e_schema != eschema, self.entity.cw_col)
+
+ def same_type_children(self, entities=True):
+ """Return children entities of the same type as this entity.
+
+ According to the `entities` parameter, return entity objects or the
+ equivalent result set.
+ """
+ res = self.entity.related(self.tree_relation, self.parent_role,
+ entities=entities)
+ eschema = self.entity.e_schema
+ if entities:
+ return [e for e in res if e.e_schema == eschema]
+ return res.filtered_rset(lambda x: x.e_schema is eschema, self.entity.cw_col)
+
+ def is_leaf(self):
+ """Returns True if the entity does not have any children."""
+ return len(self.children()) == 0
+
+ def is_root(self):
+ """Returns true if the entity is root of the tree (e.g. has no parent).
+ """
+ return self.parent() is None
+
+ def root(self):
+ """Return the root entity of the tree."""
+ return self._cw.entity_from_eid(self.path()[0])
+
+ def parent(self):
+ """Returns the parent entity if any, else None (e.g. if we are on the
+ root).
+ """
+ try:
+ return self.entity.related(self.tree_relation, self.child_role,
+ entities=True)[0]
+ except (KeyError, IndexError):
+ return None
+
+ def children(self, entities=True, sametype=False):
+ """Return children entities.
+
+ According to the `entities` parameter, return entity objects or the
+ equivalent result set.
+ """
+ if sametype:
+ return self.same_type_children(entities)
+ else:
+ return self.entity.related(self.tree_relation, self.parent_role,
+ entities=entities)
+
+ def iterparents(self, strict=True):
+ """Return an iterator on the parents of the entity."""
+ def _uptoroot(self):
+ curr = self
+ while True:
+ curr = curr.parent()
+ if curr is None:
+ break
+ yield curr
+ curr = curr.cw_adapt_to('ITree')
+ if not strict:
+ return chain([self.entity], _uptoroot(self))
+ return _uptoroot(self)
+
+ def iterchildren(self, _done=None):
+ """Return an iterator over the item's children."""
+ if _done is None:
+ _done = set()
+ for child in self.children():
+ if child.eid in _done:
+ self.error('loop in %s tree: %s', child.cw_etype.lower(), child)
+ continue
+ yield child
+ _done.add(child.eid)
+
+ def prefixiter(self, _done=None):
+ """Return an iterator over the item's descendants in a prefixed order."""
+ if _done is None:
+ _done = set()
+ if self.entity.eid in _done:
+ return
+ _done.add(self.entity.eid)
+ yield self.entity
+ for child in self.same_type_children():
+ for entity in child.cw_adapt_to('ITree').prefixiter(_done):
+ yield entity
+
+ @cached
+ def path(self):
+ """Returns the list of eids from the root object to this object."""
+ path = []
+ adapter = self
+ entity = adapter.entity
+ while entity is not None:
+ if entity.eid in path:
+ self.error('loop in %s tree: %s', entity.cw_etype.lower(), entity)
+ break
+ path.append(entity.eid)
+ try:
+ # check we are not jumping to another tree
+ if (adapter.tree_relation != self.tree_relation or
+ adapter.child_role != self.child_role):
+ break
+ entity = adapter.parent()
+ adapter = entity.cw_adapt_to('ITree')
+ except AttributeError:
+ break
+ path.reverse()
+ return path
+
+
+class ISerializableAdapter(view.EntityAdapter):
+ """Adapter to serialize an entity to a bare python structure that may be
+ directly serialized to e.g. JSON.
+ """
+
+ __regid__ = 'ISerializable'
+ __select__ = is_instance('Any')
+
+ def serialize(self):
+ entity = self.entity
+ entity.complete()
+ data = {
+ 'cw_etype': entity.cw_etype,
+ 'cw_source': entity.cw_metainformation()['source']['uri'],
+ 'eid': entity.eid,
+ }
+ for rschema, __ in entity.e_schema.attribute_definitions():
+ attr = rschema.type
+ try:
+ value = entity.cw_attr_cache[attr]
+ except KeyError:
+ # Bytes
+ continue
+ data[attr] = value
+ return data
+
+
+# error handling adapters ######################################################
+
+
+class IUserFriendlyError(view.EntityAdapter):
+ __regid__ = 'IUserFriendlyError'
+ __abstract__ = True
+
+ def __init__(self, *args, **kwargs):
+ self.exc = kwargs.pop('exc')
+ super(IUserFriendlyError, self).__init__(*args, **kwargs)
+
+
+class IUserFriendlyUniqueTogether(IUserFriendlyError):
+ __select__ = match_exception(UniqueTogetherError)
+
+ def raise_user_exception(self):
+ rtypes = self.exc.rtypes
+ errors = {}
+ msgargs = {}
+ i18nvalues = []
+ for rtype in rtypes:
+ errors[rtype] = _('%(KEY-rtype)s is part of violated unicity constraint')
+ msgargs[rtype + '-rtype'] = rtype
+ i18nvalues.append(rtype + '-rtype')
+ errors[''] = _('some relations violate a unicity constraint')
+ raise ValidationError(self.entity.eid, errors, msgargs=msgargs, i18nvalues=i18nvalues)
+
+
+class IUserFriendlyCheckConstraint(IUserFriendlyError):
+ __select__ = match_exception(ViolatedConstraint)
+
+ def raise_user_exception(self):
+ cstrname = self.exc.cstrname
+ eschema = self.entity.e_schema
+ for rschema, attrschema in eschema.attribute_definitions():
+ rdef = rschema.rdef(eschema, attrschema)
+ for constraint in rdef.constraints:
+ if cstrname == 'cstr' + md5(
+ (eschema.type + rschema.type + constraint.type() +
+ (constraint.serialize() or '')).encode('ascii')).hexdigest():
+ break
+ else:
+ continue
+ break
+ else:
+ assert 0
+ key = rschema.type + '-subject'
+ msg, args = constraint.failed_message(key, self.entity.cw_edited[rschema.type])
+ raise ValidationError(self.entity.eid, {key: msg}, args)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/authobjs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/authobjs.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,188 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""entity classes user and group entities"""
+
+__docformat__ = "restructuredtext en"
+
+from six import string_types
+
+from logilab.common.decorators import cached
+
+from cubicweb import Unauthorized
+from cubicweb.entities import AnyEntity, fetch_config
+
+class CWGroup(AnyEntity):
+ __regid__ = 'CWGroup'
+ fetch_attrs, cw_fetch_order = fetch_config(['name'])
+ cw_fetch_unrelated_order = cw_fetch_order
+
+ def dc_long_title(self):
+ name = self.name
+ trname = self._cw._(name)
+ if trname != name:
+ return '%s (%s)' % (name, trname)
+ return name
+
+ @cached
+ def num_users(self):
+ """return the number of users in this group"""
+ return self._cw.execute('Any COUNT(U) WHERE U in_group G, G eid %(g)s',
+ {'g': self.eid})[0][0]
+
+
+class CWUser(AnyEntity):
+ __regid__ = 'CWUser'
+ fetch_attrs, cw_fetch_order = fetch_config(['login', 'firstname', 'surname'])
+ cw_fetch_unrelated_order = cw_fetch_order
+
+ # used by repository to check if the user can log in or not
+ AUTHENTICABLE_STATES = ('activated',)
+
+ # low level utilities #####################################################
+ def __init__(self, *args, **kwargs):
+ groups = kwargs.pop('groups', None)
+ properties = kwargs.pop('properties', None)
+ super(CWUser, self).__init__(*args, **kwargs)
+ if groups is not None:
+ self._groups = groups
+ if properties is not None:
+ self._properties = properties
+
+ @property
+ def groups(self):
+ try:
+ return self._groups
+ except AttributeError:
+ self._groups = set(g.name for g in self.in_group)
+ return self._groups
+
+ @property
+ def properties(self):
+ try:
+ return self._properties
+ except AttributeError:
+ self._properties = dict(
+ self._cw.execute(
+ 'Any K, V WHERE P for_user U, U eid %(userid)s, '
+ 'P pkey K, P value V',
+ {'userid': self.eid}))
+ return self._properties
+
+ def prefered_language(self, language=None):
+ """return language used by this user, if explicitly defined (eg not
+ using http negociation)
+ """
+ language = language or self.property_value('ui.language')
+ vreg = self._cw.vreg
+ try:
+ vreg.config.translations[language]
+ except KeyError:
+ language = vreg.property_value('ui.language')
+ assert language in vreg.config.translations[language], language
+ return language
+
+ def property_value(self, key):
+ try:
+ # properties stored on the user aren't correctly typed
+ # (e.g. all values are unicode string)
+ return self._cw.vreg.typed_value(key, self.properties[key])
+ except KeyError:
+ pass
+ except ValueError:
+ self.warning('incorrect value for eproperty %s of user %s',
+ key, self.login)
+ return self._cw.vreg.property_value(key)
+
+ def set_property(self, pkey, value):
+ value = unicode(value)
+ try:
+ prop = self._cw.execute(
+ 'CWProperty X WHERE X pkey %(k)s, X for_user U, U eid %(u)s',
+ {'k': pkey, 'u': self.eid}).get_entity(0, 0)
+ except Exception:
+ kwargs = dict(pkey=unicode(pkey), value=value)
+ if self.is_in_group('managers'):
+ kwargs['for_user'] = self
+ self._cw.create_entity('CWProperty', **kwargs)
+ else:
+ prop.cw_set(value=value)
+
+ def matching_groups(self, groups):
+ """return the number of the given group(s) in which the user is
+
+ :type groups: str or iterable(str)
+ :param groups: a group name or an iterable on group names
+ """
+ if isinstance(groups, string_types):
+ groups = frozenset((groups,))
+ elif isinstance(groups, (tuple, list)):
+ groups = frozenset(groups)
+ return len(groups & self.groups) # XXX return the resulting set instead of its size
+
+ def is_in_group(self, group):
+ """convience / shortcut method to test if the user belongs to `group`
+ """
+ return group in self.groups
+
+ def is_anonymous(self):
+ """ checks if user is an anonymous user"""
+ #FIXME on the web-side anonymous user is detected according
+ # to config['anonymous-user'], we don't have this info on
+ # the server side.
+ return self.groups == frozenset(('guests', ))
+
+ def owns(self, eid):
+ try:
+ return self._cw.execute(
+ 'Any X WHERE X eid %(x)s, X owned_by U, U eid %(u)s',
+ {'x': eid, 'u': self.eid})
+ except Unauthorized:
+ return False
+ owns = cached(owns, keyarg=1)
+
+ # presentation utilities ##################################################
+
+ def name(self):
+ """construct a name using firstname / surname or login if not defined"""
+
+ if self.firstname and self.surname:
+ return self._cw._('%(firstname)s %(surname)s') % {
+ 'firstname': self.firstname, 'surname' : self.surname}
+ if self.firstname:
+ return self.firstname
+ return self.login
+
+ def dc_title(self):
+ return self.login
+
+ dc_long_title = name
+
+ def __call__(self, *args, **kwargs):
+ """ugly hack for compatibility betweeb dbapi and repo api
+
+ In the dbapi, Connection and Session have a ``user`` method to
+ generated a user for a request In the repo api, Connection and Session
+ have a user attribute inherited from SessionRequestBase prototype. This
+ ugly hack allows to not break user of the user method.
+
+ XXX Deprecate me ASAP"""
+ return self
+
+from logilab.common.deprecation import class_renamed
+EUser = class_renamed('EUser', CWUser)
+EGroup = class_renamed('EGroup', CWGroup)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/lib.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/lib.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,149 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""entity classes for optional library entities"""
+
+__docformat__ = "restructuredtext en"
+from warnings import warn
+from datetime import datetime
+
+from six.moves import range
+from six.moves.urllib.parse import urlsplit, urlunsplit
+
+from logilab.mtconverter import xml_escape
+
+from cubicweb import UnknownProperty
+from cubicweb.entity import _marker
+from cubicweb.entities import AnyEntity, fetch_config
+
+def mangle_email(address):
+ try:
+ name, host = address.split('@', 1)
+ except ValueError:
+ return address
+ return '%s at %s' % (name, host.replace('.', ' dot '))
+
+
+class EmailAddress(AnyEntity):
+ __regid__ = 'EmailAddress'
+ fetch_attrs, cw_fetch_order = fetch_config(['address', 'alias'])
+ rest_attr = 'eid'
+
+ def dc_title(self):
+ if self.alias:
+ return '%s <%s>' % (self.alias, self.display_address())
+ return self.display_address()
+
+ @property
+ def email_of(self):
+ return self.reverse_use_email and self.reverse_use_email[0] or None
+
+ @property
+ def prefered(self):
+ return self.prefered_form and self.prefered_form[0] or self
+
+ def related_emails(self, skipeids=None):
+ # XXX move to eemail
+ # check email relations are in the schema first
+ subjrels = self.e_schema.object_relations()
+ if not ('sender' in subjrels and 'recipients' in subjrels):
+ return
+ rset = self._cw.execute('DISTINCT Any X, S, D ORDERBY D DESC '
+ 'WHERE X sender Y or X recipients Y, '
+ 'X subject S, X date D, Y eid %(y)s',
+ {'y': self.eid})
+ if skipeids is None:
+ skipeids = set()
+ for i in range(len(rset)):
+ eid = rset[i][0]
+ if eid in skipeids:
+ continue
+ skipeids.add(eid)
+ yield rset.get_entity(i, 0)
+
+ def display_address(self):
+ if self._cw.vreg.config['mangle-emails']:
+ return mangle_email(self.address)
+ return self.address
+
+ def printable_value(self, attr, value=_marker, attrtype=None,
+ format='text/html'):
+ """overriden to return displayable address when necessary"""
+ if attr == 'address':
+ address = self.display_address()
+ if format == 'text/html':
+ address = xml_escape(address)
+ return address
+ return super(EmailAddress, self).printable_value(attr, value, attrtype, format)
+
+
+class Bookmark(AnyEntity):
+ """customized class for Bookmark entities"""
+ __regid__ = 'Bookmark'
+ fetch_attrs, cw_fetch_order = fetch_config(['title', 'path'])
+
+ def actual_url(self):
+ url = self._cw.build_url(self.path)
+ if self.title:
+ urlparts = list(urlsplit(url))
+ if urlparts[3]:
+ urlparts[3] += '&vtitle=%s' % self._cw.url_quote(self.title)
+ else:
+ urlparts[3] = 'vtitle=%s' % self._cw.url_quote(self.title)
+ url = urlunsplit(urlparts)
+ return url
+
+ def action_url(self):
+ return self.absolute_url() + '/follow'
+
+
+class CWProperty(AnyEntity):
+ __regid__ = 'CWProperty'
+
+ fetch_attrs, cw_fetch_order = fetch_config(['pkey', 'value'])
+ rest_attr = 'pkey'
+
+ def typed_value(self):
+ return self._cw.vreg.typed_value(self.pkey, self.value)
+
+ def dc_description(self, format='text/plain'):
+ try:
+ return self._cw._(self._cw.vreg.property_info(self.pkey)['help'])
+ except UnknownProperty:
+ return u''
+
+
+class CWCache(AnyEntity):
+ """Cache"""
+ __regid__ = 'CWCache'
+ fetch_attrs, cw_fetch_order = fetch_config(['name'])
+
+ def __init__(self, *args, **kwargs):
+ warn('[3.19] CWCache entity type is going away soon. '
+ 'Other caching mechanisms can be used more reliably '
+ 'to the same effect.',
+ DeprecationWarning)
+ super(CWCache, self).__init__(*args, **kwargs)
+
+ def touch(self):
+ self._cw.execute('SET X timestamp %(t)s WHERE X eid %(x)s',
+ {'t': datetime.now(), 'x': self.eid})
+
+ def valid(self, date):
+ if date:
+ return date > self.timestamp
+ return False
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/schemaobjs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/schemaobjs.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,178 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""schema definition related entities"""
+
+__docformat__ = "restructuredtext en"
+
+from logilab.common.decorators import cached
+
+from yams.schema import role_name
+
+from cubicweb import ValidationError
+from cubicweb.schema import ERQLExpression, RRQLExpression
+
+from cubicweb.entities import AnyEntity, fetch_config
+
+
+class CWEType(AnyEntity):
+ __regid__ = 'CWEType'
+ fetch_attrs, cw_fetch_order = fetch_config(['name'])
+
+ def dc_title(self):
+ return u'%s (%s)' % (self.name, self._cw._(self.name))
+
+ def dc_long_title(self):
+ stereotypes = []
+ _ = self._cw._
+ if self.final:
+ stereotypes.append(_('final'))
+ if stereotypes:
+ return u'%s <<%s>>' % (self.dc_title(), ', '.join(stereotypes))
+ return self.dc_title()
+
+
+class CWRType(AnyEntity):
+ __regid__ = 'CWRType'
+ fetch_attrs, cw_fetch_order = fetch_config(['name'])
+
+ def dc_title(self):
+ return u'%s (%s)' % (self.name, self._cw._(self.name))
+
+ def dc_long_title(self):
+ stereotypes = []
+ _ = self._cw._
+ if self.symmetric:
+ stereotypes.append(_('symmetric'))
+ if self.inlined:
+ stereotypes.append(_('inlined'))
+ if self.final:
+ stereotypes.append(_('final'))
+ if stereotypes:
+ return u'%s <<%s>>' % (self.dc_title(), ', '.join(stereotypes))
+ return self.dc_title()
+
+ def check_inlined_allowed(self):
+ """check inlining is possible, raise ValidationError if not possible
+ """
+ # don't use the persistent schema, we may miss cardinality changes
+ # in the same transaction
+ for rdef in self.reverse_relation_type:
+ card = rdef.cardinality[0]
+ if not card in '?1':
+ qname = role_name('inlined', 'subject')
+ rtype = self.name
+ stype = rdef.stype
+ otype = rdef.otype
+ msg = self._cw._("can't set inlined=True, "
+ "%(stype)s %(rtype)s %(otype)s "
+ "has cardinality=%(card)s")
+ raise ValidationError(self.eid, {qname: msg % locals()})
+
+
+class CWRelation(AnyEntity):
+ __regid__ = 'CWRelation'
+ fetch_attrs = fetch_config(['cardinality'])[0]
+
+ def dc_title(self):
+ return u'%s %s %s' % (
+ self.from_entity[0].name,
+ self.relation_type[0].name,
+ self.to_entity[0].name)
+
+ def dc_long_title(self):
+ card = self.cardinality
+ scard, ocard = u'', u''
+ if card[0] != '1':
+ scard = '[%s]' % card[0]
+ if card[1] != '1':
+ ocard = '[%s]' % card[1]
+ return u'%s %s%s%s %s' % (
+ self.from_entity[0].name,
+ scard, self.relation_type[0].name, ocard,
+ self.to_entity[0].name)
+
+ @property
+ def rtype(self):
+ return self.relation_type[0]
+
+ @property
+ def stype(self):
+ return self.from_entity[0]
+
+ @property
+ def otype(self):
+ return self.to_entity[0]
+
+ def yams_schema(self):
+ rschema = self._cw.vreg.schema.rschema(self.rtype.name)
+ return rschema.rdefs[(self.stype.name, self.otype.name)]
+
+
+class CWAttribute(CWRelation):
+ __regid__ = 'CWAttribute'
+
+ def dc_long_title(self):
+ card = self.cardinality
+ scard = u''
+ if card[0] == '1':
+ scard = '+'
+ return u'%s %s%s %s' % (
+ self.from_entity[0].name,
+ scard, self.relation_type[0].name,
+ self.to_entity[0].name)
+
+
+class CWConstraint(AnyEntity):
+ __regid__ = 'CWConstraint'
+ fetch_attrs, cw_fetch_order = fetch_config(['value'])
+
+ def dc_title(self):
+ return '%s(%s)' % (self.cstrtype[0].name, self.value or u'')
+
+ @property
+ def type(self):
+ return self.cstrtype[0].name
+
+
+class RQLExpression(AnyEntity):
+ __regid__ = 'RQLExpression'
+ fetch_attrs, cw_fetch_order = fetch_config(['exprtype', 'mainvars', 'expression'])
+
+ def dc_title(self):
+ return self.expression or u''
+
+ def dc_long_title(self):
+ return '%s(%s)' % (self.exprtype, self.expression or u'')
+
+ @property
+ def expression_of(self):
+ for rel in ('read_permission', 'add_permission', 'delete_permission',
+ 'update_permission', 'condition'):
+ values = getattr(self, 'reverse_%s' % rel)
+ if values:
+ return values[0]
+
+ @cached
+ def _rqlexpr(self):
+ if self.exprtype == 'ERQLExpression':
+ return ERQLExpression(self.expression, self.mainvars, self.eid)
+ #if self.exprtype == 'RRQLExpression':
+ return RRQLExpression(self.expression, self.mainvars, self.eid)
+
+ def check_expression(self, *args, **kwargs):
+ return self._rqlexpr().check(*args, **kwargs)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/sources.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/sources.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,184 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""data source related entities"""
+
+__docformat__ = "restructuredtext en"
+
+import re
+from socket import gethostname
+import logging
+
+from logilab.common.textutils import text_to_dict
+from logilab.common.configuration import OptionError
+from logilab.mtconverter import xml_escape
+
+from cubicweb.entities import AnyEntity, fetch_config
+
+class _CWSourceCfgMixIn(object):
+ @property
+ def dictconfig(self):
+ return self.config and text_to_dict(self.config) or {}
+
+ def update_config(self, skip_unknown=False, **config):
+ from cubicweb.server import SOURCE_TYPES
+ from cubicweb.server.serverconfig import (SourceConfiguration,
+ generate_source_config)
+ cfg = self.dictconfig
+ cfg.update(config)
+ options = SOURCE_TYPES[self.type].options
+ sconfig = SourceConfiguration(self._cw.vreg.config, options=options)
+ for opt, val in cfg.items():
+ try:
+ sconfig.set_option(opt, val)
+ except OptionError:
+ if skip_unknown:
+ continue
+ raise
+ cfgstr = unicode(generate_source_config(sconfig), self._cw.encoding)
+ self.cw_set(config=cfgstr)
+
+
+class CWSource(_CWSourceCfgMixIn, AnyEntity):
+ __regid__ = 'CWSource'
+ fetch_attrs, cw_fetch_order = fetch_config(['name', 'type'])
+
+ @property
+ def host_config(self):
+ dictconfig = self.dictconfig
+ host = gethostname()
+ for hostcfg in self.host_configs:
+ if hostcfg.match(host):
+ self.info('matching host config %s for source %s',
+ hostcfg.match_host, self.name)
+ dictconfig.update(hostcfg.dictconfig)
+ return dictconfig
+
+ @property
+ def host_configs(self):
+ return self.reverse_cw_host_config_of
+
+ def init_mapping(self, mapping):
+ for key, options in mapping:
+ if isinstance(key, tuple): # relation definition
+ assert len(key) == 3
+ restrictions = ['X relation_type RT, RT name %(rt)s']
+ kwargs = {'rt': key[1]}
+ if key[0] != '*':
+ restrictions.append('X from_entity FT, FT name %(ft)s')
+ kwargs['ft'] = key[0]
+ if key[2] != '*':
+ restrictions.append('X to_entity TT, TT name %(tt)s')
+ kwargs['tt'] = key[2]
+ rql = 'Any X WHERE %s' % ','.join(restrictions)
+ schemarset = self._cw.execute(rql, kwargs)
+ elif key[0].isupper(): # entity type
+ schemarset = self._cw.execute('CWEType X WHERE X name %(et)s',
+ {'et': key})
+ else: # relation type
+ schemarset = self._cw.execute('CWRType X WHERE X name %(rt)s',
+ {'rt': key})
+ for schemaentity in schemarset.entities():
+ self._cw.create_entity('CWSourceSchemaConfig',
+ cw_for_source=self,
+ cw_schema=schemaentity,
+ options=options)
+
+ @property
+ def repo_source(self):
+ """repository only property, not available from the web side (eg
+ self._cw is expected to be a server session)
+ """
+ return self._cw.repo.sources_by_eid[self.eid]
+
+
+class CWSourceHostConfig(_CWSourceCfgMixIn, AnyEntity):
+ __regid__ = 'CWSourceHostConfig'
+ fetch_attrs, cw_fetch_order = fetch_config(['match_host', 'config'])
+
+ @property
+ def cwsource(self):
+ return self.cw_host_config_of[0]
+
+ def match(self, hostname):
+ return re.match(self.match_host, hostname)
+
+
+class CWSourceSchemaConfig(AnyEntity):
+ __regid__ = 'CWSourceSchemaConfig'
+ fetch_attrs, cw_fetch_order = fetch_config(['cw_for_source', 'cw_schema', 'options'])
+
+ def dc_title(self):
+ return self._cw._(self.cw_etype) + ' #%s' % self.eid
+
+ @property
+ def schema(self):
+ return self.cw_schema[0]
+
+ @property
+ def cwsource(self):
+ return self.cw_for_source[0]
+
+
+class CWDataImport(AnyEntity):
+ __regid__ = 'CWDataImport'
+ repo_source = _logs = None # please pylint
+
+ def init(self):
+ self._logs = []
+ self.repo_source = self.cwsource.repo_source
+
+ def dc_title(self):
+ return '%s [%s]' % (self.printable_value('start_timestamp'),
+ self.printable_value('status'))
+
+ @property
+ def cwsource(self):
+ return self.cw_import_of[0]
+
+ def record_debug(self, msg, path=None, line=None):
+ self._log(logging.DEBUG, msg, path, line)
+ self.repo_source.debug(msg)
+
+ def record_info(self, msg, path=None, line=None):
+ self._log(logging.INFO, msg, path, line)
+ self.repo_source.info(msg)
+
+ def record_warning(self, msg, path=None, line=None):
+ self._log(logging.WARNING, msg, path, line)
+ self.repo_source.warning(msg)
+
+ def record_error(self, msg, path=None, line=None):
+ self._status = u'failed'
+ self._log(logging.ERROR, msg, path, line)
+ self.repo_source.error(msg)
+
+ def record_fatal(self, msg, path=None, line=None):
+ self._status = u'failed'
+ self._log(logging.FATAL, msg, path, line)
+ self.repo_source.fatal(msg)
+
+ def _log(self, severity, msg, path=None, line=None):
+ encodedmsg = u'%s\t%s\t%s\t%s ' % (severity, path or u'',
+ line or u'', xml_escape(msg))
+ self._logs.append(encodedmsg)
+
+ def write_log(self, session, **kwargs):
+ if 'status' not in kwargs:
+ kwargs['status'] = getattr(self, '_status', u'success')
+ self.cw_set(log=u' '.join(self._logs), **kwargs)
+ self._logs = []
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/test/data/migration/postcreate.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/test/data/migration/postcreate.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,19 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+wf = add_workflow(u'bmk wf', 'Bookmark')
+wf.add_state(u'hop', initial=True)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/test/data/schema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/test/data/schema.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,37 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""entities tests schema"""
+
+from yams.buildobjs import EntityType, String, RichString, Int
+from cubicweb.schema import make_workflowable
+
+class Company(EntityType):
+ order = Int()
+ name = String()
+ description = RichString()
+
+class Division(Company):
+ __specializes_schema__ = True
+
+class SubDivision(Division):
+ __specializes_schema__ = True
+
+
+from cubicweb.schemas import bootstrap, Bookmark
+make_workflowable(bootstrap.CWGroup)
+make_workflowable(Bookmark.Bookmark)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/test/requirements.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/test/requirements.txt Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+docutils
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/test/unittest_base.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/test/unittest_base.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,262 @@
+# -*- coding: utf-8 -*-
+# copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""unit tests for cubicweb.entities.base module
+"""
+
+from logilab.common.testlib import unittest_main
+from logilab.common.decorators import clear_cache
+from logilab.common.registry import yes
+
+from cubicweb.devtools.testlib import CubicWebTC
+
+from cubicweb.entities import AnyEntity
+
+
+class BaseEntityTC(CubicWebTC):
+
+ def setup_database(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.membereid = self.create_user(cnx, 'member').eid
+ cnx.commit()
+
+
+class MetadataTC(BaseEntityTC):
+
+ def test_creator(self):
+ with self.new_access('member').repo_cnx() as cnx:
+ entity = cnx.create_entity('Bookmark', title=u"hello", path=u'project/cubicweb')
+ cnx.commit()
+ self.assertEqual(entity.creator.eid, self.membereid)
+ self.assertEqual(entity.dc_creator(), u'member')
+
+ def test_type(self):
+ # dc_type may be translated
+ with self.admin_access.client_cnx() as cnx:
+ member = cnx.entity_from_eid(self.membereid)
+ self.assertEqual(member.dc_type(), 'CWUser')
+
+ def test_cw_etype(self):
+ # cw_etype is never translated
+ with self.admin_access.client_cnx() as cnx:
+ member = cnx.entity_from_eid(self.membereid)
+ self.assertEqual(member.cw_etype, 'CWUser')
+
+ def test_entity_meta_attributes(self):
+ # XXX move to yams
+ self.assertEqual(self.schema['CWUser'].meta_attributes(), {})
+ self.assertEqual(dict((str(k), v)
+ for k, v in self.schema['State'].meta_attributes().items()),
+ {'description_format': ('format', 'description')})
+
+ def test_fti_rql_method(self):
+ class EmailAddress(AnyEntity):
+ __regid__ = 'EmailAddress'
+ __select__ = AnyEntity.__select__ & yes(2)
+
+ @classmethod
+ def cw_fti_index_rql_queries(cls, req):
+ return ['EmailAddress Y']
+
+ with self.admin_access.web_request() as req:
+ req.create_entity('EmailAddress', address=u'foo@bar.com')
+ eclass = self.vreg['etypes'].etype_class('EmailAddress')
+ # deprecated
+ self.assertEqual(['Any X, ADDRESS, ALIAS WHERE X is EmailAddress, '
+ 'X address ADDRESS, X alias ALIAS'],
+ eclass.cw_fti_index_rql_queries(req))
+
+ self.assertEqual(['Any X, ADDRESS, ALIAS ORDERBY X LIMIT 1000 WHERE X is EmailAddress, '
+ 'X address ADDRESS, X alias ALIAS, X eid > 0'],
+ [rset.rql for rset in eclass.cw_fti_index_rql_limit(req)])
+
+ # test backwards compatibility with custom method
+ with self.temporary_appobjects(EmailAddress):
+ self.vreg['etypes'].clear_caches()
+ eclass = self.vreg['etypes'].etype_class('EmailAddress')
+ self.assertEqual(['EmailAddress Y'],
+ [rset.rql for rset in eclass.cw_fti_index_rql_limit(req)])
+
+
+class EmailAddressTC(BaseEntityTC):
+
+ def test_canonical_form(self):
+ with self.admin_access.repo_cnx() as cnx:
+ email1 = cnx.execute('INSERT EmailAddress X: '
+ 'X address "maarten.ter.huurne@philips.com"').get_entity(0, 0)
+ email2 = cnx.execute('INSERT EmailAddress X: '
+ 'X address "maarten@philips.com"').get_entity(0, 0)
+ email3 = cnx.execute('INSERT EmailAddress X: '
+ 'X address "toto@logilab.fr"').get_entity(0, 0)
+ email1.cw_set(prefered_form=email2)
+ self.assertEqual(email1.prefered.eid, email2.eid)
+ self.assertEqual(email2.prefered.eid, email2.eid)
+ self.assertEqual(email3.prefered.eid, email3.eid)
+
+ def test_mangling(self):
+ query = 'INSERT EmailAddress X: X address "maarten.ter.huurne@philips.com"'
+ with self.admin_access.repo_cnx() as cnx:
+ email = cnx.execute(query).get_entity(0, 0)
+ self.assertEqual(email.display_address(), 'maarten.ter.huurne@philips.com')
+ self.assertEqual(email.printable_value('address'), 'maarten.ter.huurne@philips.com')
+ self.vreg.config.global_set_option('mangle-emails', True)
+ try:
+ self.assertEqual(email.display_address(), 'maarten.ter.huurne at philips dot com')
+ self.assertEqual(email.printable_value('address'),
+ 'maarten.ter.huurne at philips dot com')
+ email = cnx.execute('INSERT EmailAddress X: X address "syt"').get_entity(0, 0)
+ self.assertEqual(email.display_address(), 'syt')
+ self.assertEqual(email.printable_value('address'), 'syt')
+ finally:
+ self.vreg.config.global_set_option('mangle-emails', False)
+
+ def test_printable_value_escape(self):
+ with self.admin_access.repo_cnx() as cnx:
+ email = cnx.execute('INSERT EmailAddress X: '
+ 'X address "maarten&ter@philips.com"').get_entity(0, 0)
+ self.assertEqual(email.printable_value('address'),
+ 'maarten&ter@philips.com')
+ self.assertEqual(email.printable_value('address', format='text/plain'),
+ 'maarten&ter@philips.com')
+
+
+class CWUserTC(BaseEntityTC):
+
+ def test_complete(self):
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.execute('CWUser X WHERE X login "admin"').get_entity(0, 0)
+ e.complete()
+
+ def test_matching_groups(self):
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.execute('CWUser X WHERE X login "admin"').get_entity(0, 0)
+ self.assertTrue(e.matching_groups('managers'))
+ self.assertFalse(e.matching_groups('xyz'))
+ self.assertTrue(e.matching_groups(('xyz', 'managers')))
+ self.assertFalse(e.matching_groups(('xyz', 'abcd')))
+
+ def test_dc_title_and_name(self):
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.execute('CWUser U WHERE U login "member"').get_entity(0, 0)
+ self.assertEqual(e.dc_title(), 'member')
+ self.assertEqual(e.name(), 'member')
+ e.cw_set(firstname=u'bouah')
+ self.assertEqual(e.dc_title(), 'member')
+ self.assertEqual(e.name(), u'bouah')
+ e.cw_set(surname=u'lôt')
+ self.assertEqual(e.dc_title(), 'member')
+ self.assertEqual(e.name(), u'bouah lôt')
+
+ def test_falsey_dc_title(self):
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.create_entity('Company', order=0, name=u'pythonian')
+ cnx.commit()
+ self.assertEqual(u'0', e.dc_title())
+
+ def test_allowed_massmail_keys(self):
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.execute('CWUser U WHERE U login "member"').get_entity(0, 0)
+ # Bytes/Password attributes should be omitted
+ self.assertEqual(
+ e.cw_adapt_to('IEmailable').allowed_massmail_keys(),
+ set(('surname', 'firstname', 'login', 'last_login_time',
+ 'creation_date', 'modification_date', 'cwuri', 'eid'))
+ )
+
+ def test_cw_instantiate_object_relation(self):
+ """ a weird non regression test """
+ with self.admin_access.repo_cnx() as cnx:
+ e = cnx.execute('CWUser U WHERE U login "member"').get_entity(0, 0)
+ cnx.create_entity('CWGroup', name=u'logilab', reverse_in_group=e)
+
+
+class HTMLtransformTC(BaseEntityTC):
+
+ def test_sanitized_html(self):
+ with self.admin_access.repo_cnx() as cnx:
+ c = cnx.create_entity('Company', name=u'Babar',
+ description=u"""
+Title
+=====
+
+Elephant management best practices.
+
+.. raw:: html
+
+
+""", description_format=u'text/rest')
+ cnx.commit()
+ c.cw_clear_all_caches()
+ self.assertIn('alert',
+ c.printable_value('description', format='text/plain'))
+ self.assertNotIn('alert',
+ c.printable_value('description', format='text/html'))
+
+
+class SpecializedEntityClassesTC(CubicWebTC):
+
+ def select_eclass(self, etype):
+ # clear selector cache
+ clear_cache(self.vreg['etypes'], 'etype_class')
+ return self.vreg['etypes'].etype_class(etype)
+
+ def test_etype_class_selection_and_specialization(self):
+ # no specific class for Subdivisions, the default one should be selected
+ eclass = self.select_eclass('SubDivision')
+ self.assertTrue(eclass.__autogenerated__)
+ # self.assertEqual(eclass.__bases__, (AnyEntity,))
+ # build class from most generic to most specific and make
+ # sure the most specific is always selected
+ self.vreg._loadedmods[__name__] = {}
+ for etype in ('Company', 'Division', 'SubDivision'):
+ class Foo(AnyEntity):
+ __regid__ = etype
+ self.vreg.register(Foo)
+ eclass = self.select_eclass('SubDivision')
+ self.assertTrue(eclass.__autogenerated__)
+ self.assertFalse(eclass is Foo)
+ if etype == 'SubDivision':
+ self.assertEqual(eclass.__bases__, (Foo,))
+ else:
+ self.assertEqual(eclass.__bases__[0].__bases__, (Foo,))
+ # check Division eclass is still selected for plain Division entities
+ eclass = self.select_eclass('Division')
+ self.assertEqual(eclass.cw_etype, 'Division')
+
+
+class ISerializableTC(CubicWebTC):
+
+ def test_serialization(self):
+ with self.admin_access.repo_cnx() as cnx:
+ entity = cnx.create_entity('CWGroup', name=u'tmp')
+ cnx.commit()
+ serializer = entity.cw_adapt_to('ISerializable')
+ expected = {
+ 'cw_etype': u'CWGroup',
+ 'cw_source': 'system',
+ 'eid': entity.eid,
+ 'cwuri': u'http://testing.fr/cubicweb/%s' % entity.eid,
+ 'creation_date': entity.creation_date,
+ 'modification_date': entity.modification_date,
+ 'name': u'tmp',
+ }
+ self.assertEqual(serializer.serialize(), expected)
+
+
+if __name__ == '__main__':
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/test/unittest_wfobjs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/test/unittest_wfobjs.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,705 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+from cubicweb import ValidationError
+from cubicweb.devtools.testlib import CubicWebTC
+
+def add_wf(shell, etype, name=None, default=False):
+ if name is None:
+ name = etype
+ return shell.add_workflow(name, etype, default=default,
+ ensure_workflowable=False)
+
+def parse_hist(wfhist):
+ return [(ti.previous_state.name, ti.new_state.name,
+ ti.transition and ti.transition.name, ti.comment)
+ for ti in wfhist]
+
+
+class WorkflowBuildingTC(CubicWebTC):
+
+ def test_wf_construction(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ foo = wf.add_state(u'foo', initial=True)
+ bar = wf.add_state(u'bar')
+ self.assertEqual(wf.state_by_name('bar').eid, bar.eid)
+ self.assertEqual(wf.state_by_name('barrr'), None)
+ baz = wf.add_transition(u'baz', (foo,), bar, ('managers',))
+ self.assertEqual(wf.transition_by_name('baz').eid, baz.eid)
+ self.assertEqual(len(baz.require_group), 1)
+ self.assertEqual(baz.require_group[0].name, 'managers')
+
+ def test_duplicated_state(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ wf.add_state(u'foo', initial=True)
+ shell.commit()
+ with self.assertRaises(ValidationError) as cm:
+ wf.add_state(u'foo')
+ self.assertEqual({'name': u'%(KEY-rtype)s is part of violated unicity constraint',
+ 'state_of': u'%(KEY-rtype)s is part of violated unicity constraint',
+ '': u'some relations violate a unicity constraint'},
+ cm.exception.errors)
+ shell.rollback()
+ # no pb if not in the same workflow
+ wf2 = add_wf(shell, 'Company')
+ foo = wf2.add_state(u'foo', initial=True)
+ shell.commit()
+ # gnark gnark
+ bar = wf.add_state(u'bar')
+ shell.commit()
+ with self.assertRaises(ValidationError) as cm:
+ bar.cw_set(name=u'foo')
+ shell.rollback()
+ self.assertEqual({'name': u'%(KEY-rtype)s is part of violated unicity constraint',
+ 'state_of': u'%(KEY-rtype)s is part of violated unicity constraint',
+ '': u'some relations violate a unicity constraint'},
+ cm.exception.errors)
+
+ def test_duplicated_transition(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ foo = wf.add_state(u'foo', initial=True)
+ bar = wf.add_state(u'bar')
+ wf.add_transition(u'baz', (foo,), bar, ('managers',))
+ with self.assertRaises(ValidationError) as cm:
+ wf.add_transition(u'baz', (bar,), foo)
+ self.assertEqual({'name': u'%(KEY-rtype)s is part of violated unicity constraint',
+ 'transition_of': u'%(KEY-rtype)s is part of violated unicity constraint',
+ '': u'some relations violate a unicity constraint'},
+ cm.exception.errors)
+ shell.rollback()
+ # no pb if not in the same workflow
+ wf2 = add_wf(shell, 'Company')
+ foo = wf2.add_state(u'foo', initial=True)
+ bar = wf2.add_state(u'bar')
+ wf2.add_transition(u'baz', (foo,), bar, ('managers',))
+ shell.commit()
+ # gnark gnark
+ biz = wf2.add_transition(u'biz', (bar,), foo)
+ shell.commit()
+ with self.assertRaises(ValidationError) as cm:
+ biz.cw_set(name=u'baz')
+ shell.rollback()
+ self.assertEqual({'name': u'%(KEY-rtype)s is part of violated unicity constraint',
+ 'transition_of': u'%(KEY-rtype)s is part of violated unicity constraint',
+ '': u'some relations violate a unicity constraint'},
+ cm.exception.errors)
+
+
+class WorkflowTC(CubicWebTC):
+
+ def setup_database(self):
+ rschema = self.schema['in_state']
+ for rdef in rschema.rdefs.values():
+ self.assertEqual(rdef.cardinality, '1*')
+ with self.admin_access.client_cnx() as cnx:
+ self.member_eid = self.create_user(cnx, 'member').eid
+ cnx.commit()
+
+ def test_workflow_base(self):
+ with self.admin_access.web_request() as req:
+ e = self.create_user(req, 'toto')
+ iworkflowable = e.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'activated')
+ iworkflowable.change_state('deactivated', u'deactivate 1')
+ req.cnx.commit()
+ iworkflowable.change_state('activated', u'activate 1')
+ req.cnx.commit()
+ iworkflowable.change_state('deactivated', u'deactivate 2')
+ req.cnx.commit()
+ e.cw_clear_relation_cache('wf_info_for', 'object')
+ self.assertEqual([tr.comment for tr in e.reverse_wf_info_for],
+ ['deactivate 1', 'activate 1', 'deactivate 2'])
+ self.assertEqual(iworkflowable.latest_trinfo().comment, 'deactivate 2')
+
+ def test_possible_transitions(self):
+ with self.admin_access.web_request() as req:
+ user = req.execute('CWUser X').get_entity(0, 0)
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ trs = list(iworkflowable.possible_transitions())
+ self.assertEqual(len(trs), 1)
+ self.assertEqual(trs[0].name, u'deactivate')
+ self.assertEqual(trs[0].destination(None).name, u'deactivated')
+ # test a std user get no possible transition
+ with self.new_access('member').web_request() as req:
+ # fetch the entity using the new session
+ trs = list(req.user.cw_adapt_to('IWorkflowable').possible_transitions())
+ self.assertEqual(len(trs), 0)
+
+ def _test_manager_deactivate(self, user):
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ user.cw_clear_relation_cache('in_state', 'subject')
+ self.assertEqual(len(user.in_state), 1)
+ self.assertEqual(iworkflowable.state, 'deactivated')
+ trinfo = iworkflowable.latest_trinfo()
+ self.assertEqual(trinfo.previous_state.name, 'activated')
+ self.assertEqual(trinfo.new_state.name, 'deactivated')
+ self.assertEqual(trinfo.comment, 'deactivate user')
+ self.assertEqual(trinfo.comment_format, 'text/plain')
+ return trinfo
+
+ def test_change_state(self):
+ with self.admin_access.client_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.change_state('deactivated', comment=u'deactivate user')
+ trinfo = self._test_manager_deactivate(user)
+ self.assertEqual(trinfo.transition, None)
+
+ def test_set_in_state_bad_wf(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ s = wf.add_state(u'foo', initial=True)
+ shell.commit()
+ with self.admin_access.repo_cnx() as cnx:
+ with cnx.security_enabled(write=False):
+ with self.assertRaises(ValidationError) as cm:
+ cnx.execute('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
+ {'x': cnx.user.eid, 's': s.eid})
+ self.assertEqual(cm.exception.errors, {'in_state-subject': "state doesn't belong to entity's workflow. "
+ "You may want to set a custom workflow for this entity first."})
+
+ def test_fire_transition(self):
+ with self.admin_access.client_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate', comment=u'deactivate user')
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'deactivated')
+ self._test_manager_deactivate(user)
+ trinfo = self._test_manager_deactivate(user)
+ self.assertEqual(trinfo.transition.name, 'deactivate')
+
+ def test_goback_transition(self):
+ with self.admin_access.web_request() as req:
+ wf = req.user.cw_adapt_to('IWorkflowable').current_workflow
+ asleep = wf.add_state('asleep')
+ wf.add_transition('rest', (wf.state_by_name('activated'),
+ wf.state_by_name('deactivated')),
+ asleep)
+ wf.add_transition('wake up', asleep)
+ user = self.create_user(req, 'stduser')
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ iworkflowable.fire_transition('wake up')
+ req.cnx.commit()
+ self.assertEqual(iworkflowable.state, 'activated')
+ iworkflowable.fire_transition('deactivate')
+ req.cnx.commit()
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ iworkflowable.fire_transition('wake up')
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'deactivated')
+
+ # XXX test managers can change state without matching transition
+
+ def _test_stduser_deactivate(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.create_user(cnx, 'tutu')
+ with self.new_access('tutu').web_request() as req:
+ iworkflowable = req.entity_from_eid(self.member_eid).cw_adapt_to('IWorkflowable')
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('deactivate')
+ self.assertEqual(cm.exception.errors, {'by_transition-subject': "transition may not be fired"})
+ with self.new_access('member').web_request() as req:
+ iworkflowable = req.entity_from_eid(self.member_eid).cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate')
+ req.cnx.commit()
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('activate')
+ self.assertEqual(cm.exception.errors, {'by_transition-subject': "transition may not be fired"})
+
+ def test_fire_transition_owned_by(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
+ 'X expression "X owned_by U", T condition X '
+ 'WHERE T name "deactivate"')
+ cnx.commit()
+ self._test_stduser_deactivate()
+
+ def test_fire_transition_has_update_perm(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
+ 'X expression "U has_update_permission X", T condition X '
+ 'WHERE T name "deactivate"')
+ cnx.commit()
+ self._test_stduser_deactivate()
+
+ def test_swf_base(self):
+ """subworkflow
+
+ +-----------+ tr1 +-----------+
+ | swfstate1 | ------>| swfstate2 |
+ +-----------+ +-----------+
+ | tr2 +-----------+
+ `------>| swfstate3 |
+ +-----------+
+
+ main workflow
+
+ +--------+ swftr1 +--------+
+ | state1 | -------[swfstate2]->| state2 |
+ +--------+ | +--------+
+ | +--------+
+ `-[swfstate3]-->| state3 |
+ +--------+
+ """
+ # sub-workflow
+ with self.admin_access.shell() as shell:
+ swf = add_wf(shell, 'CWGroup', name='subworkflow')
+ swfstate1 = swf.add_state(u'swfstate1', initial=True)
+ swfstate2 = swf.add_state(u'swfstate2')
+ swfstate3 = swf.add_state(u'swfstate3')
+ tr1 = swf.add_transition(u'tr1', (swfstate1,), swfstate2)
+ tr2 = swf.add_transition(u'tr2', (swfstate1,), swfstate3)
+ # main workflow
+ mwf = add_wf(shell, 'CWGroup', name='main workflow', default=True)
+ state1 = mwf.add_state(u'state1', initial=True)
+ state2 = mwf.add_state(u'state2')
+ state3 = mwf.add_state(u'state3')
+ swftr1 = mwf.add_wftransition(u'swftr1', swf, state1,
+ [(swfstate2, state2), (swfstate3, state3)])
+ swf.cw_clear_all_caches()
+ self.assertEqual(swftr1.destination(None).eid, swfstate1.eid)
+ # workflows built, begin test
+ with self.admin_access.web_request() as req:
+ group = req.create_entity('CWGroup', name=u'grp1')
+ req.cnx.commit()
+ iworkflowable = group.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.current_state.eid, state1.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.subworkflow_input_transition(), None)
+ iworkflowable.fire_transition('swftr1', u'go')
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_state.eid, swfstate1.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, swf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.subworkflow_input_transition().eid, swftr1.eid)
+ iworkflowable.fire_transition('tr1', u'go')
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_state.eid, state2.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.subworkflow_input_transition(), None)
+ # force back to swfstate1 is impossible since we can't any more find
+ # subworkflow input transition
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.change_state(swfstate1, u'gadget')
+ self.assertEqual(cm.exception.errors, {'to_state-subject': "state doesn't belong to entity's workflow"})
+ req.cnx.rollback()
+ # force back to state1
+ iworkflowable.change_state('state1', u'gadget')
+ iworkflowable.fire_transition('swftr1', u'au')
+ group.cw_clear_all_caches()
+ iworkflowable.fire_transition('tr2', u'chapeau')
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_state.eid, state3.eid)
+ self.assertEqual(iworkflowable.current_workflow.eid, mwf.eid)
+ self.assertEqual(iworkflowable.main_workflow.eid, mwf.eid)
+ self.assertListEqual(parse_hist(iworkflowable.workflow_history),
+ [('state1', 'swfstate1', 'swftr1', 'go'),
+ ('swfstate1', 'swfstate2', 'tr1', 'go'),
+ ('swfstate2', 'state2', 'swftr1', 'exiting from subworkflow subworkflow'),
+ ('state2', 'state1', None, 'gadget'),
+ ('state1', 'swfstate1', 'swftr1', 'au'),
+ ('swfstate1', 'swfstate3', 'tr2', 'chapeau'),
+ ('swfstate3', 'state3', 'swftr1', 'exiting from subworkflow subworkflow'),
+ ])
+
+ def test_swf_exit_consistency(self):
+ with self.admin_access.shell() as shell:
+ # sub-workflow
+ swf = add_wf(shell, 'CWGroup', name='subworkflow')
+ swfstate1 = swf.add_state(u'swfstate1', initial=True)
+ swfstate2 = swf.add_state(u'swfstate2')
+ tr1 = swf.add_transition(u'tr1', (swfstate1,), swfstate2)
+ # main workflow
+ mwf = add_wf(shell, 'CWGroup', name='main workflow', default=True)
+ state1 = mwf.add_state(u'state1', initial=True)
+ state2 = mwf.add_state(u'state2')
+ state3 = mwf.add_state(u'state3')
+ mwf.add_wftransition(u'swftr1', swf, state1,
+ [(swfstate2, state2), (swfstate2, state3)])
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'subworkflow_exit-subject': u"can't have multiple exits on the same state"})
+
+ def test_swf_fire_in_a_row(self):
+ with self.admin_access.shell() as shell:
+ # sub-workflow
+ subwf = add_wf(shell, 'CWGroup', name='subworkflow')
+ xsigning = subwf.add_state('xsigning', initial=True)
+ xaborted = subwf.add_state('xaborted')
+ xsigned = subwf.add_state('xsigned')
+ xabort = subwf.add_transition('xabort', (xsigning,), xaborted)
+ xsign = subwf.add_transition('xsign', (xsigning,), xsigning)
+ xcomplete = subwf.add_transition('xcomplete', (xsigning,), xsigned,
+ type=u'auto')
+ # main workflow
+ twf = add_wf(shell, 'CWGroup', name='mainwf', default=True)
+ created = twf.add_state(_('created'), initial=True)
+ identified = twf.add_state(_('identified'))
+ released = twf.add_state(_('released'))
+ closed = twf.add_state(_('closed'))
+ twf.add_wftransition(_('identify'), subwf, (created,),
+ [(xsigned, identified), (xaborted, created)])
+ twf.add_wftransition(_('release'), subwf, (identified,),
+ [(xsigned, released), (xaborted, identified)])
+ twf.add_wftransition(_('close'), subwf, (released,),
+ [(xsigned, closed), (xaborted, released)])
+ shell.commit()
+ with self.admin_access.repo_cnx() as cnx:
+ group = cnx.create_entity('CWGroup', name=u'grp1')
+ cnx.commit()
+ iworkflowable = group.cw_adapt_to('IWorkflowable')
+ for trans in ('identify', 'release', 'close'):
+ iworkflowable.fire_transition(trans)
+ cnx.commit()
+
+
+ def test_swf_magic_tr(self):
+ with self.admin_access.shell() as shell:
+ # sub-workflow
+ subwf = add_wf(shell, 'CWGroup', name='subworkflow')
+ xsigning = subwf.add_state('xsigning', initial=True)
+ xaborted = subwf.add_state('xaborted')
+ xsigned = subwf.add_state('xsigned')
+ xabort = subwf.add_transition('xabort', (xsigning,), xaborted)
+ xsign = subwf.add_transition('xsign', (xsigning,), xsigned)
+ # main workflow
+ twf = add_wf(shell, 'CWGroup', name='mainwf', default=True)
+ created = twf.add_state(_('created'), initial=True)
+ identified = twf.add_state(_('identified'))
+ released = twf.add_state(_('released'))
+ twf.add_wftransition(_('identify'), subwf, created,
+ [(xaborted, None), (xsigned, identified)])
+ twf.add_wftransition(_('release'), subwf, identified,
+ [(xaborted, None)])
+ shell.commit()
+ with self.admin_access.web_request() as req:
+ group = req.create_entity('CWGroup', name=u'grp1')
+ req.cnx.commit()
+ iworkflowable = group.cw_adapt_to('IWorkflowable')
+ for trans, nextstate in (('identify', 'xsigning'),
+ ('xabort', 'created'),
+ ('identify', 'xsigning'),
+ ('xsign', 'identified'),
+ ('release', 'xsigning'),
+ ('xabort', 'identified')
+ ):
+ iworkflowable.fire_transition(trans)
+ req.cnx.commit()
+ group.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, nextstate)
+
+ def test_replace_state(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWGroup', name='groupwf', default=True)
+ s_new = wf.add_state('new', initial=True)
+ s_state1 = wf.add_state('state1')
+ wf.add_transition('tr', (s_new,), s_state1)
+ shell.commit()
+
+ with self.admin_access.repo_cnx() as cnx:
+ group = cnx.create_entity('CWGroup', name=u'grp1')
+ cnx.commit()
+
+ iwf = group.cw_adapt_to('IWorkflowable')
+ iwf.fire_transition('tr')
+ cnx.commit()
+ group.cw_clear_all_caches()
+
+ wf = cnx.entity_from_eid(wf.eid)
+ wf.add_state('state2')
+ with cnx.security_enabled(write=False):
+ wf.replace_state('state1', 'state2')
+ cnx.commit()
+
+ self.assertEqual(iwf.state, 'state2')
+ self.assertEqual(iwf.latest_trinfo().to_state[0].name, 'state2')
+
+
+class CustomWorkflowTC(CubicWebTC):
+
+ def setup_database(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.member_eid = self.create_user(cnx, 'member').eid
+
+ def test_custom_wf_replace_state_no_history(self):
+ """member in inital state with no previous history, state is simply
+ redirected when changing workflow
+ """
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep', initial=True)
+ with self.admin_access.web_request() as req:
+ req.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'activated') # no change before commit
+ req.cnx.commit()
+ member.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual(iworkflowable.workflow_history, ())
+
+ def test_custom_wf_replace_state_keep_history(self):
+ """member in inital state with some history, state is redirected and
+ state change is recorded to history
+ """
+ with self.admin_access.web_request() as req:
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate')
+ iworkflowable.fire_transition('activate')
+ req.cnx.commit()
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep', initial=True)
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ with self.admin_access.web_request() as req:
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.current_workflow.eid, wf.eid)
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('activated', 'deactivated', 'deactivate', None),
+ ('deactivated', 'activated', 'activate', None),
+ ('activated', 'asleep', None, 'workflow changed to "CWUser"')])
+
+ def test_custom_wf_no_initial_state(self):
+ """try to set a custom workflow which has no initial state"""
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep')
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'custom_workflow-subject': u'workflow has no initial state'})
+
+ def test_custom_wf_bad_etype(self):
+ """try to set a custom workflow which doesn't apply to entity type"""
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'Company')
+ wf.add_state('asleep', initial=True)
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ with self.assertRaises(ValidationError) as cm:
+ shell.commit()
+ self.assertEqual(cm.exception.errors, {'custom_workflow-subject': u"workflow isn't a workflow for this type"})
+
+ def test_del_custom_wf(self):
+ """member in some state shared by the new workflow, nothing has to be
+ done
+ """
+ with self.admin_access.web_request() as req:
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate')
+ req.cnx.commit()
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ wf.add_state('asleep', initial=True)
+ shell.rqlexec('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ shell.commit()
+ with self.admin_access.web_request() as req:
+ req.execute('DELETE X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': self.member_eid})
+ member = req.entity_from_eid(self.member_eid)
+ iworkflowable = member.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'asleep')# no change before commit
+ req.cnx.commit()
+ member.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.current_workflow.name, "default user workflow")
+ self.assertEqual(iworkflowable.state, 'activated')
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('activated', 'deactivated', 'deactivate', None),
+ ('deactivated', 'asleep', None, 'workflow changed to "CWUser"'),
+ ('asleep', 'activated', None, 'workflow changed to "default user workflow"'),])
+
+
+class AutoTransitionTC(CubicWebTC):
+
+ def setup_custom_wf(self):
+ with self.admin_access.shell() as shell:
+ wf = add_wf(shell, 'CWUser')
+ asleep = wf.add_state('asleep', initial=True)
+ dead = wf.add_state('dead')
+ wf.add_transition('rest', asleep, asleep)
+ wf.add_transition('sick', asleep, dead, type=u'auto',
+ conditions=({'expr': u'X surname "toto"',
+ 'mainvars': u'X'},))
+ return wf
+
+ def test_auto_transition_fired(self):
+ wf = self.setup_custom_wf()
+ with self.admin_access.web_request() as req:
+ user = self.create_user(req, 'member')
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ req.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': user.eid})
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
+ ['rest'])
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'asleep')
+ self.assertEqual([t.name for t in iworkflowable.possible_transitions()],
+ ['rest'])
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('asleep', 'asleep', 'rest', None)])
+ user.cw_set(surname=u'toto') # fulfill condition
+ req.cnx.commit()
+ iworkflowable.fire_transition('rest')
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ self.assertEqual(iworkflowable.state, 'dead')
+ self.assertEqual(parse_hist(iworkflowable.workflow_history),
+ [('asleep', 'asleep', 'rest', None),
+ ('asleep', 'asleep', 'rest', None),
+ ('asleep', 'dead', 'sick', None),])
+
+ def test_auto_transition_custom_initial_state_fired(self):
+ wf = self.setup_custom_wf()
+ with self.admin_access.web_request() as req:
+ user = self.create_user(req, 'member', surname=u'toto')
+ req.execute('SET X custom_workflow WF WHERE X eid %(x)s, WF eid %(wf)s',
+ {'wf': wf.eid, 'x': user.eid})
+ req.cnx.commit()
+ user.cw_clear_all_caches()
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'dead')
+
+ def test_auto_transition_initial_state_fired(self):
+ with self.admin_access.web_request() as req:
+ wf = req.execute('Any WF WHERE ET default_workflow WF, '
+ 'ET name %(et)s', {'et': 'CWUser'}).get_entity(0, 0)
+ dead = wf.add_state('dead')
+ wf.add_transition('sick', wf.state_by_name('activated'), dead,
+ type=u'auto', conditions=({'expr': u'X surname "toto"',
+ 'mainvars': u'X'},))
+ req.cnx.commit()
+ with self.admin_access.web_request() as req:
+ user = self.create_user(req, 'member', surname=u'toto')
+ req.cnx.commit()
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ self.assertEqual(iworkflowable.state, 'dead')
+
+
+class WorkflowHooksTC(CubicWebTC):
+
+ def setUp(self):
+ CubicWebTC.setUp(self)
+ with self.admin_access.web_request() as req:
+ self.wf = req.user.cw_adapt_to('IWorkflowable').current_workflow
+ self.s_activated = self.wf.state_by_name('activated').eid
+ self.s_deactivated = self.wf.state_by_name('deactivated').eid
+ self.s_dummy = self.wf.add_state(u'dummy').eid
+ self.wf.add_transition(u'dummy', (self.s_deactivated,), self.s_dummy)
+ ueid = self.create_user(req, 'stduser', commit=False).eid
+ # test initial state is set
+ rset = req.execute('Any N WHERE S name N, X in_state S, X eid %(x)s',
+ {'x' : ueid})
+ self.assertFalse(rset, rset.rows)
+ req.cnx.commit()
+ initialstate = req.execute('Any N WHERE S name N, X in_state S, X eid %(x)s',
+ {'x' : ueid})[0][0]
+ self.assertEqual(initialstate, u'activated')
+ # give access to users group on the user's wf transitions
+ # so we can test wf enforcing on euser (managers don't have anymore this
+ # enforcement
+ req.execute('SET X require_group G '
+ 'WHERE G name "users", X transition_of WF, WF eid %(wf)s',
+ {'wf': self.wf.eid})
+ req.cnx.commit()
+
+ # XXX currently, we've to rely on hooks to set initial state, or to use execute
+ # def test_initial_state(self):
+ # cnx = self.login('stduser')
+ # cu = cnx.cursor()
+ # self.assertRaises(ValidationError, cu.execute,
+ # 'INSERT CWUser X: X login "badaboum", X upassword %(pwd)s, '
+ # 'X in_state S WHERE S name "deactivated"', {'pwd': 'oops'})
+ # cnx.close()
+ # # though managers can do whatever he want
+ # self.execute('INSERT CWUser X: X login "badaboum", X upassword %(pwd)s, '
+ # 'X in_state S, X in_group G WHERE S name "deactivated", G name "users"', {'pwd': 'oops'})
+ # self.commit()
+
+ # test that the workflow is correctly enforced
+
+ def _cleanup_msg(self, msg):
+ """remove the variable part of one specific error message"""
+ lmsg = msg.split()
+ lmsg.pop(1)
+ lmsg.pop()
+ return ' '.join(lmsg)
+
+ def test_transition_checking1(self):
+ with self.new_access('stduser').repo_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('activate')
+ self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
+ u"transition isn't allowed from")
+
+ def test_transition_checking2(self):
+ with self.new_access('stduser').repo_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('dummy')
+ self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
+ u"transition isn't allowed from")
+
+ def test_transition_checking3(self):
+ with self.new_access('stduser').repo_cnx() as cnx:
+ user = cnx.user
+ iworkflowable = user.cw_adapt_to('IWorkflowable')
+ iworkflowable.fire_transition('deactivate')
+ cnx.commit()
+ with self.assertRaises(ValidationError) as cm:
+ iworkflowable.fire_transition('deactivate')
+ self.assertEqual(self._cleanup_msg(cm.exception.errors['by_transition-subject']),
+ u"transition isn't allowed from")
+ cnx.rollback()
+ # get back now
+ iworkflowable.fire_transition('activate')
+ cnx.commit()
+
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entities/wfobjs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entities/wfobjs.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,589 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""workflow handling:
+
+* entity types defining workflow (Workflow, State, Transition...)
+* workflow history (TrInfo)
+* adapter for workflowable entities (IWorkflowableAdapter)
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+from six import text_type, string_types
+
+from logilab.common.decorators import cached, clear_cache
+from logilab.common.deprecation import deprecated
+
+from cubicweb.entities import AnyEntity, fetch_config
+from cubicweb.view import EntityAdapter
+from cubicweb.predicates import relation_possible
+
+
+try:
+ from cubicweb import server
+except ImportError:
+ # We need to lookup DEBUG from there,
+ # however a pure dbapi client may not have it.
+ class server(object): pass
+ server.DEBUG = False
+
+
+class WorkflowException(Exception): pass
+
+class Workflow(AnyEntity):
+ __regid__ = 'Workflow'
+
+ @property
+ def initial(self):
+ """return the initial state for this workflow"""
+ return self.initial_state and self.initial_state[0] or None
+
+ def is_default_workflow_of(self, etype):
+ """return True if this workflow is the default workflow for the given
+ entity type
+ """
+ return any(et for et in self.reverse_default_workflow
+ if et.name == etype)
+
+ def iter_workflows(self, _done=None):
+ """return an iterator on actual workflows, eg this workflow and its
+ subworkflows
+ """
+ # infinite loop safety belt
+ if _done is None:
+ _done = set()
+ yield self
+ _done.add(self.eid)
+ for tr in self._cw.execute('Any T WHERE T is WorkflowTransition, '
+ 'T transition_of WF, WF eid %(wf)s',
+ {'wf': self.eid}).entities():
+ if tr.subwf.eid in _done:
+ continue
+ for subwf in tr.subwf.iter_workflows(_done):
+ yield subwf
+
+ # state / transitions accessors ############################################
+
+ def state_by_name(self, statename):
+ rset = self._cw.execute('Any S, SN WHERE S name SN, S name %(n)s, '
+ 'S state_of WF, WF eid %(wf)s',
+ {'n': statename, 'wf': self.eid})
+ if rset:
+ return rset.get_entity(0, 0)
+ return None
+
+ def state_by_eid(self, eid):
+ rset = self._cw.execute('Any S, SN WHERE S name SN, S eid %(s)s, '
+ 'S state_of WF, WF eid %(wf)s',
+ {'s': eid, 'wf': self.eid})
+ if rset:
+ return rset.get_entity(0, 0)
+ return None
+
+ def transition_by_name(self, trname):
+ rset = self._cw.execute('Any T, TN WHERE T name TN, T name %(n)s, '
+ 'T transition_of WF, WF eid %(wf)s',
+ {'n': text_type(trname), 'wf': self.eid})
+ if rset:
+ return rset.get_entity(0, 0)
+ return None
+
+ def transition_by_eid(self, eid):
+ rset = self._cw.execute('Any T, TN WHERE T name TN, T eid %(t)s, '
+ 'T transition_of WF, WF eid %(wf)s',
+ {'t': eid, 'wf': self.eid})
+ if rset:
+ return rset.get_entity(0, 0)
+ return None
+
+ # wf construction methods ##################################################
+
+ def add_state(self, name, initial=False, **kwargs):
+ """add a state to this workflow"""
+ state = self._cw.create_entity('State', name=text_type(name), **kwargs)
+ self._cw.execute('SET S state_of WF WHERE S eid %(s)s, WF eid %(wf)s',
+ {'s': state.eid, 'wf': self.eid})
+ if initial:
+ assert not self.initial, "Initial state already defined as %s" % self.initial
+ self._cw.execute('SET WF initial_state S '
+ 'WHERE S eid %(s)s, WF eid %(wf)s',
+ {'s': state.eid, 'wf': self.eid})
+ return state
+
+ def _add_transition(self, trtype, name, fromstates,
+ requiredgroups=(), conditions=(), **kwargs):
+ tr = self._cw.create_entity(trtype, name=text_type(name), **kwargs)
+ self._cw.execute('SET T transition_of WF '
+ 'WHERE T eid %(t)s, WF eid %(wf)s',
+ {'t': tr.eid, 'wf': self.eid})
+ assert fromstates, fromstates
+ if not isinstance(fromstates, (tuple, list)):
+ fromstates = (fromstates,)
+ for state in fromstates:
+ if hasattr(state, 'eid'):
+ state = state.eid
+ self._cw.execute('SET S allowed_transition T '
+ 'WHERE S eid %(s)s, T eid %(t)s',
+ {'s': state, 't': tr.eid})
+ tr.set_permissions(requiredgroups, conditions, reset=False)
+ return tr
+
+ def add_transition(self, name, fromstates, tostate=None,
+ requiredgroups=(), conditions=(), **kwargs):
+ """add a transition to this workflow from some state(s) to another"""
+ tr = self._add_transition('Transition', name, fromstates,
+ requiredgroups, conditions, **kwargs)
+ if tostate is not None:
+ if hasattr(tostate, 'eid'):
+ tostate = tostate.eid
+ self._cw.execute('SET T destination_state S '
+ 'WHERE S eid %(s)s, T eid %(t)s',
+ {'t': tr.eid, 's': tostate})
+ return tr
+
+ def add_wftransition(self, name, subworkflow, fromstates, exitpoints=(),
+ requiredgroups=(), conditions=(), **kwargs):
+ """add a workflow transition to this workflow"""
+ tr = self._add_transition('WorkflowTransition', name, fromstates,
+ requiredgroups, conditions, **kwargs)
+ if hasattr(subworkflow, 'eid'):
+ subworkflow = subworkflow.eid
+ assert self._cw.execute('SET T subworkflow WF WHERE WF eid %(wf)s,T eid %(t)s',
+ {'t': tr.eid, 'wf': subworkflow})
+ for fromstate, tostate in exitpoints:
+ tr.add_exit_point(fromstate, tostate)
+ return tr
+
+ def replace_state(self, todelstate, replacement):
+ """migration convenience method"""
+ if not hasattr(todelstate, 'eid'):
+ todelstate = self.state_by_name(todelstate)
+ if not hasattr(replacement, 'eid'):
+ replacement = self.state_by_name(replacement)
+ args = {'os': todelstate.eid, 'ns': replacement.eid}
+ execute = self._cw.execute
+ execute('SET X in_state NS WHERE X in_state OS, '
+ 'NS eid %(ns)s, OS eid %(os)s', args)
+ execute('SET X from_state NS WHERE X from_state OS, '
+ 'OS eid %(os)s, NS eid %(ns)s', args)
+ execute('SET X to_state NS WHERE X to_state OS, '
+ 'OS eid %(os)s, NS eid %(ns)s', args)
+ todelstate.cw_delete()
+
+
+class BaseTransition(AnyEntity):
+ """customized class for abstract transition
+
+ provides a specific may_be_fired method to check if the relation may be
+ fired by the logged user
+ """
+ __regid__ = 'BaseTransition'
+ fetch_attrs, cw_fetch_order = fetch_config(['name', 'type'])
+
+ def __init__(self, *args, **kwargs):
+ if self.cw_etype == 'BaseTransition':
+ raise WorkflowException('should not be instantiated')
+ super(BaseTransition, self).__init__(*args, **kwargs)
+
+ @property
+ def workflow(self):
+ return self.transition_of[0]
+
+ def has_input_state(self, state):
+ if hasattr(state, 'eid'):
+ state = state.eid
+ return any(s for s in self.reverse_allowed_transition if s.eid == state)
+
+ def may_be_fired(self, eid):
+ """return true if the logged user may fire this transition
+
+ `eid` is the eid of the object on which we may fire the transition
+ """
+ DBG = False
+ if server.DEBUG & server.DBG_SEC:
+ if 'transition' in server._SECURITY_CAPS:
+ DBG = True
+ user = self._cw.user
+ # check user is at least in one of the required groups if any
+ groups = frozenset(g.name for g in self.require_group)
+ if groups:
+ matches = user.matching_groups(groups)
+ if matches:
+ if DBG:
+ print('may_be_fired: %r may fire: user matches %s' % (self.name, groups))
+ return matches
+ if 'owners' in groups and user.owns(eid):
+ if DBG:
+ print('may_be_fired: %r may fire: user is owner' % self.name)
+ return True
+ # check one of the rql expression conditions matches if any
+ if self.condition:
+ if DBG:
+ print('my_be_fired: %r: %s' %
+ (self.name, [(rqlexpr.expression,
+ rqlexpr.check_expression(self._cw, eid))
+ for rqlexpr in self.condition]))
+ for rqlexpr in self.condition:
+ if rqlexpr.check_expression(self._cw, eid):
+ return True
+ if self.condition or groups:
+ return False
+ return True
+
+ def set_permissions(self, requiredgroups=(), conditions=(), reset=True):
+ """set or add (if `reset` is False) groups and conditions for this
+ transition
+ """
+ if reset:
+ self._cw.execute('DELETE T require_group G WHERE T eid %(x)s',
+ {'x': self.eid})
+ self._cw.execute('DELETE T condition R WHERE T eid %(x)s',
+ {'x': self.eid})
+ for gname in requiredgroups:
+ rset = self._cw.execute('SET T require_group G '
+ 'WHERE T eid %(x)s, G name %(gn)s',
+ {'x': self.eid, 'gn': text_type(gname)})
+ assert rset, '%s is not a known group' % gname
+ if isinstance(conditions, string_types):
+ conditions = (conditions,)
+ for expr in conditions:
+ if isinstance(expr, string_types):
+ kwargs = {'expr': text_type(expr)}
+ else:
+ assert isinstance(expr, dict)
+ kwargs = expr
+ kwargs['x'] = self.eid
+ kwargs.setdefault('mainvars', u'X')
+ self._cw.execute('INSERT RQLExpression X: X exprtype "ERQLExpression", '
+ 'X expression %(expr)s, X mainvars %(mainvars)s, '
+ 'T condition X WHERE T eid %(x)s', kwargs)
+ # XXX clear caches?
+
+
+class Transition(BaseTransition):
+ """customized class for Transition entities"""
+ __regid__ = 'Transition'
+
+ def dc_long_title(self):
+ return '%s (%s)' % (self.name, self._cw._(self.name))
+
+ def destination(self, entity):
+ try:
+ return self.destination_state[0]
+ except IndexError:
+ return entity.cw_adapt_to('IWorkflowable').latest_trinfo().previous_state
+
+ def potential_destinations(self):
+ try:
+ yield self.destination_state[0]
+ except IndexError:
+ for incomingstate in self.reverse_allowed_transition:
+ for tr in incomingstate.reverse_destination_state:
+ for previousstate in tr.reverse_allowed_transition:
+ yield previousstate
+
+
+class WorkflowTransition(BaseTransition):
+ """customized class for WorkflowTransition entities"""
+ __regid__ = 'WorkflowTransition'
+
+ @property
+ def subwf(self):
+ return self.subworkflow[0]
+
+ def destination(self, entity):
+ return self.subwf.initial
+
+ def potential_destinations(self):
+ yield self.subwf.initial
+
+ def add_exit_point(self, fromstate, tostate):
+ if hasattr(fromstate, 'eid'):
+ fromstate = fromstate.eid
+ if tostate is None:
+ self._cw.execute('INSERT SubWorkflowExitPoint X: T subworkflow_exit X, '
+ 'X subworkflow_state FS WHERE T eid %(t)s, FS eid %(fs)s',
+ {'t': self.eid, 'fs': fromstate})
+ else:
+ if hasattr(tostate, 'eid'):
+ tostate = tostate.eid
+ self._cw.execute('INSERT SubWorkflowExitPoint X: T subworkflow_exit X, '
+ 'X subworkflow_state FS, X destination_state TS '
+ 'WHERE T eid %(t)s, FS eid %(fs)s, TS eid %(ts)s',
+ {'t': self.eid, 'fs': fromstate, 'ts': tostate})
+
+ def get_exit_point(self, entity, stateeid):
+ """if state is an exit point, return its associated destination state"""
+ if hasattr(stateeid, 'eid'):
+ stateeid = stateeid.eid
+ try:
+ tostateeid = self.exit_points()[stateeid]
+ except KeyError:
+ return None
+ if tostateeid is None:
+ # go back to state from which we've entered the subworkflow
+ return entity.cw_adapt_to('IWorkflowable').subworkflow_input_trinfo().previous_state
+ return self._cw.entity_from_eid(tostateeid)
+
+ @cached
+ def exit_points(self):
+ result = {}
+ for ep in self.subworkflow_exit:
+ result[ep.subwf_state.eid] = ep.destination and ep.destination.eid
+ return result
+
+ def cw_clear_all_caches(self):
+ super(WorkflowTransition, self).cw_clear_all_caches()
+ clear_cache(self, 'exit_points')
+
+
+class SubWorkflowExitPoint(AnyEntity):
+ """customized class for SubWorkflowExitPoint entities"""
+ __regid__ = 'SubWorkflowExitPoint'
+
+ @property
+ def subwf_state(self):
+ return self.subworkflow_state[0]
+
+ @property
+ def destination(self):
+ return self.destination_state and self.destination_state[0] or None
+
+
+class State(AnyEntity):
+ """customized class for State entities"""
+ __regid__ = 'State'
+ fetch_attrs, cw_fetch_order = fetch_config(['name'])
+ rest_attr = 'eid'
+
+ def dc_long_title(self):
+ return '%s (%s)' % (self.name, self._cw._(self.name))
+
+ @property
+ def workflow(self):
+ # take care, may be missing in multi-sources configuration
+ return self.state_of and self.state_of[0] or None
+
+
+class TrInfo(AnyEntity):
+ """customized class for Transition information entities
+ """
+ __regid__ = 'TrInfo'
+ fetch_attrs, cw_fetch_order = fetch_config(['creation_date', 'comment'],
+ pclass=None) # don't want modification_date
+ @property
+ def for_entity(self):
+ return self.wf_info_for[0]
+
+ @property
+ def previous_state(self):
+ return self.from_state[0]
+
+ @property
+ def new_state(self):
+ return self.to_state[0]
+
+ @property
+ def transition(self):
+ return self.by_transition and self.by_transition[0] or None
+
+
+
+class IWorkflowableAdapter(EntityAdapter):
+ """base adapter providing workflow helper methods for workflowable entities.
+ """
+ __regid__ = 'IWorkflowable'
+ __select__ = relation_possible('in_state')
+
+ @cached
+ def cwetype_workflow(self):
+ """return the default workflow for entities of this type"""
+ # XXX CWEType method
+ wfrset = self._cw.execute('Any WF WHERE ET default_workflow WF, '
+ 'ET name %(et)s', {'et': text_type(self.entity.cw_etype)})
+ if wfrset:
+ return wfrset.get_entity(0, 0)
+ self.warning("can't find any workflow for %s", self.entity.cw_etype)
+ return None
+
+ @property
+ def main_workflow(self):
+ """return current workflow applied to this entity"""
+ if self.entity.custom_workflow:
+ return self.entity.custom_workflow[0]
+ return self.cwetype_workflow()
+
+ @property
+ def current_workflow(self):
+ """return current workflow applied to this entity"""
+ return self.current_state and self.current_state.workflow or self.main_workflow
+
+ @property
+ def current_state(self):
+ """return current state entity"""
+ return self.entity.in_state and self.entity.in_state[0] or None
+
+ @property
+ def state(self):
+ """return current state name"""
+ try:
+ return self.current_state.name
+ except AttributeError:
+ self.warning('entity %s has no state', self.entity)
+ return None
+
+ @property
+ def printable_state(self):
+ """return current state name translated to context's language"""
+ state = self.current_state
+ if state:
+ return self._cw._(state.name)
+ return u''
+
+ @property
+ def workflow_history(self):
+ """return the workflow history for this entity (eg ordered list of
+ TrInfo entities)
+ """
+ return self.entity.reverse_wf_info_for
+
+ def latest_trinfo(self):
+ """return the latest transition information for this entity"""
+ try:
+ return self.workflow_history[-1]
+ except IndexError:
+ return None
+
+ def possible_transitions(self, type='normal'):
+ """generates transition that MAY be fired for the given entity,
+ expected to be in this state
+ used only by the UI
+ """
+ if self.current_state is None or self.current_workflow is None:
+ return
+ rset = self._cw.execute(
+ 'Any T,TT, TN WHERE S allowed_transition T, S eid %(x)s, '
+ 'T type TT, T type %(type)s, '
+ 'T name TN, T transition_of WF, WF eid %(wfeid)s',
+ {'x': self.current_state.eid, 'type': text_type(type),
+ 'wfeid': self.current_workflow.eid})
+ for tr in rset.entities():
+ if tr.may_be_fired(self.entity.eid):
+ yield tr
+
+ def subworkflow_input_trinfo(self):
+ """return the TrInfo which has be recorded when this entity went into
+ the current sub-workflow
+ """
+ if self.main_workflow.eid == self.current_workflow.eid:
+ return # doesn't make sense
+ subwfentries = []
+ for trinfo in self.workflow_history:
+ if (trinfo.transition and
+ trinfo.previous_state.workflow.eid != trinfo.new_state.workflow.eid):
+ # entering or leaving a subworkflow
+ if (subwfentries and
+ subwfentries[-1].new_state.workflow.eid == trinfo.previous_state.workflow.eid and
+ subwfentries[-1].previous_state.workflow.eid == trinfo.new_state.workflow.eid):
+ # leave
+ del subwfentries[-1]
+ else:
+ # enter
+ subwfentries.append(trinfo)
+ if not subwfentries:
+ return None
+ return subwfentries[-1]
+
+ def subworkflow_input_transition(self):
+ """return the transition which has went through the current sub-workflow
+ """
+ return getattr(self.subworkflow_input_trinfo(), 'transition', None)
+
+ def _add_trinfo(self, comment, commentformat, treid=None, tseid=None):
+ kwargs = {}
+ if comment is not None:
+ kwargs['comment'] = comment
+ if commentformat is not None:
+ kwargs['comment_format'] = commentformat
+ kwargs['wf_info_for'] = self.entity
+ if treid is not None:
+ kwargs['by_transition'] = self._cw.entity_from_eid(treid)
+ if tseid is not None:
+ kwargs['to_state'] = self._cw.entity_from_eid(tseid)
+ return self._cw.create_entity('TrInfo', **kwargs)
+
+ def _get_transition(self, tr):
+ assert self.current_workflow
+ if isinstance(tr, string_types):
+ _tr = self.current_workflow.transition_by_name(tr)
+ assert _tr is not None, 'not a %s transition: %s' % (
+ self.__regid__, tr)
+ tr = _tr
+ return tr
+
+ def fire_transition(self, tr, comment=None, commentformat=None):
+ """change the entity's state by firing given transition (name or entity)
+ in entity's workflow
+ """
+ tr = self._get_transition(tr)
+ return self._add_trinfo(comment, commentformat, tr.eid)
+
+ def fire_transition_if_possible(self, tr, comment=None, commentformat=None):
+ """change the entity's state by firing given transition (name or entity)
+ in entity's workflow if this transition is possible
+ """
+ tr = self._get_transition(tr)
+ if any(tr_ for tr_ in self.possible_transitions()
+ if tr_.eid == tr.eid):
+ self.fire_transition(tr, comment, commentformat)
+
+ def change_state(self, statename, comment=None, commentformat=None, tr=None):
+ """change the entity's state to the given state (name or entity) in
+ entity's workflow. This method should only by used by manager to fix an
+ entity's state when their is no matching transition, otherwise
+ fire_transition should be used.
+ """
+ assert self.current_workflow
+ if hasattr(statename, 'eid'):
+ stateeid = statename.eid
+ else:
+ state = self.current_workflow.state_by_name(statename)
+ if state is None:
+ raise WorkflowException('not a %s state: %s' % (self.__regid__,
+ statename))
+ stateeid = state.eid
+ # XXX try to find matching transition?
+ return self._add_trinfo(comment, commentformat, tr and tr.eid, stateeid)
+
+ def set_initial_state(self, statename):
+ """set a newly created entity's state to the given state (name or entity)
+ in entity's workflow. This is useful if you don't want it to be the
+ workflow's initial state.
+ """
+ assert self.current_workflow
+ if hasattr(statename, 'eid'):
+ stateeid = statename.eid
+ else:
+ state = self.current_workflow.state_by_name(statename)
+ if state is None:
+ raise WorkflowException('not a %s state: %s' % (self.__regid__,
+ statename))
+ stateeid = state.eid
+ self._cw.execute('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
+ {'x': self.entity.eid, 's': stateeid})
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/entity.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/entity.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1403 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Base class for entity objects manipulated in clients"""
+
+__docformat__ = "restructuredtext en"
+
+from warnings import warn
+from functools import partial
+
+from six import text_type, string_types, integer_types
+from six.moves import range
+
+from logilab.common.decorators import cached
+from logilab.common.deprecation import deprecated
+from logilab.common.registry import yes
+from logilab.mtconverter import TransformData, xml_escape
+
+from rql.utils import rqlvar_maker
+from rql.stmts import Select
+from rql.nodes import (Not, VariableRef, Constant, make_relation,
+ Relation as RqlRelation)
+
+from cubicweb import Unauthorized, neg_role
+from cubicweb.utils import support_args
+from cubicweb.rset import ResultSet
+from cubicweb.appobject import AppObject
+from cubicweb.schema import (RQLVocabularyConstraint, RQLConstraint,
+ GeneratedConstraint)
+from cubicweb.rqlrewrite import RQLRewriter
+
+from cubicweb.uilib import soup2xhtml
+from cubicweb.mttransforms import ENGINE
+
+_marker = object()
+
+def greater_card(rschema, subjtypes, objtypes, index):
+ for subjtype in subjtypes:
+ for objtype in objtypes:
+ card = rschema.rdef(subjtype, objtype).cardinality[index]
+ if card in '+*':
+ return card
+ return '1'
+
+def can_use_rest_path(value):
+ """return True if value can be used at the end of a Rest URL path"""
+ if value is None:
+ return False
+ value = text_type(value)
+ # the check for ?, /, & are to prevent problems when running
+ # behind Apache mod_proxy
+ if value == u'' or u'?' in value or u'/' in value or u'&' in value:
+ return False
+ return True
+
+def rel_vars(rel):
+ return ((isinstance(rel.children[0], VariableRef)
+ and rel.children[0].variable or None),
+ (isinstance(rel.children[1].children[0], VariableRef)
+ and rel.children[1].children[0].variable or None)
+ )
+
+def rel_matches(rel, rtype, role, varname, operator='='):
+ if rel.r_type == rtype and rel.children[1].operator == operator:
+ same_role_var_idx = 0 if role == 'subject' else 1
+ variables = rel_vars(rel)
+ if variables[same_role_var_idx].name == varname:
+ return variables[1 - same_role_var_idx]
+
+def build_cstr_with_linkto_infos(cstr, args, searchedvar, evar,
+ lt_infos, eidvars):
+ """restrict vocabulary as much as possible in entity creation,
+ based on infos provided by __linkto form param.
+
+ Example based on following schema:
+
+ class works_in(RelationDefinition):
+ subject = 'CWUser'
+ object = 'Lab'
+ cardinality = '1*'
+ constraints = [RQLConstraint('S in_group G, O welcomes G')]
+
+ class welcomes(RelationDefinition):
+ subject = 'Lab'
+ object = 'CWGroup'
+
+ If you create a CWUser in the "scientists" CWGroup you can show
+ only the labs that welcome them using :
+
+ lt_infos = {('in_group', 'subject'): 321}
+
+ You get following restriction : 'O welcomes G, G eid 321'
+
+ """
+ st = cstr.snippet_rqlst.copy()
+ # replace relations in ST by eid infos from linkto where possible
+ for (info_rtype, info_role), eids in lt_infos.items():
+ eid = eids[0] # NOTE: we currently assume a pruned lt_info with only 1 eid
+ for rel in st.iget_nodes(RqlRelation):
+ targetvar = rel_matches(rel, info_rtype, info_role, evar.name)
+ if targetvar is not None:
+ if targetvar.name in eidvars:
+ rel.parent.remove(rel)
+ else:
+ eidrel = make_relation(
+ targetvar, 'eid', (targetvar.name, 'Substitute'),
+ Constant)
+ rel.parent.replace(rel, eidrel)
+ args[targetvar.name] = eid
+ eidvars.add(targetvar.name)
+ # if modified ST still contains evar references we must discard the
+ # constraint, otherwise evar is unknown in the final rql query which can
+ # lead to a SQL table cartesian product and multiple occurences of solutions
+ evarname = evar.name
+ for rel in st.iget_nodes(RqlRelation):
+ for variable in rel_vars(rel):
+ if variable and evarname == variable.name:
+ return
+ # else insert snippets into the global tree
+ return GeneratedConstraint(st, cstr.mainvars - set(evarname))
+
+def pruned_lt_info(eschema, lt_infos):
+ pruned = {}
+ for (lt_rtype, lt_role), eids in lt_infos.items():
+ # we can only use lt_infos describing relation with a cardinality
+ # of value 1 towards the linked entity
+ if not len(eids) == 1:
+ continue
+ lt_card = eschema.rdef(lt_rtype, lt_role).cardinality[
+ 0 if lt_role == 'subject' else 1]
+ if lt_card not in '?1':
+ continue
+ pruned[(lt_rtype, lt_role)] = eids
+ return pruned
+
+
+class Entity(AppObject):
+ """an entity instance has e_schema automagically set on
+ the class and instances has access to their issuing cursor.
+
+ A property is set for each attribute and relation on each entity's type
+ class. Becare that among attributes, 'eid' is *NEITHER* stored in the
+ dict containment (which acts as a cache for other attributes dynamically
+ fetched)
+
+ :type e_schema: `cubicweb.schema.EntitySchema`
+ :ivar e_schema: the entity's schema
+
+ :type rest_attr: str
+ :cvar rest_attr: indicates which attribute should be used to build REST urls
+ If `None` is specified (the default), the first unique attribute will
+ be used ('eid' if none found)
+
+ :type cw_skip_copy_for: list
+ :cvar cw_skip_copy_for: a list of couples (rtype, role) for each relation
+ that should be skipped when copying this kind of entity. Note that some
+ relations such as composite relations or relations that have '?1' as
+ object cardinality are always skipped.
+ """
+ __registry__ = 'etypes'
+ __select__ = yes()
+
+ # class attributes that must be set in class definition
+ rest_attr = None
+ fetch_attrs = None
+ skip_copy_for = () # bw compat (< 3.14), use cw_skip_copy_for instead
+ cw_skip_copy_for = [('in_state', 'subject')]
+ # class attributes set automatically at registration time
+ e_schema = None
+
+ @classmethod
+ def __initialize__(cls, schema):
+ """initialize a specific entity class by adding descriptors to access
+ entity type's attributes and relations
+ """
+ etype = cls.__regid__
+ assert etype != 'Any', etype
+ cls.e_schema = eschema = schema.eschema(etype)
+ for rschema, _ in eschema.attribute_definitions():
+ if rschema.type == 'eid':
+ continue
+ setattr(cls, rschema.type, Attribute(rschema.type))
+ mixins = []
+ for rschema, _, role in eschema.relation_definitions():
+ if role == 'subject':
+ attr = rschema.type
+ else:
+ attr = 'reverse_%s' % rschema.type
+ setattr(cls, attr, Relation(rschema, role))
+
+ fetch_attrs = ('modification_date',)
+
+ @classmethod
+ def cw_fetch_order(cls, select, attr, var):
+ """This class method may be used to control sort order when multiple
+ entities of this type are fetched through ORM methods. Its arguments
+ are:
+
+ * `select`, the RQL syntax tree
+
+ * `attr`, the attribute being watched
+
+ * `var`, the variable through which this attribute's value may be
+ accessed in the query
+
+ When you want to do some sorting on the given attribute, you should
+ modify the syntax tree accordingly. For instance:
+
+ .. sourcecode:: python
+
+ from rql import nodes
+
+ class Version(AnyEntity):
+ __regid__ = 'Version'
+
+ fetch_attrs = ('num', 'description', 'in_state')
+
+ @classmethod
+ def cw_fetch_order(cls, select, attr, var):
+ if attr == 'num':
+ func = nodes.Function('version_sort_value')
+ func.append(nodes.variable_ref(var))
+ sterm = nodes.SortTerm(func, asc=False)
+ select.add_sort_term(sterm)
+
+ The default implementation call
+ :meth:`~cubicweb.entity.Entity.cw_fetch_unrelated_order`
+ """
+ cls.cw_fetch_unrelated_order(select, attr, var)
+
+ @classmethod
+ def cw_fetch_unrelated_order(cls, select, attr, var):
+ """This class method may be used to control sort order when multiple entities of
+ this type are fetched to use in edition (e.g. propose them to create a
+ new relation on an edited entity).
+
+ See :meth:`~cubicweb.entity.Entity.cw_fetch_unrelated_order` for a
+ description of its arguments and usage.
+
+ By default entities will be listed on their modification date descending,
+ i.e. you'll get entities recently modified first.
+ """
+ if attr == 'modification_date':
+ select.add_sort_var(var, asc=False)
+
+ @classmethod
+ def fetch_rql(cls, user, restriction=None, fetchattrs=None, mainvar='X',
+ settype=True, ordermethod='fetch_order'):
+ st = cls.fetch_rqlst(user, mainvar=mainvar, fetchattrs=fetchattrs,
+ settype=settype, ordermethod=ordermethod)
+ rql = st.as_string()
+ if restriction:
+ # cannot use RQLRewriter API to insert 'X rtype %(x)s' restriction
+ warn('[3.14] fetch_rql: use of `restriction` parameter is '
+ 'deprecated, please use fetch_rqlst and supply a syntax'
+ 'tree with your restriction instead', DeprecationWarning)
+ insert = ' WHERE ' + ','.join(restriction)
+ if ' WHERE ' in rql:
+ select, where = rql.split(' WHERE ', 1)
+ rql = select + insert + ',' + where
+ else:
+ rql += insert
+ return rql
+
+ @classmethod
+ def fetch_rqlst(cls, user, select=None, mainvar='X', fetchattrs=None,
+ settype=True, ordermethod='fetch_order'):
+ if select is None:
+ select = Select()
+ mainvar = select.get_variable(mainvar)
+ select.add_selected(mainvar)
+ elif isinstance(mainvar, string_types):
+ assert mainvar in select.defined_vars
+ mainvar = select.get_variable(mainvar)
+ # eases string -> syntax tree test transition: please remove once stable
+ select._varmaker = rqlvar_maker(defined=select.defined_vars,
+ aliases=select.aliases, index=26)
+ if settype:
+ rel = select.add_type_restriction(mainvar, cls.__regid__)
+ # should use 'is_instance_of' instead of 'is' so we retrieve
+ # subclasses instances as well
+ rel.r_type = 'is_instance_of'
+ if fetchattrs is None:
+ fetchattrs = cls.fetch_attrs
+ cls._fetch_restrictions(mainvar, select, fetchattrs, user, ordermethod)
+ return select
+
+ @classmethod
+ def _fetch_ambiguous_rtypes(cls, select, var, fetchattrs, subjtypes, schema):
+ """find rtypes in `fetchattrs` that relate different subject etypes
+ taken from (`subjtypes`) to different target etypes; these so called
+ "ambiguous" relations, are added directly to the `select` syntax tree
+ selection but removed from `fetchattrs` to avoid the fetch recursion
+ because we have to choose only one targettype for the recursion and
+ adding its own fetch attrs to the selection -when we recurse- would
+ filter out the other possible target types from the result set
+ """
+ for attr in fetchattrs.copy():
+ rschema = schema.rschema(attr)
+ if rschema.final:
+ continue
+ ttypes = None
+ for subjtype in subjtypes:
+ cur_ttypes = set(rschema.objects(subjtype))
+ if ttypes is None:
+ ttypes = cur_ttypes
+ elif cur_ttypes != ttypes:
+ # we found an ambiguous relation: remove it from fetchattrs
+ fetchattrs.remove(attr)
+ # ... and add it to the selection
+ targetvar = select.make_variable()
+ select.add_selected(targetvar)
+ rel = make_relation(var, attr, (targetvar,), VariableRef)
+ select.add_restriction(rel)
+ break
+
+ @classmethod
+ def _fetch_restrictions(cls, mainvar, select, fetchattrs,
+ user, ordermethod='fetch_order', visited=None):
+ eschema = cls.e_schema
+ if visited is None:
+ visited = set((eschema.type,))
+ elif eschema.type in visited:
+ # avoid infinite recursion
+ return
+ else:
+ visited.add(eschema.type)
+ _fetchattrs = []
+ for attr in sorted(fetchattrs):
+ try:
+ rschema = eschema.subjrels[attr]
+ except KeyError:
+ cls.warning('skipping fetch_attr %s defined in %s (not found in schema)',
+ attr, cls.__regid__)
+ continue
+ # XXX takefirst=True to remove warning triggered by ambiguous inlined relations
+ rdef = eschema.rdef(attr, takefirst=True)
+ if not user.matching_groups(rdef.get_groups('read')):
+ continue
+ if rschema.final or rdef.cardinality[0] in '?1':
+ var = select.make_variable()
+ select.add_selected(var)
+ rel = make_relation(mainvar, attr, (var,), VariableRef)
+ select.add_restriction(rel)
+ else:
+ cls.warning('bad relation %s specified in fetch attrs for %s',
+ attr, cls)
+ continue
+ if not rschema.final:
+ # XXX we need outer join in case the relation is not mandatory
+ # (card == '?') *or if the entity is being added*, since in
+ # that case the relation may still be missing. As we miss this
+ # later information here, systematically add it.
+ rel.change_optional('right')
+ targettypes = rschema.objects(eschema.type)
+ vreg = user._cw.vreg # XXX user._cw.vreg iiiirk
+ etypecls = vreg['etypes'].etype_class(targettypes[0])
+ if len(targettypes) > 1:
+ # find fetch_attrs common to all destination types
+ fetchattrs = vreg['etypes'].fetch_attrs(targettypes)
+ # ... and handle ambiguous relations
+ cls._fetch_ambiguous_rtypes(select, var, fetchattrs,
+ targettypes, vreg.schema)
+ else:
+ fetchattrs = etypecls.fetch_attrs
+ etypecls._fetch_restrictions(var, select, fetchattrs,
+ user, None, visited=visited)
+ if ordermethod is not None:
+ try:
+ cmeth = getattr(cls, ordermethod)
+ warn('[3.14] %s %s class method should be renamed to cw_%s'
+ % (cls.__regid__, ordermethod, ordermethod),
+ DeprecationWarning)
+ except AttributeError:
+ cmeth = getattr(cls, 'cw_' + ordermethod)
+ if support_args(cmeth, 'select'):
+ cmeth(select, attr, var)
+ else:
+ warn('[3.14] %s should now take (select, attr, var) and '
+ 'modify the syntax tree when desired instead of '
+ 'returning something' % cmeth, DeprecationWarning)
+ orderterm = cmeth(attr, var.name)
+ if orderterm is not None:
+ try:
+ var, order = orderterm.split()
+ except ValueError:
+ if '(' in orderterm:
+ cls.error('ignore %s until %s is upgraded',
+ orderterm, cmeth)
+ orderterm = None
+ elif not ' ' in orderterm.strip():
+ var = orderterm
+ order = 'ASC'
+ if orderterm is not None:
+ select.add_sort_var(select.get_variable(var),
+ order=='ASC')
+
+ @classmethod
+ @cached
+ def cw_rest_attr_info(cls):
+ """this class method return an attribute name to be used in URL for
+ entities of this type and a boolean flag telling if its value should be
+ checked for uniqness.
+
+ The attribute returned is, in order of priority:
+
+ * class's `rest_attr` class attribute
+ * an attribute defined as unique in the class'schema
+ * 'eid'
+ """
+ mainattr, needcheck = 'eid', True
+ if cls.rest_attr:
+ mainattr = cls.rest_attr
+ needcheck = not cls.e_schema.has_unique_values(mainattr)
+ else:
+ for rschema in cls.e_schema.subject_relations():
+ if (rschema.final
+ and rschema not in ('eid', 'cwuri')
+ and cls.e_schema.has_unique_values(rschema)
+ and cls.e_schema.rdef(rschema.type).cardinality[0] == '1'):
+ mainattr = str(rschema)
+ needcheck = False
+ break
+ if mainattr == 'eid':
+ needcheck = False
+ return mainattr, needcheck
+
+ @classmethod
+ def _cw_build_entity_query(cls, kwargs):
+ relations = []
+ restrictions = set()
+ pendingrels = []
+ eschema = cls.e_schema
+ qargs = {}
+ attrcache = {}
+ for attr, value in kwargs.items():
+ if attr.startswith('reverse_'):
+ attr = attr[len('reverse_'):]
+ role = 'object'
+ else:
+ role = 'subject'
+ assert eschema.has_relation(attr, role), '%s %s not found on %s' % (attr, role, eschema)
+ rschema = eschema.subjrels[attr] if role == 'subject' else eschema.objrels[attr]
+ if not rschema.final and isinstance(value, (tuple, list, set, frozenset)):
+ if len(value) == 0:
+ continue # avoid crash with empty IN clause
+ elif len(value) == 1:
+ value = next(iter(value))
+ else:
+ # prepare IN clause
+ pendingrels.append( (attr, role, value) )
+ continue
+ if rschema.final: # attribute
+ relations.append('X %s %%(%s)s' % (attr, attr))
+ attrcache[attr] = value
+ elif value is None:
+ pendingrels.append( (attr, role, value) )
+ else:
+ rvar = attr.upper()
+ if role == 'object':
+ relations.append('%s %s X' % (rvar, attr))
+ else:
+ relations.append('X %s %s' % (attr, rvar))
+ restriction = '%s eid %%(%s)s' % (rvar, attr)
+ if not restriction in restrictions:
+ restrictions.add(restriction)
+ if hasattr(value, 'eid'):
+ value = value.eid
+ qargs[attr] = value
+ rql = u''
+ if relations:
+ rql += ', '.join(relations)
+ if restrictions:
+ rql += ' WHERE %s' % ', '.join(restrictions)
+ return rql, qargs, pendingrels, attrcache
+
+ @classmethod
+ def _cw_handle_pending_relations(cls, eid, pendingrels, execute):
+ for attr, role, values in pendingrels:
+ if role == 'object':
+ restr = 'Y %s X' % attr
+ else:
+ restr = 'X %s Y' % attr
+ if values is None:
+ execute('DELETE %s WHERE X eid %%(x)s' % restr, {'x': eid})
+ continue
+ execute('SET %s WHERE X eid %%(x)s, Y eid IN (%s)' % (
+ restr, ','.join(str(getattr(r, 'eid', r)) for r in values)),
+ {'x': eid}, build_descr=False)
+
+ @classmethod
+ def cw_instantiate(cls, execute, **kwargs):
+ """add a new entity of this given type
+
+ Example (in a shell session):
+
+ >>> companycls = vreg['etypes'].etype_class('Company')
+ >>> personcls = vreg['etypes'].etype_class('Person')
+ >>> c = companycls.cw_instantiate(session.execute, name=u'Logilab')
+ >>> p = personcls.cw_instantiate(session.execute, firstname=u'John', lastname=u'Doe',
+ ... works_for=c)
+
+ You can also set relations where the entity has 'object' role by
+ prefixing the relation name by 'reverse_'. Also, relation values may be
+ an entity or eid, a list of entities or eids.
+ """
+ rql, qargs, pendingrels, attrcache = cls._cw_build_entity_query(kwargs)
+ if rql:
+ rql = 'INSERT %s X: %s' % (cls.__regid__, rql)
+ else:
+ rql = 'INSERT %s X' % (cls.__regid__)
+ try:
+ created = execute(rql, qargs).get_entity(0, 0)
+ except IndexError:
+ raise Exception('could not create a %r with %r (%r)' %
+ (cls.__regid__, rql, qargs))
+ created._cw_update_attr_cache(attrcache)
+ cls._cw_handle_pending_relations(created.eid, pendingrels, execute)
+ return created
+
+ def __init__(self, req, rset=None, row=None, col=0):
+ AppObject.__init__(self, req, rset=rset, row=row, col=col)
+ self._cw_related_cache = {}
+ self._cw_adapters_cache = {}
+ if rset is not None:
+ self.eid = rset[row][col]
+ else:
+ self.eid = None
+ self._cw_is_saved = True
+ self.cw_attr_cache = {}
+
+ def __repr__(self):
+ return '' % (
+ self.e_schema, self.eid, list(self.cw_attr_cache), id(self))
+
+ def __lt__(self, other):
+ raise NotImplementedError('comparison not implemented for %s' % self.__class__)
+
+ def __eq__(self, other):
+ if isinstance(self.eid, integer_types):
+ return self.eid == other.eid
+ return self is other
+
+ def __hash__(self):
+ if isinstance(self.eid, integer_types):
+ return self.eid
+ return super(Entity, self).__hash__()
+
+ def _cw_update_attr_cache(self, attrcache):
+ trdata = self._cw.transaction_data
+ uncached_attrs = trdata.get('%s.storage-special-process-attrs' % self.eid, set())
+ uncached_attrs.update(trdata.get('%s.dont-cache-attrs' % self.eid, set()))
+ for attr in uncached_attrs:
+ attrcache.pop(attr, None)
+ self.cw_attr_cache.pop(attr, None)
+ self.cw_attr_cache.update(attrcache)
+
+ def _cw_dont_cache_attribute(self, attr, repo_side=False):
+ """Called when some attribute has been transformed by a *storage*,
+ hence the original value should not be cached **by anyone**.
+
+ For example we have a special "fs_importing" mode in BFSS
+ where a file path is given as attribute value and stored as is
+ in the data base. Later access to the attribute will provide
+ the content of the file at the specified path. We do not want
+ the "filepath" value to be cached.
+
+ """
+ trdata = self._cw.transaction_data
+ trdata.setdefault('%s.dont-cache-attrs' % self.eid, set()).add(attr)
+ if repo_side:
+ trdata.setdefault('%s.storage-special-process-attrs' % self.eid, set()).add(attr)
+
+ def __json_encode__(self):
+ """custom json dumps hook to dump the entity's eid
+ which is not part of dict structure itself
+ """
+ dumpable = self.cw_attr_cache.copy()
+ dumpable['eid'] = self.eid
+ return dumpable
+
+ def cw_adapt_to(self, interface):
+ """return an adapter the entity to the given interface name.
+
+ return None if it can not be adapted.
+ """
+ cache = self._cw_adapters_cache
+ try:
+ return cache[interface]
+ except KeyError:
+ adapter = self._cw.vreg['adapters'].select_or_none(
+ interface, self._cw, entity=self)
+ cache[interface] = adapter
+ return adapter
+
+ def has_eid(self): # XXX cw_has_eid
+ """return True if the entity has an attributed eid (False
+ meaning that the entity has to be created
+ """
+ try:
+ int(self.eid)
+ return True
+ except (ValueError, TypeError):
+ return False
+
+ def cw_is_saved(self):
+ """during entity creation, there is some time during which the entity
+ has an eid attributed though it's not saved (eg during
+ 'before_add_entity' hooks). You can use this method to ensure the entity
+ has an eid *and* is saved in its source.
+ """
+ return self.has_eid() and self._cw_is_saved
+
+ @cached
+ def cw_metainformation(self):
+ metas = self._cw.entity_metas(self.eid)
+ metas['source'] = self._cw.source_defs()[metas['source']]
+ return metas
+
+ def cw_check_perm(self, action):
+ self.e_schema.check_perm(self._cw, action, eid=self.eid)
+
+ def cw_has_perm(self, action):
+ return self.e_schema.has_perm(self._cw, action, eid=self.eid)
+
+ def view(self, __vid, __registry='views', w=None, initargs=None, **kwargs): # XXX cw_view
+ """shortcut to apply a view on this entity"""
+ if initargs is None:
+ initargs = kwargs
+ else:
+ initargs.update(kwargs)
+ view = self._cw.vreg[__registry].select(__vid, self._cw, rset=self.cw_rset,
+ row=self.cw_row, col=self.cw_col,
+ **initargs)
+ return view.render(row=self.cw_row, col=self.cw_col, w=w, **kwargs)
+
+ def absolute_url(self, *args, **kwargs): # XXX cw_url
+ """return an absolute url to view this entity"""
+ # use *args since we don't want first argument to be "anonymous" to
+ # avoid potential clash with kwargs
+ if args:
+ assert len(args) == 1, 'only 0 or 1 non-named-argument expected'
+ method = args[0]
+ else:
+ method = None
+ # in linksearch mode, we don't want external urls else selecting
+ # the object for use in the relation is tricky
+ # XXX search_state is web specific
+ use_ext_id = False
+ if 'base_url' not in kwargs and \
+ getattr(self._cw, 'search_state', ('normal',))[0] == 'normal':
+ sourcemeta = self.cw_metainformation()['source']
+ if sourcemeta.get('use-cwuri-as-url'):
+ return self.cwuri # XXX consider kwargs?
+ if sourcemeta.get('base-url'):
+ kwargs['base_url'] = sourcemeta['base-url']
+ use_ext_id = True
+ if method in (None, 'view'):
+ kwargs['_restpath'] = self.rest_path(use_ext_id)
+ else:
+ kwargs['rql'] = 'Any X WHERE X eid %s' % self.eid
+ return self._cw.build_url(method, **kwargs)
+
+ def rest_path(self, use_ext_eid=False): # XXX cw_rest_path
+ """returns a REST-like (relative) path for this entity"""
+ mainattr, needcheck = self.cw_rest_attr_info()
+ etype = str(self.e_schema)
+ path = etype.lower()
+ fallback = False
+ if mainattr != 'eid':
+ value = getattr(self, mainattr)
+ if not can_use_rest_path(value):
+ mainattr = 'eid'
+ path = None
+ elif needcheck:
+ # make sure url is not ambiguous
+ try:
+ nbresults = self.__unique
+ except AttributeError:
+ rql = 'Any COUNT(X) WHERE X is %s, X %s %%(value)s' % (
+ etype, mainattr)
+ nbresults = self.__unique = self._cw.execute(rql, {'value' : value})[0][0]
+ if nbresults != 1: # ambiguity?
+ mainattr = 'eid'
+ path = None
+ if mainattr == 'eid':
+ if use_ext_eid:
+ value = self.cw_metainformation()['extid']
+ else:
+ value = self.eid
+ if path is None:
+ # fallback url: / url is used as cw entities uri,
+ # prefer it to //eid/
+ return text_type(value)
+ return u'%s/%s' % (path, self._cw.url_quote(value))
+
+ def cw_attr_metadata(self, attr, metadata):
+ """return a metadata for an attribute (None if unspecified)"""
+ value = getattr(self, '%s_%s' % (attr, metadata), None)
+ if value is None and metadata == 'encoding':
+ value = self._cw.vreg.property_value('ui.encoding')
+ return value
+
+ def printable_value(self, attr, value=_marker, attrtype=None,
+ format='text/html', displaytime=True): # XXX cw_printable_value
+ """return a displayable value (i.e. unicode string) which may contains
+ html tags
+ """
+ attr = str(attr)
+ if value is _marker:
+ value = getattr(self, attr)
+ if isinstance(value, string_types):
+ value = value.strip()
+ if value is None or value == '': # don't use "not", 0 is an acceptable value
+ return u''
+ if attrtype is None:
+ attrtype = self.e_schema.destination(attr)
+ props = self.e_schema.rdef(attr)
+ if attrtype == 'String':
+ # internalinalized *and* formatted string such as schema
+ # description...
+ if props.internationalizable:
+ value = self._cw._(value)
+ attrformat = self.cw_attr_metadata(attr, 'format')
+ if attrformat:
+ return self._cw_mtc_transform(value, attrformat, format,
+ self._cw.encoding)
+ elif attrtype == 'Bytes':
+ attrformat = self.cw_attr_metadata(attr, 'format')
+ if attrformat:
+ encoding = self.cw_attr_metadata(attr, 'encoding')
+ return self._cw_mtc_transform(value.getvalue(), attrformat, format,
+ encoding)
+ return u''
+ value = self._cw.printable_value(attrtype, value, props,
+ displaytime=displaytime)
+ if format == 'text/html':
+ value = xml_escape(value)
+ return value
+
+ def _cw_mtc_transform(self, data, format, target_format, encoding,
+ _engine=ENGINE):
+ trdata = TransformData(data, format, encoding, appobject=self)
+ data = _engine.convert(trdata, target_format).decode()
+ if target_format == 'text/html':
+ data = soup2xhtml(data, self._cw.encoding)
+ return data
+
+ # entity cloning ##########################################################
+
+ def copy_relations(self, ceid): # XXX cw_copy_relations
+ """copy relations of the object with the given eid on this
+ object (this method is called on the newly created copy, and
+ ceid designates the original entity).
+
+ By default meta and composite relations are skipped.
+ Overrides this if you want another behaviour
+ """
+ assert self.has_eid()
+ execute = self._cw.execute
+ skip_copy_for = {'subject': set(), 'object': set()}
+ for rtype in self.skip_copy_for:
+ skip_copy_for['subject'].add(rtype)
+ warn('[3.14] skip_copy_for on entity classes (%s) is deprecated, '
+ 'use cw_skip_for instead with list of couples (rtype, role)' % self.cw_etype,
+ DeprecationWarning)
+ for rtype, role in self.cw_skip_copy_for:
+ assert role in ('subject', 'object'), role
+ skip_copy_for[role].add(rtype)
+ for rschema in self.e_schema.subject_relations():
+ if rschema.type in skip_copy_for['subject']:
+ continue
+ if rschema.final or rschema.meta:
+ continue
+ # skip already defined relations
+ if getattr(self, rschema.type):
+ continue
+ # XXX takefirst=True to remove warning triggered by ambiguous relations
+ rdef = self.e_schema.rdef(rschema, takefirst=True)
+ # skip composite relation
+ if rdef.composite:
+ continue
+ # skip relation with card in ?1 else we either change the copied
+ # object (inlined relation) or inserting some inconsistency
+ if rdef.cardinality[1] in '?1':
+ continue
+ rql = 'SET X %s V WHERE X eid %%(x)s, Y eid %%(y)s, Y %s V' % (
+ rschema.type, rschema.type)
+ execute(rql, {'x': self.eid, 'y': ceid})
+ self.cw_clear_relation_cache(rschema.type, 'subject')
+ for rschema in self.e_schema.object_relations():
+ if rschema.meta:
+ continue
+ # skip already defined relations
+ if self.related(rschema.type, 'object'):
+ continue
+ if rschema.type in skip_copy_for['object']:
+ continue
+ # XXX takefirst=True to remove warning triggered by ambiguous relations
+ rdef = self.e_schema.rdef(rschema, 'object', takefirst=True)
+ # skip composite relation
+ if rdef.composite:
+ continue
+ # skip relation with card in ?1 else we either change the copied
+ # object (inlined relation) or inserting some inconsistency
+ if rdef.cardinality[0] in '?1':
+ continue
+ rql = 'SET V %s X WHERE X eid %%(x)s, Y eid %%(y)s, V %s Y' % (
+ rschema.type, rschema.type)
+ execute(rql, {'x': self.eid, 'y': ceid})
+ self.cw_clear_relation_cache(rschema.type, 'object')
+
+ # data fetching methods ###################################################
+
+ @cached
+ def as_rset(self): # XXX .cw_as_rset
+ """returns a resultset containing `self` information"""
+ rset = ResultSet([(self.eid,)], 'Any X WHERE X eid %(x)s',
+ {'x': self.eid}, [(self.cw_etype,)])
+ rset.req = self._cw
+ return rset
+
+ def _cw_to_complete_relations(self):
+ """by default complete final relations to when calling .complete()"""
+ for rschema in self.e_schema.subject_relations():
+ if rschema.final:
+ continue
+ targets = rschema.objects(self.e_schema)
+ if rschema.inlined:
+ matching_groups = self._cw.user.matching_groups
+ if all(matching_groups(e.get_groups('read')) and
+ rschema.rdef(self.e_schema, e).get_groups('read')
+ for e in targets):
+ yield rschema, 'subject'
+
+ def _cw_to_complete_attributes(self, skip_bytes=True, skip_pwd=True):
+ for rschema, attrschema in self.e_schema.attribute_definitions():
+ # skip binary data by default
+ if skip_bytes and attrschema.type == 'Bytes':
+ continue
+ attr = rschema.type
+ if attr == 'eid':
+ continue
+ # password retrieval is blocked at the repository server level
+ rdef = rschema.rdef(self.e_schema, attrschema)
+ if not self._cw.user.matching_groups(rdef.get_groups('read')) \
+ or (attrschema.type == 'Password' and skip_pwd):
+ self.cw_attr_cache[attr] = None
+ continue
+ yield attr
+
+ _cw_completed = False
+ def complete(self, attributes=None, skip_bytes=True, skip_pwd=True): # XXX cw_complete
+ """complete this entity by adding missing attributes (i.e. query the
+ repository to fill the entity)
+
+ :type skip_bytes: bool
+ :param skip_bytes:
+ if true, attribute of type Bytes won't be considered
+ """
+ assert self.has_eid()
+ if self._cw_completed:
+ return
+ if attributes is None:
+ self._cw_completed = True
+ varmaker = rqlvar_maker()
+ V = next(varmaker)
+ rql = ['WHERE %s eid %%(x)s' % V]
+ selected = []
+ for attr in (attributes or self._cw_to_complete_attributes(skip_bytes, skip_pwd)):
+ # if attribute already in entity, nothing to do
+ if attr in self.cw_attr_cache:
+ continue
+ # case where attribute must be completed, but is not yet in entity
+ var = next(varmaker)
+ rql.append('%s %s %s' % (V, attr, var))
+ selected.append((attr, var))
+ # +1 since this doesn't include the main variable
+ lastattr = len(selected) + 1
+ # don't fetch extra relation if attributes specified or of the entity is
+ # coming from an external source (may lead to error)
+ if attributes is None and self.cw_metainformation()['source']['uri'] == 'system':
+ # fetch additional relations (restricted to 0..1 relations)
+ for rschema, role in self._cw_to_complete_relations():
+ rtype = rschema.type
+ if self.cw_relation_cached(rtype, role):
+ continue
+ # at this point we suppose that:
+ # * this is a inlined relation
+ # * entity (self) is the subject
+ # * user has read perm on the relation and on the target entity
+ assert rschema.inlined
+ assert role == 'subject'
+ var = next(varmaker)
+ # keep outer join anyway, we don't want .complete to crash on
+ # missing mandatory relation (see #1058267)
+ rql.append('%s %s %s?' % (V, rtype, var))
+ selected.append(((rtype, role), var))
+ if selected:
+ # select V, we need it as the left most selected variable
+ # if some outer join are included to fetch inlined relations
+ rql = 'Any %s,%s %s' % (V, ','.join(var for attr, var in selected),
+ ','.join(rql))
+ try:
+ rset = self._cw.execute(rql, {'x': self.eid}, build_descr=False)[0]
+ except IndexError:
+ raise Exception('unable to fetch attributes for entity with eid %s'
+ % self.eid)
+ # handle attributes
+ for i in range(1, lastattr):
+ self.cw_attr_cache[str(selected[i-1][0])] = rset[i]
+ # handle relations
+ for i in range(lastattr, len(rset)):
+ rtype, role = selected[i-1][0]
+ value = rset[i]
+ if value is None:
+ rrset = ResultSet([], rql, {'x': self.eid})
+ rrset.req = self._cw
+ else:
+ rrset = self._cw.eid_rset(value)
+ self.cw_set_relation_cache(rtype, role, rrset)
+
+ def cw_attr_value(self, name):
+ """get value for the attribute relation , query the repository
+ to get the value if necessary.
+
+ :type name: str
+ :param name: name of the attribute to get
+ """
+ try:
+ return self.cw_attr_cache[name]
+ except KeyError:
+ if not self.cw_is_saved():
+ return None
+ rql = "Any A WHERE X eid %%(x)s, X %s A" % name
+ try:
+ rset = self._cw.execute(rql, {'x': self.eid})
+ except Unauthorized:
+ self.cw_attr_cache[name] = value = None
+ else:
+ assert rset.rowcount <= 1, (self, rql, rset.rowcount)
+ try:
+ self.cw_attr_cache[name] = value = rset.rows[0][0]
+ except IndexError:
+ # probably a multisource error
+ self.critical("can't get value for attribute %s of entity with eid %s",
+ name, self.eid)
+ if self.e_schema.destination(name) == 'String':
+ self.cw_attr_cache[name] = value = self._cw._('unaccessible')
+ else:
+ self.cw_attr_cache[name] = value = None
+ return value
+
+ def related(self, rtype, role='subject', limit=None, entities=False, # XXX .cw_related
+ safe=False, targettypes=None):
+ """returns a resultset of related entities
+
+ :param rtype:
+ the name of the relation, aka relation type
+ :param role:
+ the role played by 'self' in the relation ('subject' or 'object')
+ :param limit:
+ resultset's maximum size
+ :param entities:
+ if True, the entites are returned; if False, a result set is returned
+ :param safe:
+ if True, an empty rset/list of entities will be returned in case of
+ :exc:`Unauthorized`, else (the default), the exception is propagated
+ :param targettypes:
+ a tuple of target entity types to restrict the query
+ """
+ rtype = str(rtype)
+ # Caching restricted/limited results is best avoided.
+ cacheable = limit is None and targettypes is None
+ if cacheable:
+ cache_key = '%s_%s' % (rtype, role)
+ if cache_key in self._cw_related_cache:
+ return self._cw_related_cache[cache_key][entities]
+ if not self.has_eid():
+ if entities:
+ return []
+ return self._cw.empty_rset()
+ rql = self.cw_related_rql(rtype, role, limit=limit, targettypes=targettypes)
+ try:
+ rset = self._cw.execute(rql, {'x': self.eid})
+ except Unauthorized:
+ if not safe:
+ raise
+ rset = self._cw.empty_rset()
+ if entities:
+ if cacheable:
+ self.cw_set_relation_cache(rtype, role, rset)
+ return self.related(rtype, role, entities=entities)
+ return list(rset.entities())
+ else:
+ return rset
+
+ def cw_related_rql(self, rtype, role='subject', targettypes=None, limit=None):
+ vreg = self._cw.vreg
+ rschema = vreg.schema[rtype]
+ select = Select()
+ mainvar, evar = select.get_variable('X'), select.get_variable('E')
+ select.add_selected(mainvar)
+ if limit is not None:
+ select.set_limit(limit)
+ select.add_eid_restriction(evar, 'x', 'Substitute')
+ if role == 'subject':
+ rel = make_relation(evar, rtype, (mainvar,), VariableRef)
+ select.add_restriction(rel)
+ if targettypes is None:
+ targettypes = rschema.objects(self.e_schema)
+ else:
+ select.add_constant_restriction(mainvar, 'is',
+ targettypes, 'etype')
+ gcard = greater_card(rschema, (self.e_schema,), targettypes, 0)
+ else:
+ rel = make_relation(mainvar, rtype, (evar,), VariableRef)
+ select.add_restriction(rel)
+ if targettypes is None:
+ targettypes = rschema.subjects(self.e_schema)
+ else:
+ select.add_constant_restriction(mainvar, 'is', targettypes,
+ 'etype')
+ gcard = greater_card(rschema, targettypes, (self.e_schema,), 1)
+ etypecls = vreg['etypes'].etype_class(targettypes[0])
+ if len(targettypes) > 1:
+ fetchattrs = vreg['etypes'].fetch_attrs(targettypes)
+ self._fetch_ambiguous_rtypes(select, mainvar, fetchattrs,
+ targettypes, vreg.schema)
+ else:
+ fetchattrs = etypecls.fetch_attrs
+ etypecls.fetch_rqlst(self._cw.user, select, mainvar, fetchattrs,
+ settype=False)
+ # optimisation: remove ORDERBY if cardinality is 1 or ? (though
+ # greater_card return 1 for those both cases)
+ if gcard == '1':
+ select.remove_sort_terms()
+ elif not select.orderby:
+ # if modification_date is already retrieved, we use it instead
+ # of adding another variable for sorting. This should not be
+ # problematic, but it is with sqlserver, see ticket #694445
+ for rel in select.where.get_nodes(RqlRelation):
+ if (rel.r_type == 'modification_date'
+ and rel.children[0].variable == mainvar
+ and rel.children[1].operator == '='):
+ var = rel.children[1].children[0].variable
+ select.add_sort_var(var, asc=False)
+ break
+ else:
+ mdvar = select.make_variable()
+ rel = make_relation(mainvar, 'modification_date',
+ (mdvar,), VariableRef)
+ select.add_restriction(rel)
+ select.add_sort_var(mdvar, asc=False)
+ return select.as_string()
+
+ # generic vocabulary methods ##############################################
+
+ def cw_linkable_rql(self, rtype, targettype, role, ordermethod=None,
+ vocabconstraints=True, lt_infos={}, limit=None):
+ """build a rql to fetch targettype entities either related or unrelated
+ to this entity using (rtype, role) relation.
+
+ Consider relation permissions so that returned entities may be actually
+ linked by `rtype`.
+
+ `lt_infos` are supplementary informations, usually coming from __linkto
+ parameter, that can help further restricting the results in case current
+ entity is not yet created. It is a dict describing entities the current
+ entity will be linked to, which keys are (rtype, role) tuples and values
+ are a list of eids.
+ """
+ return self._cw_compute_linkable_rql(rtype, targettype, role, ordermethod=None,
+ vocabconstraints=vocabconstraints,
+ lt_infos=lt_infos, limit=limit,
+ unrelated_only=False)
+
+ def cw_unrelated_rql(self, rtype, targettype, role, ordermethod=None,
+ vocabconstraints=True, lt_infos={}, limit=None):
+ """build a rql to fetch `targettype` entities unrelated to this entity
+ using (rtype, role) relation.
+
+ Consider relation permissions so that returned entities may be actually
+ linked by `rtype`.
+
+ `lt_infos` are supplementary informations, usually coming from __linkto
+ parameter, that can help further restricting the results in case current
+ entity is not yet created. It is a dict describing entities the current
+ entity will be linked to, which keys are (rtype, role) tuples and values
+ are a list of eids.
+ """
+ return self._cw_compute_linkable_rql(rtype, targettype, role, ordermethod=None,
+ vocabconstraints=vocabconstraints,
+ lt_infos=lt_infos, limit=limit,
+ unrelated_only=True)
+
+ def _cw_compute_linkable_rql(self, rtype, targettype, role, ordermethod=None,
+ vocabconstraints=True, lt_infos={}, limit=None,
+ unrelated_only=False):
+ """build a rql to fetch `targettype` entities that may be related to
+ this entity using the (rtype, role) relation.
+
+ By default (unrelated_only=False), this includes the already linked
+ entities as well as the unrelated ones. If `unrelated_only` is True, the
+ rql filters out the already related entities.
+ """
+ ordermethod = ordermethod or 'fetch_unrelated_order'
+ rschema = self._cw.vreg.schema.rschema(rtype)
+ rdef = rschema.role_rdef(self.e_schema, targettype, role)
+ rewriter = RQLRewriter(self._cw)
+ select = Select()
+ # initialize some variables according to the `role` of `self` in the
+ # relation (variable names must respect constraints conventions):
+ # * variable for myself (`evar`)
+ # * variable for searched entities (`searchvedvar`)
+ if role == 'subject':
+ evar = subjvar = select.get_variable('S')
+ searchedvar = objvar = select.get_variable('O')
+ else:
+ searchedvar = subjvar = select.get_variable('S')
+ evar = objvar = select.get_variable('O')
+ select.add_selected(searchedvar)
+ if limit is not None:
+ select.set_limit(limit)
+ # initialize some variables according to `self` existence
+ if rdef.role_cardinality(neg_role(role)) in '?1':
+ # if cardinality in '1?', we want a target entity which isn't
+ # already linked using this relation
+ variable = select.make_variable()
+ if role == 'subject':
+ rel = make_relation(variable, rtype, (searchedvar,), VariableRef)
+ else:
+ rel = make_relation(searchedvar, rtype, (variable,), VariableRef)
+ select.add_restriction(Not(rel))
+ elif self.has_eid() and unrelated_only:
+ # elif we have an eid, we don't want a target entity which is
+ # already linked to ourself through this relation
+ rel = make_relation(subjvar, rtype, (objvar,), VariableRef)
+ select.add_restriction(Not(rel))
+ if self.has_eid():
+ rel = make_relation(evar, 'eid', ('x', 'Substitute'), Constant)
+ select.add_restriction(rel)
+ args = {'x': self.eid}
+ if role == 'subject':
+ sec_check_args = {'fromeid': self.eid}
+ else:
+ sec_check_args = {'toeid': self.eid}
+ existant = None # instead of 'SO', improve perfs
+ else:
+ args = {}
+ sec_check_args = {}
+ existant = searchedvar.name
+ # undefine unused evar, or the type resolver will consider it
+ select.undefine_variable(evar)
+ # retrieve entity class for targettype to compute base rql
+ etypecls = self._cw.vreg['etypes'].etype_class(targettype)
+ etypecls.fetch_rqlst(self._cw.user, select, searchedvar,
+ ordermethod=ordermethod)
+ # from now on, we need variable type resolving
+ self._cw.vreg.solutions(self._cw, select, args)
+ # insert RQL expressions for schema constraints into the rql syntax tree
+ if vocabconstraints:
+ cstrcls = (RQLVocabularyConstraint, RQLConstraint)
+ else:
+ cstrcls = RQLConstraint
+ lt_infos = pruned_lt_info(self.e_schema, lt_infos or {})
+ # if there are still lt_infos, use set to keep track of added eid
+ # relations (adding twice the same eid relation is incorrect RQL)
+ eidvars = set()
+ for cstr in rdef.constraints:
+ # consider constraint.mainvars to check if constraint apply
+ if isinstance(cstr, cstrcls) and searchedvar.name in cstr.mainvars:
+ if not self.has_eid():
+ if lt_infos:
+ # we can perhaps further restrict with linkto infos using
+ # a custom constraint built from cstr and lt_infos
+ cstr = build_cstr_with_linkto_infos(
+ cstr, args, searchedvar, evar, lt_infos, eidvars)
+ if cstr is None:
+ continue # could not build constraint -> discard
+ elif evar.name in cstr.mainvars:
+ continue
+ # compute a varmap suitable to RQLRewriter.rewrite argument
+ varmap = dict((v, v) for v in (searchedvar.name, evar.name)
+ if v in select.defined_vars and v in cstr.mainvars)
+ # rewrite constraint by constraint since we want a AND between
+ # expressions.
+ rewriter.rewrite(select, [(varmap, (cstr,))], args, existant)
+ # insert security RQL expressions granting the permission to 'add' the
+ # relation into the rql syntax tree, if necessary
+ rqlexprs = rdef.get_rqlexprs('add')
+ if not self.has_eid():
+ rqlexprs = [rqlexpr for rqlexpr in rqlexprs
+ if searchedvar.name in rqlexpr.mainvars]
+ if rqlexprs and not rdef.has_perm(self._cw, 'add', **sec_check_args):
+ # compute a varmap suitable to RQLRewriter.rewrite argument
+ varmap = dict((v, v) for v in (searchedvar.name, evar.name)
+ if v in select.defined_vars)
+ # rewrite all expressions at once since we want a OR between them.
+ rewriter.rewrite(select, [(varmap, rqlexprs)], args, existant)
+ # ensure we have an order defined
+ if not select.orderby:
+ select.add_sort_var(select.defined_vars[searchedvar.name])
+ # we're done, turn the rql syntax tree as a string
+ rql = select.as_string()
+ return rql, args
+
+ def unrelated(self, rtype, targettype, role='subject', limit=None,
+ ordermethod=None, lt_infos={}): # XXX .cw_unrelated
+ """return a result set of target type objects that may be related
+ by a given relation, with self as subject or object
+ """
+ try:
+ rql, args = self.cw_unrelated_rql(rtype, targettype, role, limit=limit,
+ ordermethod=ordermethod, lt_infos=lt_infos)
+ except Unauthorized:
+ return self._cw.empty_rset()
+ return self._cw.execute(rql, args)
+
+ # relations cache handling #################################################
+
+ def cw_relation_cached(self, rtype, role):
+ """return None if the given relation isn't already cached on the
+ instance, else the content of the cache (a 2-uple (rset, entities)).
+ """
+ return self._cw_related_cache.get('%s_%s' % (rtype, role))
+
+ def cw_set_relation_cache(self, rtype, role, rset):
+ """set cached values for the given relation"""
+ if rset:
+ related = list(rset.entities(0))
+ rschema = self._cw.vreg.schema.rschema(rtype)
+ if role == 'subject':
+ rcard = rschema.rdef(self.e_schema, related[0].e_schema).cardinality[1]
+ target = 'object'
+ else:
+ rcard = rschema.rdef(related[0].e_schema, self.e_schema).cardinality[0]
+ target = 'subject'
+ if rcard in '?1':
+ for rentity in related:
+ rentity._cw_related_cache['%s_%s' % (rtype, target)] = (
+ self.as_rset(), (self,))
+ else:
+ related = ()
+ self._cw_related_cache['%s_%s' % (rtype, role)] = (rset, related)
+
+ def cw_clear_relation_cache(self, rtype=None, role=None):
+ """clear cached values for the given relation or the entire cache if
+ no relation is given
+ """
+ if rtype is None:
+ self._cw_related_cache.clear()
+ self._cw_adapters_cache.clear()
+ else:
+ assert role
+ self._cw_related_cache.pop('%s_%s' % (rtype, role), None)
+
+ def cw_clear_all_caches(self):
+ """flush all caches on this entity. Further attributes/relations access
+ will triggers new database queries to get back values.
+
+ If you use custom caches on your entity class (take care to @cached!),
+ you should override this method to clear them as well.
+ """
+ # clear attributes cache
+ self._cw_completed = False
+ self.cw_attr_cache.clear()
+ # clear relations cache
+ self.cw_clear_relation_cache()
+ # rest path unique cache
+ try:
+ del self.__unique
+ except AttributeError:
+ pass
+
+ # raw edition utilities ###################################################
+
+ def cw_set(self, **kwargs):
+ """update this entity using given attributes / relation, working in the
+ same fashion as :meth:`cw_instantiate`.
+
+ Example (in a shell session):
+
+ >>> c = rql('Any X WHERE X is Company').get_entity(0, 0)
+ >>> p = rql('Any X WHERE X is Person').get_entity(0, 0)
+ >>> c.cw_set(name=u'Logilab')
+ >>> p.cw_set(firstname=u'John', lastname=u'Doe', works_for=c)
+
+ You can also set relations where the entity has 'object' role by
+ prefixing the relation name by 'reverse_'. Also, relation values may be
+ an entity or eid, a list of entities or eids, or None (meaning that all
+ relations of the given type from or to this object should be deleted).
+ """
+ assert kwargs
+ assert self.cw_is_saved(), "should not call set_attributes while entity "\
+ "hasn't been saved yet"
+ rql, qargs, pendingrels, attrcache = self._cw_build_entity_query(kwargs)
+ if rql:
+ rql = 'SET ' + rql
+ qargs['x'] = self.eid
+ if ' WHERE ' in rql:
+ rql += ', X eid %(x)s'
+ else:
+ rql += ' WHERE X eid %(x)s'
+ self._cw.execute(rql, qargs)
+ # update current local object _after_ the rql query to avoid
+ # interferences between the query execution itself and the cw_edited /
+ # skip_security machinery
+ self._cw_update_attr_cache(attrcache)
+ self._cw_handle_pending_relations(self.eid, pendingrels, self._cw.execute)
+ # XXX update relation cache
+
+ def cw_delete(self, **kwargs):
+ assert self.has_eid(), self.eid
+ self._cw.execute('DELETE %s X WHERE X eid %%(x)s' % self.e_schema,
+ {'x': self.eid}, **kwargs)
+
+ # server side utilities ####################################################
+
+ def _cw_clear_local_perm_cache(self, action):
+ for rqlexpr in self.e_schema.get_rqlexprs(action):
+ self._cw.local_perm_cache.pop((rqlexpr.eid, (('x', self.eid),)), None)
+
+ # deprecated stuff #########################################################
+
+ @deprecated('[3.16] use cw_set() instead of set_attributes()')
+ def set_attributes(self, **kwargs): # XXX cw_set_attributes
+ if kwargs:
+ self.cw_set(**kwargs)
+
+ @deprecated('[3.16] use cw_set() instead of set_relations()')
+ def set_relations(self, **kwargs): # XXX cw_set_relations
+ """add relations to the given object. To set a relation where this entity
+ is the object of the relation, use 'reverse_' as argument name.
+
+ Values may be an entity or eid, a list of entities or eids, or None
+ (meaning that all relations of the given type from or to this object
+ should be deleted).
+ """
+ if kwargs:
+ self.cw_set(**kwargs)
+
+ @deprecated('[3.13] use entity.cw_clear_all_caches()')
+ def clear_all_caches(self):
+ return self.cw_clear_all_caches()
+
+
+# attribute and relation descriptors ##########################################
+
+class Attribute(object):
+ """descriptor that controls schema attribute access"""
+
+ def __init__(self, attrname):
+ assert attrname != 'eid'
+ self._attrname = attrname
+
+ def __get__(self, eobj, eclass):
+ if eobj is None:
+ return self
+ return eobj.cw_attr_value(self._attrname)
+
+ @deprecated('[3.10] assign to entity.cw_attr_cache[attr] or entity.cw_edited[attr]')
+ def __set__(self, eobj, value):
+ if hasattr(eobj, 'cw_edited') and not eobj.cw_edited.saved:
+ eobj.cw_edited[self._attrname] = value
+ else:
+ eobj.cw_attr_cache[self._attrname] = value
+
+
+class Relation(object):
+ """descriptor that controls schema relation access"""
+
+ def __init__(self, rschema, role):
+ self._rtype = rschema.type
+ self._role = role
+
+ def __get__(self, eobj, eclass):
+ if eobj is None:
+ raise AttributeError('%s can only be accessed from instances'
+ % self._rtype)
+ return eobj.related(self._rtype, self._role, entities=True)
+
+ def __set__(self, eobj, value):
+ raise NotImplementedError
+
+
+from logging import getLogger
+from cubicweb import set_log_methods
+set_log_methods(Entity, getLogger('cubicweb.entity'))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/etwist/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/__init__.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,20 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+""" CW - nevow/twisted client
+
+"""
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/etwist/http.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/http.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,43 @@
+"""twisted server for CubicWeb web instances
+
+:organization: Logilab
+:copyright: 2001-2011 LOGILAB S.A. (Paris, FRANCE), license is LGPL v2.
+:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr
+:license: GNU Lesser General Public License, v2.1 - http://www.gnu.org/licenses
+"""
+
+__docformat__ = "restructuredtext en"
+
+class HTTPResponse(object):
+ """An object representing an HTTP Response to be sent to the client.
+ """
+ def __init__(self, twisted_request, code=None, headers=None, stream=None):
+ self._headers_out = headers
+ self._twreq = twisted_request
+ self._stream = stream
+ self._code = code
+
+ self._init_headers()
+ self._finalize()
+
+ def _init_headers(self):
+ if self._headers_out is None:
+ return
+ # initialize headers
+ for k, values in self._headers_out.getAllRawHeaders():
+ self._twreq.responseHeaders.setRawHeaders(k, values)
+ # add content-length if not present
+ if (self._headers_out.getHeader('content-length') is None
+ and self._stream is not None):
+ self._twreq.setHeader('content-length', len(self._stream))
+
+ def _finalize(self):
+ # we must set code before writing anything, else it's too late
+ if self._code is not None:
+ self._twreq.setResponseCode(self._code)
+ if self._stream is not None:
+ self._twreq.write(str(self._stream))
+ self._twreq.finish()
+
+ def __repr__(self):
+ return "<%s.%s code=%d>" % (self.__module__, self.__class__.__name__, self._code)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/etwist/request.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/request.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,59 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Twisted request handler for CubicWeb"""
+
+__docformat__ = "restructuredtext en"
+
+
+from cubicweb.web.request import CubicWebRequestBase
+
+
+class CubicWebTwistedRequestAdapter(CubicWebRequestBase):
+ """ from twisted .req to cubicweb .form
+ req.files are put into .form[]
+ """
+ def __init__(self, req, vreg, https):
+ self._twreq = req
+ super(CubicWebTwistedRequestAdapter, self).__init__(
+ vreg, https, req.args, headers=req.received_headers)
+ for key, name_stream_list in req.files.items():
+ for name, stream in name_stream_list:
+ if name is not None:
+ name = unicode(name, self.encoding)
+ self.form.setdefault(key, []).append((name, stream))
+ # 3.16.4 backward compat
+ if len(self.form[key]) == 1:
+ self.form[key] = self.form[key][0]
+ self.content = self._twreq.content # stream
+
+ def http_method(self):
+ """returns 'POST', 'GET', 'HEAD', etc."""
+ return self._twreq.method
+
+ def relative_path(self, includeparams=True):
+ """return the normalized path of the request (ie at least relative to
+ the instance's root, but some other normalization may be needed so that
+ the returned path may be used to compare to generated urls
+
+ :param includeparams:
+ boolean indicating if GET form parameters should be kept in the path
+ """
+ path = self._twreq.uri[1:] # remove the root '/'
+ if not includeparams:
+ path = path.split('?', 1)[0]
+ return path
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/etwist/server.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/server.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,298 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""twisted server for CubicWeb web instances"""
+__docformat__ = "restructuredtext en"
+
+import sys
+import select
+import traceback
+import threading
+from cgi import FieldStorage, parse_header
+
+from six.moves.urllib.parse import urlsplit, urlunsplit
+
+from cubicweb.statsd_logger import statsd_timeit
+
+from twisted.internet import reactor, task, threads
+from twisted.web import http, server
+from twisted.web import resource
+from twisted.web.server import NOT_DONE_YET
+
+
+from logilab.mtconverter import xml_escape
+from logilab.common.decorators import monkeypatch
+
+from cubicweb import ConfigurationError, CW_EVENT_MANAGER
+from cubicweb.utils import json_dumps
+from cubicweb.web import DirectResponse
+from cubicweb.web.application import CubicWebPublisher
+from cubicweb.etwist.request import CubicWebTwistedRequestAdapter
+from cubicweb.etwist.http import HTTPResponse
+
+def start_task(interval, func):
+ lc = task.LoopingCall(func)
+ # wait until interval has expired to actually start the task, else we have
+ # to wait all tasks to be finished for the server to be actually started
+ lc.start(interval, now=False)
+
+
+class CubicWebRootResource(resource.Resource):
+ def __init__(self, config, repo):
+ resource.Resource.__init__(self)
+ self.config = config
+ # instantiate publisher here and not in init_publisher to get some
+ # checks done before daemonization (eg versions consistency)
+ self.appli = CubicWebPublisher(repo, config)
+ self.base_url = config['base-url']
+ self.https_url = config['https-url']
+ global MAX_POST_LENGTH
+ MAX_POST_LENGTH = config['max-post-length']
+
+ def init_publisher(self):
+ config = self.config
+ # when we have an in-memory repository, clean unused sessions every XX
+ # seconds and properly shutdown the server
+ if config['repository-uri'] == 'inmemory://':
+ if config.mode != 'test':
+ reactor.addSystemEventTrigger('before', 'shutdown',
+ self.shutdown_event)
+ self.appli.repo.start_looping_tasks()
+ self.set_url_rewriter()
+ CW_EVENT_MANAGER.bind('after-registry-reload', self.set_url_rewriter)
+
+ def start_service(self):
+ start_task(self.appli.session_handler.clean_sessions_interval,
+ self.appli.session_handler.clean_sessions)
+
+ def set_url_rewriter(self):
+ self.url_rewriter = self.appli.vreg['components'].select_or_none('urlrewriter')
+
+ def shutdown_event(self):
+ """callback fired when the server is shutting down to properly
+ clean opened sessions
+ """
+ self.appli.repo.shutdown()
+
+ def getChild(self, path, request):
+ """Indicate which resource to use to process down the URL's path"""
+ return self
+
+ def render(self, request):
+ """Render a page from the root resource"""
+ # reload modified files in debug mode
+ if self.config.debugmode:
+ self.config.uiprops.reload_if_needed()
+ if self.https_url:
+ self.config.https_uiprops.reload_if_needed()
+ self.appli.vreg.reload_if_needed()
+ if self.config['profile']: # default profiler don't trace threads
+ return self.render_request(request)
+ else:
+ deferred = threads.deferToThread(self.render_request, request)
+ return NOT_DONE_YET
+
+ @statsd_timeit
+ def render_request(self, request):
+ try:
+ # processing HUGE files (hundred of megabytes) in http.processReceived
+ # blocks other HTTP requests processing
+ # due to the clumsy & slow parsing algorithm of cgi.FieldStorage
+ # so we deferred that part to the cubicweb thread
+ request.process_multipart()
+ return self._render_request(request)
+ except Exception:
+ trace = traceback.format_exc()
+ return HTTPResponse(stream='
%s
' % xml_escape(trace),
+ code=500, twisted_request=request)
+
+ def _render_request(self, request):
+ origpath = request.path
+ host = request.host
+ # dual http/https access handling: expect a rewrite rule to prepend
+ # 'https' to the path to detect https access
+ https = False
+ if origpath.split('/', 2)[1] == 'https':
+ origpath = origpath[6:]
+ request.uri = request.uri[6:]
+ https = True
+ if self.url_rewriter is not None:
+ # XXX should occur before authentication?
+ path = self.url_rewriter.rewrite(host, origpath, request)
+ request.uri.replace(origpath, path, 1)
+ else:
+ path = origpath
+ req = CubicWebTwistedRequestAdapter(request, self.appli.vreg, https)
+ try:
+ ### Try to generate the actual request content
+ content = self.appli.handle_request(req, path)
+ except DirectResponse as ex:
+ return ex.response
+ # at last: create twisted object
+ return HTTPResponse(code = req.status_out,
+ headers = req.headers_out,
+ stream = content,
+ twisted_request=req._twreq)
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ @classmethod
+ def debug(cls, msg, *a, **kw):
+ pass
+ info = warning = error = critical = exception = debug
+
+
+JSON_PATHS = set(('json',))
+FRAME_POST_PATHS = set(('validateform',))
+
+orig_gotLength = http.Request.gotLength
+@monkeypatch(http.Request)
+def gotLength(self, length):
+ orig_gotLength(self, length)
+ if length > MAX_POST_LENGTH: # length is 0 on GET
+ path = self.channel._path.split('?', 1)[0].rstrip('/').rsplit('/', 1)[-1]
+ self.clientproto = 'HTTP/1.1' # not yet initialized
+ self.channel.persistent = 0 # force connection close on cleanup
+ self.setResponseCode(http.REQUEST_ENTITY_TOO_LARGE)
+ if path in JSON_PATHS: # XXX better json path detection
+ self.setHeader('content-type',"application/json")
+ body = json_dumps({'reason': 'request max size exceeded'})
+ elif path in FRAME_POST_PATHS: # XXX better frame post path detection
+ self.setHeader('content-type',"text/html")
+ body = ('' % json_dumps( (False, 'request max size exceeded', None) ))
+ else:
+ self.setHeader('content-type',"text/html")
+ body = ("Processing Failed"
+ "request max size exceeded")
+ self.setHeader('content-length', str(len(body)))
+ self.write(body)
+ # see request.finish(). Done here since we get error due to not full
+ # initialized request
+ self.finished = 1
+ if not self.queued:
+ self._cleanup()
+ for d in self.notifications:
+ d.callback(None)
+ self.notifications = []
+
+@monkeypatch(http.Request)
+def requestReceived(self, command, path, version):
+ """Called by channel when all data has been received.
+
+ This method is not intended for users.
+ """
+ self.content.seek(0, 0)
+ self.args = {}
+ self.files = {}
+ self.stack = []
+ self.method, self.uri = command, path
+ self.clientproto = version
+ x = self.uri.split('?', 1)
+ if len(x) == 1:
+ self.path = self.uri
+ else:
+ self.path, argstring = x
+ self.args = http.parse_qs(argstring, 1)
+ # cache the client and server information, we'll need this later to be
+ # serialized and sent with the request so CGIs will work remotely
+ self.client = self.channel.transport.getPeer()
+ self.host = self.channel.transport.getHost()
+ # Argument processing
+ ctype = self.getHeader('content-type')
+ self._do_process_multipart = False
+ if self.method == "POST" and ctype:
+ key, pdict = parse_header(ctype)
+ if key == 'application/x-www-form-urlencoded':
+ self.args.update(http.parse_qs(self.content.read(), 1))
+ self.content.seek(0)
+ elif key == 'multipart/form-data':
+ # defer this as it can be extremely time consumming
+ # with big files
+ self._do_process_multipart = True
+ self.process()
+
+@monkeypatch(http.Request)
+def process_multipart(self):
+ if not self._do_process_multipart:
+ return
+ form = FieldStorage(self.content, self.received_headers,
+ environ={'REQUEST_METHOD': 'POST'},
+ keep_blank_values=1,
+ strict_parsing=1)
+ for key in form:
+ values = form[key]
+ if not isinstance(values, list):
+ values = [values]
+ for value in values:
+ if value.filename:
+ if value.done != -1: # -1 is transfer has been interrupted
+ self.files.setdefault(key, []).append((value.filename, value.file))
+ else:
+ self.files.setdefault(key, []).append((None, None))
+ else:
+ self.args.setdefault(key, []).append(value.value)
+
+from logging import getLogger
+from cubicweb import set_log_methods
+LOGGER = getLogger('cubicweb.twisted')
+set_log_methods(CubicWebRootResource, LOGGER)
+
+def run(config, debug=None, repo=None):
+ # repo may by passed during test.
+ #
+ # Test has already created a repo object so we should not create a new one.
+ # Explicitly passing the repo object avoid relying on the fragile
+ # config.repository() cache. We could imagine making repo a mandatory
+ # argument and receives it from the starting command directly.
+ if debug is not None:
+ config.debugmode = debug
+ config.check_writeable_uid_directory(config.appdatahome)
+ # create the site
+ if repo is None:
+ repo = config.repository()
+ root_resource = CubicWebRootResource(config, repo)
+ website = server.Site(root_resource)
+ # serve it via standard HTTP on port set in the configuration
+ port = config['port'] or 8080
+ interface = config['interface']
+ reactor.suggestThreadPoolSize(config['webserver-threadpool-size'])
+ reactor.listenTCP(port, website, interface=interface)
+ if not config.debugmode:
+ if sys.platform == 'win32':
+ raise ConfigurationError("Under windows, you must use the service management "
+ "commands (e.g : 'net start my_instance)'")
+ from logilab.common.daemon import daemonize
+ LOGGER.info('instance started in the background on %s', root_resource.base_url)
+ whichproc = daemonize(config['pid-file'], umask=config['umask'])
+ if whichproc: # 1 = orig process, 2 = first fork, None = second fork (eg daemon process)
+ return whichproc # parent process
+ root_resource.init_publisher() # before changing uid
+ if config['uid'] is not None:
+ from logilab.common.daemon import setugid
+ setugid(config['uid'])
+ root_resource.start_service()
+ LOGGER.info('instance started on %s', root_resource.base_url)
+ # avoid annoying warnign if not in Main Thread
+ signals = threading.currentThread().getName() == 'MainThread'
+ if config['profile']:
+ import cProfile
+ cProfile.runctx('reactor.run(installSignalHandlers=%s)' % signals,
+ globals(), locals(), config['profile'])
+ else:
+ reactor.run(installSignalHandlers=signals)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/etwist/service.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/service.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,99 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from __future__ import print_function
+
+import os
+import sys
+
+try:
+ import win32serviceutil
+ import win32service
+except ImportError:
+ print('Win32 extensions for Python are likely not installed.')
+ sys.exit(3)
+
+from os.path import join
+
+from cubicweb.etwist.server import (CubicWebRootResource, reactor, server)
+
+from logilab.common.shellutils import rm
+
+import logging
+from logging import getLogger, handlers
+from cubicweb import set_log_methods
+from cubicweb.cwconfig import CubicWebConfiguration as cwcfg
+
+def _check_env(env):
+ env_vars = ('CW_INSTANCES_DIR', 'CW_INSTANCES_DATA_DIR', 'CW_RUNTIME_DIR')
+ for var in env_vars:
+ if var not in env:
+ raise Exception('The environment variables %s must be set.' % \
+ ', '.join(env_vars))
+ if not env.get('USERNAME'):
+ env['USERNAME'] = 'cubicweb'
+
+class CWService(object, win32serviceutil.ServiceFramework):
+ _svc_name_ = None
+ _svc_display_name_ = None
+ instance = None
+
+ def __init__(self, *args, **kwargs):
+ win32serviceutil.ServiceFramework.__init__(self, *args, **kwargs)
+ cwcfg.load_cwctl_plugins()
+ logger = getLogger('cubicweb')
+ set_log_methods(CubicWebRootResource, logger)
+
+ def SvcStop(self):
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+ logger = getLogger('cubicweb.twisted')
+ logger.info('stopping %s service' % self.instance)
+ reactor.stop()
+ self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
+
+ def SvcDoRun(self):
+ self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
+ logger = getLogger('cubicweb.twisted')
+ handler = handlers.NTEventLogHandler('cubicweb')
+ handler.setLevel(logging.INFO)
+ logger.addHandler(handler)
+ logger.info('starting %s service' % self.instance)
+ try:
+ _check_env(os.environ)
+ # create the site
+ config = cwcfg.config_for(self.instance)
+ config.init_log(force=True)
+ config.debugmode = False
+ logger.info('starting cubicweb instance %s ', self.instance)
+ config.info('clear ui caches')
+ for cachedir in ('uicache', 'uicachehttps'):
+ rm(join(config.appdatahome, cachedir, '*'))
+ root_resource = CubicWebRootResource(config, config.repository())
+ website = server.Site(root_resource)
+ # serve it via standard HTTP on port set in the configuration
+ port = config['port'] or 8080
+ logger.info('listening on port %s' % port)
+ reactor.listenTCP(port, website)
+ root_resource.init_publisher()
+ root_resource.start_service()
+ logger.info('instance started on %s', root_resource.base_url)
+ self.ReportServiceStatus(win32service.SERVICE_RUNNING)
+ reactor.run()
+ except Exception as e:
+ logger.error('service %s stopped (cause: %s)' % (self.instance, e))
+ logger.exception('what happened ...')
+ self.ReportServiceStatus(win32service.SERVICE_STOPPED)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/etwist/test/data/views.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/test/data/views.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,29 @@
+# copyright 2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""only for unit tests !"""
+
+from cubicweb.view import View
+from cubicweb.predicates import match_http_method
+
+class PutView(View):
+ __regid__ = 'put'
+ __select__ = match_http_method('PUT') | match_http_method('POST')
+ binary = True
+
+ def call(self):
+ self.w(self._cw.content.read())
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/etwist/test/requirements.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/test/requirements.txt Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+Twisted
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/etwist/test/unittest_server.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/test/unittest_server.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,38 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+import os, os.path as osp, glob
+import urllib
+
+from cubicweb.devtools.httptest import CubicWebServerTC
+
+
+class ETwistHTTPTC(CubicWebServerTC):
+ def test_put_content(self):
+ data = {'hip': 'hop'}
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ body = urllib.urlencode(data)
+ response = self.web_request('?vid=put', method='PUT', body=body)
+ self.assertEqual(body, response.body)
+ response = self.web_request('?vid=put', method='POST', body=body,
+ headers=headers)
+ self.assertEqual(body, response.body)
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/etwist/twconfig.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/twconfig.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,115 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""twisted server configurations:
+
+* the "all-in-one" configuration to get a web instance running in a twisted
+ web server integrating a repository server in the same process (only available
+ if the repository part of the software is installed
+"""
+__docformat__ = "restructuredtext en"
+
+from os.path import join
+
+from logilab.common.configuration import Method, merge_options
+
+from cubicweb.cwconfig import CONFIGURATIONS
+from cubicweb.web.webconfig import WebConfiguration
+
+
+class WebConfigurationBase(WebConfiguration):
+ """web instance (in a twisted web server) client of a RQL server"""
+
+ options = merge_options((
+ # ctl configuration
+ ('port',
+ {'type' : 'int',
+ 'default': None,
+ 'help': 'http server port number (default to 8080)',
+ 'group': 'web', 'level': 0,
+ }),
+ ('interface',
+ {'type' : 'string',
+ 'default': "",
+ 'help': 'http server address on which to listen (default to everywhere)',
+ 'group': 'web', 'level': 1,
+ }),
+ ('max-post-length',
+ {'type' : 'bytes',
+ 'default': '100MB',
+ 'help': 'maximum length of HTTP request. Default to 100 MB.',
+ 'group': 'web', 'level': 1,
+ }),
+ ('profile',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'profile code and use the specified file to store stats if this option is set',
+ 'group': 'web', 'level': 3,
+ }),
+ ('host',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'host name if not correctly detectable through gethostname',
+ 'group': 'main', 'level': 1,
+ }),
+ ('pid-file',
+ {'type' : 'string',
+ 'default': Method('default_pid_file'),
+ 'help': 'repository\'s pid file',
+ 'group': 'main', 'level': 2,
+ }),
+ ('uid',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'if this option is set, use the specified user to start \
+the repository rather than the user running the command',
+ 'group': 'main', 'level': WebConfiguration.mode == 'system'
+ }),
+ ('webserver-threadpool-size',
+ {'type': 'int',
+ 'default': 4,
+ 'help': "size of twisted's reactor threadpool. It should probably be not too \
+much greater than connection-poolsize",
+ 'group': 'web', 'level': 3,
+ }),
+ ) + WebConfiguration.options)
+
+ def server_file(self):
+ return join(self.apphome, '%s-%s.py' % (self.appid, self.name))
+
+ def default_base_url(self):
+ from socket import getfqdn
+ return 'http://%s:%s/' % (self['host'] or getfqdn().lower(), self['port'] or 8080)
+
+
+try:
+ from cubicweb.server.serverconfig import ServerConfiguration
+
+ class AllInOneConfiguration(WebConfigurationBase, ServerConfiguration):
+ """repository and web instance in the same twisted process"""
+ name = 'all-in-one'
+ options = merge_options(WebConfigurationBase.options
+ + ServerConfiguration.options)
+
+ cubicweb_appobject_path = WebConfigurationBase.cubicweb_appobject_path | ServerConfiguration.cubicweb_appobject_path
+ cube_appobject_path = WebConfigurationBase.cube_appobject_path | ServerConfiguration.cube_appobject_path
+
+
+ CONFIGURATIONS.append(AllInOneConfiguration)
+
+except ImportError:
+ pass
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/etwist/twctl.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/etwist/twctl.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,79 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""cubicweb-clt handlers for twisted"""
+
+from cubicweb.toolsutils import CommandHandler
+from cubicweb.web.webctl import WebCreateHandler, WebUpgradeHandler
+
+# trigger configuration registration
+import cubicweb.etwist.twconfig # pylint: disable=W0611
+
+class TWCreateHandler(WebCreateHandler):
+ cfgname = 'twisted'
+
+class TWStartHandler(CommandHandler):
+ cmdname = 'start'
+ cfgname = 'twisted'
+
+ def start_server(self, config):
+ from cubicweb.etwist import server
+ return server.run(config)
+
+class TWStopHandler(CommandHandler):
+ cmdname = 'stop'
+ cfgname = 'twisted'
+
+ def poststop(self):
+ pass
+
+class TWUpgradeHandler(WebUpgradeHandler):
+ cfgname = 'twisted'
+
+
+try:
+ from cubicweb.server import serverctl
+ class AllInOneCreateHandler(serverctl.RepositoryCreateHandler,
+ TWCreateHandler):
+ """configuration to get an instance running in a twisted web server
+ integrating a repository server in the same process
+ """
+ cfgname = 'all-in-one'
+
+ def bootstrap(self, cubes, automatic=False, inputlevel=0):
+ """bootstrap this configuration"""
+ serverctl.RepositoryCreateHandler.bootstrap(self, cubes, automatic, inputlevel)
+ TWCreateHandler.bootstrap(self, cubes, automatic, inputlevel)
+
+ class AllInOneStartHandler(TWStartHandler):
+ cmdname = 'start'
+ cfgname = 'all-in-one'
+ subcommand = 'cubicweb-twisted'
+
+ class AllInOneStopHandler(CommandHandler):
+ cmdname = 'stop'
+ cfgname = 'all-in-one'
+ subcommand = 'cubicweb-twisted'
+
+ def poststop(self):
+ pass
+
+ class AllInOneUpgradeHandler(TWUpgradeHandler):
+ cfgname = 'all-in-one'
+
+except ImportError:
+ pass
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/ext/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/__init__.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,17 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/ext/html4zope.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/html4zope.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,174 @@
+# Author: David Goodger
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""
+
+"""
+# Contact: goodger@users.sourceforge.net
+# Revision: $Revision: 1.2 $
+# Date: $Date: 2005-07-04 16:36:50 $
+# Copyright: This module has been placed in the public domain.
+
+"""
+Simple HyperText Markup Language document tree Writer.
+
+The output conforms to the HTML 4.01 Transitional DTD and to the Extensible
+HTML version 1.0 Transitional DTD (*almost* strict). The output contains a
+minimum of formatting information. A cascading style sheet ("default.css" by
+default) is required for proper viewing with a modern graphical browser.
+
+http://cvs.zope.org/Zope/lib/python/docutils/writers/Attic/html4zope.py?rev=1.1.2.2&only_with_tag=ajung-restructuredtext-integration-branch&content-type=text/vnd.viewcvs-markup
+"""
+
+__docformat__ = 'reStructuredText'
+
+import os
+
+from logilab.mtconverter import xml_escape
+
+from docutils import nodes
+from docutils.writers.html4css1 import Writer as CSS1Writer
+from docutils.writers.html4css1 import HTMLTranslator as CSS1HTMLTranslator
+
+default_level = int(os.environ.get('STX_DEFAULT_LEVEL', 3))
+
+class Writer(CSS1Writer):
+ """css writer using our html translator"""
+ def __init__(self, base_url):
+ CSS1Writer.__init__(self)
+ self.translator_class = URLBinder(base_url, HTMLTranslator)
+
+ def apply_template(self):
+ """overriding this is necessary with docutils >= 0.5"""
+ return self.visitor.astext()
+
+class URLBinder:
+ def __init__(self, url, klass):
+ self.base_url = url
+ self.translator_class = HTMLTranslator
+
+ def __call__(self, document):
+ translator = self.translator_class(document)
+ translator.base_url = self.base_url
+ return translator
+
+class HTMLTranslator(CSS1HTMLTranslator):
+ """ReST tree to html translator"""
+
+ def astext(self):
+ """return the extracted html"""
+ return ''.join(self.body)
+
+ def visit_title(self, node):
+ """Only 6 section levels are supported by HTML."""
+ if isinstance(node.parent, nodes.topic):
+ self.body.append(
+ self.starttag(node, 'p', '', CLASS='topic-title'))
+ if node.parent.hasattr('id'):
+ self.body.append(
+ self.starttag({}, 'a', '', name=node.parent['id']))
+ self.context.append('
"""
+ def depart_document(self, node):
+ """syt: i don't want the enclosing
"""
+
+ def visit_reference(self, node):
+ """syt: i want absolute urls"""
+ if 'refuri' in node:
+ href = node['refuri']
+ if ( self.settings.cloak_email_addresses
+ and href.startswith('mailto:')):
+ href = self.cloak_mailto(href)
+ self.in_mailto = 1
+ else:
+ assert 'refid' in node, \
+ 'References must have "refuri" or "refid" attribute.'
+ href = '%s#%s' % (self.base_url, node['refid'])
+ atts = {'href': href, 'class': 'reference'}
+ if not isinstance(node.parent, nodes.TextElement):
+ assert len(node) == 1 and isinstance(node[0], nodes.image)
+ atts['class'] += ' image-reference'
+ self.body.append(self.starttag(node, 'a', '', **atts))
+
+ ## override error messages to avoid XHTML problems ########################
+ def visit_problematic(self, node):
+ pass
+
+ def depart_problematic(self, node):
+ pass
+
+ def visit_system_message(self, node):
+ backref_text = ''
+ if len(node['backrefs']):
+ backrefs = node['backrefs']
+ if len(backrefs) == 1:
+ backref_text = '; backlink'
+ else:
+ i = 1
+ backlinks = []
+ for backref in backrefs:
+ backlinks.append(str(i))
+ i += 1
+ backref_text = ('; backlinks: %s'
+ % ', '.join(backlinks))
+ if node.hasattr('line'):
+ line = ', line %s' % node['line']
+ else:
+ line = ''
+ a_start = a_end = ''
+ error = u'System Message: %s%s/%s%s (%s %s)%s\n' % (
+ a_start, node['type'], node['level'], a_end,
+ self.encode(node['source']), line, backref_text)
+ self.body.append(u'
ReST / HTML errors:%s
' % xml_escape(error))
+
+ def depart_system_message(self, node):
+ pass
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/ext/markdown.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/markdown.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,27 @@
+from __future__ import absolute_import
+import markdown
+
+import logging
+
+log = logging.getLogger(__name__)
+
+
+def markdown_publish(context, data):
+ """publish a string formatted as MarkDown Text to HTML
+
+ :type context: a cubicweb application object
+
+ :type data: str
+ :param data: some MarkDown text
+
+ :rtype: unicode
+ :return:
+ the data formatted as HTML or the original data if an error occurred
+ """
+ md = markdown.Markdown()
+ try:
+ return md.convert(data)
+ except:
+ import traceback; traceback.print_exc()
+ log.exception("Error while converting Markdown to HTML")
+ return data
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/ext/rest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/rest.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,469 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""rest publishing functions
+
+contains some functions and setup of docutils for cubicweb. Provides the
+following ReST directives:
+
+* `eid`, create link to entity in the repository by their eid
+
+* `card`, create link to card entity in the repository by their wikiid
+ (proposing to create it when the refered card doesn't exist yet)
+
+* `winclude`, reference to a web documentation file (in wdoc/ directories)
+
+* `sourcecode` (if pygments is installed), source code colorization
+
+* `rql-table`, create a table from a RQL query
+
+"""
+__docformat__ = "restructuredtext en"
+
+import sys
+from itertools import chain
+from logging import getLogger
+from os.path import join
+
+from six import text_type
+from six.moves.urllib.parse import urlsplit
+
+from docutils import statemachine, nodes, utils, io
+from docutils.core import Publisher
+from docutils.parsers.rst import Parser, states, directives, Directive
+from docutils.parsers.rst.roles import register_canonical_role, set_classes
+
+from logilab.mtconverter import ESC_UCAR_TABLE, ESC_CAR_TABLE, xml_escape
+
+from cubicweb import UnknownEid
+from cubicweb.ext.html4zope import Writer
+
+from cubicweb.web.views import vid_from_rset # XXX better not to import c.w.views here...
+
+# We provide our own parser as an attempt to get rid of
+# state machine reinstanciation
+
+import re
+# compile states.Body patterns
+for k, v in states.Body.patterns.items():
+ if isinstance(v, str):
+ states.Body.patterns[k] = re.compile(v)
+
+# register ReStructured Text mimetype / extensions
+import mimetypes
+mimetypes.add_type('text/rest', '.rest')
+mimetypes.add_type('text/rest', '.rst')
+
+
+LOGGER = getLogger('cubicweb.rest')
+
+
+def eid_reference_role(role, rawtext, text, lineno, inliner,
+ options={}, content=[]):
+ try:
+ try:
+ eid_num, rest = text.split(u':', 1)
+ except ValueError:
+ eid_num, rest = text, '#'+text
+ eid_num = int(eid_num)
+ if eid_num < 0:
+ raise ValueError
+ except ValueError:
+ msg = inliner.reporter.error(
+ 'EID number must be a positive number; "%s" is invalid.'
+ % text, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ # Base URL mainly used by inliner.pep_reference; so this is correct:
+ context = inliner.document.settings.context
+ try:
+ refedentity = context._cw.entity_from_eid(eid_num)
+ except UnknownEid:
+ ref = '#'
+ rest += u' ' + context._cw._('(UNEXISTANT EID)')
+ else:
+ ref = refedentity.absolute_url()
+ set_classes(options)
+ return [nodes.reference(rawtext, utils.unescape(rest), refuri=ref,
+ **options)], []
+
+
+def rql_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
+ """``:rql:```` or ``:rql:`:```
+
+ Example: ``:rql:`Any X,Y WHERE X is CWUser, X login Y:table```
+
+ Replace the directive with the output of applying the view to the resultset
+ returned by the query.
+
+ "X eid %(userid)s" can be used in the RQL query for this query will be
+ executed with the argument {'userid': _cw.user.eid}.
+ """
+ _cw = inliner.document.settings.context._cw
+ text = text.strip()
+ if ':' in text:
+ rql, vid = text.rsplit(u':', 1)
+ rql = rql.strip()
+ else:
+ rql, vid = text, None
+ _cw.ensure_ro_rql(rql)
+ try:
+ rset = _cw.execute(rql, {'userid': _cw.user.eid})
+ if rset:
+ if vid is None:
+ vid = vid_from_rset(_cw, rset, _cw.vreg.schema)
+ else:
+ vid = 'noresult'
+ view = _cw.vreg['views'].select(vid, _cw, rset=rset)
+ content = view.render()
+ except Exception as exc:
+ content = 'an error occurred while interpreting this rql directive: %r' % exc
+ set_classes(options)
+ return [nodes.raw('', content, format='html')], []
+
+
+def bookmark_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
+ """``:bookmark:```` or ``:bookmark:`:```
+
+ Example: ``:bookmark:`1234:table```
+
+ Replace the directive with the output of applying the view to the resultset
+ returned by the query stored in the bookmark. By default, the view is the one
+ stored in the bookmark, but it can be overridden by the directive as in the
+ example above.
+
+ "X eid %(userid)s" can be used in the RQL query stored in the Bookmark, for
+ this query will be executed with the argument {'userid': _cw.user.eid}.
+ """
+ _cw = inliner.document.settings.context._cw
+ text = text.strip()
+ try:
+ if ':' in text:
+ eid, vid = text.rsplit(u':', 1)
+ eid = int(eid)
+ else:
+ eid, vid = int(text), None
+ except ValueError:
+ msg = inliner.reporter.error(
+ 'EID number must be a positive number; "%s" is invalid.'
+ % text, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ try:
+ bookmark = _cw.entity_from_eid(eid)
+ except UnknownEid:
+ msg = inliner.reporter.error('Unknown EID %s.' % text, line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ try:
+ params = dict(_cw.url_parse_qsl(urlsplit(bookmark.path).query))
+ rql = params['rql']
+ if vid is None:
+ vid = params.get('vid')
+ except (ValueError, KeyError) as exc:
+ msg = inliner.reporter.error('Could not parse bookmark path %s [%s].'
+ % (bookmark.path, exc), line=lineno)
+ prb = inliner.problematic(rawtext, rawtext, msg)
+ return [prb], [msg]
+ try:
+ rset = _cw.execute(rql, {'userid': _cw.user.eid})
+ if rset:
+ if vid is None:
+ vid = vid_from_rset(_cw, rset, _cw.vreg.schema)
+ else:
+ vid = 'noresult'
+ view = _cw.vreg['views'].select(vid, _cw, rset=rset)
+ content = view.render()
+ except Exception as exc:
+ content = 'An error occurred while interpreting directive bookmark: %r' % exc
+ set_classes(options)
+ return [nodes.raw('', content, format='html')], []
+
+
+def winclude_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """Include a reST file as part of the content of this reST file.
+
+ same as standard include directive but using config.locate_doc_resource to
+ get actual file to include.
+
+ Most part of this implementation is copied from `include` directive defined
+ in `docutils.parsers.rst.directives.misc`
+ """
+ context = state.document.settings.context
+ cw = context._cw
+ source = state_machine.input_lines.source(
+ lineno - state_machine.input_offset - 1)
+ #source_dir = os.path.dirname(os.path.abspath(source))
+ fid = arguments[0]
+ for lang in chain((cw.lang, cw.vreg.property_value('ui.language')),
+ cw.vreg.config.available_languages()):
+ rid = '%s_%s.rst' % (fid, lang)
+ resourcedir = cw.vreg.config.locate_doc_file(rid)
+ if resourcedir:
+ break
+ else:
+ severe = state_machine.reporter.severe(
+ 'Problems with "%s" directive path:\nno resource matching %s.'
+ % (name, fid),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [severe]
+ path = join(resourcedir, rid)
+ encoding = options.get('encoding', state.document.settings.input_encoding)
+ try:
+ state.document.settings.record_dependencies.add(path)
+ include_file = io.FileInput(
+ source_path=path, encoding=encoding,
+ error_handler=state.document.settings.input_encoding_error_handler,
+ handle_io_errors=None)
+ except IOError as error:
+ severe = state_machine.reporter.severe(
+ 'Problems with "%s" directive path:\n%s: %s.'
+ % (name, error.__class__.__name__, error),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [severe]
+ try:
+ include_text = include_file.read()
+ except UnicodeError as error:
+ severe = state_machine.reporter.severe(
+ 'Problem with "%s" directive:\n%s: %s'
+ % (name, error.__class__.__name__, error),
+ nodes.literal_block(block_text, block_text), line=lineno)
+ return [severe]
+ if 'literal' in options:
+ literal_block = nodes.literal_block(include_text, include_text,
+ source=path)
+ literal_block.line = 1
+ return literal_block
+ else:
+ include_lines = statemachine.string2lines(include_text,
+ convert_whitespace=1)
+ state_machine.insert_input(include_lines, path)
+ return []
+
+winclude_directive.arguments = (1, 0, 1)
+winclude_directive.options = {'literal': directives.flag,
+ 'encoding': directives.encoding}
+
+
+class RQLTableDirective(Directive):
+ """rql-table directive
+
+ Example:
+
+ .. rql-table::
+ :vid: mytable
+ :headers: , , progress
+ :colvids: 2=progress
+
+ Any X,U,X WHERE X is Project, X url U
+
+ All fields but the RQL string are optionnal. The ``:headers:`` option can
+ contain empty column names.
+ """
+
+ required_arguments = 0
+ optional_arguments = 0
+ has_content= True
+ final_argument_whitespace = True
+ option_spec = {'vid': directives.unchanged,
+ 'headers': directives.unchanged,
+ 'colvids': directives.unchanged}
+
+ def run(self):
+ errid = "rql-table directive"
+ self.assert_has_content()
+ if self.arguments:
+ raise self.warning('%s does not accept arguments' % errid)
+ rql = ' '.join([l.strip() for l in self.content])
+ _cw = self.state.document.settings.context._cw
+ _cw.ensure_ro_rql(rql)
+ try:
+ rset = _cw.execute(rql)
+ except Exception as exc:
+ raise self.error("fail to execute RQL query in %s: %r" %
+ (errid, exc))
+ if not rset:
+ raise self.warning("empty result set")
+ vid = self.options.get('vid', 'table')
+ try:
+ view = _cw.vreg['views'].select(vid, _cw, rset=rset)
+ except Exception as exc:
+ raise self.error("fail to select '%s' view in %s: %r" %
+ (vid, errid, exc))
+ headers = None
+ if 'headers' in self.options:
+ headers = [h.strip() for h in self.options['headers'].split(',')]
+ while headers.count(''):
+ headers[headers.index('')] = None
+ if len(headers) != len(rset[0]):
+ raise self.error("the number of 'headers' does not match the "
+ "number of columns in %s" % errid)
+ cellvids = None
+ if 'colvids' in self.options:
+ cellvids = {}
+ for f in self.options['colvids'].split(','):
+ try:
+ idx, vid = f.strip().split('=')
+ except ValueError:
+ raise self.error("malformatted 'colvids' option in %s" %
+ errid)
+ cellvids[int(idx.strip())] = vid.strip()
+ try:
+ content = view.render(headers=headers, cellvids=cellvids)
+ except Exception as exc:
+ raise self.error("Error rendering %s (%s)" % (errid, exc))
+ return [nodes.raw('', content, format='html')]
+
+
+try:
+ from pygments import highlight
+ from pygments.lexers import get_lexer_by_name
+ from pygments.formatters.html import HtmlFormatter
+except ImportError:
+ pygments_directive = None
+else:
+ _PYGMENTS_FORMATTER = HtmlFormatter()
+
+ def pygments_directive(name, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ try:
+ lexer = get_lexer_by_name(arguments[0])
+ except ValueError:
+ # no lexer found
+ lexer = get_lexer_by_name('text')
+ parsed = highlight(u'\n'.join(content), lexer, _PYGMENTS_FORMATTER)
+ # don't fail if no context set on the sourcecode directive
+ try:
+ context = state.document.settings.context
+ context._cw.add_css('pygments.css')
+ except AttributeError:
+ # used outside cubicweb XXX use hasattr instead
+ pass
+ return [nodes.raw('', parsed, format='html')]
+
+ pygments_directive.arguments = (1, 0, 1)
+ pygments_directive.content = 1
+
+
+class CubicWebReSTParser(Parser):
+ """The (customized) reStructuredText parser."""
+
+ def __init__(self):
+ self.initial_state = 'Body'
+ self.state_classes = states.state_classes
+ self.inliner = states.Inliner()
+ self.statemachine = states.RSTStateMachine(
+ state_classes=self.state_classes,
+ initial_state=self.initial_state,
+ debug=0)
+
+ def parse(self, inputstring, document):
+ """Parse `inputstring` and populate `document`, a document tree."""
+ self.setup_parse(inputstring, document)
+ inputlines = statemachine.string2lines(inputstring,
+ convert_whitespace=1)
+ self.statemachine.run(inputlines, document, inliner=self.inliner)
+ self.finish_parse()
+
+
+# XXX docutils keep a ref on context, can't find a correct way to remove it
+class CWReSTPublisher(Publisher):
+ def __init__(self, context, settings, **kwargs):
+ Publisher.__init__(self, **kwargs)
+ self.set_components('standalone', 'restructuredtext', 'pseudoxml')
+ self.process_programmatic_settings(None, settings, None)
+ self.settings.context = context
+
+
+def rest_publish(context, data):
+ """publish a string formatted as ReStructured Text to HTML
+
+ :type context: a cubicweb application object
+
+ :type data: str
+ :param data: some ReST text
+
+ :rtype: unicode
+ :return:
+ the data formatted as HTML or the original data if an error occurred
+ """
+ req = context._cw
+ if isinstance(data, text_type):
+ encoding = 'unicode'
+ # remove unprintable characters unauthorized in xml
+ data = data.translate(ESC_UCAR_TABLE)
+ else:
+ encoding = req.encoding
+ # remove unprintable characters unauthorized in xml
+ data = data.translate(ESC_CAR_TABLE)
+ settings = {'input_encoding': encoding, 'output_encoding': 'unicode',
+ 'warning_stream': False,
+ 'traceback': True, # don't sys.exit
+ 'stylesheet': None, # don't try to embed stylesheet (may cause
+ # obscure bug due to docutils computing
+ # relative path according to the directory
+ # used *at import time*
+ # dunno what's the max, severe is 4, and we never want a crash
+ # (though try/except may be a better option...). May be the
+ # above traceback option will avoid this?
+ 'halt_level': 10,
+ # disable stupid switch to colspan=2 if field name is above a size limit
+ 'field_name_limit': sys.maxsize,
+ }
+ if context:
+ if hasattr(req, 'url'):
+ base_url = req.url()
+ elif hasattr(context, 'absolute_url'):
+ base_url = context.absolute_url()
+ else:
+ base_url = req.base_url()
+ else:
+ base_url = None
+ try:
+ pub = CWReSTPublisher(context, settings,
+ parser=CubicWebReSTParser(),
+ writer=Writer(base_url=base_url),
+ source_class=io.StringInput,
+ destination_class=io.StringOutput)
+ pub.set_source(data)
+ pub.set_destination()
+ res = pub.publish(enable_exit_status=None)
+ # necessary for proper garbage collection, else a ref is kept somewhere in docutils...
+ del pub.settings.context
+ return res
+ except BaseException:
+ LOGGER.exception('error while publishing ReST text')
+ if not isinstance(data, text_type):
+ data = text_type(data, encoding, 'replace')
+ return xml_escape(req._('error while publishing ReST text')
+ + '\n\n' + data)
+
+
+_INITIALIZED = False
+def cw_rest_init():
+ global _INITIALIZED
+ if _INITIALIZED:
+ return
+ _INITIALIZED = True
+ register_canonical_role('eid', eid_reference_role)
+ register_canonical_role('rql', rql_role)
+ register_canonical_role('bookmark', bookmark_role)
+ directives.register_directive('winclude', winclude_directive)
+ if pygments_directive is not None:
+ directives.register_directive('sourcecode', pygments_directive)
+ directives.register_directive('rql-table', RQLTableDirective)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/ext/tal.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/tal.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,273 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""provides simpleTAL extensions for CubicWeb
+
+"""
+
+__docformat__ = "restructuredtext en"
+
+import sys
+import re
+from os.path import exists, isdir, join
+from logging import getLogger
+from StringIO import StringIO
+
+from simpletal import simpleTAL, simpleTALES
+
+from logilab.common.decorators import cached
+
+LOGGER = getLogger('cubicweb.tal')
+
+
+class LoggerAdapter(object):
+ def __init__(self, tal_logger):
+ self.tal_logger = tal_logger
+
+ def debug(self, msg):
+ LOGGER.debug(msg)
+
+ def warn(self, msg):
+ LOGGER.warning(msg)
+
+ def __getattr__(self, attrname):
+ return getattr(self.tal_logger, attrname)
+
+
+class CubicWebContext(simpleTALES.Context):
+ """add facilities to access entity / resultset"""
+
+ def __init__(self, options=None, allowPythonPath=1):
+ simpleTALES.Context.__init__(self, options, allowPythonPath)
+ self.log = LoggerAdapter(self.log)
+
+ def update(self, context):
+ for varname, value in context.items():
+ self.addGlobal(varname, value)
+
+ def addRepeat(self, name, var, initialValue):
+ simpleTALES.Context.addRepeat(self, name, var, initialValue)
+
+# XXX FIXME need to find a clean to define OPCODE values for extensions
+I18N_CONTENT = 18
+I18N_REPLACE = 19
+RQL_EXECUTE = 20
+# simpleTAL uses the OPCODE values to define priority over commands.
+# TAL_ITER should have the same priority than TAL_REPEAT (i.e. 3), but
+# we can't use the same OPCODE for two different commands without changing
+# the simpleTAL implementation. Another solution would be to totally override
+# the REPEAT implementation with the ITER one, but some specific operations
+# (involving len() for instance) are not implemented for ITER, so we prefer
+# to keep both implementations for now, and to fool simpleTAL by using a float
+# number between 3 and 4
+TAL_ITER = 3.1
+
+
+# FIX simpleTAL HTML 4.01 stupidity
+# (simpleTAL never closes tags like INPUT, IMG, HR ...)
+simpleTAL.HTML_FORBIDDEN_ENDTAG.clear()
+
+class CubicWebTemplateCompiler(simpleTAL.HTMLTemplateCompiler):
+ """extends default compiler by adding i18n:content commands"""
+
+ def __init__(self):
+ simpleTAL.HTMLTemplateCompiler.__init__(self)
+ self.commandHandler[I18N_CONTENT] = self.compile_cmd_i18n_content
+ self.commandHandler[I18N_REPLACE] = self.compile_cmd_i18n_replace
+ self.commandHandler[RQL_EXECUTE] = self.compile_cmd_rql
+ self.commandHandler[TAL_ITER] = self.compile_cmd_tal_iter
+
+ def setTALPrefix(self, prefix):
+ simpleTAL.TemplateCompiler.setTALPrefix(self, prefix)
+ self.tal_attribute_map['i18n:content'] = I18N_CONTENT
+ self.tal_attribute_map['i18n:replace'] = I18N_REPLACE
+ self.tal_attribute_map['rql:execute'] = RQL_EXECUTE
+ self.tal_attribute_map['tal:iter'] = TAL_ITER
+
+ def compile_cmd_i18n_content(self, argument):
+ # XXX tal:content structure=, text= should we support this ?
+ structure_flag = 0
+ return (I18N_CONTENT, (argument, False, structure_flag, self.endTagSymbol))
+
+ def compile_cmd_i18n_replace(self, argument):
+ # XXX tal:content structure=, text= should we support this ?
+ structure_flag = 0
+ return (I18N_CONTENT, (argument, True, structure_flag, self.endTagSymbol))
+
+ def compile_cmd_rql(self, argument):
+ return (RQL_EXECUTE, (argument, self.endTagSymbol))
+
+ def compile_cmd_tal_iter(self, argument):
+ original_id, (var_name, expression, end_tag_symbol) = \
+ simpleTAL.HTMLTemplateCompiler.compileCmdRepeat(self, argument)
+ return (TAL_ITER, (var_name, expression, self.endTagSymbol))
+
+ def getTemplate(self):
+ return CubicWebTemplate(self.commandList, self.macroMap, self.symbolLocationTable)
+
+ def compileCmdAttributes (self, argument):
+ """XXX modified to support single attribute
+ definition ending by a ';'
+
+ backport this to simpleTAL
+ """
+ # Compile tal:attributes into attribute command
+ # Argument: [(attributeName, expression)]
+
+ # Break up the list of attribute settings first
+ commandArgs = []
+ # We only want to match semi-colons that are not escaped
+ argumentSplitter = re.compile(r'(?.
+
+
+from cubicweb.web.views import tableview
+
+class CustomRsetTableView(tableview.RsetTableView):
+ __regid__ = 'mytable'
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/ext/test/requirements.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/test/requirements.txt Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+docutils
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/ext/test/unittest_rest.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/ext/test/unittest_rest.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,244 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from six import PY3
+
+from logilab.common.testlib import unittest_main
+from cubicweb.devtools.testlib import CubicWebTC
+
+from cubicweb.ext.rest import rest_publish
+
+class RestTC(CubicWebTC):
+
+ def context(self, req):
+ return req.execute('CWUser X WHERE X login "admin"').get_entity(0, 0)
+
+ def test_eid_role(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ self.assertEqual(rest_publish(context, ':eid:`%s`' % context.eid),
+ '
\n')
+
+ def test_bad_rest_no_crash(self):
+ with self.admin_access.web_request() as req:
+ rest_publish(self.context(req), '''
+| card | implication |
+--------------------------
+| 1-1 | N1 = N2 |
+| 1-? | N1 <= N2 |
+| 1-+ | N1 >= N2 |
+| 1-* | N1>0 => N2>0 |
+--------------------------
+| ?-? | N1 # N2 |
+| ?-+ | N1 >= N2 |
+| ?-* | N1 # N2 |
+--------------------------
+| +-+ | N1>0 => N2>0 et |
+| | N2>0 => N1>0 |
+| +-* | N1>+ => N2>0 |
+--------------------------
+| *-* | N1#N2 |
+--------------------------
+
+''')
+
+ def test_disable_field_name_colspan(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ value = rest_publish(context, '''my field list:
+
+:a long dumb param name: value
+''')
+ self.assertNotIn('colspan', value)
+
+ def test_rql_role_with_vid(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ out = rest_publish(context, ':rql:`Any X WHERE X is CWUser:table`')
+ self.assertTrue(out.endswith('anon\n'
+ '
\n'))
+
+ def test_rql_role_with_vid_empty_rset(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ out = rest_publish(context, ':rql:`Any X WHERE X is CWUser, X login "nono":table`')
+ self.assertTrue(out.endswith('
'
+ 'No result matching query
\n\n'))
+
+ def test_rql_role_with_unknown_vid(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ out = rest_publish(context, ':rql:`Any X WHERE X is CWUser:toto`')
+ self.assertTrue(out.startswith("
an error occurred while interpreting this "
+ "rql directive: ObjectNotFound(%s'toto',)
" %
+ ('' if PY3 else 'u')),
+ out)
+
+ def test_rql_role_without_vid(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ out = rest_publish(context, ':rql:`Any X WHERE X is CWUser`')
+ self.assertEqual(out, u'
\n')
+
+ def test_bookmark_role(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ rset = req.execute('INSERT Bookmark X: X title "hello", X path '
+ '"/view?rql=Any X WHERE X is CWUser"')
+ eid = rset[0][0]
+ out = rest_publish(context, ':bookmark:`%s`' % eid)
+ self.assertEqual(out, u'
\n')
+
+ def test_rqltable_nocontent(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ out = rest_publish(context, """.. rql-table::""")
+ self.assertIn("System Message: ERROR", out)
+ self.assertIn("Content block expected for the "rql-table" "
+ "directive; none found" , out)
+
+ def test_rqltable_norset(self):
+ with self.admin_access.web_request() as req:
+ context = self.context(req)
+ rql = "Any X WHERE X is CWUser, X firstname 'franky'"
+ out = rest_publish(
+ context, """\
+.. rql-table::
+
+ %(rql)s""" % {'rql': rql})
+ self.assertIn("System Message: WARNING", out)
+ self.assertIn("empty result set", out)
+
+ def test_rqltable_nooptions(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+
+ %(rql)s
+ """ % {'rql': rql})
+ view = self.vreg['views'].select('table', req, rset=req.execute(rql))
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+
+ def test_rqltable_vid(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ vid = 'mytable'
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :vid: %(vid)s
+
+ %(rql)s
+ """ % {'rql': rql, 'vid': vid})
+ view = self.vreg['views'].select(vid, req, rset=req.execute(rql))
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+ self.assertIn(vid, out[:49])
+
+ def test_rqltable_badvid(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ vid = 'mytabel'
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :vid: %(vid)s
+
+ %(rql)s
+ """ % {'rql': rql, 'vid': vid})
+ self.assertIn("fail to select '%s' view" % vid, out)
+
+ def test_rqltable_headers(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ headers = ["nom", "prenom", "identifiant"]
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :headers: %(headers)s
+
+ %(rql)s
+ """ % {'rql': rql, 'headers': ', '.join(headers)})
+ view = self.vreg['views'].select('table', req, rset=req.execute(rql))
+ view.headers = headers
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+
+ def test_rqltable_headers_missing(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ headers = ["nom", "", "identifiant"]
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :headers: %(headers)s
+
+ %(rql)s
+ """ % {'rql': rql, 'headers': ', '.join(headers)})
+ view = self.vreg['views'].select('table', req, rset=req.execute(rql))
+ view.headers = [headers[0], None, headers[2]]
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+
+ def test_rqltable_headers_missing_edges(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ headers = [" ", "prenom", ""]
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :headers: %(headers)s
+
+ %(rql)s
+ """ % {'rql': rql, 'headers': ', '.join(headers)})
+ view = self.vreg['views'].select('table', req, rset=req.execute(rql))
+ view.headers = [None, headers[1], None]
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+
+ def test_rqltable_colvids(self):
+ with self.admin_access.web_request() as req:
+ rql = "Any X,S,F,L WHERE X is CWUser, X surname S, X firstname F, X login L"
+ colvids = {0: "oneline"}
+ out = rest_publish(
+ self.context(req), """\
+.. rql-table::
+ :colvids: %(colvids)s
+
+ %(rql)s
+ """ % {'rql': rql,
+ 'colvids': ', '.join(["%d=%s" % (k, v)
+ for k, v in colvids.items()])
+ })
+ view = self.vreg['views'].select('table', req, rset=req.execute(rql))
+ view.cellvids = colvids
+ self.assertEqual(view.render(w=None)[49:], out[49:])
+
+
+if __name__ == '__main__':
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/__init__.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,84 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""core hooks registering some maintainance tasks as server startup time"""
+
+__docformat__ = "restructuredtext en"
+
+from datetime import timedelta, datetime
+
+from cubicweb.server import hook
+
+class TransactionsCleanupStartupHook(hook.Hook):
+ """start task to cleanup transaction data"""
+ __regid__ = 'cw.looping-tasks.transactions-cleanup'
+ events = ('server_startup',)
+
+ def __call__(self):
+ # XXX use named args and inner functions to avoid referencing globals
+ # which may cause reloading pb
+ lifetime = timedelta(days=self.repo.config['keep-transaction-lifetime'])
+ def cleanup_old_transactions(repo=self.repo, lifetime=lifetime):
+ mindate = datetime.utcnow() - lifetime
+ with repo.internal_cnx() as cnx:
+ cnx.system_sql(
+ 'DELETE FROM transactions WHERE tx_time < %(time)s',
+ {'time': mindate})
+ cnx.commit()
+ if self.repo.config['undo-enabled']:
+ self.repo.looping_task(60*60*24, cleanup_old_transactions,
+ self.repo)
+
+class UpdateFeedsStartupHook(hook.Hook):
+ """start task to update datafeed based sources"""
+ __regid__ = 'cw.looping-tasks.update-feeds'
+ events = ('server_startup',)
+
+ def __call__(self):
+ def update_feeds(repo):
+ # take a list to avoid iterating on a dictionary whose size may
+ # change
+ for uri, source in list(repo.sources_by_uri.items()):
+ if (uri == 'system'
+ or not repo.config.source_enabled(source)
+ or not source.config['synchronize']):
+ continue
+ with repo.internal_cnx() as cnx:
+ try:
+ source.pull_data(cnx)
+ except Exception as exc:
+ cnx.exception('while trying to update feed %s', source)
+ self.repo.looping_task(60, update_feeds, self.repo)
+
+
+class DataImportsCleanupStartupHook(hook.Hook):
+ """start task to cleanup old data imports (ie datafeed import logs)"""
+ __regid__ = 'cw.looping-tasks.dataimports-cleanup'
+ events = ('server_startup',)
+
+ def __call__(self):
+ def expire_dataimports(repo=self.repo):
+ for uri, source in repo.sources_by_uri.items():
+ if (uri == 'system'
+ or not repo.config.source_enabled(source)):
+ continue
+ with repo.internal_cnx() as cnx:
+ mindate = datetime.utcnow() - timedelta(seconds=source.config['logs-lifetime'])
+ cnx.execute('DELETE CWDataImport X WHERE X start_timestamp < %(time)s',
+ {'time': mindate})
+ cnx.commit()
+ self.repo.looping_task(60*60*24, expire_dataimports, self.repo)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/bookmark.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/bookmark.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,42 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""bookmark related hooks"""
+
+__docformat__ = "restructuredtext en"
+
+from cubicweb.server import hook
+
+
+class AutoDeleteBookmarkOp(hook.Operation):
+ bookmark = None # make pylint happy
+ def precommit_event(self):
+ if not self.cnx.deleted_in_transaction(self.bookmark.eid):
+ if not self.bookmark.bookmarked_by:
+ self.bookmark.cw_delete()
+
+
+class DelBookmarkedByHook(hook.Hook):
+ """ensure user logins are stripped"""
+ __regid__ = 'autodelbookmark'
+ __select__ = hook.Hook.__select__ & hook.match_rtype('bookmarked_by',)
+ category = 'bookmark'
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ AutoDeleteBookmarkOp(self._cw,
+ bookmark=self._cw.entity_from_eid(self.eidfrom))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/email.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/email.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,80 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""hooks to ensure use_email / primary_email relations consistency"""
+
+__docformat__ = "restructuredtext en"
+
+from cubicweb.server import hook
+
+
+class SetUseEmailRelationOp(hook.Operation):
+ """delay this operation to commit to avoid conflict with a late rql query
+ already setting the relation
+ """
+ rtype = 'use_email'
+ entity = email = None # make pylint happy
+
+ def condition(self):
+ """check entity has use_email set for the email address"""
+ return not any(e for e in self.entity.use_email
+ if self.email.eid == e.eid)
+
+ def precommit_event(self):
+ if self.cnx.deleted_in_transaction(self.entity.eid):
+ return
+ if self.cnx.deleted_in_transaction(self.email.eid):
+ return
+ if self.condition():
+ self.cnx.execute(
+ 'SET X %s Y WHERE X eid %%(x)s, Y eid %%(y)s' % self.rtype,
+ {'x': self.entity.eid, 'y': self.email.eid})
+
+
+class SetPrimaryEmailRelationOp(SetUseEmailRelationOp):
+ rtype = 'primary_email'
+
+ def condition(self):
+ """check entity has no primary_email set"""
+ return not self.entity.primary_email
+
+
+class SetPrimaryEmailHook(hook.Hook):
+ """notify when a bug or story or version has its state modified"""
+ __regid__ = 'setprimaryemail'
+ __select__ = hook.Hook.__select__ & hook.match_rtype('use_email')
+ category = 'email'
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ if 'primary_email' in entity.e_schema.subject_relations():
+ SetPrimaryEmailRelationOp(self._cw, entity=entity,
+ email=self._cw.entity_from_eid(self.eidto))
+
+class SetUseEmailHook(hook.Hook):
+ """notify when a bug or story or version has its state modified"""
+ __regid__ = 'setprimaryemail'
+ __select__ = hook.Hook.__select__ & hook.match_rtype('primary_email')
+ category = 'email'
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ if 'use_email' in entity.e_schema.subject_relations():
+ SetUseEmailRelationOp(self._cw, entity=entity,
+ email=self._cw.entity_from_eid(self.eidto))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/integrity.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/integrity.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,347 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Core hooks: check for data integrity according to the instance'schema
+validity
+"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from threading import Lock
+
+from six import text_type
+
+from cubicweb import validation_error, neg_role
+from cubicweb.schema import (META_RTYPES, WORKFLOW_RTYPES,
+ RQLConstraint, RQLUniqueConstraint)
+from cubicweb.predicates import is_instance, composite_etype
+from cubicweb.uilib import soup2xhtml
+from cubicweb.server import hook
+
+# special relations that don't have to be checked for integrity, usually
+# because they are handled internally by hooks (so we trust ourselves)
+DONT_CHECK_RTYPES_ON_ADD = META_RTYPES | WORKFLOW_RTYPES
+DONT_CHECK_RTYPES_ON_DEL = META_RTYPES | WORKFLOW_RTYPES
+
+_UNIQUE_CONSTRAINTS_LOCK = Lock()
+_UNIQUE_CONSTRAINTS_HOLDER = None
+
+
+def _acquire_unique_cstr_lock(cnx):
+ """acquire the _UNIQUE_CONSTRAINTS_LOCK for the cnx.
+
+ This lock used to avoid potential integrity pb when checking
+ RQLUniqueConstraint in two different transactions, as explained in
+ https://extranet.logilab.fr/3577926
+ """
+ if 'uniquecstrholder' in cnx.transaction_data:
+ return
+ _UNIQUE_CONSTRAINTS_LOCK.acquire()
+ cnx.transaction_data['uniquecstrholder'] = True
+ # register operation responsible to release the lock on commit/rollback
+ _ReleaseUniqueConstraintsOperation(cnx)
+
+def _release_unique_cstr_lock(cnx):
+ if 'uniquecstrholder' in cnx.transaction_data:
+ del cnx.transaction_data['uniquecstrholder']
+ _UNIQUE_CONSTRAINTS_LOCK.release()
+
+class _ReleaseUniqueConstraintsOperation(hook.Operation):
+ def postcommit_event(self):
+ _release_unique_cstr_lock(self.cnx)
+ def rollback_event(self):
+ _release_unique_cstr_lock(self.cnx)
+
+
+class _CheckRequiredRelationOperation(hook.DataOperationMixIn,
+ hook.LateOperation):
+ """checking relation cardinality has to be done after commit in case the
+ relation is being replaced
+ """
+ containercls = list
+ role = key = base_rql = None
+
+ def precommit_event(self):
+ cnx = self.cnx
+ pendingeids = cnx.transaction_data.get('pendingeids', ())
+ pendingrtypes = cnx.transaction_data.get('pendingrtypes', ())
+ for eid, rtype in self.get_data():
+ # recheck pending eids / relation types
+ if eid in pendingeids:
+ continue
+ if rtype in pendingrtypes:
+ continue
+ if not cnx.execute(self.base_rql % rtype, {'x': eid}):
+ etype = cnx.entity_metas(eid)['type']
+ msg = _('at least one relation %(rtype)s is required on '
+ '%(etype)s (%(eid)s)')
+ raise validation_error(eid, {(rtype, self.role): msg},
+ {'rtype': rtype, 'etype': etype, 'eid': eid},
+ ['rtype', 'etype'])
+
+
+class _CheckSRelationOp(_CheckRequiredRelationOperation):
+ """check required subject relation"""
+ role = 'subject'
+ base_rql = 'Any O WHERE S eid %%(x)s, S %s O'
+
+class _CheckORelationOp(_CheckRequiredRelationOperation):
+ """check required object relation"""
+ role = 'object'
+ base_rql = 'Any S WHERE O eid %%(x)s, S %s O'
+
+
+class IntegrityHook(hook.Hook):
+ __abstract__ = True
+ category = 'integrity'
+
+
+class _EnsureSymmetricRelationsAdd(hook.Hook):
+ """ ensure X r Y => Y r X iff r is symmetric """
+ __regid__ = 'cw.add_ensure_symmetry'
+ __abstract__ = True
+ category = 'activeintegrity'
+ events = ('after_add_relation',)
+ # __select__ is set in the registration callback
+
+ def __call__(self):
+ self._cw.repo.system_source.add_relation(self._cw, self.eidto,
+ self.rtype, self.eidfrom)
+
+
+class _EnsureSymmetricRelationsDelete(hook.Hook):
+ """ ensure X r Y => Y r X iff r is symmetric """
+ __regid__ = 'cw.delete_ensure_symmetry'
+ __abstract__ = True
+ category = 'activeintegrity'
+ events = ('after_delete_relation',)
+ # __select__ is set in the registration callback
+
+ def __call__(self):
+ self._cw.repo.system_source.delete_relation(self._cw, self.eidto,
+ self.rtype, self.eidfrom)
+
+
+class CheckCardinalityHookBeforeDeleteRelation(IntegrityHook):
+ """check cardinalities are satisfied"""
+ __regid__ = 'checkcard_before_delete_relation'
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ rtype = self.rtype
+ if rtype in DONT_CHECK_RTYPES_ON_DEL:
+ return
+ cnx = self._cw
+ eidfrom, eidto = self.eidfrom, self.eidto
+ rdef = cnx.rtype_eids_rdef(rtype, eidfrom, eidto)
+ if (rdef.subject, rtype, rdef.object) in cnx.transaction_data.get('pendingrdefs', ()):
+ return
+ card = rdef.cardinality
+ if card[0] in '1+' and not cnx.deleted_in_transaction(eidfrom):
+ _CheckSRelationOp.get_instance(cnx).add_data((eidfrom, rtype))
+ if card[1] in '1+' and not cnx.deleted_in_transaction(eidto):
+ _CheckORelationOp.get_instance(cnx).add_data((eidto, rtype))
+
+
+class CheckCardinalityHookAfterAddEntity(IntegrityHook):
+ """check cardinalities are satisfied"""
+ __regid__ = 'checkcard_after_add_entity'
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ eid = self.entity.eid
+ eschema = self.entity.e_schema
+ for rschema, targetschemas, role in eschema.relation_definitions():
+ # skip automatically handled relations
+ if rschema.type in DONT_CHECK_RTYPES_ON_ADD:
+ continue
+ rdef = rschema.role_rdef(eschema, targetschemas[0], role)
+ if rdef.role_cardinality(role) in '1+':
+ if role == 'subject':
+ op = _CheckSRelationOp.get_instance(self._cw)
+ else:
+ op = _CheckORelationOp.get_instance(self._cw)
+ op.add_data((eid, rschema.type))
+
+
+class _CheckConstraintsOp(hook.DataOperationMixIn, hook.LateOperation):
+ """ check a new relation satisfy its constraints """
+ containercls = list
+ def precommit_event(self):
+ cnx = self.cnx
+ for values in self.get_data():
+ eidfrom, rtype, eidto, constraints = values
+ # first check related entities have not been deleted in the same
+ # transaction
+ if cnx.deleted_in_transaction(eidfrom):
+ continue
+ if cnx.deleted_in_transaction(eidto):
+ continue
+ for constraint in constraints:
+ # XXX
+ # * lock RQLConstraint as well?
+ # * use a constraint id to use per constraint lock and avoid
+ # unnecessary commit serialization ?
+ if isinstance(constraint, RQLUniqueConstraint):
+ _acquire_unique_cstr_lock(cnx)
+ try:
+ constraint.repo_check(cnx, eidfrom, rtype, eidto)
+ except NotImplementedError:
+ self.critical('can\'t check constraint %s, not supported',
+ constraint)
+
+
+class CheckConstraintHook(IntegrityHook):
+ """check the relation satisfy its constraints
+
+ this is delayed to a precommit time operation since other relation which
+ will make constraint satisfied (or unsatisfied) may be added later.
+ """
+ __regid__ = 'checkconstraint'
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ # XXX get only RQL[Unique]Constraints?
+ rdef = self._cw.rtype_eids_rdef(self.rtype, self.eidfrom, self.eidto)
+ constraints = rdef.constraints
+ if constraints:
+ _CheckConstraintsOp.get_instance(self._cw).add_data(
+ (self.eidfrom, self.rtype, self.eidto, constraints))
+
+
+class CheckAttributeConstraintHook(IntegrityHook):
+ """check the attribute relation satisfy its constraints
+
+ this is delayed to a precommit time operation since other relation which
+ will make constraint satisfied (or unsatisfied) may be added later.
+ """
+ __regid__ = 'checkattrconstraint'
+ events = ('after_add_entity', 'after_update_entity')
+
+ def __call__(self):
+ eschema = self.entity.e_schema
+ for attr in self.entity.cw_edited:
+ if eschema.subjrels[attr].final:
+ constraints = [c for c in eschema.rdef(attr).constraints
+ if isinstance(c, (RQLUniqueConstraint, RQLConstraint))]
+ if constraints:
+ _CheckConstraintsOp.get_instance(self._cw).add_data(
+ (self.entity.eid, attr, None, constraints))
+
+
+class CheckUniqueHook(IntegrityHook):
+ __regid__ = 'checkunique'
+ events = ('before_add_entity', 'before_update_entity')
+
+ def __call__(self):
+ entity = self.entity
+ eschema = entity.e_schema
+ for attr, val in entity.cw_edited.items():
+ if eschema.subjrels[attr].final and eschema.has_unique_values(attr):
+ if val is None:
+ continue
+ rql = '%s X WHERE X %s %%(val)s' % (entity.e_schema, attr)
+ rset = self._cw.execute(rql, {'val': val})
+ if rset and rset[0][0] != entity.eid:
+ msg = _('the value "%s" is already used, use another one')
+ raise validation_error(entity, {(attr, 'subject'): msg},
+ (val,))
+
+
+class DontRemoveOwnersGroupHook(IntegrityHook):
+ """delete the composed of a composite relation when this relation is deleted
+ """
+ __regid__ = 'checkownersgroup'
+ __select__ = IntegrityHook.__select__ & is_instance('CWGroup')
+ events = ('before_delete_entity', 'before_update_entity')
+
+ def __call__(self):
+ entity = self.entity
+ if self.event == 'before_delete_entity' and entity.name == 'owners':
+ raise validation_error(entity, {None: _("can't be deleted")})
+ elif self.event == 'before_update_entity' \
+ and 'name' in entity.cw_edited:
+ oldname, newname = entity.cw_edited.oldnewvalue('name')
+ if oldname == 'owners' and newname != oldname:
+ raise validation_error(entity, {('name', 'subject'): _("can't be changed")})
+
+
+class TidyHtmlFields(IntegrityHook):
+ """tidy HTML in rich text strings"""
+ __regid__ = 'htmltidy'
+ events = ('before_add_entity', 'before_update_entity')
+
+ def __call__(self):
+ entity = self.entity
+ metaattrs = entity.e_schema.meta_attributes()
+ edited = entity.cw_edited
+ for metaattr, (metadata, attr) in metaattrs.items():
+ if metadata == 'format' and attr in edited:
+ try:
+ value = edited[attr]
+ except KeyError:
+ continue # no text to tidy
+ if isinstance(value, text_type): # filter out None and Binary
+ if getattr(entity, str(metaattr)) == 'text/html':
+ edited[attr] = soup2xhtml(value, self._cw.encoding)
+
+
+class StripCWUserLoginHook(IntegrityHook):
+ """ensure user logins are stripped"""
+ __regid__ = 'stripuserlogin'
+ __select__ = IntegrityHook.__select__ & is_instance('CWUser')
+ events = ('before_add_entity', 'before_update_entity',)
+
+ def __call__(self):
+ login = self.entity.cw_edited.get('login')
+ if login:
+ self.entity.cw_edited['login'] = login.strip()
+
+
+class DeleteCompositeOrphanHook(hook.Hook):
+ """Delete the composed of a composite relation when the composite is
+ deleted (this is similar to the cascading ON DELETE CASCADE
+ semantics of sql).
+ """
+ __regid__ = 'deletecomposite'
+ __select__ = hook.Hook.__select__ & composite_etype()
+ events = ('before_delete_entity',)
+ category = 'activeintegrity'
+ # give the application's before_delete_entity hooks a chance to run before we cascade
+ order = 99
+
+ def __call__(self):
+ eid = self.entity.eid
+ for rdef, role in self.entity.e_schema.composite_rdef_roles:
+ rtype = rdef.rtype.type
+ target = getattr(rdef, neg_role(role))
+ expr = ('C %s X' % rtype) if role == 'subject' else ('X %s C' % rtype)
+ self._cw.execute('DELETE %s X WHERE C eid %%(c)s, %s' % (target, expr),
+ {'c': eid})
+
+
+def registration_callback(vreg):
+ vreg.register_all(globals().values(), __name__)
+ symmetric_rtypes = [rschema.type for rschema in vreg.schema.relations()
+ if rschema.symmetric]
+ class EnsureSymmetricRelationsAdd(_EnsureSymmetricRelationsAdd):
+ __select__ = _EnsureSymmetricRelationsAdd.__select__ & hook.match_rtype(*symmetric_rtypes)
+ vreg.register(EnsureSymmetricRelationsAdd)
+ class EnsureSymmetricRelationsDelete(_EnsureSymmetricRelationsDelete):
+ __select__ = _EnsureSymmetricRelationsDelete.__select__ & hook.match_rtype(*symmetric_rtypes)
+ vreg.register(EnsureSymmetricRelationsDelete)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/logstats.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/logstats.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,59 @@
+# copyright 2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+"""looping task for dumping instance's stats in a file
+"""
+
+__docformat__ = "restructuredtext en"
+
+from datetime import datetime
+import json
+
+from cubicweb.server import hook
+
+class LogStatsStartHook(hook.Hook):
+ """register task to regularly dump instance's stats in a file
+
+ data are stored as one json entry per row
+ """
+ __regid__ = 'cubicweb.hook.logstats.start'
+ events = ('server_startup',)
+
+ def __call__(self):
+ interval = self.repo.config.get('logstat-interval', 0)
+ if interval <= 0:
+ return
+
+ def dump_stats(repo):
+ statsfile = repo.config.get('logstat-file')
+ with repo.internal_cnx() as cnx:
+ stats = cnx.call_service('repo_stats')
+ gcstats = cnx.call_service('repo_gc_stats', nmax=5)
+
+ allstats = {'resources': stats,
+ 'memory': gcstats,
+ 'timestamp': datetime.utcnow().isoformat(),
+ }
+ try:
+ with open(statsfile, 'ab') as ofile:
+ json.dump(allstats, ofile)
+ ofile.write('\n')
+ except IOError:
+ repo.warning('Cannot open stats file for writing: %s', statsfile)
+
+ self.repo.looping_task(interval, dump_stats, self.repo)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/metadata.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/metadata.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,219 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Core hooks: set generic metadata"""
+
+__docformat__ = "restructuredtext en"
+
+from datetime import datetime
+from base64 import b64encode
+
+from pytz import utc
+
+from cubicweb.predicates import is_instance
+from cubicweb.server import hook
+from cubicweb.server.edition import EditedEntity
+
+
+class MetaDataHook(hook.Hook):
+ __abstract__ = True
+ category = 'metadata'
+
+
+class InitMetaAttrsHook(MetaDataHook):
+ """before create a new entity -> set creation and modification date
+
+ this is a conveniency hook, you shouldn't have to disable it
+ """
+ __regid__ = 'metaattrsinit'
+ events = ('before_add_entity',)
+
+ def __call__(self):
+ timestamp = datetime.now(utc)
+ edited = self.entity.cw_edited
+ if not edited.get('creation_date'):
+ edited['creation_date'] = timestamp
+ if not edited.get('modification_date'):
+ edited['modification_date'] = timestamp
+ if not self._cw.transaction_data.get('do-not-insert-cwuri'):
+ cwuri = u'%s%s' % (self._cw.base_url(), self.entity.eid)
+ edited.setdefault('cwuri', cwuri)
+
+
+class UpdateMetaAttrsHook(MetaDataHook):
+ """update an entity -> set modification date"""
+ __regid__ = 'metaattrsupdate'
+ events = ('before_update_entity',)
+
+ def __call__(self):
+ # repairing is true during c-c upgrade/shell and similar commands. We
+ # usually don't want to update modification date in such cases.
+ #
+ # XXX to be really clean, we should turn off modification_date update
+ # explicitly on each command where we do not want that behaviour.
+ if not self._cw.vreg.config.repairing:
+ self.entity.cw_edited.setdefault('modification_date', datetime.now(utc))
+
+
+class SetCreatorOp(hook.DataOperationMixIn, hook.Operation):
+
+ def precommit_event(self):
+ cnx = self.cnx
+ relations = [(eid, cnx.user.eid) for eid in self.get_data()
+ # don't consider entities that have been created and deleted in
+ # the same transaction, nor ones where created_by has been
+ # explicitly set
+ if not cnx.deleted_in_transaction(eid) and \
+ not cnx.entity_from_eid(eid).created_by]
+ cnx.add_relations([('created_by', relations)])
+
+
+class SetOwnershipHook(MetaDataHook):
+ """create a new entity -> set owner and creator metadata"""
+ __regid__ = 'setowner'
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ if not self._cw.is_internal_session:
+ self._cw.add_relation(self.entity.eid, 'owned_by', self._cw.user.eid)
+ SetCreatorOp.get_instance(self._cw).add_data(self.entity.eid)
+
+
+class SyncOwnersOp(hook.DataOperationMixIn, hook.Operation):
+ def precommit_event(self):
+ for compositeeid, composedeid in self.get_data():
+ if self.cnx.deleted_in_transaction(compositeeid):
+ continue
+ if self.cnx.deleted_in_transaction(composedeid):
+ continue
+ self.cnx.execute('SET X owned_by U WHERE C owned_by U, C eid %(c)s,'
+ 'NOT EXISTS(X owned_by U, X eid %(x)s)',
+ {'c': compositeeid, 'x': composedeid})
+
+
+class SyncCompositeOwner(MetaDataHook):
+ """when adding composite relation, the composed should have the same owners
+ has the composite
+ """
+ __regid__ = 'synccompositeowner'
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ if self.rtype == 'wf_info_for':
+ # skip this special composite relation # XXX (syt) why?
+ return
+ eidfrom, eidto = self.eidfrom, self.eidto
+ composite = self._cw.rtype_eids_rdef(self.rtype, eidfrom, eidto).composite
+ if composite == 'subject':
+ SyncOwnersOp.get_instance(self._cw).add_data( (eidfrom, eidto) )
+ elif composite == 'object':
+ SyncOwnersOp.get_instance(self._cw).add_data( (eidto, eidfrom) )
+
+
+class FixUserOwnershipHook(MetaDataHook):
+ """when a user has been created, add owned_by relation on itself"""
+ __regid__ = 'fixuserowner'
+ __select__ = MetaDataHook.__select__ & is_instance('CWUser')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ self._cw.add_relation(self.entity.eid, 'owned_by', self.entity.eid)
+
+
+class UpdateFTIHook(MetaDataHook):
+ """sync fulltext index text index container when a relation with
+ fulltext_container set is added / removed
+ """
+ __regid__ = 'updateftirel'
+ events = ('after_add_relation', 'after_delete_relation')
+
+ def __call__(self):
+ rtype = self.rtype
+ cnx = self._cw
+ ftcontainer = cnx.vreg.schema.rschema(rtype).fulltext_container
+ if ftcontainer == 'subject':
+ cnx.repo.system_source.index_entity(
+ cnx, cnx.entity_from_eid(self.eidfrom))
+ elif ftcontainer == 'object':
+ cnx.repo.system_source.index_entity(
+ cnx, cnx.entity_from_eid(self.eidto))
+
+
+
+# entity source handling #######################################################
+
+class ChangeEntitySourceUpdateCaches(hook.Operation):
+ oldsource = newsource = entity = None # make pylint happy
+
+ def postcommit_event(self):
+ self.oldsource.reset_caches()
+ repo = self.cnx.repo
+ entity = self.entity
+ extid = entity.cw_metainformation()['extid']
+ repo._type_source_cache[entity.eid] = (
+ entity.cw_etype, None, self.newsource.uri)
+ repo._extid_cache[extid] = -entity.eid
+
+
+class ChangeEntitySourceDeleteHook(MetaDataHook):
+ """support for moving an entity from an external source by watching 'Any
+ cw_source CWSource' relation
+ """
+
+ __regid__ = 'cw.metadata.source-change'
+ __select__ = MetaDataHook.__select__ & hook.match_rtype('cw_source')
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ if (self._cw.deleted_in_transaction(self.eidfrom)
+ or self._cw.deleted_in_transaction(self.eidto)):
+ return
+ schange = self._cw.transaction_data.setdefault('cw_source_change', {})
+ schange[self.eidfrom] = self.eidto
+
+
+class ChangeEntitySourceAddHook(MetaDataHook):
+ __regid__ = 'cw.metadata.source-change'
+ __select__ = MetaDataHook.__select__ & hook.match_rtype('cw_source')
+ events = ('before_add_relation',)
+
+ def __call__(self):
+ schange = self._cw.transaction_data.get('cw_source_change')
+ if schange is not None and self.eidfrom in schange:
+ newsource = self._cw.entity_from_eid(self.eidto)
+ if newsource.name != 'system':
+ raise Exception('changing source to something else than the '
+ 'system source is unsupported')
+ syssource = newsource.repo_source
+ oldsource = self._cw.entity_from_eid(schange[self.eidfrom])
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ # we don't want the moved entity to be reimported later. To
+ # distinguish this state, move the record from the 'entities' table
+ # to 'moved_entities'. External source will then have consider
+ # case where `extid2eid` returns a negative eid as 'this entity was
+ # known but has been moved, ignore it'.
+ extid = self._cw.entity_metas(entity.eid)['extid']
+ assert extid is not None
+ attrs = {'eid': entity.eid, 'extid': b64encode(extid).decode('ascii')}
+ self._cw.system_sql(syssource.sqlgen.insert('moved_entities', attrs), attrs)
+ attrs = {'type': entity.cw_etype, 'eid': entity.eid, 'extid': None,
+ 'asource': 'system'}
+ self._cw.system_sql(syssource.sqlgen.update('entities', attrs, ['eid']), attrs)
+ # register an operation to update repository/sources caches
+ ChangeEntitySourceUpdateCaches(self._cw, entity=entity,
+ oldsource=oldsource.repo_source,
+ newsource=syssource)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/notification.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/notification.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,244 @@
+# copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""some hooks to handle notification on entity's changes"""
+
+__docformat__ = "restructuredtext en"
+
+from logilab.common.textutils import normalize_text
+from logilab.common.deprecation import deprecated
+
+from cubicweb import RegistryNotFound
+from cubicweb.predicates import is_instance
+from cubicweb.server import hook
+from cubicweb.sobjects.supervising import SupervisionMailOp
+
+
+@deprecated('[3.17] use notify_on_commit instead')
+def RenderAndSendNotificationView(cnx, view, viewargs=None):
+ notify_on_commit(cnx, view, viewargs)
+
+
+def notify_on_commit(cnx, view, viewargs=None):
+ """register a notification view (see
+ :class:`~cubicweb.sobjects.notification.NotificationView`) to be sent at
+ post-commit time, ie only if the transaction has succeeded.
+
+ `viewargs` is an optional dictionary containing extra argument to be given
+ to :meth:`~cubicweb.sobjects.notification.NotificationView.render_and_send`
+ """
+ if viewargs is None:
+ viewargs = {}
+ notif_op = _RenderAndSendNotificationOp.get_instance(cnx)
+ notif_op.add_data((view, viewargs))
+
+
+class _RenderAndSendNotificationOp(hook.DataOperationMixIn, hook.Operation):
+ """End of the notification chain. Do render and send views after commit
+
+ All others Operations end up adding data to this Operation.
+ The notification are done on ``postcommit_event`` to make sure to prevent
+ sending notification about rolled back data.
+ """
+
+ containercls = list
+
+ def postcommit_event(self):
+ deleted = self.cnx.deleted_in_transaction
+ for view, viewargs in self.get_data():
+ if view.cw_rset is not None:
+ if not view.cw_rset:
+ # entity added and deleted in the same transaction
+ # (cache effect)
+ continue
+ elif deleted(view.cw_rset[view.cw_row or 0][view.cw_col or 0]):
+ # entity added and deleted in the same transaction
+ continue
+ try:
+ view.render_and_send(**viewargs)
+ except Exception:
+ # error in post commit are not propagated
+ # We keep this logic here to prevent a small notification error
+ # to prevent them all.
+ self.exception('Notification failed')
+
+
+class NotificationHook(hook.Hook):
+ __abstract__ = True
+ category = 'notification'
+
+ def select_view(self, vid, rset, row=0, col=0):
+ try:
+ return self._cw.vreg['views'].select_or_none(vid, self._cw, rset=rset,
+ row=row, col=col)
+ except RegistryNotFound: # can happen in some config
+ # (e.g. repo only config with no
+ # notification views registered by
+ # the instance's cubes)
+ return None
+
+
+class StatusChangeHook(NotificationHook):
+ """notify when a workflowable entity has its state modified"""
+ __regid__ = 'notifystatuschange'
+ __select__ = NotificationHook.__select__ & is_instance('TrInfo')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ if not entity.from_state: # not a transition
+ return
+ rset = entity.related('wf_info_for')
+ view = self.select_view('notif_status_change', rset=rset, row=0)
+ if view is None:
+ return
+ comment = entity.printable_value('comment', format='text/plain')
+ # XXX don't try to wrap rest until we've a proper transformation (see
+ # #103822)
+ if comment and entity.comment_format != 'text/rest':
+ comment = normalize_text(comment, 80)
+ viewargs = {'comment': comment,
+ 'previous_state': entity.previous_state.name,
+ 'current_state': entity.new_state.name}
+ notify_on_commit(self._cw, view, viewargs=viewargs)
+
+class RelationChangeHook(NotificationHook):
+ __regid__ = 'notifyrelationchange'
+ events = ('before_add_relation', 'after_add_relation',
+ 'before_delete_relation', 'after_delete_relation')
+
+ def __call__(self):
+ """if a notification view is defined for the event, send notification
+ email defined by the view
+ """
+ rset = self._cw.eid_rset(self.eidfrom)
+ view = self.select_view('notif_%s_%s' % (self.event, self.rtype),
+ rset=rset, row=0)
+ if view is None:
+ return
+ notify_on_commit(self._cw, view)
+
+
+class EntityChangeHook(NotificationHook):
+ """if a notification view is defined for the event, send notification
+ email defined by the view
+ """
+ __regid__ = 'notifyentitychange'
+ events = ('after_add_entity', 'after_update_entity')
+
+ def __call__(self):
+ rset = self.entity.as_rset()
+ view = self.select_view('notif_%s' % self.event, rset=rset, row=0)
+ if view is None:
+ return
+ notify_on_commit(self._cw, view)
+
+
+class EntityUpdatedNotificationOp(hook.SingleLastOperation):
+ """scrap all changed entity to prepare a Notification Operation for them"""
+
+ def precommit_event(self):
+ # precommit event that creates postcommit operation
+ cnx = self.cnx
+ for eid in cnx.transaction_data['changes']:
+ view = cnx.vreg['views'].select('notif_entity_updated', cnx,
+ rset=cnx.eid_rset(eid),
+ row=0)
+ notify_on_commit(self.cnx, view,
+ viewargs={'changes': cnx.transaction_data['changes'][eid]})
+
+
+class EntityUpdateHook(NotificationHook):
+ __regid__ = 'notifentityupdated'
+ __abstract__ = True # do not register by default
+ __select__ = NotificationHook.__select__ & hook.issued_from_user_query()
+ events = ('before_update_entity',)
+ skip_attrs = set()
+
+ def __call__(self):
+ cnx = self._cw
+ if cnx.added_in_transaction(self.entity.eid):
+ return # entity is being created
+ # then compute changes
+ attrs = [k for k in self.entity.cw_edited
+ if not k in self.skip_attrs]
+ if not attrs:
+ return
+ changes = cnx.transaction_data.setdefault('changes', {})
+ thisentitychanges = changes.setdefault(self.entity.eid, set())
+ rqlsel, rqlrestr = [], ['X eid %(x)s']
+ for i, attr in enumerate(attrs):
+ var = chr(65+i)
+ rqlsel.append(var)
+ rqlrestr.append('X %s %s' % (attr, var))
+ rql = 'Any %s WHERE %s' % (','.join(rqlsel), ','.join(rqlrestr))
+ rset = cnx.execute(rql, {'x': self.entity.eid})
+ for i, attr in enumerate(attrs):
+ oldvalue = rset[0][i]
+ newvalue = self.entity.cw_edited[attr]
+ if oldvalue != newvalue:
+ thisentitychanges.add((attr, oldvalue, newvalue))
+ if thisentitychanges:
+ EntityUpdatedNotificationOp(cnx)
+
+
+# supervising ##################################################################
+
+class SomethingChangedHook(NotificationHook):
+ __regid__ = 'supervising'
+ __select__ = NotificationHook.__select__ & hook.issued_from_user_query()
+ events = ('before_add_relation', 'before_delete_relation',
+ 'after_add_entity', 'before_update_entity')
+
+ def __call__(self):
+ dest = self._cw.vreg.config['supervising-addrs']
+ if not dest: # no supervisors, don't do this for nothing...
+ return
+ if self._call():
+ SupervisionMailOp(self._cw)
+
+ def _call(self):
+ event = self.event.split('_', 1)[1]
+ if event == 'update_entity':
+ if self._cw.added_in_transaction(self.entity.eid):
+ return False
+ if self.entity.e_schema == 'CWUser':
+ if not (frozenset(self.entity.cw_edited)
+ - frozenset(('eid', 'modification_date',
+ 'last_login_time'))):
+ # don't record last_login_time update which are done
+ # automatically at login time
+ return False
+ self._cw.transaction_data.setdefault('pendingchanges', []).append(
+ (event, self))
+ return True
+
+
+class EntityDeleteHook(SomethingChangedHook):
+ __regid__ = 'supervisingentitydel'
+ events = ('before_delete_entity',)
+
+ def _call(self):
+ try:
+ title = self.entity.dc_title()
+ except Exception:
+ # may raise an error during deletion process, for instance due to
+ # missing required relation
+ title = '#%s' % self.entity.eid
+ self._cw.transaction_data.setdefault('pendingchanges', []).append(
+ ('delete_entity', (self.entity.eid, self.entity.cw_etype, title)))
+ return True
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/security.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/security.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,209 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Security hooks: check permissions to add/delete/update entities according to
+the connected user
+"""
+
+__docformat__ = "restructuredtext en"
+from warnings import warn
+
+from logilab.common.registry import objectify_predicate
+
+from yams import buildobjs
+
+from cubicweb import Unauthorized
+from cubicweb.server import BEFORE_ADD_RELATIONS, ON_COMMIT_ADD_RELATIONS, hook
+
+
+
+def check_entity_attributes(cnx, entity, action, editedattrs=None):
+ eid = entity.eid
+ eschema = entity.e_schema
+ if action == 'delete':
+ eschema.check_perm(session, action, eid=eid)
+ return
+ # ._cw_skip_security_attributes is there to bypass security for attributes
+ # set by hooks by modifying the entity's dictionary
+ if editedattrs is None:
+ editedattrs = entity.cw_edited
+ dontcheck = editedattrs.skip_security
+ etypechecked = False
+ for attr in editedattrs:
+ if attr in dontcheck:
+ continue
+ rdef = eschema.rdef(attr, takefirst=True)
+ if rdef.final: # non final relation are checked by standard hooks
+ perms = rdef.permissions.get(action)
+ # comparison below works because the default update perm is:
+ #
+ # ('managers', ERQLExpression(Any X WHERE U has_update_permission X,
+ # X eid %(x)s, U eid %(u)s))
+ #
+ # is deserialized in this order (groups first), and ERQLExpression
+ # implements comparison by rql expression.
+ if perms == buildobjs.DEFAULT_ATTRPERMS[action]:
+ # The default rule is to delegate to the entity
+ # rule. This needs to be checked only once.
+ if not etypechecked:
+ entity.cw_check_perm(action)
+ etypechecked = True
+ continue
+ if perms == ():
+ # That means an immutable attribute; as an optimization, avoid
+ # going through check_perm.
+ raise Unauthorized(action, str(rdef))
+ rdef.check_perm(cnx, action, eid=eid)
+
+ if action == 'add' and not etypechecked:
+ # think about cnx.create_entity('Foo')
+ # the standard metadata were inserted by a hook
+ # with a bypass ... we conceptually need to check
+ # the eid attribute at *creation* time
+ entity.cw_check_perm(action)
+
+
+class CheckEntityPermissionOp(hook.DataOperationMixIn, hook.LateOperation):
+ def precommit_event(self):
+ cnx = self.cnx
+ for eid, action, edited in self.get_data():
+ entity = cnx.entity_from_eid(eid)
+ check_entity_attributes(cnx, entity, action, edited)
+
+
+class CheckRelationPermissionOp(hook.DataOperationMixIn, hook.LateOperation):
+ def precommit_event(self):
+ cnx = self.cnx
+ for action, rschema, eidfrom, eidto in self.get_data():
+ rdef = rschema.rdef(cnx.entity_metas(eidfrom)['type'],
+ cnx.entity_metas(eidto)['type'])
+ rdef.check_perm(cnx, action, fromeid=eidfrom, toeid=eidto)
+
+
+@objectify_predicate
+def write_security_enabled(cls, req, **kwargs):
+ if req is None or not req.write_security:
+ return 0
+ return 1
+
+class SecurityHook(hook.Hook):
+ __abstract__ = True
+ category = 'security'
+ __select__ = hook.Hook.__select__ & write_security_enabled()
+
+
+class AfterAddEntitySecurityHook(SecurityHook):
+ __regid__ = 'securityafteraddentity'
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ CheckEntityPermissionOp.get_instance(self._cw).add_data(
+ (self.entity.eid, 'add', self.entity.cw_edited) )
+
+
+class AfterUpdateEntitySecurityHook(SecurityHook):
+ __regid__ = 'securityafterupdateentity'
+ events = ('after_update_entity',)
+
+ def __call__(self):
+ # save back editedattrs in case the entity is reedited later in the
+ # same transaction, which will lead to cw_edited being
+ # overwritten
+ action = 'add' if self._cw.added_in_transaction(self.entity.eid) else 'update'
+ CheckEntityPermissionOp.get_instance(self._cw).add_data(
+ (self.entity.eid, action, self.entity.cw_edited) )
+
+
+class BeforeDelEntitySecurityHook(SecurityHook):
+ __regid__ = 'securitybeforedelentity'
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ self.entity.cw_check_perm('delete')
+
+
+def skip_inlined_relation_security(cnx, rschema, eid):
+ """return True if security for the given inlined relation should be skipped,
+ in case where the relation has been set through modification of
+ `entity.cw_edited` in a hook
+ """
+ assert rschema.inlined
+ try:
+ entity = cnx.entity_cache(eid)
+ except KeyError:
+ return False
+ edited = getattr(entity, 'cw_edited', None)
+ if edited is None:
+ return False
+ return rschema.type in edited.skip_security
+
+
+class BeforeAddRelationSecurityHook(SecurityHook):
+ __regid__ = 'securitybeforeaddrelation'
+ events = ('before_add_relation',)
+
+ def __call__(self):
+ if self.rtype in BEFORE_ADD_RELATIONS:
+ nocheck = self._cw.transaction_data.get('skip-security', ())
+ if (self.eidfrom, self.rtype, self.eidto) in nocheck:
+ return
+ rschema = self._cw.repo.schema[self.rtype]
+ if rschema.inlined and skip_inlined_relation_security(
+ self._cw, rschema, self.eidfrom):
+ return
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
+ rdef.check_perm(self._cw, 'add', fromeid=self.eidfrom, toeid=self.eidto)
+
+
+class AfterAddRelationSecurityHook(SecurityHook):
+ __regid__ = 'securityafteraddrelation'
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ if self.rtype not in BEFORE_ADD_RELATIONS:
+ nocheck = self._cw.transaction_data.get('skip-security', ())
+ if (self.eidfrom, self.rtype, self.eidto) in nocheck:
+ return
+ rschema = self._cw.repo.schema[self.rtype]
+ if rschema.inlined and skip_inlined_relation_security(
+ self._cw, rschema, self.eidfrom):
+ return
+ if self.rtype in ON_COMMIT_ADD_RELATIONS:
+ CheckRelationPermissionOp.get_instance(self._cw).add_data(
+ ('add', rschema, self.eidfrom, self.eidto) )
+ else:
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
+ rdef.check_perm(self._cw, 'add', fromeid=self.eidfrom, toeid=self.eidto)
+
+
+class BeforeDeleteRelationSecurityHook(SecurityHook):
+ __regid__ = 'securitybeforedelrelation'
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ nocheck = self._cw.transaction_data.get('skip-security', ())
+ if (self.eidfrom, self.rtype, self.eidto) in nocheck:
+ return
+ rschema = self._cw.repo.schema[self.rtype]
+ if rschema.inlined and skip_inlined_relation_security(
+ self._cw, rschema, self.eidfrom):
+ return
+ rdef = rschema.rdef(self._cw.entity_metas(self.eidfrom)['type'],
+ self._cw.entity_metas(self.eidto)['type'])
+ rdef.check_perm(self._cw, 'delete', fromeid=self.eidfrom, toeid=self.eidto)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/synccomputed.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/synccomputed.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,227 @@
+# copyright 2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Hooks for synchronizing computed attributes"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from collections import defaultdict
+
+from rql import nodes
+
+from cubicweb.server import hook
+
+
+class RecomputeAttributeOperation(hook.DataOperationMixIn, hook.Operation):
+ """Operation to recompute caches of computed attribute at commit time,
+ depending on what's have been modified in the transaction and avoiding to
+ recompute twice the same attribute
+ """
+ containercls = dict
+ def add_data(self, computed_attribute, eid=None):
+ try:
+ self._container[computed_attribute].add(eid)
+ except KeyError:
+ self._container[computed_attribute] = set((eid,))
+
+ def precommit_event(self):
+ for computed_attribute_rdef, eids in self.get_data().items():
+ attr = computed_attribute_rdef.rtype
+ formula = computed_attribute_rdef.formula
+ select = self.cnx.repo.vreg.rqlhelper.parse(formula).children[0]
+ xvar = select.get_variable('X')
+ select.add_selected(xvar, index=0)
+ select.add_group_var(xvar, index=0)
+ if None in eids:
+ select.add_type_restriction(xvar, computed_attribute_rdef.subject)
+ else:
+ select.add_eid_restriction(xvar, eids)
+ update_rql = 'SET X %s %%(value)s WHERE X eid %%(x)s' % attr
+ for eid, value in self.cnx.execute(select.as_string()):
+ self.cnx.execute(update_rql, {'value': value, 'x': eid})
+
+
+class EntityWithCACreatedHook(hook.Hook):
+ """When creating an entity that has some computed attribute, those
+ attributes have to be computed.
+
+ Concret class of this hook are generated at registration time by
+ introspecting the schema.
+ """
+ __abstract__ = True
+ events = ('after_add_entity',)
+ # list of computed attribute rdefs that have to be recomputed
+ computed_attributes = None
+
+ def __call__(self):
+ for rdef in self.computed_attributes:
+ RecomputeAttributeOperation.get_instance(self._cw).add_data(
+ rdef, self.entity.eid)
+
+
+class RelationInvolvedInCAModifiedHook(hook.Hook):
+ """When some relation used in a computed attribute is updated, those
+ attributes have to be recomputed.
+
+ Concret class of this hook are generated at registration time by
+ introspecting the schema.
+ """
+ __abstract__ = True
+ events = ('after_add_relation', 'before_delete_relation')
+ # list of (computed attribute rdef, optimize_on) that have to be recomputed
+ optimized_computed_attributes = None
+
+ def __call__(self):
+ for rdef, optimize_on in self.optimized_computed_attributes:
+ if optimize_on is None:
+ eid = None
+ else:
+ eid = getattr(self, optimize_on)
+ RecomputeAttributeOperation.get_instance(self._cw).add_data(rdef, eid)
+
+
+class AttributeInvolvedInCAModifiedHook(hook.Hook):
+ """When some attribute used in a computed attribute is updated, those
+ attributes have to be recomputed.
+
+ Concret class of this hook are generated at registration time by
+ introspecting the schema.
+ """
+ __abstract__ = True
+ events = ('after_update_entity',)
+ # list of (computed attribute rdef, attributes of this entity type involved)
+ # that may have to be recomputed
+ attributes_computed_attributes = None
+
+ def __call__(self):
+ edited_attributes = frozenset(self.entity.cw_edited)
+ for rdef, used_attributes in self.attributes_computed_attributes.items():
+ if edited_attributes.intersection(used_attributes):
+ # XXX optimize if the modified attributes belong to the same
+ # entity as the computed attribute
+ RecomputeAttributeOperation.get_instance(self._cw).add_data(rdef)
+
+
+# code generation at registration time #########################################
+
+def _optimize_on(formula_select, rtype):
+ """Given a formula and some rtype, tells whether on update of the given
+ relation, formula may be recomputed only for rhe relation's subject
+ ('eidfrom' returned), object ('eidto' returned) or None.
+
+ Optimizing is only possible when X is used as direct subject/object of this
+ relation, else we may miss some necessary update.
+ """
+ for rel in formula_select.get_nodes(nodes.Relation):
+ if rel.r_type == rtype:
+ sub = rel.get_variable_parts()[0]
+ obj = rel.get_variable_parts()[1]
+ if sub.name == 'X':
+ return 'eidfrom'
+ elif obj.name == 'X':
+ return 'eidto'
+ else:
+ return None
+
+
+class _FormulaDependenciesMatrix(object):
+ """This class computes and represents the dependencies of computed attributes
+ towards relations and attributes
+ """
+
+ def __init__(self, schema):
+ """Analyzes the schema to compute the dependencies"""
+ # entity types holding some computed attribute {etype: [computed rdefs]}
+ self.computed_attribute_by_etype = defaultdict(list)
+ # depending entity types {dep. etype: {computed rdef: dep. etype attributes}}
+ self.computed_attribute_by_etype_attrs = defaultdict(lambda: defaultdict(set))
+ # depending relations def {dep. rdef: [computed rdefs]
+ self.computed_attribute_by_relation = defaultdict(list) # by rdef
+ # Walk through all attributes definitions
+ for rdef in schema.iter_computed_attributes():
+ self.computed_attribute_by_etype[rdef.subject.type].append(rdef)
+ # extract the relations it depends upon - `rdef.formula_select` is
+ # expected to have been set by finalize_computed_attributes
+ select = rdef.formula_select
+ for rel_node in select.get_nodes(nodes.Relation):
+ if rel_node.is_types_restriction():
+ continue
+ rschema = schema.rschema(rel_node.r_type)
+ lhs, rhs = rel_node.get_variable_parts()
+ for sol in select.solutions:
+ subject_etype = sol[lhs.name]
+ if isinstance(rhs, nodes.VariableRef):
+ object_etypes = set(sol[rhs.name] for sol in select.solutions)
+ else:
+ object_etypes = rschema.objects(subject_etype)
+ for object_etype in object_etypes:
+ if rschema.final:
+ attr_for_computations = self.computed_attribute_by_etype_attrs[subject_etype]
+ attr_for_computations[rdef].add(rschema.type)
+ else:
+ depend_on_rdef = rschema.rdefs[subject_etype, object_etype]
+ self.computed_attribute_by_relation[depend_on_rdef].append(rdef)
+
+ def generate_entity_creation_hooks(self):
+ for etype, computed_attributes in self.computed_attribute_by_etype.items():
+ regid = 'computed_attribute.%s_created' % etype
+ selector = hook.is_instance(etype)
+ yield type('%sCreatedHook' % etype,
+ (EntityWithCACreatedHook,),
+ {'__regid__': regid,
+ '__select__': hook.Hook.__select__ & selector,
+ 'computed_attributes': computed_attributes})
+
+ def generate_relation_change_hooks(self):
+ for rdef, computed_attributes in self.computed_attribute_by_relation.items():
+ regid = 'computed_attribute.%s_modified' % rdef.rtype
+ selector = hook.match_rtype(rdef.rtype.type,
+ frometypes=(rdef.subject.type,),
+ toetypes=(rdef.object.type,))
+ optimized_computed_attributes = []
+ for computed_rdef in computed_attributes:
+ optimized_computed_attributes.append(
+ (computed_rdef,
+ _optimize_on(computed_rdef.formula_select, rdef.rtype))
+ )
+ yield type('%sModifiedHook' % rdef.rtype,
+ (RelationInvolvedInCAModifiedHook,),
+ {'__regid__': regid,
+ '__select__': hook.Hook.__select__ & selector,
+ 'optimized_computed_attributes': optimized_computed_attributes})
+
+ def generate_entity_update_hooks(self):
+ for etype, attributes_computed_attributes in self.computed_attribute_by_etype_attrs.items():
+ regid = 'computed_attribute.%s_updated' % etype
+ selector = hook.is_instance(etype)
+ yield type('%sModifiedHook' % etype,
+ (AttributeInvolvedInCAModifiedHook,),
+ {'__regid__': regid,
+ '__select__': hook.Hook.__select__ & selector,
+ 'attributes_computed_attributes': attributes_computed_attributes})
+
+
+def registration_callback(vreg):
+ vreg.register_all(globals().values(), __name__)
+ dependencies = _FormulaDependenciesMatrix(vreg.schema)
+ for hook_class in dependencies.generate_entity_creation_hooks():
+ vreg.register(hook_class)
+ for hook_class in dependencies.generate_relation_change_hooks():
+ vreg.register(hook_class)
+ for hook_class in dependencies.generate_entity_update_hooks():
+ vreg.register(hook_class)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/syncschema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/syncschema.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1417 @@
+# copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""schema hooks:
+
+- synchronize the living schema object with the persistent schema
+- perform physical update on the source when necessary
+
+checking for schema consistency is done in hooks.py
+"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+import json
+from copy import copy
+from hashlib import md5
+
+from yams.schema import (BASE_TYPES, BadSchemaDefinition,
+ RelationSchema, RelationDefinitionSchema)
+from yams import buildobjs as ybo, convert_default_value
+
+from logilab.common.decorators import clear_cache
+
+from cubicweb import validation_error
+from cubicweb.predicates import is_instance
+from cubicweb.schema import (SCHEMA_TYPES, META_RTYPES, VIRTUAL_RTYPES,
+ CONSTRAINTS, UNIQUE_CONSTRAINTS, ETYPE_NAME_MAP)
+from cubicweb.server import hook, schemaserial as ss, schema2sql as y2sql
+from cubicweb.server.sqlutils import SQL_PREFIX
+from cubicweb.hooks.synccomputed import RecomputeAttributeOperation
+
+# core entity and relation types which can't be removed
+CORE_TYPES = BASE_TYPES | SCHEMA_TYPES | META_RTYPES | set(
+ ('CWUser', 'CWGroup','login', 'upassword', 'name', 'in_group'))
+
+
+def get_constraints(cnx, entity):
+ constraints = []
+ for cstreid in cnx.transaction_data.get(entity.eid, ()):
+ cstrent = cnx.entity_from_eid(cstreid)
+ cstr = CONSTRAINTS[cstrent.type].deserialize(cstrent.value)
+ cstr.eid = cstreid
+ constraints.append(cstr)
+ return constraints
+
+def group_mapping(cw):
+ try:
+ return cw.transaction_data['groupmap']
+ except KeyError:
+ cw.transaction_data['groupmap'] = gmap = ss.group_mapping(cw)
+ return gmap
+
+def add_inline_relation_column(cnx, etype, rtype):
+ """add necessary column and index for an inlined relation"""
+ attrkey = '%s.%s' % (etype, rtype)
+ createdattrs = cnx.transaction_data.setdefault('createdattrs', set())
+ if attrkey in createdattrs:
+ return
+ createdattrs.add(attrkey)
+ table = SQL_PREFIX + etype
+ column = SQL_PREFIX + rtype
+ try:
+ cnx.system_sql(str('ALTER TABLE %s ADD %s integer REFERENCES entities (eid)' % (table, column)),
+ rollback_on_failure=False)
+ cnx.info('added column %s to table %s', column, table)
+ except Exception:
+ # silent exception here, if this error has not been raised because the
+ # column already exists, index creation will fail anyway
+ cnx.exception('error while adding column %s to table %s',
+ table, column)
+ # create index before alter table which may expectingly fail during test
+ # (sqlite) while index creation should never fail (test for index existence
+ # is done by the dbhelper)
+ cnx.repo.system_source.create_index(cnx, table, column)
+ cnx.info('added index on %s(%s)', table, column)
+
+
+def insert_rdef_on_subclasses(cnx, eschema, rschema, rdefdef, props):
+ # XXX 'infered': True/False, not clear actually
+ props.update({'constraints': rdefdef.constraints,
+ 'description': rdefdef.description,
+ 'cardinality': rdefdef.cardinality,
+ 'permissions': rdefdef.get_permissions(),
+ 'order': rdefdef.order,
+ 'infered': False, 'eid': None
+ })
+ cstrtypemap = ss.cstrtype_mapping(cnx)
+ groupmap = group_mapping(cnx)
+ object = rschema.schema.eschema(rdefdef.object)
+ for specialization in eschema.specialized_by(False):
+ if (specialization, rdefdef.object) in rschema.rdefs:
+ continue
+ sperdef = RelationDefinitionSchema(specialization, rschema,
+ object, None, values=props)
+ ss.execschemarql(cnx.execute, sperdef,
+ ss.rdef2rql(sperdef, cstrtypemap, groupmap))
+
+
+def check_valid_changes(cnx, entity, ro_attrs=('name', 'final')):
+ errors = {}
+ # don't use getattr(entity, attr), we would get the modified value if any
+ for attr in entity.cw_edited:
+ if attr in ro_attrs:
+ origval, newval = entity.cw_edited.oldnewvalue(attr)
+ if newval != origval:
+ errors[attr] = _("can't change this attribute")
+ if errors:
+ raise validation_error(entity, errors)
+
+
+class _MockEntity(object): # XXX use a named tuple with python 2.6
+ def __init__(self, eid):
+ self.eid = eid
+
+
+class SyncSchemaHook(hook.Hook):
+ """abstract class for schema synchronization hooks (in the `syncschema`
+ category)
+ """
+ __abstract__ = True
+ category = 'syncschema'
+
+
+# operations for low-level database alteration ################################
+
+class DropTable(hook.Operation):
+ """actually remove a database from the instance's schema"""
+ table = None # make pylint happy
+ def precommit_event(self):
+ dropped = self.cnx.transaction_data.setdefault('droppedtables',
+ set())
+ if self.table in dropped:
+ return # already processed
+ dropped.add(self.table)
+ self.cnx.system_sql('DROP TABLE %s' % self.table)
+ self.info('dropped table %s', self.table)
+
+ # XXX revertprecommit_event
+
+
+class DropRelationTable(DropTable):
+ def __init__(self, cnx, rtype):
+ super(DropRelationTable, self).__init__(
+ cnx, table='%s_relation' % rtype)
+ cnx.transaction_data.setdefault('pendingrtypes', set()).add(rtype)
+
+
+class DropColumn(hook.DataOperationMixIn, hook.Operation):
+ """actually remove the attribut's column from entity table in the system
+ database
+ """
+ def precommit_event(self):
+ cnx = self.cnx
+ for etype, attr in self.get_data():
+ table = SQL_PREFIX + etype
+ column = SQL_PREFIX + attr
+ source = cnx.repo.system_source
+ # drop index if any
+ source.drop_index(cnx, table, column)
+ if source.dbhelper.alter_column_support:
+ cnx.system_sql('ALTER TABLE %s DROP COLUMN %s' % (table, column),
+ rollback_on_failure=False)
+ self.info('dropped column %s from table %s', column, table)
+ else:
+ # not supported by sqlite for instance
+ self.error('dropping column not supported by the backend, handle '
+ 'it yourself (%s.%s)', table, column)
+
+ # XXX revertprecommit_event
+
+
+# base operations for in-memory schema synchronization ########################
+
+class MemSchemaNotifyChanges(hook.SingleLastOperation):
+ """the update schema operation:
+
+ special operation which should be called once and after all other schema
+ operations. It will trigger internal structures rebuilding to consider
+ schema changes.
+ """
+
+ def __init__(self, cnx):
+ hook.SingleLastOperation.__init__(self, cnx)
+
+ def precommit_event(self):
+ for eschema in self.cnx.repo.schema.entities():
+ if not eschema.final:
+ clear_cache(eschema, 'ordered_relations')
+
+ def postcommit_event(self):
+ repo = self.cnx.repo
+ # commit event should not raise error, while set_schema has chances to
+ # do so because it triggers full vreg reloading
+ try:
+ repo.schema.rebuild_infered_relations()
+ # trigger vreg reload
+ repo.set_schema(repo.schema)
+ # CWUser class might have changed, update current session users
+ cwuser_cls = self.cnx.vreg['etypes'].etype_class('CWUser')
+ for session in repo._sessions.values():
+ session.user.__class__ = cwuser_cls
+ except Exception:
+ self.critical('error while setting schema', exc_info=True)
+
+ def rollback_event(self):
+ self.precommit_event()
+
+
+class MemSchemaOperation(hook.Operation):
+ """base class for schema operations"""
+ def __init__(self, cnx, **kwargs):
+ hook.Operation.__init__(self, cnx, **kwargs)
+ # every schema operation is triggering a schema update
+ MemSchemaNotifyChanges(cnx)
+
+
+# operations for high-level source database alteration ########################
+
+class CWETypeAddOp(MemSchemaOperation):
+ """after adding a CWEType entity:
+ * add it to the instance's schema
+ * create the necessary table
+ * set creation_date and modification_date by creating the necessary
+ CWAttribute entities
+ * add relation by creating the necessary CWRelation entity
+ """
+ entity = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ entity = self.entity
+ schema = cnx.vreg.schema
+ etype = ybo.EntityType(eid=entity.eid, name=entity.name,
+ description=entity.description)
+ eschema = schema.add_entity_type(etype)
+ # create the necessary table
+ tablesql = y2sql.eschema2sql(cnx.repo.system_source.dbhelper,
+ eschema, prefix=SQL_PREFIX)
+ for sql in tablesql.split(';'):
+ if sql.strip():
+ cnx.system_sql(sql)
+ # add meta relations
+ gmap = group_mapping(cnx)
+ cmap = ss.cstrtype_mapping(cnx)
+ for rtype in (META_RTYPES - VIRTUAL_RTYPES):
+ try:
+ rschema = schema[rtype]
+ except KeyError:
+ self.critical('rtype %s was not handled at cwetype creation time', rtype)
+ continue
+ if not rschema.rdefs:
+ self.warning('rtype %s has no relation definition yet', rtype)
+ continue
+ sampletype = rschema.subjects()[0]
+ desttype = rschema.objects()[0]
+ try:
+ rdef = copy(rschema.rdef(sampletype, desttype))
+ except KeyError:
+ # this combo does not exist because this is not a universal META_RTYPE
+ continue
+ rdef.subject = _MockEntity(eid=entity.eid)
+ mock = _MockEntity(eid=None)
+ ss.execschemarql(cnx.execute, mock, ss.rdef2rql(rdef, cmap, gmap))
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ self.cnx.vreg.schema.del_entity_type(self.entity.name)
+ # revert changes on database
+ self.cnx.system_sql('DROP TABLE %s%s' % (SQL_PREFIX, self.entity.name))
+
+
+class CWETypeRenameOp(MemSchemaOperation):
+ """this operation updates physical storage accordingly"""
+ oldname = newname = None # make pylint happy
+
+ def rename(self, oldname, newname):
+ self.cnx.vreg.schema.rename_entity_type(oldname, newname)
+ # we need sql to operate physical changes on the system database
+ sqlexec = self.cnx.system_sql
+ dbhelper = self.cnx.repo.system_source.dbhelper
+ sql = dbhelper.sql_rename_table(SQL_PREFIX+oldname,
+ SQL_PREFIX+newname)
+ sqlexec(sql)
+ self.info('renamed table %s to %s', oldname, newname)
+ sqlexec('UPDATE entities SET type=%(newname)s WHERE type=%(oldname)s',
+ {'newname': newname, 'oldname': oldname})
+ for eid, (etype, extid, auri) in self.cnx.repo._type_source_cache.items():
+ if etype == oldname:
+ self.cnx.repo._type_source_cache[eid] = (newname, extid, auri)
+ # XXX transaction records
+
+ def precommit_event(self):
+ self.rename(self.oldname, self.newname)
+
+ def revertprecommit_event(self):
+ self.rename(self.newname, self.oldname)
+
+
+class CWRTypeUpdateOp(MemSchemaOperation):
+ """actually update some properties of a relation definition"""
+ rschema = entity = values = None # make pylint happy
+ oldvalues = None
+
+ def precommit_event(self):
+ rschema = self.rschema
+ if rschema.final:
+ return # watched changes to final relation type are unexpected
+ cnx = self.cnx
+ if 'fulltext_container' in self.values:
+ op = UpdateFTIndexOp.get_instance(cnx)
+ for subjtype, objtype in rschema.rdefs:
+ if self.values['fulltext_container'] == 'subject':
+ op.add_data(subjtype)
+ op.add_data(objtype)
+ else:
+ op.add_data(objtype)
+ op.add_data(subjtype)
+ # update the in-memory schema first
+ self.oldvalues = dict( (attr, getattr(rschema, attr)) for attr in self.values)
+ self.rschema.__dict__.update(self.values)
+ # then make necessary changes to the system source database
+ if 'inlined' not in self.values:
+ return # nothing to do
+ inlined = self.values['inlined']
+ # check in-lining is possible when inlined
+ if inlined:
+ self.entity.check_inlined_allowed()
+ # inlined changed, make necessary physical changes!
+ sqlexec = self.cnx.system_sql
+ rtype = rschema.type
+ eidcolumn = SQL_PREFIX + 'eid'
+ if not inlined:
+ # need to create the relation if it has not been already done by
+ # another event of the same transaction
+ if not rschema.type in cnx.transaction_data.get('createdtables', ()):
+ tablesql = y2sql.rschema2sql(rschema)
+ # create the necessary table
+ for sql in tablesql.split(';'):
+ if sql.strip():
+ sqlexec(sql)
+ cnx.transaction_data.setdefault('createdtables', []).append(
+ rschema.type)
+ # copy existant data
+ column = SQL_PREFIX + rtype
+ for etype in rschema.subjects():
+ table = SQL_PREFIX + str(etype)
+ sqlexec('INSERT INTO %s_relation SELECT %s, %s FROM %s WHERE NOT %s IS NULL'
+ % (rtype, eidcolumn, column, table, column))
+ # drop existant columns
+ #if cnx.repo.system_source.dbhelper.alter_column_support:
+ for etype in rschema.subjects():
+ DropColumn.get_instance(cnx).add_data((str(etype), rtype))
+ else:
+ for etype in rschema.subjects():
+ try:
+ add_inline_relation_column(cnx, str(etype), rtype)
+ except Exception as ex:
+ # the column probably already exists. this occurs when the
+ # entity's type has just been added or if the column has not
+ # been previously dropped (eg sqlite)
+ self.error('error while altering table %s: %s', etype, ex)
+ # copy existant data.
+ # XXX don't use, it's not supported by sqlite (at least at when i tried it)
+ #sqlexec('UPDATE %(etype)s SET %(rtype)s=eid_to '
+ # 'FROM %(rtype)s_relation '
+ # 'WHERE %(etype)s.eid=%(rtype)s_relation.eid_from'
+ # % locals())
+ table = SQL_PREFIX + str(etype)
+ cursor = sqlexec('SELECT eid_from, eid_to FROM %(table)s, '
+ '%(rtype)s_relation WHERE %(table)s.%(eidcolumn)s='
+ '%(rtype)s_relation.eid_from' % locals())
+ args = [{'val': eid_to, 'x': eid} for eid, eid_to in cursor.fetchall()]
+ if args:
+ column = SQL_PREFIX + rtype
+ cursor.executemany('UPDATE %s SET %s=%%(val)s WHERE %s=%%(x)s'
+ % (table, column, eidcolumn), args)
+ # drop existant table
+ DropRelationTable(cnx, rtype)
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ self.rschema.__dict__.update(self.oldvalues)
+ # XXX revert changes on database
+
+
+class CWComputedRTypeUpdateOp(MemSchemaOperation):
+ """actually update some properties of a computed relation definition"""
+ rschema = entity = rule = None # make pylint happy
+ old_rule = None
+
+ def precommit_event(self):
+ # update the in-memory schema first
+ self.old_rule = self.rschema.rule
+ self.rschema.rule = self.rule
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ self.rschema.rule = self.old_rule
+
+
+class CWAttributeAddOp(MemSchemaOperation):
+ """an attribute relation (CWAttribute) has been added:
+ * add the necessary column
+ * set default on this column if any and possible
+ * register an operation to add the relation definition to the
+ instance's schema on commit
+
+ constraints are handled by specific hooks
+ """
+ entity = None # make pylint happy
+
+ def init_rdef(self, **kwargs):
+ entity = self.entity
+ fromentity = entity.stype
+ rdefdef = self.rdefdef = ybo.RelationDefinition(
+ str(fromentity.name), entity.rtype.name, str(entity.otype.name),
+ description=entity.description, cardinality=entity.cardinality,
+ constraints=get_constraints(self.cnx, entity),
+ order=entity.ordernum, eid=entity.eid, **kwargs)
+ try:
+ self.cnx.vreg.schema.add_relation_def(rdefdef)
+ except BadSchemaDefinition:
+ # rdef has been infered then explicitly added (current consensus is
+ # not clear at all versus infered relation handling (and much
+ # probably buggy)
+ rdef = self.cnx.vreg.schema.rschema(rdefdef.name).rdefs[rdefdef.subject, rdefdef.object]
+ assert rdef.infered
+ else:
+ rdef = self.cnx.vreg.schema.rschema(rdefdef.name).rdefs[rdefdef.subject, rdefdef.object]
+
+ self.cnx.execute('SET X ordernum Y+1 '
+ 'WHERE X from_entity SE, SE eid %(se)s, X ordernum Y, '
+ 'X ordernum >= %(order)s, NOT X eid %(x)s',
+ {'x': entity.eid, 'se': fromentity.eid,
+ 'order': entity.ordernum or 0})
+ return rdefdef, rdef
+
+ def precommit_event(self):
+ cnx = self.cnx
+ entity = self.entity
+ # entity.defaultval is a Binary or None, but we need a correctly typed
+ # value
+ default = entity.defaultval
+ if default is not None:
+ default = default.unzpickle()
+ props = {'default': default,
+ 'indexed': entity.indexed,
+ 'fulltextindexed': entity.fulltextindexed,
+ 'internationalizable': entity.internationalizable}
+ if entity.extra_props:
+ props.update(json.loads(entity.extra_props.getvalue().decode('ascii')))
+ # entity.formula may not exist yet if we're migrating to 3.20
+ if hasattr(entity, 'formula'):
+ props['formula'] = entity.formula
+ # update the in-memory schema first
+ rdefdef, rdef = self.init_rdef(**props)
+ # then make necessary changes to the system source database
+ syssource = cnx.repo.system_source
+ attrtype = y2sql.type_from_rdef(syssource.dbhelper, rdef)
+ # XXX should be moved somehow into lgdb: sqlite doesn't support to
+ # add a new column with UNIQUE, it should be added after the ALTER TABLE
+ # using ADD INDEX
+ if syssource.dbdriver == 'sqlite' and 'UNIQUE' in attrtype:
+ extra_unique_index = True
+ attrtype = attrtype.replace(' UNIQUE', '')
+ else:
+ extra_unique_index = False
+ # added some str() wrapping query since some backend (eg psycopg) don't
+ # allow unicode queries
+ table = SQL_PREFIX + rdefdef.subject
+ column = SQL_PREFIX + rdefdef.name
+ try:
+ cnx.system_sql(str('ALTER TABLE %s ADD %s %s'
+ % (table, column, attrtype)),
+ rollback_on_failure=False)
+ self.info('added column %s to table %s', column, table)
+ except Exception as ex:
+ # the column probably already exists. this occurs when
+ # the entity's type has just been added or if the column
+ # has not been previously dropped
+ self.error('error while altering table %s: %s', table, ex)
+ if extra_unique_index or entity.indexed:
+ try:
+ syssource.create_index(cnx, table, column,
+ unique=extra_unique_index)
+ except Exception as ex:
+ self.error('error while creating index for %s.%s: %s',
+ table, column, ex)
+ # final relations are not infered, propagate
+ schema = cnx.vreg.schema
+ try:
+ eschema = schema.eschema(rdefdef.subject)
+ except KeyError:
+ return # entity type currently being added
+ # propagate attribute to children classes
+ rschema = schema.rschema(rdefdef.name)
+ # if relation type has been inserted in the same transaction, its final
+ # attribute is still set to False, so we've to ensure it's False
+ rschema.final = True
+ insert_rdef_on_subclasses(cnx, eschema, rschema, rdefdef, props)
+ # update existing entities with the default value of newly added attribute
+ if default is not None:
+ default = convert_default_value(self.rdefdef, default)
+ cnx.system_sql('UPDATE %s SET %s=%%(default)s' % (table, column),
+ {'default': default})
+ # if attribute is computed, compute it
+ if getattr(entity, 'formula', None):
+ # add rtype attribute for RelationDefinitionSchema api compat, this
+ # is what RecomputeAttributeOperation expect
+ rdefdef.rtype = rdefdef.name
+ RecomputeAttributeOperation.get_instance(cnx).add_data(rdefdef)
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ if getattr(self, 'rdefdef', None) is None:
+ return
+ self.cnx.vreg.schema.del_relation_def(
+ self.rdefdef.subject, self.rdefdef.name, self.rdefdef.object)
+ # XXX revert changes on database
+
+
+class CWRelationAddOp(CWAttributeAddOp):
+ """an actual relation has been added:
+
+ * add the relation definition to the instance's schema
+
+ * if this is an inlined relation, add the necessary column else if it's the
+ first instance of this relation type, add the necessary table and set
+ default permissions
+
+ constraints are handled by specific hooks
+ """
+ entity = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ entity = self.entity
+ # update the in-memory schema first
+ rdefdef, rdef = self.init_rdef(composite=entity.composite)
+ # then make necessary changes to the system source database
+ schema = cnx.vreg.schema
+ rtype = rdefdef.name
+ rschema = schema.rschema(rtype)
+ # this have to be done before permissions setting
+ if rschema.inlined:
+ # need to add a column if the relation is inlined and if this is the
+ # first occurence of "Subject relation Something" whatever Something
+ if len(rschema.objects(rdefdef.subject)) == 1:
+ add_inline_relation_column(cnx, rdefdef.subject, rtype)
+ eschema = schema[rdefdef.subject]
+ insert_rdef_on_subclasses(cnx, eschema, rschema, rdefdef,
+ {'composite': entity.composite})
+ else:
+ if rschema.symmetric:
+ # for symmetric relations, rdefs will store relation definitions
+ # in both ways (i.e. (subj -> obj) and (obj -> subj))
+ relation_already_defined = len(rschema.rdefs) > 2
+ else:
+ relation_already_defined = len(rschema.rdefs) > 1
+ # need to create the relation if no relation definition in the
+ # schema and if it has not been added during other event of the same
+ # transaction
+ if not (relation_already_defined or
+ rtype in cnx.transaction_data.get('createdtables', ())):
+ rschema = schema.rschema(rtype)
+ # create the necessary table
+ for sql in y2sql.rschema2sql(rschema).split(';'):
+ if sql.strip():
+ cnx.system_sql(sql)
+ cnx.transaction_data.setdefault('createdtables', []).append(
+ rtype)
+
+ # XXX revertprecommit_event
+
+
+class RDefDelOp(MemSchemaOperation):
+ """an actual relation has been removed"""
+ rdef = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ rdef = self.rdef
+ rschema = rdef.rtype
+ # make necessary changes to the system source database first
+ rdeftype = rschema.final and 'CWAttribute' or 'CWRelation'
+ execute = cnx.execute
+ rset = execute('Any COUNT(X) WHERE X is %s, X relation_type R,'
+ 'R eid %%(x)s' % rdeftype, {'x': rschema.eid})
+ lastrel = rset[0][0] == 0
+ # we have to update physical schema systematically for final and inlined
+ # relations, but only if it's the last instance for this relation type
+ # for other relations
+ if (rschema.final or rschema.inlined):
+ rset = execute('Any COUNT(X) WHERE X is %s, X relation_type R, '
+ 'R eid %%(r)s, X from_entity E, E eid %%(e)s'
+ % rdeftype,
+ {'r': rschema.eid, 'e': rdef.subject.eid})
+ if rset[0][0] == 0 and not cnx.deleted_in_transaction(rdef.subject.eid):
+ ptypes = cnx.transaction_data.setdefault('pendingrtypes', set())
+ ptypes.add(rschema.type)
+ DropColumn.get_instance(cnx).add_data((str(rdef.subject), str(rschema)))
+ elif rschema.inlined:
+ cnx.system_sql('UPDATE %s%s SET %s%s=NULL WHERE '
+ 'EXISTS(SELECT 1 FROM entities '
+ ' WHERE eid=%s%s AND type=%%(to_etype)s)'
+ % (SQL_PREFIX, rdef.subject, SQL_PREFIX, rdef.rtype,
+ SQL_PREFIX, rdef.rtype),
+ {'to_etype': rdef.object.type})
+ elif lastrel:
+ DropRelationTable(cnx, str(rschema))
+ else:
+ cnx.system_sql('DELETE FROM %s_relation WHERE '
+ 'EXISTS(SELECT 1 FROM entities '
+ ' WHERE eid=eid_from AND type=%%(from_etype)s)'
+ ' AND EXISTS(SELECT 1 FROM entities '
+ ' WHERE eid=eid_to AND type=%%(to_etype)s)'
+ % rschema,
+ {'from_etype': rdef.subject.type, 'to_etype': rdef.object.type})
+ # then update the in-memory schema
+ if rdef.subject not in ETYPE_NAME_MAP and rdef.object not in ETYPE_NAME_MAP:
+ rschema.del_relation_def(rdef.subject, rdef.object)
+ # if this is the last relation definition of this type, drop associated
+ # relation type
+ if lastrel and not cnx.deleted_in_transaction(rschema.eid):
+ execute('DELETE CWRType X WHERE X eid %(x)s', {'x': rschema.eid})
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ #
+ # Note: add_relation_def takes a RelationDefinition, not a
+ # RelationDefinitionSchema, needs to fake it
+ rdef = self.rdef
+ rdef.name = str(rdef.rtype)
+ if rdef.subject not in ETYPE_NAME_MAP and rdef.object not in ETYPE_NAME_MAP:
+ self.cnx.vreg.schema.add_relation_def(rdef)
+
+
+
+class RDefUpdateOp(MemSchemaOperation):
+ """actually update some properties of a relation definition"""
+ rschema = rdefkey = values = None # make pylint happy
+ rdef = oldvalues = None
+ indexed_changed = null_allowed_changed = False
+
+ def precommit_event(self):
+ cnx = self.cnx
+ rdef = self.rdef = self.rschema.rdefs[self.rdefkey]
+ # update the in-memory schema first
+ self.oldvalues = dict( (attr, getattr(rdef, attr)) for attr in self.values)
+ rdef.update(self.values)
+ # then make necessary changes to the system source database
+ syssource = cnx.repo.system_source
+ if 'indexed' in self.values:
+ syssource.update_rdef_indexed(cnx, rdef)
+ self.indexed_changed = True
+ if 'cardinality' in self.values and rdef.rtype.final \
+ and self.values['cardinality'][0] != self.oldvalues['cardinality'][0]:
+ syssource.update_rdef_null_allowed(self.cnx, rdef)
+ self.null_allowed_changed = True
+ if 'fulltextindexed' in self.values:
+ UpdateFTIndexOp.get_instance(cnx).add_data(rdef.subject)
+ if 'formula' in self.values:
+ RecomputeAttributeOperation.get_instance(cnx).add_data(rdef)
+
+ def revertprecommit_event(self):
+ if self.rdef is None:
+ return
+ # revert changes on in memory schema
+ self.rdef.update(self.oldvalues)
+ # revert changes on database
+ syssource = self.cnx.repo.system_source
+ if self.indexed_changed:
+ syssource.update_rdef_indexed(self.cnx, self.rdef)
+ if self.null_allowed_changed:
+ syssource.update_rdef_null_allowed(self.cnx, self.rdef)
+
+
+def _set_modifiable_constraints(rdef):
+ # for proper in-place modification of in-memory schema: if rdef.constraints
+ # is already a list, reuse it (we're updating multiple constraints of the
+ # same rdef in the same transaction)
+ if not isinstance(rdef.constraints, list):
+ rdef.constraints = list(rdef.constraints)
+
+
+class CWConstraintDelOp(MemSchemaOperation):
+ """actually remove a constraint of a relation definition"""
+ rdef = oldcstr = newcstr = None # make pylint happy
+ size_cstr_changed = unique_changed = False
+
+ def precommit_event(self):
+ cnx = self.cnx
+ rdef = self.rdef
+ # in-place modification of in-memory schema first
+ _set_modifiable_constraints(rdef)
+ if self.oldcstr in rdef.constraints:
+ rdef.constraints.remove(self.oldcstr)
+ else:
+ self.critical('constraint %s for rdef %s was missing or already removed',
+ self.oldcstr, rdef)
+ if cnx.deleted_in_transaction(rdef.eid):
+ # don't try to alter a table that's going away (or is already gone)
+ return
+ # then update database: alter the physical schema on size/unique
+ # constraint changes
+ syssource = cnx.repo.system_source
+ cstrtype = self.oldcstr.type()
+ if cstrtype == 'SizeConstraint':
+ # if the size constraint is being replaced with a new max size, we'll
+ # call update_rdef_column in CWConstraintAddOp, skip it here
+ for cstr in cnx.transaction_data.get('newsizecstr', ()):
+ rdefentity = cstr.reverse_constrained_by[0]
+ cstrrdef = cnx.vreg.schema.schema_by_eid(rdefentity.eid)
+ if cstrrdef == rdef:
+ return
+
+ # we found that the size constraint for this rdef is really gone,
+ # not just replaced by another
+ syssource.update_rdef_column(cnx, rdef)
+ self.size_cstr_changed = True
+ elif cstrtype == 'UniqueConstraint':
+ syssource.update_rdef_unique(cnx, rdef)
+ self.unique_changed = True
+ if cstrtype in ('BoundaryConstraint', 'IntervalBoundConstraint', 'StaticVocabularyConstraint'):
+ cstrname = 'cstr' + md5((rdef.subject.type + rdef.rtype.type + cstrtype +
+ (self.oldcstr.serialize() or '')).encode('utf-8')).hexdigest()
+ cnx.system_sql('ALTER TABLE %s%s DROP CONSTRAINT %s' % (SQL_PREFIX, rdef.subject.type, cstrname))
+
+ def revertprecommit_event(self):
+ # revert changes on in memory schema
+ if self.newcstr is not None:
+ self.rdef.constraints.remove(self.newcstr)
+ if self.oldcstr is not None:
+ self.rdef.constraints.append(self.oldcstr)
+ # revert changes on database
+ syssource = self.cnx.repo.system_source
+ if self.size_cstr_changed:
+ syssource.update_rdef_column(self.cnx, self.rdef)
+ if self.unique_changed:
+ syssource.update_rdef_unique(self.cnx, self.rdef)
+
+
+class CWConstraintAddOp(CWConstraintDelOp):
+ """actually update constraint of a relation definition"""
+ entity = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ rdefentity = self.entity.reverse_constrained_by[0]
+ # when the relation is added in the same transaction, the constraint
+ # object is created by the operation adding the attribute or relation,
+ # so there is nothing to do here
+ if cnx.added_in_transaction(rdefentity.eid):
+ return
+ rdef = self.rdef = cnx.vreg.schema.schema_by_eid(rdefentity.eid)
+ cstrtype = self.entity.type
+ if cstrtype in UNIQUE_CONSTRAINTS:
+ oldcstr = self.oldcstr = rdef.constraint_by_type(cstrtype)
+ else:
+ oldcstr = None
+ newcstr = self.newcstr = CONSTRAINTS[cstrtype].deserialize(self.entity.value)
+ # in-place modification of in-memory schema first
+ _set_modifiable_constraints(rdef)
+ newcstr.eid = self.entity.eid
+ if oldcstr is not None:
+ rdef.constraints.remove(oldcstr)
+ rdef.constraints.append(newcstr)
+ # then update database: alter the physical schema on size/unique
+ # constraint changes
+ syssource = cnx.repo.system_source
+ if cstrtype == 'SizeConstraint' and (oldcstr is None or
+ oldcstr.max != newcstr.max):
+ syssource.update_rdef_column(cnx, rdef)
+ self.size_cstr_changed = True
+ elif cstrtype == 'UniqueConstraint' and oldcstr is None:
+ syssource.update_rdef_unique(cnx, rdef)
+ self.unique_changed = True
+ if cstrtype in ('BoundaryConstraint', 'IntervalBoundConstraint', 'StaticVocabularyConstraint'):
+ if oldcstr is not None:
+ oldcstrname = 'cstr' + md5((rdef.subject.type + rdef.rtype.type + cstrtype +
+ (self.oldcstr.serialize() or '')).encode('ascii')).hexdigest()
+ cnx.system_sql('ALTER TABLE %s%s DROP CONSTRAINT %s' %
+ (SQL_PREFIX, rdef.subject.type, oldcstrname))
+ cstrname, check = y2sql.check_constraint(rdef.subject, rdef.object, rdef.rtype.type,
+ newcstr, syssource.dbhelper, prefix=SQL_PREFIX)
+ cnx.system_sql('ALTER TABLE %s%s ADD CONSTRAINT %s CHECK(%s)' %
+ (SQL_PREFIX, rdef.subject.type, cstrname, check))
+
+
+class CWUniqueTogetherConstraintAddOp(MemSchemaOperation):
+ entity = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ prefix = SQL_PREFIX
+ entity = self.entity
+ table = '%s%s' % (prefix, entity.constraint_of[0].name)
+ cols = ['%s%s' % (prefix, r.name) for r in entity.relations]
+ dbhelper = cnx.repo.system_source.dbhelper
+ sqls = dbhelper.sqls_create_multicol_unique_index(table, cols, entity.name)
+ for sql in sqls:
+ cnx.system_sql(sql)
+
+ def postcommit_event(self):
+ entity = self.entity
+ eschema = self.cnx.vreg.schema.schema_by_eid(entity.constraint_of[0].eid)
+ attrs = [r.name for r in entity.relations]
+ eschema._unique_together.append(attrs)
+
+
+class CWUniqueTogetherConstraintDelOp(MemSchemaOperation):
+ entity = cstrname = None # for pylint
+ cols = () # for pylint
+
+ def insert_index(self):
+ # We need to run before CWConstraintDelOp: if a size constraint is
+ # removed and the column is part of a unique_together constraint, we
+ # remove the unique_together index before changing the column's type.
+ # SQL Server does not support unique indices on unlimited text columns.
+ return 0
+
+ def precommit_event(self):
+ cnx = self.cnx
+ prefix = SQL_PREFIX
+ table = '%s%s' % (prefix, self.entity.type)
+ dbhelper = cnx.repo.system_source.dbhelper
+ cols = ['%s%s' % (prefix, c) for c in self.cols]
+ sqls = dbhelper.sqls_drop_multicol_unique_index(table, cols, self.cstrname)
+ for sql in sqls:
+ cnx.system_sql(sql)
+
+ def postcommit_event(self):
+ eschema = self.cnx.vreg.schema.schema_by_eid(self.entity.eid)
+ cols = set(self.cols)
+ unique_together = [ut for ut in eschema._unique_together
+ if set(ut) != cols]
+ eschema._unique_together = unique_together
+
+
+# operations for in-memory schema synchronization #############################
+
+class MemSchemaCWETypeDel(MemSchemaOperation):
+ """actually remove the entity type from the instance's schema"""
+ etype = None # make pylint happy
+
+ def postcommit_event(self):
+ # del_entity_type also removes entity's relations
+ self.cnx.vreg.schema.del_entity_type(self.etype)
+
+
+class MemSchemaCWRTypeAdd(MemSchemaOperation):
+ """actually add the relation type to the instance's schema"""
+ rtypedef = None # make pylint happy
+
+ def precommit_event(self):
+ self.cnx.vreg.schema.add_relation_type(self.rtypedef)
+
+ def revertprecommit_event(self):
+ self.cnx.vreg.schema.del_relation_type(self.rtypedef.name)
+
+
+class MemSchemaCWRTypeDel(MemSchemaOperation):
+ """actually remove the relation type from the instance's schema"""
+ rtype = None # make pylint happy
+
+ def postcommit_event(self):
+ try:
+ self.cnx.vreg.schema.del_relation_type(self.rtype)
+ except KeyError:
+ # s/o entity type have already been deleted
+ pass
+
+
+class MemSchemaPermissionAdd(MemSchemaOperation):
+ """synchronize schema when a *_permission relation has been added on a group
+ """
+ eid = action = group_eid = expr = None # make pylint happy
+
+ def precommit_event(self):
+ """the observed connections.cnxset has been commited"""
+ try:
+ erschema = self.cnx.vreg.schema.schema_by_eid(self.eid)
+ except KeyError:
+ # duh, schema not found, log error and skip operation
+ self.warning('no schema for %s', self.eid)
+ return
+ perms = list(erschema.action_permissions(self.action))
+ if self.group_eid is not None:
+ perm = self.cnx.entity_from_eid(self.group_eid).name
+ else:
+ perm = erschema.rql_expression(self.expr)
+ try:
+ perms.index(perm)
+ self.warning('%s already in permissions for %s on %s',
+ perm, self.action, erschema)
+ except ValueError:
+ perms.append(perm)
+ erschema.set_action_permissions(self.action, perms)
+
+ # XXX revertprecommit_event
+
+
+class MemSchemaPermissionDel(MemSchemaPermissionAdd):
+ """synchronize schema when a *_permission relation has been deleted from a
+ group
+ """
+
+ def precommit_event(self):
+ """the observed connections set has been commited"""
+ try:
+ erschema = self.cnx.vreg.schema.schema_by_eid(self.eid)
+ except KeyError:
+ # duh, schema not found, log error and skip operation
+ self.warning('no schema for %s', self.eid)
+ return
+ perms = list(erschema.action_permissions(self.action))
+ if self.group_eid is not None:
+ perm = self.cnx.entity_from_eid(self.group_eid).name
+ else:
+ perm = erschema.rql_expression(self.expr)
+ try:
+ perms.remove(perm)
+ erschema.set_action_permissions(self.action, perms)
+ except ValueError:
+ self.error('can\'t remove permission %s for %s on %s',
+ perm, self.action, erschema)
+
+ # XXX revertprecommit_event
+
+
+class MemSchemaSpecializesAdd(MemSchemaOperation):
+ etypeeid = parentetypeeid = None # make pylint happy
+
+ def precommit_event(self):
+ eschema = self.cnx.vreg.schema.schema_by_eid(self.etypeeid)
+ parenteschema = self.cnx.vreg.schema.schema_by_eid(self.parentetypeeid)
+ eschema._specialized_type = parenteschema.type
+ parenteschema._specialized_by.append(eschema.type)
+
+ # XXX revertprecommit_event
+
+
+class MemSchemaSpecializesDel(MemSchemaOperation):
+ etypeeid = parentetypeeid = None # make pylint happy
+
+ def precommit_event(self):
+ try:
+ eschema = self.cnx.vreg.schema.schema_by_eid(self.etypeeid)
+ parenteschema = self.cnx.vreg.schema.schema_by_eid(self.parentetypeeid)
+ except KeyError:
+ # etype removed, nothing to do
+ return
+ eschema._specialized_type = None
+ parenteschema._specialized_by.remove(eschema.type)
+
+ # XXX revertprecommit_event
+
+
+# CWEType hooks ################################################################
+
+class DelCWETypeHook(SyncSchemaHook):
+ """before deleting a CWEType entity:
+ * check that we don't remove a core entity type
+ * cascade to delete related CWAttribute and CWRelation entities
+ * instantiate an operation to delete the entity type on commit
+ """
+ __regid__ = 'syncdelcwetype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWEType')
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ # final entities can't be deleted, don't care about that
+ name = self.entity.name
+ if name in CORE_TYPES:
+ raise validation_error(self.entity, {None: _("can't be deleted")})
+ # delete every entities of this type
+ if name not in ETYPE_NAME_MAP:
+ MemSchemaCWETypeDel(self._cw, etype=name)
+ DropTable(self._cw, table=SQL_PREFIX + name)
+
+
+class AfterDelCWETypeHook(DelCWETypeHook):
+ __regid__ = 'wfcleanup'
+ events = ('after_delete_entity',)
+
+ def __call__(self):
+ # workflow cleanup
+ self._cw.execute('DELETE Workflow X WHERE NOT X workflow_of Y')
+
+
+class AfterAddCWETypeHook(DelCWETypeHook):
+ """after adding a CWEType entity:
+ * create the necessary table
+ * set creation_date and modification_date by creating the necessary
+ CWAttribute entities
+ * add owned_by relation by creating the necessary CWRelation entity
+ * register an operation to add the entity type to the instance's
+ schema on commit
+ """
+ __regid__ = 'syncaddcwetype'
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ if entity.cw_edited.get('final'):
+ # final entity types don't need a table in the database and are
+ # systematically added by yams at schema initialization time so
+ # there is no need to do further processing. Simply assign its eid.
+ self._cw.vreg.schema[entity.name].eid = entity.eid
+ return
+ CWETypeAddOp(self._cw, entity=entity)
+
+
+class BeforeUpdateCWETypeHook(DelCWETypeHook):
+ """check name change, handle final"""
+ __regid__ = 'syncupdatecwetype'
+ events = ('before_update_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ check_valid_changes(self._cw, entity, ro_attrs=('final',))
+ # don't use getattr(entity, attr), we would get the modified value if any
+ if 'name' in entity.cw_edited:
+ oldname, newname = entity.cw_edited.oldnewvalue('name')
+ if newname.lower() != oldname.lower():
+ CWETypeRenameOp(self._cw, oldname=oldname, newname=newname)
+
+
+# CWRType hooks ################################################################
+
+class DelCWRTypeHook(SyncSchemaHook):
+ """before deleting a CWRType entity:
+ * check that we don't remove a core relation type
+ * cascade to delete related CWAttribute and CWRelation entities
+ * instantiate an operation to delete the relation type on commit
+ """
+ __regid__ = 'syncdelcwrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWRType')
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ name = self.entity.name
+ if name in CORE_TYPES:
+ raise validation_error(self.entity, {None: _("can't be deleted")})
+ # delete relation definitions using this relation type
+ self._cw.execute('DELETE CWAttribute X WHERE X relation_type Y, Y eid %(x)s',
+ {'x': self.entity.eid})
+ self._cw.execute('DELETE CWRelation X WHERE X relation_type Y, Y eid %(x)s',
+ {'x': self.entity.eid})
+ MemSchemaCWRTypeDel(self._cw, rtype=name)
+
+
+class AfterAddCWComputedRTypeHook(SyncSchemaHook):
+ """after a CWComputedRType entity has been added:
+ * register an operation to add the relation type to the instance's
+ schema on commit
+
+ We don't know yet this point if a table is necessary
+ """
+ __regid__ = 'syncaddcwcomputedrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWComputedRType')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ rtypedef = ybo.ComputedRelation(name=entity.name,
+ eid=entity.eid,
+ rule=entity.rule)
+ MemSchemaCWRTypeAdd(self._cw, rtypedef=rtypedef)
+
+
+class AfterAddCWRTypeHook(SyncSchemaHook):
+ """after a CWRType entity has been added:
+ * register an operation to add the relation type to the instance's
+ schema on commit
+
+ We don't know yet this point if a table is necessary
+ """
+ __regid__ = 'syncaddcwrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWRType')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ rtypedef = ybo.RelationType(name=entity.name,
+ description=entity.description,
+ inlined=entity.cw_edited.get('inlined', False),
+ symmetric=entity.cw_edited.get('symmetric', False),
+ eid=entity.eid)
+ MemSchemaCWRTypeAdd(self._cw, rtypedef=rtypedef)
+
+
+class BeforeUpdateCWRTypeHook(SyncSchemaHook):
+ """check name change, handle final"""
+ __regid__ = 'syncupdatecwrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWRType')
+ events = ('before_update_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ check_valid_changes(self._cw, entity)
+ newvalues = {}
+ for prop in ('symmetric', 'inlined', 'fulltext_container'):
+ if prop in entity.cw_edited:
+ old, new = entity.cw_edited.oldnewvalue(prop)
+ if old != new:
+ newvalues[prop] = new
+ if newvalues:
+ rschema = self._cw.vreg.schema.rschema(entity.name)
+ CWRTypeUpdateOp(self._cw, rschema=rschema, entity=entity,
+ values=newvalues)
+
+
+class BeforeUpdateCWComputedRTypeHook(SyncSchemaHook):
+ """check name change, handle final"""
+ __regid__ = 'syncupdatecwcomputedrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWComputedRType')
+ events = ('before_update_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ check_valid_changes(self._cw, entity)
+ if 'rule' in entity.cw_edited:
+ old, new = entity.cw_edited.oldnewvalue('rule')
+ if old != new:
+ rschema = self._cw.vreg.schema.rschema(entity.name)
+ CWComputedRTypeUpdateOp(self._cw, rschema=rschema,
+ entity=entity, rule=new)
+
+
+class AfterDelRelationTypeHook(SyncSchemaHook):
+ """before deleting a CWAttribute or CWRelation entity:
+ * if this is a final or inlined relation definition, instantiate an
+ operation to drop necessary column, else if this is the last instance
+ of a non final relation, instantiate an operation to drop necessary
+ table
+ * instantiate an operation to delete the relation definition on commit
+ * delete the associated relation type when necessary
+ """
+ __regid__ = 'syncdelrelationtype'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype('relation_type')
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ cnx = self._cw
+ try:
+ rdef = cnx.vreg.schema.schema_by_eid(self.eidfrom)
+ except KeyError:
+ self.critical('cant get schema rdef associated to %s', self.eidfrom)
+ return
+ subjschema, rschema, objschema = rdef.as_triple()
+ pendingrdefs = cnx.transaction_data.setdefault('pendingrdefs', set())
+ # first delete existing relation if necessary
+ if rschema.final:
+ rdeftype = 'CWAttribute'
+ pendingrdefs.add((subjschema, rschema))
+ else:
+ rdeftype = 'CWRelation'
+ pendingrdefs.add((subjschema, rschema, objschema))
+ RDefDelOp(cnx, rdef=rdef)
+
+
+# CWComputedRType hooks #######################################################
+
+class DelCWComputedRTypeHook(SyncSchemaHook):
+ """before deleting a CWComputedRType entity:
+ * check that we don't remove a core relation type
+ * instantiate an operation to delete the relation type on commit
+ """
+ __regid__ = 'syncdelcwcomputedrtype'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWComputedRType')
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ name = self.entity.name
+ if name in CORE_TYPES:
+ raise validation_error(self.entity, {None: _("can't be deleted")})
+ MemSchemaCWRTypeDel(self._cw, rtype=name)
+
+
+# CWAttribute / CWRelation hooks ###############################################
+
+class AfterAddCWAttributeHook(SyncSchemaHook):
+ __regid__ = 'syncaddcwattribute'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWAttribute')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ CWAttributeAddOp(self._cw, entity=self.entity)
+
+
+class AfterAddCWRelationHook(AfterAddCWAttributeHook):
+ __regid__ = 'syncaddcwrelation'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWRelation')
+
+ def __call__(self):
+ CWRelationAddOp(self._cw, entity=self.entity)
+
+
+class AfterUpdateCWRDefHook(SyncSchemaHook):
+ __regid__ = 'syncaddcwattribute'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWAttribute',
+ 'CWRelation')
+ events = ('before_update_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ if self._cw.deleted_in_transaction(entity.eid):
+ return
+ subjtype = entity.stype.name
+ objtype = entity.otype.name
+ if subjtype in ETYPE_NAME_MAP or objtype in ETYPE_NAME_MAP:
+ return
+ rschema = self._cw.vreg.schema[entity.rtype.name]
+ # note: do not access schema rdef here, it may be added later by an
+ # operation
+ newvalues = {}
+ for prop in RelationDefinitionSchema.rproperty_defs(objtype):
+ if prop == 'constraints':
+ continue
+ if prop == 'order':
+ attr = 'ordernum'
+ else:
+ attr = prop
+ if attr in entity.cw_edited:
+ old, new = entity.cw_edited.oldnewvalue(attr)
+ if old != new:
+ newvalues[prop] = new
+ if newvalues:
+ RDefUpdateOp(self._cw, rschema=rschema, rdefkey=(subjtype, objtype),
+ values=newvalues)
+
+
+# constraints synchronization hooks ############################################
+
+class AfterAddCWConstraintHook(SyncSchemaHook):
+ __regid__ = 'syncaddcwconstraint'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWConstraint')
+ events = ('after_add_entity', 'after_update_entity')
+
+ def __call__(self):
+ if self.entity.cstrtype[0].name == 'SizeConstraint':
+ txdata = self._cw.transaction_data
+ if 'newsizecstr' not in txdata:
+ txdata['newsizecstr'] = set()
+ txdata['newsizecstr'].add(self.entity)
+ CWConstraintAddOp(self._cw, entity=self.entity)
+
+
+class AfterAddConstrainedByHook(SyncSchemaHook):
+ __regid__ = 'syncaddconstrainedby'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype('constrained_by')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ if self._cw.added_in_transaction(self.eidfrom):
+ # used by get_constraints() which is called in CWAttributeAddOp
+ self._cw.transaction_data.setdefault(self.eidfrom, []).append(self.eidto)
+
+
+class BeforeDeleteCWConstraintHook(SyncSchemaHook):
+ __regid__ = 'syncdelcwconstraint'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWConstraint')
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ schema = self._cw.vreg.schema
+ try:
+ # KeyError, e.g. composite chain deletion
+ rdef = schema.schema_by_eid(entity.reverse_constrained_by[0].eid)
+ # IndexError
+ cstr = rdef.constraint_by_eid(entity.eid)
+ except (KeyError, IndexError):
+ self._cw.critical('constraint type no more accessible')
+ else:
+ CWConstraintDelOp(self._cw, rdef=rdef, oldcstr=cstr)
+
+# unique_together constraints
+# XXX: use setoperations and before_add_relation here (on constraint_of and relations)
+class AfterAddCWUniqueTogetherConstraintHook(SyncSchemaHook):
+ __regid__ = 'syncadd_cwuniquetogether_constraint'
+ __select__ = SyncSchemaHook.__select__ & is_instance('CWUniqueTogetherConstraint')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ CWUniqueTogetherConstraintAddOp(self._cw, entity=self.entity)
+
+
+class BeforeDeleteConstraintOfHook(SyncSchemaHook):
+ __regid__ = 'syncdelconstraintof'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype('constraint_of')
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ if self._cw.deleted_in_transaction(self.eidto):
+ return
+ schema = self._cw.vreg.schema
+ cstr = self._cw.entity_from_eid(self.eidfrom)
+ entity = schema.schema_by_eid(self.eidto)
+ cols = tuple(r.name for r in cstr.relations)
+ CWUniqueTogetherConstraintDelOp(self._cw, entity=entity,
+ cstrname=cstr.name, cols=cols)
+
+
+# permissions synchronization hooks ############################################
+
+class AfterAddPermissionHook(SyncSchemaHook):
+ """added entity/relation *_permission, need to update schema"""
+ __regid__ = 'syncaddperm'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype(
+ 'read_permission', 'add_permission', 'delete_permission',
+ 'update_permission')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ action = self.rtype.split('_', 1)[0]
+ if self._cw.entity_metas(self.eidto)['type'] == 'CWGroup':
+ MemSchemaPermissionAdd(self._cw, action=action, eid=self.eidfrom,
+ group_eid=self.eidto)
+ else: # RQLExpression
+ expr = self._cw.entity_from_eid(self.eidto).expression
+ MemSchemaPermissionAdd(self._cw, action=action, eid=self.eidfrom,
+ expr=expr)
+
+
+class BeforeDelPermissionHook(AfterAddPermissionHook):
+ """delete entity/relation *_permission, need to update schema
+
+ skip the operation if the related type is being deleted
+ """
+ __regid__ = 'syncdelperm'
+ events = ('before_delete_relation',)
+
+ def __call__(self):
+ if self._cw.deleted_in_transaction(self.eidfrom):
+ return
+ action = self.rtype.split('_', 1)[0]
+ if self._cw.entity_metas(self.eidto)['type'] == 'CWGroup':
+ MemSchemaPermissionDel(self._cw, action=action, eid=self.eidfrom,
+ group_eid=self.eidto)
+ else: # RQLExpression
+ expr = self._cw.entity_from_eid(self.eidto).expression
+ MemSchemaPermissionDel(self._cw, action=action, eid=self.eidfrom,
+ expr=expr)
+
+
+
+class UpdateFTIndexOp(hook.DataOperationMixIn, hook.SingleLastOperation):
+ """operation to update full text indexation of entity whose schema change
+
+ We wait after the commit to as the schema in memory is only updated after
+ the commit.
+ """
+ containercls = list
+
+ def postcommit_event(self):
+ cnx = self.cnx
+ source = cnx.repo.system_source
+ schema = cnx.repo.vreg.schema
+ to_reindex = self.get_data()
+ self.info('%i etypes need full text indexed reindexation',
+ len(to_reindex))
+ for etype in to_reindex:
+ rset = cnx.execute('Any X WHERE X is %s' % etype)
+ self.info('Reindexing full text index for %i entity of type %s',
+ len(rset), etype)
+ still_fti = list(schema[etype].indexable_attributes())
+ for entity in rset.entities():
+ source.fti_unindex_entities(cnx, [entity])
+ for container in entity.cw_adapt_to('IFTIndexable').fti_containers():
+ if still_fti or container is not entity:
+ source.fti_unindex_entities(cnx, [container])
+ source.fti_index_entities(cnx, [container])
+ if to_reindex:
+ # Transaction has already been committed
+ cnx.cnxset.commit()
+
+
+
+
+# specializes synchronization hooks ############################################
+
+
+class AfterAddSpecializesHook(SyncSchemaHook):
+ __regid__ = 'syncaddspecializes'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype('specializes')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ MemSchemaSpecializesAdd(self._cw, etypeeid=self.eidfrom,
+ parentetypeeid=self.eidto)
+
+
+class AfterDelSpecializesHook(SyncSchemaHook):
+ __regid__ = 'syncdelspecializes'
+ __select__ = SyncSchemaHook.__select__ & hook.match_rtype('specializes')
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ MemSchemaSpecializesDel(self._cw, etypeeid=self.eidfrom,
+ parentetypeeid=self.eidto)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/syncsession.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/syncsession.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,255 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Core hooks: synchronize living session on persistent data changes"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from cubicweb import UnknownProperty, BadConnectionId, validation_error
+from cubicweb.predicates import is_instance
+from cubicweb.server import hook
+
+
+def get_user_sessions(repo, ueid):
+ for session in repo._sessions.values():
+ if ueid == session.user.eid:
+ yield session
+
+
+class SyncSessionHook(hook.Hook):
+ __abstract__ = True
+ category = 'syncsession'
+
+
+# user/groups synchronisation #################################################
+
+class _GroupOperation(hook.Operation):
+ """base class for group operation"""
+ cnxuser = None # make pylint happy
+
+ def __init__(self, cnx, *args, **kwargs):
+ """override to get the group name before actual groups manipulation:
+
+ we may temporarily loose right access during a commit event, so
+ no query should be emitted while comitting
+ """
+ rql = 'Any N WHERE G eid %(x)s, G name N'
+ result = cnx.execute(rql, {'x': kwargs['geid']}, build_descr=False)
+ hook.Operation.__init__(self, cnx, *args, **kwargs)
+ self.group = result[0][0]
+
+
+class _DeleteGroupOp(_GroupOperation):
+ """synchronize user when a in_group relation has been deleted"""
+
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ groups = self.cnxuser.groups
+ try:
+ groups.remove(self.group)
+ except KeyError:
+ self.error('user %s not in group %s', self.cnxuser, self.group)
+
+
+class _AddGroupOp(_GroupOperation):
+ """synchronize user when a in_group relation has been added"""
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ groups = self.cnxuser.groups
+ if self.group in groups:
+ self.warning('user %s already in group %s', self.cnxuser,
+ self.group)
+ else:
+ groups.add(self.group)
+
+
+class SyncInGroupHook(SyncSessionHook):
+ __regid__ = 'syncingroup'
+ __select__ = SyncSessionHook.__select__ & hook.match_rtype('in_group')
+ events = ('after_delete_relation', 'after_add_relation')
+
+ def __call__(self):
+ if self.event == 'after_delete_relation':
+ opcls = _DeleteGroupOp
+ else:
+ opcls = _AddGroupOp
+ for session in get_user_sessions(self._cw.repo, self.eidfrom):
+ opcls(self._cw, cnxuser=session.user, geid=self.eidto)
+
+
+class _DelUserOp(hook.Operation):
+ """close associated user's session when it is deleted"""
+ def __init__(self, cnx, sessionid):
+ self.sessionid = sessionid
+ hook.Operation.__init__(self, cnx)
+
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ try:
+ self.cnx.repo.close(self.sessionid)
+ except BadConnectionId:
+ pass # already closed
+
+
+class CloseDeletedUserSessionsHook(SyncSessionHook):
+ __regid__ = 'closession'
+ __select__ = SyncSessionHook.__select__ & is_instance('CWUser')
+ events = ('after_delete_entity',)
+
+ def __call__(self):
+ """modify user permission, need to update users"""
+ for session in get_user_sessions(self._cw.repo, self.entity.eid):
+ _DelUserOp(self._cw, session.sessionid)
+
+
+# CWProperty hooks #############################################################
+
+class _DelCWPropertyOp(hook.Operation):
+ """a user's custom properties has been deleted"""
+ cwpropdict = key = None # make pylint happy
+
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ try:
+ del self.cwpropdict[self.key]
+ except KeyError:
+ self.error('%s has no associated value', self.key)
+
+
+class _ChangeCWPropertyOp(hook.Operation):
+ """a user's custom properties has been added/changed"""
+ cwpropdict = key = value = None # make pylint happy
+
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ self.cwpropdict[self.key] = self.value
+
+
+class _AddCWPropertyOp(hook.Operation):
+ """a user's custom properties has been added/changed"""
+ cwprop = None # make pylint happy
+
+ def postcommit_event(self):
+ """the observed connections set has been commited"""
+ cwprop = self.cwprop
+ if not cwprop.for_user:
+ self.cnx.vreg['propertyvalues'][cwprop.pkey] = cwprop.value
+ # if for_user is set, update is handled by a ChangeCWPropertyOp operation
+
+
+class AddCWPropertyHook(SyncSessionHook):
+ __regid__ = 'addcwprop'
+ __select__ = SyncSessionHook.__select__ & is_instance('CWProperty')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ key, value = self.entity.pkey, self.entity.value
+ if key.startswith('sources.'):
+ return
+ cnx = self._cw
+ try:
+ value = cnx.vreg.typed_value(key, value)
+ except UnknownProperty:
+ msg = _('unknown property key %s')
+ raise validation_error(self.entity, {('pkey', 'subject'): msg}, (key,))
+ except ValueError as ex:
+ raise validation_error(self.entity,
+ {('value', 'subject'): str(ex)})
+ if not cnx.user.matching_groups('managers'):
+ cnx.add_relation(self.entity.eid, 'for_user', cnx.user.eid)
+ else:
+ _AddCWPropertyOp(cnx, cwprop=self.entity)
+
+
+class UpdateCWPropertyHook(AddCWPropertyHook):
+ __regid__ = 'updatecwprop'
+ events = ('after_update_entity',)
+
+ def __call__(self):
+ entity = self.entity
+ if not ('pkey' in entity.cw_edited or
+ 'value' in entity.cw_edited):
+ return
+ key, value = entity.pkey, entity.value
+ if key.startswith('sources.'):
+ return
+ cnx = self._cw
+ try:
+ value = cnx.vreg.typed_value(key, value)
+ except UnknownProperty:
+ return
+ except ValueError as ex:
+ raise validation_error(entity, {('value', 'subject'): str(ex)})
+ if entity.for_user:
+ for session in get_user_sessions(cnx.repo, entity.for_user[0].eid):
+ _ChangeCWPropertyOp(cnx, cwpropdict=session.user.properties,
+ key=key, value=value)
+ else:
+ # site wide properties
+ _ChangeCWPropertyOp(cnx, cwpropdict=cnx.vreg['propertyvalues'],
+ key=key, value=value)
+
+
+class DeleteCWPropertyHook(AddCWPropertyHook):
+ __regid__ = 'delcwprop'
+ events = ('before_delete_entity',)
+
+ def __call__(self):
+ eid = self.entity.eid
+ cnx = self._cw
+ for eidfrom, rtype, eidto in cnx.transaction_data.get('pendingrelations', ()):
+ if rtype == 'for_user' and eidfrom == self.entity.eid:
+ # if for_user was set, delete has already been handled
+ break
+ else:
+ _DelCWPropertyOp(cnx, cwpropdict=cnx.vreg['propertyvalues'],
+ key=self.entity.pkey)
+
+
+class AddForUserRelationHook(SyncSessionHook):
+ __regid__ = 'addcwpropforuser'
+ __select__ = SyncSessionHook.__select__ & hook.match_rtype('for_user')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ cnx = self._cw
+ eidfrom = self.eidfrom
+ if not cnx.entity_metas(eidfrom)['type'] == 'CWProperty':
+ return
+ key, value = cnx.execute('Any K,V WHERE P eid %(x)s,P pkey K,P value V',
+ {'x': eidfrom})[0]
+ if cnx.vreg.property_info(key)['sitewide']:
+ msg = _("site-wide property can't be set for user")
+ raise validation_error(eidfrom, {('for_user', 'subject'): msg})
+ for session in get_user_sessions(cnx.repo, self.eidto):
+ _ChangeCWPropertyOp(cnx, cwpropdict=session.user.properties,
+ key=key, value=value)
+
+
+class DelForUserRelationHook(AddForUserRelationHook):
+ __regid__ = 'delcwpropforuser'
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ cnx = self._cw
+ key = cnx.execute('Any K WHERE P eid %(x)s, P pkey K',
+ {'x': self.eidfrom})[0][0]
+ cnx.transaction_data.setdefault('pendingrelations', []).append(
+ (self.eidfrom, self.rtype, self.eidto))
+ for session in get_user_sessions(cnx.repo, self.eidto):
+ _DelCWPropertyOp(cnx, cwpropdict=session.user.properties, key=key)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/syncsources.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/syncsources.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,208 @@
+# copyright 2010-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""hooks for repository sources synchronization"""
+
+from cubicweb import _
+
+from socket import gethostname
+
+from logilab.common.decorators import clear_cache
+
+from cubicweb import validation_error
+from cubicweb.predicates import is_instance
+from cubicweb.server import SOURCE_TYPES, hook
+
+class SourceHook(hook.Hook):
+ __abstract__ = True
+ category = 'cw.sources'
+
+
+# repo sources synchronization #################################################
+
+class SourceAddedOp(hook.Operation):
+ entity = None # make pylint happy
+ def postcommit_event(self):
+ self.cnx.repo.add_source(self.entity)
+
+class SourceAddedHook(SourceHook):
+ __regid__ = 'cw.sources.added'
+ __select__ = SourceHook.__select__ & is_instance('CWSource')
+ events = ('after_add_entity',)
+ def __call__(self):
+ try:
+ sourcecls = SOURCE_TYPES[self.entity.type]
+ except KeyError:
+ msg = _('Unknown source type')
+ raise validation_error(self.entity, {('type', 'subject'): msg})
+ # ignore creation of the system source done during database
+ # initialisation, as config for this source is in a file and handling
+ # is done separatly (no need for the operation either)
+ if self.entity.name != 'system':
+ sourcecls.check_conf_dict(self.entity.eid, self.entity.host_config,
+ fail_if_unknown=not self._cw.vreg.config.repairing)
+ SourceAddedOp(self._cw, entity=self.entity)
+
+
+class SourceRemovedOp(hook.Operation):
+ uri = None # make pylint happy
+ def postcommit_event(self):
+ self.cnx.repo.remove_source(self.uri)
+
+class SourceRemovedHook(SourceHook):
+ __regid__ = 'cw.sources.removed'
+ __select__ = SourceHook.__select__ & is_instance('CWSource')
+ events = ('before_delete_entity',)
+ def __call__(self):
+ if self.entity.name == 'system':
+ msg = _("You cannot remove the system source")
+ raise validation_error(self.entity, {None: msg})
+ SourceRemovedOp(self._cw, uri=self.entity.name)
+
+
+class SourceConfigUpdatedOp(hook.DataOperationMixIn, hook.Operation):
+
+ def precommit_event(self):
+ self.__processed = []
+ for source in self.get_data():
+ if not self.cnx.deleted_in_transaction(source.eid):
+ conf = source.repo_source.check_config(source)
+ self.__processed.append( (source, conf) )
+
+ def postcommit_event(self):
+ for source, conf in self.__processed:
+ source.repo_source.update_config(source, conf)
+
+
+class SourceRenamedOp(hook.LateOperation):
+ oldname = newname = None # make pylint happy
+
+ def precommit_event(self):
+ source = self.cnx.repo.sources_by_uri[self.oldname]
+ sql = 'UPDATE entities SET asource=%(newname)s WHERE asource=%(oldname)s'
+ self.cnx.system_sql(sql, {'oldname': self.oldname,
+ 'newname': self.newname})
+
+ def postcommit_event(self):
+ repo = self.cnx.repo
+ # XXX race condition
+ source = repo.sources_by_uri.pop(self.oldname)
+ source.uri = self.newname
+ source.public_config['uri'] = self.newname
+ repo.sources_by_uri[self.newname] = source
+ repo._type_source_cache.clear()
+ clear_cache(repo, 'source_defs')
+
+
+class SourceUpdatedHook(SourceHook):
+ __regid__ = 'cw.sources.configupdate'
+ __select__ = SourceHook.__select__ & is_instance('CWSource')
+ events = ('before_update_entity',)
+ def __call__(self):
+ if 'name' in self.entity.cw_edited:
+ oldname, newname = self.entity.cw_edited.oldnewvalue('name')
+ if oldname == 'system':
+ msg = _("You cannot rename the system source")
+ raise validation_error(self.entity, {('name', 'subject'): msg})
+ SourceRenamedOp(self._cw, oldname=oldname, newname=newname)
+ if 'config' in self.entity.cw_edited or 'url' in self.entity.cw_edited:
+ if self.entity.name == 'system' and self.entity.config:
+ msg = _("Configuration of the system source goes to "
+ "the 'sources' file, not in the database")
+ raise validation_error(self.entity, {('config', 'subject'): msg})
+ SourceConfigUpdatedOp.get_instance(self._cw).add_data(self.entity)
+
+
+class SourceHostConfigUpdatedHook(SourceHook):
+ __regid__ = 'cw.sources.hostconfigupdate'
+ __select__ = SourceHook.__select__ & is_instance('CWSourceHostConfig')
+ events = ('after_add_entity', 'after_update_entity', 'before_delete_entity',)
+ def __call__(self):
+ if self.entity.match(gethostname()):
+ if self.event == 'after_update_entity' and \
+ not 'config' in self.entity.cw_edited:
+ return
+ try:
+ SourceConfigUpdatedOp.get_instance(self._cw).add_data(self.entity.cwsource)
+ except IndexError:
+ # XXX no source linked to the host config yet
+ pass
+
+
+# source mapping synchronization ###############################################
+#
+# Expect cw_for_source/cw_schema are immutable relations (i.e. can't change from
+# a source or schema to another).
+
+class SourceMappingImmutableHook(SourceHook):
+ """check cw_for_source and cw_schema are immutable relations
+
+ XXX empty delete perms would be enough?
+ """
+ __regid__ = 'cw.sources.mapping.immutable'
+ __select__ = SourceHook.__select__ & hook.match_rtype('cw_for_source', 'cw_schema')
+ events = ('before_add_relation',)
+ def __call__(self):
+ if not self._cw.added_in_transaction(self.eidfrom):
+ msg = _("You can't change this relation")
+ raise validation_error(self.eidfrom, {self.rtype: msg})
+
+
+class SourceMappingChangedOp(hook.DataOperationMixIn, hook.Operation):
+ def check_or_update(self, checkonly):
+ cnx = self.cnx
+ # take care, can't call get_data() twice
+ try:
+ data = self.__data
+ except AttributeError:
+ data = self.__data = self.get_data()
+ for schemacfg, source in data:
+ if source is None:
+ source = schemacfg.cwsource.repo_source
+ if cnx.added_in_transaction(schemacfg.eid):
+ if not cnx.deleted_in_transaction(schemacfg.eid):
+ source.add_schema_config(schemacfg, checkonly=checkonly)
+ elif cnx.deleted_in_transaction(schemacfg.eid):
+ source.del_schema_config(schemacfg, checkonly=checkonly)
+ else:
+ source.update_schema_config(schemacfg, checkonly=checkonly)
+
+ def precommit_event(self):
+ self.check_or_update(True)
+
+ def postcommit_event(self):
+ self.check_or_update(False)
+
+
+class SourceMappingChangedHook(SourceHook):
+ __regid__ = 'cw.sources.schemaconfig'
+ __select__ = SourceHook.__select__ & is_instance('CWSourceSchemaConfig')
+ events = ('after_add_entity', 'after_update_entity')
+ def __call__(self):
+ if self.event == 'after_add_entity' or (
+ self.event == 'after_update_entity' and 'options' in self.entity.cw_edited):
+ SourceMappingChangedOp.get_instance(self._cw).add_data(
+ (self.entity, None) )
+
+class SourceMappingDeleteHook(SourceHook):
+ __regid__ = 'cw.sources.delschemaconfig'
+ __select__ = SourceHook.__select__ & hook.match_rtype('cw_for_source')
+ events = ('before_delete_relation',)
+ def __call__(self):
+ SourceMappingChangedOp.get_instance(self._cw).add_data(
+ (self._cw.entity_from_eid(self.eidfrom),
+ self._cw.entity_from_eid(self.eidto).repo_source) )
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/test/data-computed/schema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/data-computed/schema.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,46 @@
+# copyright 2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from yams.buildobjs import EntityType, String, Int, SubjectRelation, RelationDefinition
+
+THISYEAR = 2014
+
+class Person(EntityType):
+ name = String()
+ salaire = Int()
+ birth_year = Int(required=True)
+ travaille = SubjectRelation('Societe')
+ age = Int(formula='Any %d - D WHERE X birth_year D' % THISYEAR)
+
+class Societe(EntityType):
+ nom = String()
+ salaire_total = Int(formula='Any SUM(SA) GROUPBY X WHERE P travaille X, P salaire SA')
+
+
+class Agent(EntityType):
+ asalae_id = String(formula='Any E WHERE M mirror_of X, M extid E')
+
+class MirrorEntity(EntityType):
+ extid = String(required=True, unique=True,
+ description=_('external identifier of the object'))
+
+
+class mirror_of(RelationDefinition):
+ subject = 'MirrorEntity'
+ object = ('Agent', 'Societe')
+ cardinality = '?*'
+ inlined = True
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/test/data/schema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/data/schema.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,85 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+from yams.buildobjs import (RelationDefinition, RelationType, EntityType,
+ String, Datetime, Int)
+from yams.reader import context
+
+from cubicweb.schema import ERQLExpression
+
+from cubicweb import _
+
+class friend(RelationDefinition):
+ subject = ('CWUser', 'CWGroup')
+ object = ('CWUser', 'CWGroup')
+ symmetric = True
+
+class Folder(EntityType):
+ name = String()
+
+class parent(RelationDefinition):
+ subject = 'Folder'
+ object = 'Folder'
+ composite = 'object'
+ cardinality = '?*'
+
+class children(RelationDefinition):
+ subject = 'Folder'
+ object = 'Folder'
+ composite = 'subject'
+
+
+class Email(EntityType):
+ """electronic mail"""
+ subject = String(fulltextindexed=True)
+ date = Datetime(description=_('UTC time on which the mail was sent'))
+ messageid = String(required=True, indexed=True)
+ headers = String(description=_('raw headers'))
+
+
+
+class EmailPart(EntityType):
+ """an email attachment"""
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',), # XXX if E parts X, U has_read_permission E
+ 'add': ('managers', ERQLExpression('E parts X, U has_update_permission E'),),
+ 'delete': ('managers', ERQLExpression('E parts X, U has_update_permission E')),
+ 'update': ('managers', 'owners',),
+ }
+
+ content = String(fulltextindexed=True)
+ content_format = String(required=True, maxsize=50)
+ ordernum = Int(required=True)
+
+
+class parts(RelationType):
+ subject = 'Email'
+ object = 'EmailPart'
+ cardinality = '*1'
+ composite = 'subject'
+ fulltext_container = 'subject'
+
+class sender(RelationDefinition):
+ subject = 'Email'
+ object = 'EmailAddress'
+ cardinality = '?*'
+ inlined = True
+
+class recipients(RelationDefinition):
+ subject = 'Email'
+ object = 'EmailAddress'
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/test/requirements.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/requirements.txt Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+psycopg2
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/test/unittest_bookmarks.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_bookmarks.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,38 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+from logilab.common.testlib import unittest_main
+from cubicweb.devtools.testlib import CubicWebTC
+
+class BookmarkHooksTC(CubicWebTC):
+
+
+ def test_auto_delete_bookmarks(self):
+ with self.admin_access.repo_cnx() as cnx:
+ beid = cnx.execute('INSERT Bookmark X: X title "hop", X path "view", X bookmarked_by U '
+ 'WHERE U login "admin"')[0][0]
+ cnx.execute('SET X bookmarked_by U WHERE U login "anon"')
+ cnx.commit()
+ cnx.execute('DELETE X bookmarked_by U WHERE U login "admin"')
+ cnx.commit()
+ self.assertTrue(cnx.execute('Any X WHERE X eid %(x)s', {'x': beid}))
+ cnx.execute('DELETE X bookmarked_by U WHERE U login "anon"')
+ cnx.commit()
+ self.assertFalse(cnx.execute('Any X WHERE X eid %(x)s', {'x': beid}))
+
+if __name__ == '__main__':
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/test/unittest_hooks.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_hooks.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,221 @@
+# -*- coding: utf-8 -*-
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""functional tests for core hooks
+
+Note:
+ syncschema.py hooks are mostly tested in server/test/unittest_migrations.py
+"""
+
+from datetime import datetime
+
+from six import text_type
+
+from pytz import utc
+from cubicweb import ValidationError, AuthenticationError, BadConnectionId
+from cubicweb.devtools.testlib import CubicWebTC
+
+
+class CoreHooksTC(CubicWebTC):
+
+ def test_inlined(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.assertEqual(self.repo.schema['sender'].inlined, True)
+ cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr", X alias "hop"')
+ cnx.execute('INSERT EmailPart X: X content_format "text/plain", X ordernum 1, '
+ 'X content "this is a test"')
+ eeid = cnx.execute('INSERT Email X: X messageid "<1234>", X subject "test", '
+ 'X sender Y, X recipients Y, X parts P '
+ 'WHERE Y is EmailAddress, P is EmailPart')[0][0]
+ cnx.execute('SET X sender Y WHERE X is Email, Y is EmailAddress')
+ rset = cnx.execute('Any S WHERE X sender S, X eid %s' % eeid)
+ self.assertEqual(len(rset), 1)
+
+ def test_symmetric(self):
+ with self.admin_access.repo_cnx() as cnx:
+ u1 = self.create_user(cnx, u'1')
+ u2 = self.create_user(cnx, u'2')
+ u3 = self.create_user(cnx, u'3')
+ ga = cnx.create_entity('CWGroup', name=u'A')
+ gb = cnx.create_entity('CWGroup', name=u'B')
+ u1.cw_set(friend=u2)
+ u2.cw_set(friend=u3)
+ ga.cw_set(friend=gb)
+ ga.cw_set(friend=u1)
+ cnx.commit()
+ for l1, l2 in ((u'1', u'2'),
+ (u'2', u'3')):
+ self.assertTrue(cnx.execute('Any U1,U2 WHERE U1 friend U2, U1 login %(l1)s, U2 login %(l2)s',
+ {'l1': l1, 'l2': l2}))
+ self.assertTrue(cnx.execute('Any U1,U2 WHERE U2 friend U1, U1 login %(l1)s, U2 login %(l2)s',
+ {'l1': l1, 'l2': l2}))
+ self.assertTrue(cnx.execute('Any GA,GB WHERE GA friend GB, GA name "A", GB name "B"'))
+ self.assertTrue(cnx.execute('Any GA,GB WHERE GB friend GA, GA name "A", GB name "B"'))
+ self.assertTrue(cnx.execute('Any GA,U1 WHERE GA friend U1, GA name "A", U1 login "1"'))
+ self.assertTrue(cnx.execute('Any GA,U1 WHERE U1 friend GA, GA name "A", U1 login "1"'))
+ self.assertFalse(cnx.execute('Any GA,U WHERE GA friend U, GA name "A", U login "2"'))
+ for l1, l2 in ((u'1', u'3'),
+ (u'3', u'1')):
+ self.assertFalse(cnx.execute('Any U1,U2 WHERE U1 friend U2, U1 login %(l1)s, U2 login %(l2)s',
+ {'l1': l1, 'l2': l2}))
+ self.assertFalse(cnx.execute('Any U1,U2 WHERE U2 friend U1, U1 login %(l1)s, U2 login %(l2)s',
+ {'l1': l1, 'l2': l2}))
+
+ def test_html_tidy_hook(self):
+ with self.admin_access.client_cnx() as cnx:
+ entity = cnx.create_entity('Workflow', name=u'wf1',
+ description_format=u'text/html',
+ description=u'yo')
+ self.assertEqual(u'yo', entity.description)
+ entity = cnx.create_entity('Workflow', name=u'wf2',
+ description_format=u'text/html',
+ description=u'yo')
+ self.assertEqual(u'yo', entity.description)
+ entity = cnx.create_entity('Workflow', name=u'wf3',
+ description_format=u'text/html',
+ description=u'yo')
+ self.assertEqual(u'yo', entity.description)
+ entity = cnx.create_entity('Workflow', name=u'wf4',
+ description_format=u'text/html',
+ description=u'R&D')
+ self.assertEqual(u'R&D', entity.description, )
+ entity = cnx.create_entity('Workflow', name=u'wf5',
+ description_format=u'text/html',
+ description=u"
')
+
+ def test_metadata_cwuri(self):
+ with self.admin_access.repo_cnx() as cnx:
+ entity = cnx.create_entity('Workflow', name=u'wf1')
+ self.assertEqual(entity.cwuri, self.repo.config['base-url'] + str(entity.eid))
+
+ def test_metadata_creation_modification_date(self):
+ with self.admin_access.repo_cnx() as cnx:
+ _now = datetime.now(utc)
+ entity = cnx.create_entity('Workflow', name=u'wf1')
+ self.assertEqual((entity.creation_date - _now).seconds, 0)
+ self.assertEqual((entity.modification_date - _now).seconds, 0)
+
+ def test_metadata_created_by(self):
+ with self.admin_access.repo_cnx() as cnx:
+ entity = cnx.create_entity('Bookmark', title=u'wf1', path=u'/view')
+ cnx.commit() # fire operations
+ self.assertEqual(len(entity.created_by), 1) # make sure we have only one creator
+ self.assertEqual(entity.created_by[0].eid, cnx.user.eid)
+
+ def test_metadata_owned_by(self):
+ with self.admin_access.repo_cnx() as cnx:
+ entity = cnx.create_entity('Bookmark', title=u'wf1', path=u'/view')
+ cnx.commit() # fire operations
+ self.assertEqual(len(entity.owned_by), 1) # make sure we have only one owner
+ self.assertEqual(entity.owned_by[0].eid, cnx.user.eid)
+
+ def test_user_login_stripped(self):
+ with self.admin_access.repo_cnx() as cnx:
+ u = self.create_user(cnx, ' joe ')
+ tname = cnx.execute('Any L WHERE E login L, E eid %(e)s',
+ {'e': u.eid})[0][0]
+ self.assertEqual(tname, 'joe')
+ cnx.execute('SET X login " jijoe " WHERE X eid %(x)s', {'x': u.eid})
+ tname = cnx.execute('Any L WHERE E login L, E eid %(e)s',
+ {'e': u.eid})[0][0]
+ self.assertEqual(tname, 'jijoe')
+
+
+
+class UserGroupHooksTC(CubicWebTC):
+
+ def test_user_group_synchronization(self):
+ with self.admin_access.repo_cnx() as cnx:
+ user = cnx.user
+ self.assertEqual(user.groups, set(('managers',)))
+ cnx.execute('SET X in_group G WHERE X eid %s, G name "guests"' % user.eid)
+ self.assertEqual(user.groups, set(('managers',)))
+ cnx.commit()
+ self.assertEqual(user.groups, set(('managers', 'guests')))
+ cnx.execute('DELETE X in_group G WHERE X eid %s, G name "guests"' % user.eid)
+ self.assertEqual(user.groups, set(('managers', 'guests')))
+ cnx.commit()
+ self.assertEqual(user.groups, set(('managers',)))
+
+ def test_user_composite_owner(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.create_user(cnx, 'toto').eid
+ # composite of euser should be owned by the euser regardless of who created it
+ cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr", U use_email X '
+ 'WHERE U login "toto"')
+ cnx.commit()
+ self.assertEqual(cnx.execute('Any A WHERE X owned_by U, U use_email X,'
+ 'U login "toto", X address A')[0][0],
+ 'toto@logilab.fr')
+
+ def test_user_composite_no_owner_on_deleted_entity(self):
+ with self.admin_access.repo_cnx() as cnx:
+ u = self.create_user(cnx, 'toto').eid
+ cnx.commit()
+ e = cnx.create_entity('EmailAddress', address=u'toto@logilab.fr', reverse_use_email=u)
+ e.cw_delete()
+ cnx.commit()
+ self.assertFalse(cnx.system_sql(
+ 'SELECT * FROM owned_by_relation '
+ 'WHERE eid_from NOT IN (SELECT eid FROM entities)').fetchall())
+
+ def test_no_created_by_on_deleted_entity(self):
+ with self.admin_access.repo_cnx() as cnx:
+ eid = cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr"')[0][0]
+ cnx.execute('DELETE EmailAddress X WHERE X eid %s' % eid)
+ cnx.commit()
+ self.assertFalse(cnx.execute('Any X WHERE X created_by Y, X eid >= %(x)s', {'x': eid}))
+
+
+
+class SchemaHooksTC(CubicWebTC):
+
+ def test_duplicate_etype_error(self):
+ with self.admin_access.repo_cnx() as cnx:
+ # check we can't add a CWEType or CWRType entity if it already exists one
+ # with the same name
+ self.assertRaises(ValidationError,
+ cnx.execute, 'INSERT CWEType X: X name "CWUser"')
+ cnx.rollback()
+ self.assertRaises(ValidationError,
+ cnx.execute, 'INSERT CWRType X: X name "in_group"')
+
+ def test_validation_unique_constraint(self):
+ with self.admin_access.repo_cnx() as cnx:
+ with self.assertRaises(ValidationError) as cm:
+ cnx.execute('INSERT CWUser X: X login "admin"')
+ ex = cm.exception
+ ex.translate(text_type)
+ self.assertIsInstance(ex.entity, int)
+ self.assertEqual(ex.errors, {'login-subject': 'the value "admin" is already used, use another one'})
+
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/test/unittest_integrity.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_integrity.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""functional tests for integrity hooks"""
+
+from cubicweb import ValidationError
+from cubicweb.devtools.testlib import CubicWebTC
+
+class CoreHooksTC(CubicWebTC):
+
+ def test_delete_internal_entities(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.assertRaises(ValidationError, cnx.execute,
+ 'DELETE CWEType X WHERE X name "CWEType"')
+ cnx.rollback()
+ self.assertRaises(ValidationError, cnx.execute,
+ 'DELETE CWRType X WHERE X name "relation_type"')
+ cnx.rollback()
+ self.assertRaises(ValidationError, cnx.execute,
+ 'DELETE CWGroup X WHERE X name "owners"')
+
+ def test_delete_required_relations_subject(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT CWUser X: X login "toto", X upassword "hop", X in_group Y '
+ 'WHERE Y name "users"')
+ cnx.commit()
+ cnx.execute('DELETE X in_group Y WHERE X login "toto", Y name "users"')
+ self.assertRaises(ValidationError, cnx.commit)
+ cnx.rollback()
+ cnx.execute('DELETE X in_group Y WHERE X login "toto"')
+ cnx.execute('SET X in_group Y WHERE X login "toto", Y name "guests"')
+ cnx.commit()
+
+ def test_static_vocabulary_check(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.assertRaises(ValidationError,
+ cnx.execute,
+ 'SET X composite "whatever" WHERE X from_entity FE, FE name "CWUser", '
+ 'X relation_type RT, RT name "in_group"')
+
+ def test_missing_required_relations_subject_inline(self):
+ with self.admin_access.repo_cnx() as cnx:
+ # missing in_group relation
+ cnx.execute('INSERT CWUser X: X login "toto", X upassword "hop"')
+ self.assertRaises(ValidationError, cnx.commit)
+
+ def test_composite_1(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr", X alias "hop"')
+ cnx.execute('INSERT EmailPart X: X content_format "text/plain", X ordernum 1, '
+ 'X content "this is a test"')
+ cnx.execute('INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, '
+ 'X recipients Y, X parts P '
+ 'WHERE Y is EmailAddress, P is EmailPart')
+ self.assertTrue(cnx.execute('Email X WHERE X sender Y'))
+ cnx.commit()
+ cnx.execute('DELETE Email X')
+ rset = cnx.execute('Any X WHERE X is EmailPart')
+ self.assertEqual(len(rset), 0)
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X is EmailPart')
+ self.assertEqual(len(rset), 0)
+
+ def test_composite_2(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr", X alias "hop"')
+ cnx.execute('INSERT EmailPart X: X content_format "text/plain", X ordernum 1, '
+ 'X content "this is a test"')
+ cnx.execute('INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, '
+ 'X recipients Y, X parts P '
+ 'WHERE Y is EmailAddress, P is EmailPart')
+ cnx.commit()
+ cnx.execute('DELETE Email X')
+ cnx.execute('DELETE EmailPart X')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X is EmailPart')
+ self.assertEqual(len(rset), 0)
+
+ def test_composite_redirection(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('INSERT EmailAddress X: X address "toto@logilab.fr", X alias "hop"')
+ cnx.execute('INSERT EmailPart X: X content_format "text/plain", X ordernum 1, '
+ 'X content "this is a test"')
+ cnx.execute('INSERT Email X: X messageid "<1234>", X subject "test", X sender Y, '
+ 'X recipients Y, X parts P '
+ 'WHERE Y is EmailAddress, P is EmailPart')
+ cnx.execute('INSERT Email X: X messageid "<2345>", X subject "test2", X sender Y, '
+ 'X recipients Y '
+ 'WHERE Y is EmailAddress')
+ cnx.commit()
+ cnx.execute('DELETE X parts Y WHERE X messageid "<1234>"')
+ cnx.execute('SET X parts Y WHERE X messageid "<2345>"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X is EmailPart')
+ self.assertEqual(len(rset), 1)
+ self.assertEqual(rset.get_entity(0, 0).reverse_parts[0].messageid, '<2345>')
+
+ def test_composite_object_relation_deletion(self):
+ with self.admin_access.repo_cnx() as cnx:
+ root = cnx.create_entity('Folder', name=u'root')
+ a = cnx.create_entity('Folder', name=u'a', parent=root)
+ cnx.create_entity('Folder', name=u'b', parent=a)
+ cnx.create_entity('Folder', name=u'c', parent=root)
+ cnx.commit()
+ cnx.execute('DELETE Folder F WHERE F name "a"')
+ cnx.execute('DELETE F parent R WHERE R name "root"')
+ cnx.commit()
+ self.assertEqual([['root'], ['c']],
+ cnx.execute('Any NF WHERE F is Folder, F name NF').rows)
+ self.assertEqual([], cnx.execute('Any NF,NP WHERE F parent P, F name NF, P name NP').rows)
+
+ def test_composite_subject_relation_deletion(self):
+ with self.admin_access.repo_cnx() as cnx:
+ root = cnx.create_entity('Folder', name=u'root')
+ a = cnx.create_entity('Folder', name=u'a')
+ b = cnx.create_entity('Folder', name=u'b')
+ c = cnx.create_entity('Folder', name=u'c')
+ root.cw_set(children=(a, c))
+ a.cw_set(children=b)
+ cnx.commit()
+ cnx.execute('DELETE Folder F WHERE F name "a"')
+ cnx.execute('DELETE R children F WHERE R name "root"')
+ cnx.commit()
+ self.assertEqual([['root'], ['c']],
+ cnx.execute('Any NF WHERE F is Folder, F name NF').rows)
+ self.assertEqual([], cnx.execute('Any NF,NP WHERE F parent P, F name NF, P name NP').rows)
+
+ def test_unsatisfied_constraints(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('SET U in_group G WHERE G name "owners", U login "admin"')[0][0]
+ with self.assertRaises(ValidationError) as cm:
+ cnx.commit()
+ self.assertEqual(cm.exception.errors,
+ {'in_group-object': u'RQLConstraint NOT O name "owners" failed'})
+
+ def test_unique_constraint(self):
+ with self.admin_access.repo_cnx() as cnx:
+ entity = cnx.create_entity('CWGroup', name=u'trout')
+ cnx.commit()
+ self.assertRaises(ValidationError, cnx.create_entity, 'CWGroup', name=u'trout')
+ cnx.rollback()
+ cnx.execute('SET X name "trout" WHERE X eid %(x)s', {'x': entity.eid})
+ cnx.commit()
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/test/unittest_security.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_security.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,56 @@
+# copyright 2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.server import hook
+from cubicweb.predicates import is_instance
+
+
+class SecurityHooksTC(CubicWebTC):
+ def setup_database(self):
+ with self.admin_access.repo_cnx() as cnx:
+ self.add_eid = cnx.create_entity('EmailAddress',
+ address=u'hop@perdu.com',
+ reverse_use_email=cnx.user.eid).eid
+ cnx.commit()
+
+ def test_inlined_cw_edited_relation(self):
+ """modification of cw_edited to add an inlined relation shouldn't trigger a security error.
+
+ Test for https://www.cubicweb.org/ticket/5477315
+ """
+ sender = self.repo.schema['Email'].rdef('sender')
+ with self.temporary_permissions((sender, {'add': ()})):
+
+ class MyHook(hook.Hook):
+ __regid__ = 'test.pouet'
+ __select__ = hook.Hook.__select__ & is_instance('Email')
+ events = ('before_add_entity',)
+
+ def __call__(self):
+ self.entity.cw_edited['sender'] = self._cw.user.primary_email[0].eid
+
+ with self.temporary_appobjects(MyHook):
+ with self.admin_access.repo_cnx() as cnx:
+ email = cnx.create_entity('Email', messageid=u'1234')
+ cnx.commit()
+ self.assertEqual(email.sender[0].eid, self.add_eid)
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/test/unittest_synccomputed.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_synccomputed.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,146 @@
+# copyright 2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""unit tests for computed attributes/relations hooks"""
+
+from unittest import TestCase
+
+from yams.buildobjs import EntityType, String, Int, SubjectRelation
+
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.schema import build_schema_from_namespace
+
+
+class FormulaDependenciesMatrixTC(TestCase):
+
+ def simple_schema(self):
+ THISYEAR = 2014
+
+ class Person(EntityType):
+ name = String()
+ salary = Int()
+ birth_year = Int(required=True)
+ works_for = SubjectRelation('Company')
+ age = Int(formula='Any %d - D WHERE X birth_year D' % THISYEAR)
+
+ class Company(EntityType):
+ name = String()
+ total_salary = Int(formula='Any SUM(SA) GROUPBY X WHERE P works_for X, P salary SA')
+
+ schema = build_schema_from_namespace(vars().items())
+ return schema
+
+ def setUp(self):
+ from cubicweb.hooks.synccomputed import _FormulaDependenciesMatrix
+ self.schema = self.simple_schema()
+ self.dependencies = _FormulaDependenciesMatrix(self.schema)
+
+ def test_computed_attributes_by_etype(self):
+ comp_by_etype = self.dependencies.computed_attribute_by_etype
+ self.assertEqual(len(comp_by_etype), 2)
+ values = comp_by_etype['Person']
+ self.assertEqual(len(values), 1)
+ self.assertEqual(values[0].rtype, 'age')
+ values = comp_by_etype['Company']
+ self.assertEqual(len(values), 1)
+ self.assertEqual(values[0].rtype, 'total_salary')
+
+ def test_computed_attribute_by_relation(self):
+ comp_by_rdef = self.dependencies.computed_attribute_by_relation
+ self.assertEqual(len(comp_by_rdef), 1)
+ key, values = next(iter(comp_by_rdef.items()))
+ self.assertEqual(key.rtype, 'works_for')
+ self.assertEqual(len(values), 1)
+ self.assertEqual(values[0].rtype, 'total_salary')
+
+ def test_computed_attribute_by_etype_attrs(self):
+ comp_by_attr = self.dependencies.computed_attribute_by_etype_attrs
+ self.assertEqual(len(comp_by_attr), 1)
+ values = comp_by_attr['Person']
+ self.assertEqual(len(values), 2)
+ values = set((rdef.formula, tuple(v))
+ for rdef, v in values.items())
+ self.assertEquals(values,
+ set((('Any 2014 - D WHERE X birth_year D', tuple(('birth_year',))),
+ ('Any SUM(SA) GROUPBY X WHERE P works_for X, P salary SA', tuple(('salary',)))))
+ )
+
+
+class ComputedAttributeTC(CubicWebTC):
+ appid = 'data-computed'
+
+ def setup_entities(self, req):
+ self.societe = req.create_entity('Societe', nom=u'Foo')
+ req.create_entity('Person', name=u'Titi', salaire=1000,
+ travaille=self.societe, birth_year=2001)
+ self.tata = req.create_entity('Person', name=u'Tata', salaire=2000,
+ travaille=self.societe, birth_year=1990)
+
+
+ def test_update_on_add_remove_relation(self):
+ """check the rewriting of a computed attribute"""
+ with self.admin_access.web_request() as req:
+ self.setup_entities(req)
+ req.cnx.commit()
+ rset = req.execute('Any S WHERE X salaire_total S, X nom "Foo"')
+ self.assertEqual(rset[0][0], 3000)
+ # Add relation.
+ toto = req.create_entity('Person', name=u'Toto', salaire=1500,
+ travaille=self.societe, birth_year=1988)
+ req.cnx.commit()
+ rset = req.execute('Any S WHERE X salaire_total S, X nom "Foo"')
+ self.assertEqual(rset[0][0], 4500)
+ # Delete relation.
+ toto.cw_set(travaille=None)
+ req.cnx.commit()
+ rset = req.execute('Any S WHERE X salaire_total S, X nom "Foo"')
+ self.assertEqual(rset[0][0], 3000)
+
+ def test_recompute_on_attribute_update(self):
+ """check the modification of an attribute triggers the update of the
+ computed attributes that depend on it"""
+ with self.admin_access.web_request() as req:
+ self.setup_entities(req)
+ req.cnx.commit()
+ rset = req.execute('Any S WHERE X salaire_total S, X nom "Foo"')
+ self.assertEqual(rset[0][0], 3000)
+ # Update attribute.
+ self.tata.cw_set(salaire=1000)
+ req.cnx.commit()
+ rset = req.execute('Any S WHERE X salaire_total S, X nom "Foo"')
+ self.assertEqual(rset[0][0], 2000)
+
+ def test_init_on_entity_creation(self):
+ """check the computed attribute is initialized on entity creation"""
+ with self.admin_access.web_request() as req:
+ p = req.create_entity('Person', name=u'Tata', salaire=2000,
+ birth_year=1990)
+ req.cnx.commit()
+ rset = req.execute('Any A, X WHERE X age A, X name "Tata"')
+ self.assertEqual(rset[0][0], 2014 - 1990)
+
+
+ def test_recompute_on_ambiguous_relation(self):
+ # check we don't end up with TypeResolverException as in #4901163
+ with self.admin_access.client_cnx() as cnx:
+ societe = cnx.create_entity('Societe', nom=u'Foo')
+ cnx.create_entity('MirrorEntity', mirror_of=societe, extid=u'1')
+ cnx.commit()
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/test/unittest_syncschema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_syncschema.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,405 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""cubicweb.server.hooks.syncschema unit and functional tests"""
+
+from logilab.common.testlib import unittest_main
+
+from yams.constraints import BoundaryConstraint
+from cubicweb import ValidationError, Binary
+from cubicweb.schema import META_RTYPES
+from cubicweb.devtools import startpgcluster, stoppgcluster, PostgresApptestConfiguration
+from cubicweb.devtools.testlib import CubicWebTC
+from cubicweb.server.sqlutils import SQL_PREFIX
+from cubicweb.devtools.repotest import schema_eids_idx
+
+
+def setUpModule():
+ startpgcluster(__file__)
+
+
+def tearDownModule(*args):
+ stoppgcluster(__file__)
+ del SchemaModificationHooksTC.schema_eids
+
+
+class SchemaModificationHooksTC(CubicWebTC):
+ configcls = PostgresApptestConfiguration
+
+ def setUp(self):
+ super(SchemaModificationHooksTC, self).setUp()
+ self.repo.set_schema(self.repo.deserialize_schema(), resetvreg=False)
+ self.__class__.schema_eids = schema_eids_idx(self.repo.schema)
+
+ def index_exists(self, cnx, etype, attr, unique=False):
+ dbhelper = self.repo.system_source.dbhelper
+ sqlcursor = cnx.cnxset.cu
+ return dbhelper.index_exists(sqlcursor,
+ SQL_PREFIX + etype,
+ SQL_PREFIX + attr,
+ unique=unique)
+
+ def _set_perms(self, cnx, eid):
+ cnx.execute('SET X read_permission G WHERE X eid %(x)s, G is CWGroup',
+ {'x': eid})
+ cnx.execute('SET X add_permission G WHERE X eid %(x)s, G is CWGroup, '
+ 'G name "managers"', {'x': eid})
+ cnx.execute('SET X delete_permission G WHERE X eid %(x)s, G is CWGroup, '
+ 'G name "owners"', {'x': eid})
+
+ def _set_attr_perms(self, cnx, eid):
+ cnx.execute('SET X read_permission G WHERE X eid %(x)s, G is CWGroup',
+ {'x': eid})
+ cnx.execute('SET X update_permission G WHERE X eid %(x)s, G is CWGroup, G name "managers"',
+ {'x': eid})
+
+ def test_base(self):
+ with self.admin_access.repo_cnx() as cnx:
+ schema = self.repo.schema
+ self.assertFalse(schema.has_entity('Societe2'))
+ self.assertFalse(schema.has_entity('concerne2'))
+ # schema should be update on insertion (after commit)
+ eeid = cnx.execute('INSERT CWEType X: X name "Societe2", '
+ 'X description "", X final FALSE')[0][0]
+ self._set_perms(cnx, eeid)
+ cnx.execute('INSERT CWRType X: X name "concerne2", X description "", '
+ 'X final FALSE, X symmetric FALSE')
+ self.assertFalse(schema.has_entity('Societe2'))
+ self.assertFalse(schema.has_entity('concerne2'))
+ # have to commit before adding definition relations
+ cnx.commit()
+ self.assertTrue(schema.has_entity('Societe2'))
+ self.assertTrue(schema.has_relation('concerne2'))
+ attreid = cnx.execute('INSERT CWAttribute X: X cardinality "11", '
+ 'X defaultval %(default)s, X indexed TRUE, '
+ 'X relation_type RT, X from_entity E, X to_entity F '
+ 'WHERE RT name "name", E name "Societe2", '
+ 'F name "String"',
+ {'default': Binary.zpickle('noname')})[0][0]
+ self._set_attr_perms(cnx, attreid)
+ concerne2_rdef_eid = cnx.execute(
+ 'INSERT CWRelation X: X cardinality "**", X relation_type RT, '
+ 'X from_entity E, X to_entity E '
+ 'WHERE RT name "concerne2", E name "Societe2"')[0][0]
+ self._set_perms(cnx, concerne2_rdef_eid)
+ self.assertNotIn('name', schema['Societe2'].subject_relations())
+ self.assertNotIn('concerne2', schema['Societe2'].subject_relations())
+ self.assertFalse(self.index_exists(cnx, 'Societe2', 'name'))
+ cnx.commit()
+ self.assertIn('name', schema['Societe2'].subject_relations())
+ self.assertIn('concerne2', schema['Societe2'].subject_relations())
+ self.assertTrue(self.index_exists(cnx, 'Societe2', 'name'))
+ # now we should be able to insert and query Societe2
+ s2eid = cnx.execute('INSERT Societe2 X: X name "logilab"')[0][0]
+ cnx.execute('Societe2 X WHERE X name "logilab"')
+ cnx.execute('SET X concerne2 X WHERE X name "logilab"')
+ rset = cnx.execute('Any X WHERE X concerne2 Y')
+ self.assertEqual(rset.rows, [[s2eid]])
+ # check that when a relation definition is deleted, existing relations are deleted
+ rdefeid = cnx.execute('INSERT CWRelation X: X cardinality "**", X relation_type RT, '
+ ' X from_entity E, X to_entity E '
+ 'WHERE RT name "concerne2", E name "CWUser"')[0][0]
+ self._set_perms(cnx, rdefeid)
+ cnx.commit()
+ cnx.execute('DELETE CWRelation X WHERE X eid %(x)s', {'x': concerne2_rdef_eid})
+ cnx.commit()
+ self.assertIn('concerne2', schema['CWUser'].subject_relations())
+ self.assertNotIn('concerne2', schema['Societe2'].subject_relations())
+ self.assertFalse(cnx.execute('Any X WHERE X concerne2 Y'))
+ # schema should be cleaned on delete (after commit)
+ cnx.execute('DELETE CWEType X WHERE X name "Societe2"')
+ cnx.execute('DELETE CWRType X WHERE X name "concerne2"')
+ self.assertTrue(self.index_exists(cnx, 'Societe2', 'name'))
+ self.assertTrue(schema.has_entity('Societe2'))
+ self.assertTrue(schema.has_relation('concerne2'))
+ cnx.commit()
+ self.assertFalse(self.index_exists(cnx, 'Societe2', 'name'))
+ self.assertFalse(schema.has_entity('Societe2'))
+ self.assertFalse(schema.has_entity('concerne2'))
+ self.assertNotIn('concerne2', schema['CWUser'].subject_relations())
+
+ def test_metartype_with_nordefs(self):
+ with self.admin_access.repo_cnx() as cnx:
+ META_RTYPES.add('custom_meta')
+ cnx.execute('INSERT CWRType X: X name "custom_meta", X description "", '
+ 'X final FALSE, X symmetric FALSE')
+ cnx.commit()
+ eeid = cnx.execute('INSERT CWEType X: X name "NEWEtype", '
+ 'X description "", X final FALSE')[0][0]
+ self._set_perms(cnx, eeid)
+ cnx.commit()
+ META_RTYPES.remove('custom_meta')
+
+ def test_metartype_with_somerdefs(self):
+ with self.admin_access.repo_cnx() as cnx:
+ META_RTYPES.add('custom_meta')
+ cnx.execute('INSERT CWRType X: X name "custom_meta", X description "", '
+ 'X final FALSE, X symmetric FALSE')
+ cnx.commit()
+ rdefeid = cnx.execute('INSERT CWRelation X: X cardinality "**", X relation_type RT, '
+ ' X from_entity E, X to_entity E '
+ 'WHERE RT name "custom_meta", E name "CWUser"')[0][0]
+ self._set_perms(cnx, rdefeid)
+ cnx.commit()
+ eeid = cnx.execute('INSERT CWEType X: X name "NEWEtype", '
+ 'X description "", X final FALSE')[0][0]
+ self._set_perms(cnx, eeid)
+ cnx.commit()
+ META_RTYPES.remove('custom_meta')
+
+ def test_is_instance_of_insertions(self):
+ with self.admin_access.repo_cnx() as cnx:
+ seid = cnx.execute('INSERT Transition T: T name "subdiv"')[0][0]
+ is_etypes = [etype for etype, in cnx.execute('Any ETN WHERE X eid %s, '
+ 'X is ET, ET name ETN' % seid)]
+ self.assertEqual(is_etypes, ['Transition'])
+ instanceof_etypes = [etype
+ for etype, in cnx.execute('Any ETN WHERE X eid %s, '
+ 'X is_instance_of ET, ET name ETN'
+ % seid)]
+ self.assertEqual(sorted(instanceof_etypes), ['BaseTransition', 'Transition'])
+ snames = [name for name, in cnx.execute('Any N WHERE S is BaseTransition, S name N')]
+ self.assertNotIn('subdiv', snames)
+ snames = [name for name, in cnx.execute('Any N WHERE S is_instance_of BaseTransition, '
+ 'S name N')]
+ self.assertIn('subdiv', snames)
+
+
+ def test_perms_synchronization_1(self):
+ with self.admin_access.repo_cnx() as cnx:
+ schema = self.repo.schema
+ self.assertEqual(schema['CWUser'].get_groups('read'), set(('managers', 'users')))
+ self.assertTrue(cnx.execute('Any X, Y WHERE X is CWEType, X name "CWUser", '
+ 'Y is CWGroup, Y name "users"')[0])
+ cnx.execute('DELETE X read_permission Y WHERE X is CWEType, X name "CWUser", Y name "users"')
+ self.assertEqual(schema['CWUser'].get_groups('read'), set(('managers', 'users', )))
+ cnx.commit()
+ self.assertEqual(schema['CWUser'].get_groups('read'), set(('managers',)))
+ cnx.execute('SET X read_permission Y WHERE X is CWEType, '
+ 'X name "CWUser", Y name "users"')
+ cnx.commit()
+ self.assertEqual(schema['CWUser'].get_groups('read'),
+ set(('managers', 'users',)))
+
+ def test_perms_synchronization_2(self):
+ with self.admin_access.repo_cnx() as cnx:
+ schema = self.repo.schema['in_group'].rdefs[('CWUser', 'CWGroup')]
+ self.assertEqual(schema.get_groups('read'),
+ set(('managers', 'users', 'guests')))
+ cnx.execute('DELETE X read_permission Y WHERE X relation_type RT, '
+ 'RT name "in_group", Y name "guests"')
+ self.assertEqual(schema.get_groups('read'),
+ set(('managers', 'users', 'guests')))
+ cnx.commit()
+ self.assertEqual(schema.get_groups('read'),
+ set(('managers', 'users')))
+ cnx.execute('SET X read_permission Y WHERE X relation_type RT, '
+ 'RT name "in_group", Y name "guests"')
+ self.assertEqual(schema.get_groups('read'),
+ set(('managers', 'users')))
+ cnx.commit()
+ self.assertEqual(schema.get_groups('read'),
+ set(('managers', 'users', 'guests')))
+
+ def test_nonregr_user_edit_itself(self):
+ with self.admin_access.repo_cnx() as cnx:
+ ueid = cnx.user.eid
+ groupeids = [eid for eid, in cnx.execute('CWGroup G WHERE G name '
+ 'in ("managers", "users")')]
+ cnx.execute('DELETE X in_group Y WHERE X eid %s' % ueid)
+ cnx.execute('SET X surname "toto" WHERE X eid %s' % ueid)
+ cnx.execute('SET X in_group Y WHERE X eid %s, Y name "managers"' % ueid)
+ cnx.commit()
+ eeid = cnx.execute('Any X WHERE X is CWEType, X name "CWEType"')[0][0]
+ cnx.execute('DELETE X read_permission Y WHERE X eid %s' % eeid)
+ cnx.execute('SET X final FALSE WHERE X eid %s' % eeid)
+ cnx.execute('SET X read_permission Y WHERE X eid %s, Y eid in (%s, %s)'
+ % (eeid, groupeids[0], groupeids[1]))
+ cnx.commit()
+ cnx.execute('Any X WHERE X is CWEType, X name "CWEType"')
+
+ # schema modification hooks tests #########################################
+
+ def test_uninline_relation(self):
+ with self.admin_access.repo_cnx() as cnx:
+ try:
+ self.assertTrue(self.schema['state_of'].inlined)
+ cnx.execute('SET X inlined FALSE WHERE X name "state_of"')
+ self.assertTrue(self.schema['state_of'].inlined)
+ cnx.commit()
+ self.assertFalse(self.schema['state_of'].inlined)
+ self.assertFalse(self.index_exists(cnx, 'State', 'state_of'))
+ rset = cnx.execute('Any X, Y WHERE X state_of Y')
+ self.assertEqual(len(rset), 2) # user states
+ finally:
+ cnx.execute('SET X inlined TRUE WHERE X name "state_of"')
+ self.assertFalse(self.schema['state_of'].inlined)
+ cnx.commit()
+ self.assertTrue(self.schema['state_of'].inlined)
+ self.assertTrue(self.index_exists(cnx, 'State', 'state_of'))
+ rset = cnx.execute('Any X, Y WHERE X state_of Y')
+ self.assertEqual(len(rset), 2)
+
+ def test_indexed_change(self):
+ with self.admin_access.repo_cnx() as cnx:
+ try:
+ cnx.execute('SET X indexed FALSE WHERE X relation_type R, R name "name"')
+ self.assertTrue(self.schema['name'].rdef('Workflow', 'String').indexed)
+ self.assertTrue(self.index_exists(cnx, 'Workflow', 'name'))
+ cnx.commit()
+ self.assertFalse(self.schema['name'].rdef('Workflow', 'String').indexed)
+ self.assertFalse(self.index_exists(cnx, 'Workflow', 'name'))
+ finally:
+ cnx.execute('SET X indexed TRUE WHERE X relation_type R, R name "name"')
+ self.assertFalse(self.schema['name'].rdef('Workflow', 'String').indexed)
+ self.assertFalse(self.index_exists(cnx, 'Workflow', 'name'))
+ cnx.commit()
+ self.assertTrue(self.schema['name'].rdef('Workflow', 'String').indexed)
+ self.assertTrue(self.index_exists(cnx, 'Workflow', 'name'))
+
+ def test_unique_change(self):
+ with self.admin_access.repo_cnx() as cnx:
+ try:
+ eid = cnx.execute('INSERT CWConstraint X: X cstrtype CT, DEF constrained_by X '
+ 'WHERE CT name "UniqueConstraint", DEF relation_type RT, '
+ 'DEF from_entity E, RT name "name", '
+ 'E name "Workflow"').rows[0][0]
+ self.assertFalse(self.schema['Workflow'].has_unique_values('name'))
+ self.assertFalse(self.index_exists(cnx, 'Workflow', 'name', unique=True))
+ cnx.commit()
+ self.assertTrue(self.schema['Workflow'].has_unique_values('name'))
+ self.assertTrue(self.index_exists(cnx, 'Workflow', 'name', unique=True))
+ finally:
+ cnx.execute('DELETE CWConstraint C WHERE C eid %(eid)s', {'eid': eid})
+ cnx.commit()
+ self.assertFalse(self.schema['Workflow'].has_unique_values('name'))
+ self.assertFalse(self.index_exists(cnx, 'Workflow', 'name', unique=True))
+
+ def test_required_change_1(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('SET DEF cardinality "?1" '
+ 'WHERE DEF relation_type RT, DEF from_entity E,'
+ 'RT name "title", E name "Bookmark"')
+ cnx.commit()
+ # should now be able to add bookmark without title
+ cnx.execute('INSERT Bookmark X: X path "/view"')
+ cnx.commit()
+
+ def test_required_change_2(self):
+ with self.admin_access.repo_cnx() as cnx:
+ cnx.execute('SET DEF cardinality "11" '
+ 'WHERE DEF relation_type RT, DEF from_entity E,'
+ 'RT name "surname", E name "CWUser"')
+ cnx.execute('SET U surname "Doe" WHERE U surname NULL')
+ cnx.commit()
+ # should not be able anymore to add cwuser without surname
+ self.assertRaises(ValidationError, self.create_user, cnx, "toto")
+ cnx.rollback()
+ cnx.execute('SET DEF cardinality "?1" '
+ 'WHERE DEF relation_type RT, DEF from_entity E,'
+ 'RT name "surname", E name "CWUser"')
+ cnx.commit()
+
+ def test_add_attribute_to_base_class(self):
+ with self.admin_access.repo_cnx() as cnx:
+ attreid = cnx.execute('INSERT CWAttribute X: X cardinality "11", X defaultval %(default)s, '
+ 'X indexed TRUE, X relation_type RT, X from_entity E, X to_entity F '
+ 'WHERE RT name "messageid", E name "BaseTransition", F name "String"',
+ {'default': Binary.zpickle('noname')})[0][0]
+ assert cnx.execute('SET X read_permission Y WHERE X eid %(x)s, Y name "managers"',
+ {'x': attreid})
+ cnx.commit()
+ self.schema.rebuild_infered_relations()
+ self.assertIn('Transition', self.schema['messageid'].subjects())
+ self.assertIn('WorkflowTransition', self.schema['messageid'].subjects())
+ cnx.execute('Any X WHERE X is_instance_of BaseTransition, X messageid "hop"')
+
+ def test_change_fulltextindexed(self):
+ with self.admin_access.repo_cnx() as cnx:
+ target = cnx.create_entity(u'Email', messageid=u'1234',
+ subject=u'rick.roll@dance.com')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertIn(target.eid, [item[0] for item in rset])
+ assert cnx.execute('SET A fulltextindexed FALSE '
+ 'WHERE E is CWEType, E name "Email", A is CWAttribute,'
+ 'A from_entity E, A relation_type R, R name "subject"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertFalse(rset)
+ assert cnx.execute('SET A fulltextindexed TRUE '
+ 'WHERE A from_entity E, A relation_type R, '
+ 'E name "Email", R name "subject"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertIn(target.eid, [item[0] for item in rset])
+
+ def test_change_fulltext_container(self):
+ with self.admin_access.repo_cnx() as cnx:
+ target = cnx.create_entity(u'EmailAddress', address=u'rick.roll@dance.com')
+ target.cw_set(reverse_use_email=cnx.user)
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertIn(cnx.user.eid, [item[0] for item in rset])
+ assert cnx.execute('SET R fulltext_container NULL '
+ 'WHERE R name "use_email"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertIn(target.eid, [item[0] for item in rset])
+ assert cnx.execute('SET R fulltext_container "subject" '
+ 'WHERE R name "use_email"')
+ cnx.commit()
+ rset = cnx.execute('Any X WHERE X has_text "rick.roll"')
+ self.assertIn(cnx.user.eid, [item[0] for item in rset])
+
+ def test_update_constraint(self):
+ with self.admin_access.repo_cnx() as cnx:
+ rdef = self.schema['Transition'].rdef('type')
+ cstr = rdef.constraint_by_type('StaticVocabularyConstraint')
+ if not getattr(cstr, 'eid', None):
+ # bug in schema reloading, constraint's eid not restored
+ self.skipTest('start me alone')
+ cnx.execute('SET X value %(v)s WHERE X eid %(x)s',
+ {'x': cstr.eid, 'v': u"u'normal', u'auto', u'new'"})
+ cnx.execute('INSERT CWConstraint X: X value %(value)s, X cstrtype CT, '
+ 'EDEF constrained_by X WHERE CT name %(ct)s, EDEF eid %(x)s',
+ {'ct': 'SizeConstraint', 'value': u'max=10', 'x': rdef.eid})
+ cnx.commit()
+ cstr = rdef.constraint_by_type('StaticVocabularyConstraint')
+ self.assertEqual(cstr.values, (u'normal', u'auto', u'new'))
+ cnx.execute('INSERT Transition T: T name "hop", T type "new"')
+
+ def test_add_constraint(self):
+ with self.admin_access.repo_cnx() as cnx:
+ rdef = self.schema['EmailPart'].rdef('ordernum')
+ cstr = BoundaryConstraint('>=', 0)
+ cnx.execute('INSERT CWConstraint X: X value %(v)s, X cstrtype CT, '
+ 'EDEF constrained_by X WHERE CT name %(ct)s, EDEF eid %(x)s',
+ {'ct': cstr.__class__.__name__, 'v': cstr.serialize(), 'x': rdef.eid})
+ cnx.commit()
+ cstr2 = rdef.constraint_by_type('BoundaryConstraint')
+ self.assertEqual(cstr, cstr2)
+ cstr3 = BoundaryConstraint('<=', 1000)
+ cnx.execute('INSERT CWConstraint X: X value %(v)s, X cstrtype CT, '
+ 'EDEF constrained_by X WHERE CT name %(ct)s, EDEF eid %(x)s',
+ {'ct': cstr3.__class__.__name__, 'v': cstr3.serialize(), 'x': rdef.eid})
+ cnx.commit()
+ self.assertCountEqual(rdef.constraints, [cstr, cstr3])
+
+
+if __name__ == '__main__':
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/test/unittest_syncsession.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/test/unittest_syncsession.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,76 @@
+# -*- coding: utf-8 -*-
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""functional tests for core hooks
+
+Note:
+ syncschema.py hooks are mostly tested in server/test/unittest_migrations.py
+"""
+
+from six import text_type
+
+from cubicweb import ValidationError
+from cubicweb.devtools.testlib import CubicWebTC
+
+class CWPropertyHooksTC(CubicWebTC):
+
+ def test_unexistant_cwproperty(self):
+ with self.admin_access.web_request() as req:
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "bla.bla", '
+ 'X value "hop", X for_user U')
+ cm.exception.translate(text_type)
+ self.assertEqual(cm.exception.errors,
+ {'pkey-subject': 'unknown property key bla.bla'})
+
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "bla.bla", X value "hop"')
+ cm.exception.translate(text_type)
+ self.assertEqual(cm.exception.errors,
+ {'pkey-subject': 'unknown property key bla.bla'})
+
+ def test_site_wide_cwproperty(self):
+ with self.admin_access.web_request() as req:
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "ui.site-title", '
+ 'X value "hop", X for_user U')
+ self.assertEqual(cm.exception.errors,
+ {'for_user-subject': "site-wide property can't be set for user"})
+
+ def test_system_cwproperty(self):
+ with self.admin_access.web_request() as req:
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "system.version.cubicweb", '
+ 'X value "hop", X for_user U')
+ self.assertEqual(cm.exception.errors,
+ {'for_user-subject': "site-wide property can't be set for user"})
+
+ def test_bad_type_cwproperty(self):
+ with self.admin_access.web_request() as req:
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "ui.language", '
+ 'X value "hop", X for_user U')
+ self.assertEqual(cm.exception.errors,
+ {'value-subject': u'unauthorized value'})
+ with self.assertRaises(ValidationError) as cm:
+ req.execute('INSERT CWProperty X: X pkey "ui.language", X value "hop"')
+ self.assertEqual(cm.exception.errors, {'value-subject': u'unauthorized value'})
+
+if __name__ == '__main__':
+ from logilab.common.testlib import unittest_main
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/workflow.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/workflow.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,357 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Core hooks: workflow related hooks"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from datetime import datetime
+
+
+from cubicweb import RepositoryError, validation_error
+from cubicweb.predicates import is_instance, adaptable
+from cubicweb.server import hook
+
+
+def _change_state(cnx, x, oldstate, newstate):
+ nocheck = cnx.transaction_data.setdefault('skip-security', set())
+ nocheck.add((x, 'in_state', oldstate))
+ nocheck.add((x, 'in_state', newstate))
+ # delete previous state first
+ cnx.delete_relation(x, 'in_state', oldstate)
+ cnx.add_relation(x, 'in_state', newstate)
+
+
+# operations ###################################################################
+
+class _SetInitialStateOp(hook.Operation):
+ """make initial state be a default state"""
+ entity = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ entity = self.entity
+ iworkflowable = entity.cw_adapt_to('IWorkflowable')
+ # if there is an initial state and the entity's state is not set,
+ # use the initial state as a default state
+ if not (cnx.deleted_in_transaction(entity.eid) or entity.in_state) \
+ and iworkflowable.current_workflow:
+ state = iworkflowable.current_workflow.initial
+ if state:
+ cnx.add_relation(entity.eid, 'in_state', state.eid)
+ _FireAutotransitionOp(cnx, entity=entity)
+
+class _FireAutotransitionOp(hook.Operation):
+ """try to fire auto transition after state changes"""
+ entity = None # make pylint happy
+
+ def precommit_event(self):
+ entity = self.entity
+ iworkflowable = entity.cw_adapt_to('IWorkflowable')
+ autotrs = list(iworkflowable.possible_transitions('auto'))
+ if autotrs:
+ assert len(autotrs) == 1
+ iworkflowable.fire_transition(autotrs[0])
+
+
+class _WorkflowChangedOp(hook.Operation):
+ """fix entity current state when changing its workflow"""
+ eid = wfeid = None # make pylint happy
+
+ def precommit_event(self):
+ # notice that enforcement that new workflow apply to the entity's type is
+ # done by schema rule, no need to check it here
+ cnx = self.cnx
+ pendingeids = cnx.transaction_data.get('pendingeids', ())
+ if self.eid in pendingeids:
+ return
+ entity = cnx.entity_from_eid(self.eid)
+ iworkflowable = entity.cw_adapt_to('IWorkflowable')
+ # check custom workflow has not been rechanged to another one in the same
+ # transaction
+ mainwf = iworkflowable.main_workflow
+ if mainwf.eid == self.wfeid:
+ deststate = mainwf.initial
+ if not deststate:
+ msg = _('workflow has no initial state')
+ raise validation_error(entity, {('custom_workflow', 'subject'): msg})
+ if mainwf.state_by_eid(iworkflowable.current_state.eid):
+ # nothing to do
+ return
+ # if there are no history, simply go to new workflow's initial state
+ if not iworkflowable.workflow_history:
+ if iworkflowable.current_state.eid != deststate.eid:
+ _change_state(cnx, entity.eid,
+ iworkflowable.current_state.eid, deststate.eid)
+ _FireAutotransitionOp(cnx, entity=entity)
+ return
+ msg = cnx._('workflow changed to "%s"')
+ msg %= cnx._(mainwf.name)
+ cnx.transaction_data[(entity.eid, 'customwf')] = self.wfeid
+ iworkflowable.change_state(deststate, msg, u'text/plain')
+
+
+class _CheckTrExitPoint(hook.Operation):
+ treid = None # make pylint happy
+
+ def precommit_event(self):
+ tr = self.cnx.entity_from_eid(self.treid)
+ outputs = set()
+ for ep in tr.subworkflow_exit:
+ if ep.subwf_state.eid in outputs:
+ msg = _("can't have multiple exits on the same state")
+ raise validation_error(self.treid, {('subworkflow_exit', 'subject'): msg})
+ outputs.add(ep.subwf_state.eid)
+
+
+class _SubWorkflowExitOp(hook.Operation):
+ forentity = trinfo = None # make pylint happy
+
+ def precommit_event(self):
+ cnx = self.cnx
+ forentity = self.forentity
+ iworkflowable = forentity.cw_adapt_to('IWorkflowable')
+ trinfo = self.trinfo
+ # we're in a subworkflow, check if we've reached an exit point
+ wftr = iworkflowable.subworkflow_input_transition()
+ if wftr is None:
+ # inconsistency detected
+ msg = _("state doesn't belong to entity's current workflow")
+ raise validation_error(self.trinfo, {('to_state', 'subject'): msg})
+ tostate = wftr.get_exit_point(forentity, trinfo.cw_attr_cache['to_state'])
+ if tostate is not None:
+ # reached an exit point
+ msg = _('exiting from subworkflow %s')
+ msg %= cnx._(iworkflowable.current_workflow.name)
+ cnx.transaction_data[(forentity.eid, 'subwfentrytr')] = True
+ iworkflowable.change_state(tostate, msg, u'text/plain', tr=wftr)
+
+
+# hooks ########################################################################
+
+class WorkflowHook(hook.Hook):
+ __abstract__ = True
+ category = 'metadata'
+
+
+class SetInitialStateHook(WorkflowHook):
+ __regid__ = 'wfsetinitial'
+ __select__ = WorkflowHook.__select__ & adaptable('IWorkflowable')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ _SetInitialStateOp(self._cw, entity=self.entity)
+
+
+class FireTransitionHook(WorkflowHook):
+ """check the transition is allowed and add missing information into the
+ TrInfo entity.
+
+ Expect that:
+ * wf_info_for inlined relation is set
+ * by_transition or to_state (managers only) inlined relation is set
+
+ Check for automatic transition to be fired at the end
+ """
+ __regid__ = 'wffiretransition'
+ __select__ = WorkflowHook.__select__ & is_instance('TrInfo')
+ events = ('before_add_entity',)
+
+ def __call__(self):
+ cnx = self._cw
+ entity = self.entity
+ # first retreive entity to which the state change apply
+ try:
+ foreid = entity.cw_attr_cache['wf_info_for']
+ except KeyError:
+ msg = _('mandatory relation')
+ raise validation_error(entity, {('wf_info_for', 'subject'): msg})
+ forentity = cnx.entity_from_eid(foreid)
+ # see comment in the TrInfo entity definition
+ entity.cw_edited['tr_count']=len(forentity.reverse_wf_info_for)
+ iworkflowable = forentity.cw_adapt_to('IWorkflowable')
+ # then check it has a workflow set, unless we're in the process of changing
+ # entity's workflow
+ if cnx.transaction_data.get((forentity.eid, 'customwf')):
+ wfeid = cnx.transaction_data[(forentity.eid, 'customwf')]
+ wf = cnx.entity_from_eid(wfeid)
+ else:
+ wf = iworkflowable.current_workflow
+ if wf is None:
+ msg = _('related entity has no workflow set')
+ raise validation_error(entity, {None: msg})
+ # then check it has a state set
+ fromstate = iworkflowable.current_state
+ if fromstate is None:
+ msg = _('related entity has no state')
+ raise validation_error(entity, {None: msg})
+ # True if we are coming back from subworkflow
+ swtr = cnx.transaction_data.pop((forentity.eid, 'subwfentrytr'), None)
+ cowpowers = (cnx.user.is_in_group('managers')
+ or not cnx.write_security)
+ # no investigate the requested state change...
+ try:
+ treid = entity.cw_attr_cache['by_transition']
+ except KeyError:
+ # no transition set, check user is a manager and destination state
+ # is specified (and valid)
+ if not cowpowers:
+ msg = _('mandatory relation')
+ raise validation_error(entity, {('by_transition', 'subject'): msg})
+ deststateeid = entity.cw_attr_cache.get('to_state')
+ if not deststateeid:
+ msg = _('mandatory relation')
+ raise validation_error(entity, {('by_transition', 'subject'): msg})
+ deststate = wf.state_by_eid(deststateeid)
+ if deststate is None:
+ msg = _("state doesn't belong to entity's workflow")
+ raise validation_error(entity, {('to_state', 'subject'): msg})
+ else:
+ # check transition is valid and allowed, unless we're coming back
+ # from subworkflow
+ tr = cnx.entity_from_eid(treid)
+ if swtr is None:
+ qname = ('by_transition', 'subject')
+ if tr is None:
+ msg = _("transition doesn't belong to entity's workflow")
+ raise validation_error(entity, {qname: msg})
+ if not tr.has_input_state(fromstate):
+ msg = _("transition %(tr)s isn't allowed from %(st)s")
+ raise validation_error(entity, {qname: msg}, {
+ 'tr': tr.name, 'st': fromstate.name}, ['tr', 'st'])
+ if not tr.may_be_fired(foreid):
+ msg = _("transition may not be fired")
+ raise validation_error(entity, {qname: msg})
+ deststateeid = entity.cw_attr_cache.get('to_state')
+ if deststateeid is not None:
+ if not cowpowers and deststateeid != tr.destination(forentity).eid:
+ msg = _("transition isn't allowed")
+ raise validation_error(entity, {('by_transition', 'subject'): msg})
+ if swtr is None:
+ deststate = cnx.entity_from_eid(deststateeid)
+ if not cowpowers and deststate is None:
+ msg = _("state doesn't belong to entity's workflow")
+ raise validation_error(entity, {('to_state', 'subject'): msg})
+ else:
+ deststateeid = tr.destination(forentity).eid
+ # everything is ok, add missing information on the trinfo entity
+ entity.cw_edited['from_state'] = fromstate.eid
+ entity.cw_edited['to_state'] = deststateeid
+ nocheck = cnx.transaction_data.setdefault('skip-security', set())
+ nocheck.add((entity.eid, 'from_state', fromstate.eid))
+ nocheck.add((entity.eid, 'to_state', deststateeid))
+ _FireAutotransitionOp(cnx, entity=forentity)
+
+
+class FiredTransitionHook(WorkflowHook):
+ """change related entity state and handle exit of subworkflow"""
+ __regid__ = 'wffiretransition'
+ __select__ = WorkflowHook.__select__ & is_instance('TrInfo')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ trinfo = self.entity
+ rcache = trinfo.cw_attr_cache
+ _change_state(self._cw, rcache['wf_info_for'], rcache['from_state'],
+ rcache['to_state'])
+ forentity = self._cw.entity_from_eid(rcache['wf_info_for'])
+ iworkflowable = forentity.cw_adapt_to('IWorkflowable')
+ assert iworkflowable.current_state.eid == rcache['to_state']
+ if iworkflowable.main_workflow.eid != iworkflowable.current_workflow.eid:
+ _SubWorkflowExitOp(self._cw, forentity=forentity, trinfo=trinfo)
+
+
+class CheckInStateChangeAllowed(WorkflowHook):
+ """check state apply, in case of direct in_state change using unsafe execute
+ """
+ __regid__ = 'wfcheckinstate'
+ __select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
+ events = ('before_add_relation',)
+ category = 'integrity'
+
+ def __call__(self):
+ cnx = self._cw
+ nocheck = cnx.transaction_data.get('skip-security', ())
+ if (self.eidfrom, 'in_state', self.eidto) in nocheck:
+ # state changed through TrInfo insertion, so we already know it's ok
+ return
+ entity = cnx.entity_from_eid(self.eidfrom)
+ iworkflowable = entity.cw_adapt_to('IWorkflowable')
+ mainwf = iworkflowable.main_workflow
+ if mainwf is None:
+ msg = _('entity has no workflow set')
+ raise validation_error(entity, {None: msg})
+ for wf in mainwf.iter_workflows():
+ if wf.state_by_eid(self.eidto):
+ break
+ else:
+ msg = _("state doesn't belong to entity's workflow. You may "
+ "want to set a custom workflow for this entity first.")
+ raise validation_error(self.eidfrom, {('in_state', 'subject'): msg})
+ if iworkflowable.current_workflow and wf.eid != iworkflowable.current_workflow.eid:
+ msg = _("state doesn't belong to entity's current workflow")
+ raise validation_error(self.eidfrom, {('in_state', 'subject'): msg})
+
+
+class SetModificationDateOnStateChange(WorkflowHook):
+ """update entity's modification date after changing its state"""
+ __regid__ = 'wfsyncmdate'
+ __select__ = WorkflowHook.__select__ & hook.match_rtype('in_state')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ if self._cw.added_in_transaction(self.eidfrom):
+ # new entity, not needed
+ return
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ try:
+ entity.cw_set(modification_date=datetime.utcnow())
+ except RepositoryError as ex:
+ # usually occurs if entity is coming from a read-only source
+ # (eg ldap user)
+ self.warning('cant change modification date for %s: %s', entity, ex)
+
+
+class CheckWorkflowTransitionExitPoint(WorkflowHook):
+ """check that there is no multiple exits from the same state"""
+ __regid__ = 'wfcheckwftrexit'
+ __select__ = WorkflowHook.__select__ & hook.match_rtype('subworkflow_exit')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ _CheckTrExitPoint(self._cw, treid=self.eidfrom)
+
+
+class SetCustomWorkflow(WorkflowHook):
+ __regid__ = 'wfsetcustom'
+ __select__ = WorkflowHook.__select__ & hook.match_rtype('custom_workflow')
+ events = ('after_add_relation',)
+
+ def __call__(self):
+ _WorkflowChangedOp(self._cw, eid=self.eidfrom, wfeid=self.eidto)
+
+
+class DelCustomWorkflow(SetCustomWorkflow):
+ __regid__ = 'wfdelcustom'
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ entity = self._cw.entity_from_eid(self.eidfrom)
+ typewf = entity.cw_adapt_to('IWorkflowable').cwetype_workflow()
+ if typewf is not None:
+ _WorkflowChangedOp(self._cw, eid=self.eidfrom, wfeid=typewf.eid)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/hooks/zmq.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/hooks/zmq.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# copyright 2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+from cubicweb.server import hook
+
+class ZMQStopHook(hook.Hook):
+ __regid__ = 'zmqstop'
+ events = ('server_shutdown',)
+
+ def __call__(self):
+ self.repo.app_instances_bus.stop()
+
+class ZMQStartHook(hook.Hook):
+ __regid__ = 'zmqstart'
+ events = ('server_startup',)
+ order = -1
+
+ def __call__(self):
+ config = self.repo.config
+ address_pub = config.get('zmq-address-pub')
+ address_sub = config.get('zmq-address-sub')
+ if not address_pub and not address_sub:
+ return
+ from cubicweb.server import cwzmq
+ self.repo.app_instances_bus = cwzmq.ZMQComm()
+ if address_pub:
+ self.repo.app_instances_bus.add_publisher(address_pub)
+ def clear_cache_callback(msg):
+ self.debug('clear_caches: %s', ' '.join(msg))
+ self.repo.clear_caches(msg[1:])
+ self.repo.app_instances_bus.add_subscription('delete', clear_cache_callback)
+ for address in address_sub:
+ self.repo.app_instances_bus.add_subscriber(address)
+ self.repo.app_instances_bus.start()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/i18n.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/i18n.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,117 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Some i18n/gettext utilities."""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import re
+import os
+from os.path import join, basename, splitext, exists
+from glob import glob
+
+from six import PY2
+
+from cubicweb.toolsutils import create_dir
+
+def extract_from_tal(files, output_file):
+ """extract i18n strings from tal and write them into the given output file
+ using standard python gettext marker (_)
+ """
+ output = open(output_file, 'w')
+ for filepath in files:
+ for match in re.finditer('i18n:(content|replace)="([^"]+)"', open(filepath).read()):
+ output.write('_("%s")' % match.group(2))
+ output.close()
+
+
+def add_msg(w, msgid, msgctx=None):
+ """write an empty pot msgid definition"""
+ if PY2 and isinstance(msgid, unicode):
+ msgid = msgid.encode('utf-8')
+ if msgctx:
+ if PY2 and isinstance(msgctx, unicode):
+ msgctx = msgctx.encode('utf-8')
+ w('msgctxt "%s"\n' % msgctx)
+ msgid = msgid.replace('"', r'\"').splitlines()
+ if len(msgid) > 1:
+ w('msgid ""\n')
+ for line in msgid:
+ w('"%s"' % line.replace('"', r'\"'))
+ else:
+ w('msgid "%s"\n' % msgid[0])
+ w('msgstr ""\n\n')
+
+def execute2(args):
+ # XXX replace this with check_output in Python 2.7
+ from subprocess import Popen, PIPE, CalledProcessError
+ p = Popen(args, stdout=PIPE, stderr=PIPE)
+ out, err = p.communicate()
+ if p.returncode != 0:
+ exc = CalledProcessError(p.returncode, args[0])
+ exc.cmd = args
+ exc.data = (out, err)
+ raise exc
+
+def available_catalogs(i18ndir=None):
+ if i18ndir is None:
+ wildcard = '*.po'
+ else:
+ wildcard = join(i18ndir, '*.po')
+ for popath in glob(wildcard):
+ lang = splitext(basename(popath))[0]
+ yield lang, popath
+
+
+def compile_i18n_catalogs(sourcedirs, destdir, langs):
+ """generate .mo files for a set of languages into the `destdir` i18n directory
+ """
+ from subprocess import CalledProcessError
+ from logilab.common.fileutils import ensure_fs_mode
+ print('-> compiling message catalogs to %s' % destdir)
+ errors = []
+ for lang in langs:
+ langdir = join(destdir, lang, 'LC_MESSAGES')
+ if not exists(langdir):
+ create_dir(langdir)
+ pofiles = [join(path, '%s.po' % lang) for path in sourcedirs]
+ pofiles = [pof for pof in pofiles if exists(pof)]
+ mergedpo = join(destdir, '%s_merged.po' % lang)
+ try:
+ # merge instance/cubes messages catalogs with the stdlib's one
+ cmd = ['msgcat', '--use-first', '--sort-output', '--strict',
+ '-o', mergedpo] + pofiles
+ execute2(cmd)
+ # make sure the .mo file is writeable and compiles with *msgfmt*
+ applmo = join(destdir, lang, 'LC_MESSAGES', 'cubicweb.mo')
+ try:
+ ensure_fs_mode(applmo)
+ except OSError:
+ pass # suppose not exists
+ execute2(['msgfmt', mergedpo, '-o', applmo])
+ except CalledProcessError as exc:
+ errors.append(u'while handling language %s:\ncmd:\n%s\nstdout:\n%s\nstderr:\n%s\n' %
+ (lang, exc.cmd, repr(exc.data[0]), repr(exc.data[1])))
+ except Exception as exc:
+ errors.append(u'while handling language %s: %s' % (lang, exc))
+ try:
+ # clean everything
+ os.unlink(mergedpo)
+ except Exception:
+ continue
+ return errors
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/i18n/de.po
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/i18n/de.po Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,4690 @@
+# cubicweb i18n catalog
+# Copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# Logilab
+msgid ""
+msgstr ""
+"Project-Id-Version: 2.0\n"
+"POT-Creation-Date: 2006-01-12 17:35+CET\n"
+"PO-Revision-Date: 2010-09-15 14:55+0200\n"
+"Last-Translator: Dr. Leo \n"
+"Language-Team: English \n"
+"Language: de\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: pygettext.py 1.5\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+
+#, python-format
+msgid ""
+"\n"
+"%(user)s changed status from <%(previous_state)s> to <%(current_state)s> for "
+"entity\n"
+"'%(title)s'\n"
+"\n"
+"%(comment)s\n"
+"\n"
+"url: %(url)s\n"
+msgstr ""
+"\n"
+"%(user)s hat den Zustand geändert von <%(previous_state)s> in <"
+"%(current_state)s> für die Entität\n"
+"'%(title)s'\n"
+"\n"
+"%(comment)s\n"
+"\n"
+"url: %(url)s\n"
+
+#, python-format
+msgid " from state %(fromstate)s to state %(tostate)s\n"
+msgstr " aus dem Zustand %(fromstate)s in den Zustand %(tostate)s\n"
+
+msgid " :"
+msgstr " :"
+
+#, python-format
+msgid "\"action\" must be specified in options; allowed values are %s"
+msgstr ""
+
+msgid "\"role=subject\" or \"role=object\" must be specified in options"
+msgstr ""
+
+#, python-format
+msgid "%(KEY-cstr)s constraint failed for value %(KEY-value)r"
+msgstr ""
+
+#, python-format
+msgid "%(KEY-rtype)s is part of violated unicity constraint"
+msgstr ""
+
+#, python-format
+msgid "%(KEY-value)r doesn't match the %(KEY-regexp)r regular expression"
+msgstr ""
+
+#, python-format
+msgid "%(attr)s set to %(newvalue)s"
+msgstr "%(attr)s geändert in %(newvalue)s"
+
+#, python-format
+msgid "%(attr)s updated from %(oldvalue)s to %(newvalue)s"
+msgstr "%(attr)s geändert von %(oldvalue)s in %(newvalue)s"
+
+#, python-format
+msgid "%(etype)s by %(author)s"
+msgstr ""
+
+#, python-format
+msgid "%(firstname)s %(surname)s"
+msgstr "%(firstname)s %(surname)s"
+
+#, python-format
+msgid "%(subject)s %(etype)s #%(eid)s (%(login)s)"
+msgstr "%(subject)s %(etype)s #%(eid)s (%(login)s)"
+
+#, python-format
+msgid "%d days"
+msgstr "%d Tage"
+
+#, python-format
+msgid "%d hours"
+msgstr "%d Stunden"
+
+#, python-format
+msgid "%d minutes"
+msgstr "%d Minuten"
+
+#, python-format
+msgid "%d months"
+msgstr "%d Monate"
+
+#, python-format
+msgid "%d seconds"
+msgstr "%d Sekunden"
+
+#, python-format
+msgid "%d weeks"
+msgstr "%d Wochen"
+
+#, python-format
+msgid "%d years"
+msgstr "%d Jahre"
+
+#, python-format
+msgid "%s could be supported"
+msgstr ""
+
+#, python-format
+msgid "%s error report"
+msgstr "%s Fehlerbericht"
+
+#, python-format
+msgid "%s software version of the database"
+msgstr "Software-Version der Datenbank %s"
+
+#, python-format
+msgid "%s updated"
+msgstr "%s aktualisiert"
+
+#, python-format
+msgid "'%s' action doesn't take any options"
+msgstr ""
+
+#, python-format
+msgid ""
+"'%s' action for in_state relation should at least have 'linkattr=name' option"
+msgstr ""
+
+#, python-format
+msgid "'%s' action requires 'linkattr' option"
+msgstr ""
+
+msgid "(UNEXISTANT EID)"
+msgstr "(EID nicht gefunden)"
+
+#, python-format
+msgid "(suppressed) entity #%d"
+msgstr ""
+
+msgid "**"
+msgstr "0..n 0..n"
+
+msgid "*+"
+msgstr "0..n 1..n"
+
+msgid "*1"
+msgstr "0..n 1"
+
+msgid "*?"
+msgstr "0..n 0..1"
+
+msgid "+*"
+msgstr "1..n 0..n"
+
+msgid "++"
+msgstr "1..n 1..n"
+
+msgid "+1"
+msgstr "1..n 1"
+
+msgid "+?"
+msgstr "1..n 0..1"
+
+msgid "1*"
+msgstr "1 0..n"
+
+msgid "1+"
+msgstr "1 1..n"
+
+msgid "11"
+msgstr "1 1"
+
+msgid "1?"
+msgstr "1 0..1"
+
+#, python-format
+msgid "<%s not specified>"
+msgstr "<%s nicht spezifiziert>"
+
+#, python-format
+msgid ""
+"
Este esquema del modelo de datos no incluye los meta-datos, "
+"pero se puede ver a un modelo completo con meta-datos."
+"div>"
+
+msgid ""
+msgstr ""
+
+msgid ""
+msgstr ""
+
+msgid "?*"
+msgstr "0..1 0..n"
+
+msgid "?+"
+msgstr "0..1 1..n"
+
+msgid "?1"
+msgstr "0..1 1"
+
+msgid "??"
+msgstr "0..1 0..1"
+
+msgid "AND"
+msgstr "Y"
+
+msgid "About this site"
+msgstr "Información del Sistema"
+
+#, python-format
+msgid "Added relation : %(entity_from)s %(rtype)s %(entity_to)s"
+msgstr "Relación agregada : %(entity_from)s %(rtype)s %(entity_to)s"
+
+msgid "Attributes permissions:"
+msgstr "Permisos de atributos:"
+
+# schema pot file, generated on 2009-09-16 16:46:55
+#
+# singular and plural forms for each entity type
+msgid "BaseTransition"
+msgstr "Transición (abstracta)"
+
+msgid "BaseTransition_plural"
+msgstr "Transiciones (abstractas)"
+
+msgid "BigInt"
+msgstr "Big integer"
+
+msgid "BigInt_plural"
+msgstr "Big integers"
+
+msgid "Bookmark"
+msgstr "Favorito"
+
+msgid "Bookmark_plural"
+msgstr "Favoritos"
+
+msgid "Boolean"
+msgstr "Booleano"
+
+msgid "Boolean_plural"
+msgstr "Booleanos"
+
+msgid "BoundConstraint"
+msgstr "Restricción de límite"
+
+msgid "BoundaryConstraint"
+msgstr "Restricción de límite"
+
+msgid "Browse by entity type"
+msgstr "Busca por tipo de entidad"
+
+#, python-format
+msgid "By %(user)s on %(dt)s [%(undo_link)s]"
+msgstr "Por %(user)s en %(dt)s [%(undo_link)s]"
+
+msgid "Bytes"
+msgstr "Bytes"
+
+msgid "Bytes_plural"
+msgstr "Bytes"
+
+msgid "CWAttribute"
+msgstr "Atributo"
+
+msgid "CWAttribute_plural"
+msgstr "Atributos"
+
+msgid "CWCache"
+msgstr "Cache"
+
+msgid "CWCache_plural"
+msgstr "Caches"
+
+msgid "CWComputedRType"
+msgstr ""
+
+msgid "CWComputedRType_plural"
+msgstr ""
+
+msgid "CWConstraint"
+msgstr "Restricción"
+
+msgid "CWConstraintType"
+msgstr "Tipo de Restricción"
+
+msgid "CWConstraintType_plural"
+msgstr "Tipos de Restricción"
+
+msgid "CWConstraint_plural"
+msgstr "Restricciones"
+
+msgid "CWDataImport"
+msgstr "Importación de Datos"
+
+msgid "CWDataImport_plural"
+msgstr "Importaciones de Datos"
+
+msgid "CWEType"
+msgstr "Tipo de entidad"
+
+msgctxt "inlined:CWRelation.from_entity.subject"
+msgid "CWEType"
+msgstr "Tipo de entidad"
+
+msgctxt "inlined:CWRelation.to_entity.subject"
+msgid "CWEType"
+msgstr "Tipo de entidad"
+
+msgid "CWEType_plural"
+msgstr "Tipos de entidades"
+
+msgid "CWGroup"
+msgstr "Groupo"
+
+msgid "CWGroup_plural"
+msgstr "Grupos"
+
+msgid "CWProperty"
+msgstr "Propiedad"
+
+msgid "CWProperty_plural"
+msgstr "Propiedades"
+
+msgid "CWRType"
+msgstr "Tipo de relación"
+
+msgctxt "inlined:CWRelation.relation_type.subject"
+msgid "CWRType"
+msgstr "Tipo de relación"
+
+msgid "CWRType_plural"
+msgstr "Tipos de relación"
+
+msgid "CWRelation"
+msgstr "Relación"
+
+msgid "CWRelation_plural"
+msgstr "Relaciones"
+
+msgid "CWSource"
+msgstr "Fuente de datos"
+
+msgid "CWSourceHostConfig"
+msgstr "Configuración de Fuente"
+
+msgid "CWSourceHostConfig_plural"
+msgstr "Configuraciones de fuente"
+
+msgid "CWSourceSchemaConfig"
+msgstr "Configuraciones de Esquema de Fuente"
+
+msgid "CWSourceSchemaConfig_plural"
+msgstr "Configuraciones de Esquema de Fuente"
+
+msgid "CWSource_plural"
+msgstr "Fuentes de Datos"
+
+msgid "CWUniqueTogetherConstraint"
+msgstr "Restricción de Singularidad"
+
+msgid "CWUniqueTogetherConstraint_plural"
+msgstr "Restricciones de Singularidad"
+
+msgid "CWUser"
+msgstr "Usuario"
+
+msgid "CWUser_plural"
+msgstr "Usuarios"
+
+#, python-format
+msgid ""
+"Can't restore %(role)s relation %(rtype)s to entity %(eid)s which is already "
+"linked using this relation."
+msgstr ""
+"No puede restaurar la relación %(role)s %(rtype)s en la entidad %(eid)s pues "
+"ya esta ligada a otra entidad usando esa relación."
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s between %(subj)s and %(obj)s, that relation "
+"does not exists anymore in the schema."
+msgstr ""
+"No puede restaurar la relación %(rtype)s entre %(subj)s y %(obj)s, esta "
+"relación ya no existe en el esquema."
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s, %(role)s entity %(eid)s doesn't exist "
+"anymore."
+msgstr ""
+"No puede restaurar la relación %(rtype)s, la entidad %(role)s %(eid)s ya no "
+"existe."
+
+#, python-format
+msgid ""
+"Can't undo addition of relation %(rtype)s from %(subj)s to %(obj)s, doesn't "
+"exist anymore"
+msgstr ""
+"No puede anular el agregar la relación %(rtype)s de %(subj)s a %(obj)s, esta "
+"relación ya no existe"
+
+#, python-format
+msgid ""
+"Can't undo creation of entity %(eid)s of type %(etype)s, type no more "
+"supported"
+msgstr ""
+"No puede anular la creación de la entidad %(eid)s de tipo %(etype)s, este "
+"tipo ya no existe"
+
+msgid "Click to sort on this column"
+msgstr "Seleccione para ordenar columna"
+
+msgid ""
+"Configuration of the system source goes to the 'sources' file, not in the "
+"database"
+msgstr ""
+"La configuración de la fuente sistema va en el archivo \"Sources\"/Fuentes, "
+"y no en la base de datos."
+
+#, python-format
+msgid "Created %(etype)s : %(entity)s"
+msgstr "Se creó %(etype)s : %(entity)s"
+
+msgid "DEBUG"
+msgstr "DEPURAR"
+
+msgid "Date"
+msgstr "Fecha"
+
+msgid "Date_plural"
+msgstr "Fechas"
+
+msgid "Datetime"
+msgstr "Fecha y hora"
+
+msgid "Datetime_plural"
+msgstr "Fechas y horas"
+
+msgid "Decimal"
+msgstr "Decimal"
+
+msgid "Decimal_plural"
+msgstr "Decimales"
+
+#, python-format
+msgid "Delete relation : %(entity_from)s %(rtype)s %(entity_to)s"
+msgstr "Eliminar relación : %(entity_from)s %(rtype)s %(entity_to)s"
+
+#, python-format
+msgid "Deleted %(etype)s : %(entity)s"
+msgstr "Se eliminó %(etype)s : %(entity)s"
+
+msgid "Detected problems"
+msgstr "Problemas detectados"
+
+msgid "Do you want to delete the following element(s)?"
+msgstr "Desea eliminar el/los elemento(s) a continuación?"
+
+msgid "Download schema as OWL"
+msgstr "Descargar el esquema en formato OWL"
+
+msgid "ERROR"
+msgstr "ERROR"
+
+msgid "EmailAddress"
+msgstr "Correo Electrónico"
+
+msgctxt "inlined:CWUser.use_email.subject"
+msgid "EmailAddress"
+msgstr "Correo Electrónico"
+
+msgid "EmailAddress_plural"
+msgstr "Direcciones de Correo Electrónico"
+
+msgid "Entities"
+msgstr "Entidades"
+
+#, python-format
+msgid ""
+"Entity %(eid)s has changed since you started to edit it. Reload the page and "
+"reapply your changes."
+msgstr ""
+
+msgid "Entity and relation supported by this source"
+msgstr "Entidades y relaciones aceptadas por esta fuente"
+
+msgid "ExternalUri"
+msgstr "Uri externo"
+
+msgid "ExternalUri_plural"
+msgstr "Uris externos"
+
+msgid "FATAL"
+msgstr "FATAL"
+
+msgid "Float"
+msgstr "Número flotante"
+
+msgid "Float_plural"
+msgstr "Números flotantes"
+
+# schema pot file, generated on 2009-12-03 09:22:35
+#
+# singular and plural forms for each entity type
+msgid "FormatConstraint"
+msgstr "Restricción de Formato"
+
+msgid "Garbage collection information"
+msgstr "Recolector de basura en memoria"
+
+msgid "Help"
+msgstr "Ayuda"
+
+msgid "INFO"
+msgstr "INFO"
+
+msgid "Instance"
+msgstr "Instancia"
+
+msgid "Int"
+msgstr "Número entero"
+
+msgid "Int_plural"
+msgstr "Números enteros"
+
+msgid "Interval"
+msgstr "Duración"
+
+msgid "IntervalBoundConstraint"
+msgstr "Restricción de intervalo"
+
+msgid "Interval_plural"
+msgstr "Duraciones"
+
+msgid "Link:"
+msgstr "Liga:"
+
+msgid "Looked up classes"
+msgstr "Clases buscadas"
+
+msgid "Manage"
+msgstr "Administración"
+
+msgid "Manage security"
+msgstr "Gestión de seguridad"
+
+msgid "Message threshold"
+msgstr "Límite de mensajes"
+
+msgid "Most referenced classes"
+msgstr "Clases más referenciadas"
+
+msgid "New BaseTransition"
+msgstr "XXX"
+
+msgid "New Bookmark"
+msgstr "Agregar a Favoritos"
+
+msgid "New CWAttribute"
+msgstr "Nueva definición de relación final"
+
+msgid "New CWCache"
+msgstr "Agregar Caché"
+
+msgid "New CWComputedRType"
+msgstr ""
+
+msgid "New CWConstraint"
+msgstr "Agregar Restricción"
+
+msgid "New CWConstraintType"
+msgstr "Agregar tipo de Restricción"
+
+msgid "New CWDataImport"
+msgstr "Nueva importación de datos"
+
+msgid "New CWEType"
+msgstr "Agregar tipo de entidad"
+
+msgid "New CWGroup"
+msgstr "Nuevo grupo"
+
+msgid "New CWProperty"
+msgstr "Agregar Propiedad"
+
+msgid "New CWRType"
+msgstr "Agregar tipo de relación"
+
+msgid "New CWRelation"
+msgstr "Nueva definición de relación final"
+
+msgid "New CWSource"
+msgstr "Nueva fuente"
+
+msgid "New CWSourceHostConfig"
+msgstr "Nueva configuración de fuente"
+
+msgid "New CWSourceSchemaConfig"
+msgstr "Nueva parte de mapeo de fuente"
+
+msgid "New CWUniqueTogetherConstraint"
+msgstr "Nueva restricción de singularidad"
+
+msgid "New CWUser"
+msgstr "Agregar usuario"
+
+msgid "New EmailAddress"
+msgstr "Agregar Email"
+
+msgid "New ExternalUri"
+msgstr "Agregar Uri externa"
+
+msgid "New RQLExpression"
+msgstr "Agregar expresión rql"
+
+msgid "New State"
+msgstr "Agregar Estado"
+
+msgid "New SubWorkflowExitPoint"
+msgstr "Agregar salida de sub-Workflow"
+
+msgid "New TrInfo"
+msgstr "Agregar Información de Transición"
+
+msgid "New Transition"
+msgstr "Agregar transición"
+
+msgid "New Workflow"
+msgstr "Agregar Workflow"
+
+msgid "New WorkflowTransition"
+msgstr "Agregar transición de Workflow"
+
+msgid "No result matching query"
+msgstr "Ningún resultado corresponde a su búsqueda"
+
+msgid "Non exhaustive list of views that may apply to entities of this type"
+msgstr "Lista no exhaustiva de vistas aplicables a este tipo de entidad"
+
+msgid "OR"
+msgstr "O"
+
+msgid "Ownership"
+msgstr "Propiedad"
+
+msgid "Parent class:"
+msgstr "Clase padre:"
+
+msgid "Password"
+msgstr "Contraseña"
+
+msgid "Password_plural"
+msgstr "Contraseñas"
+
+msgid "Please note that this is only a shallow copy"
+msgstr "Recuerde que sólo es una copia superficial"
+
+msgid "Powered by CubicWeb"
+msgstr "Potenciado en CubicWeb"
+
+msgid "RQLConstraint"
+msgstr "Restricción RQL"
+
+msgid "RQLExpression"
+msgstr "Expresión RQL"
+
+msgid "RQLExpression_plural"
+msgstr "Expresiones RQL"
+
+msgid "RQLUniqueConstraint"
+msgstr "Restricción RQL de Unicidad"
+
+msgid "RQLVocabularyConstraint"
+msgstr "Restricción RQL de Vocabulario"
+
+msgid "RegexpConstraint"
+msgstr "restricción expresión regular"
+
+msgid "Registry's content"
+msgstr "Contenido del registro"
+
+msgid "Relations"
+msgstr "Relaciones"
+
+msgid "Repository"
+msgstr "Repositorio"
+
+#, python-format
+msgid "Schema %s"
+msgstr "Esquema %s"
+
+msgid "Schema's permissions definitions"
+msgstr "Definiciones de permisos del esquema"
+
+msgid "Search for"
+msgstr "Buscar"
+
+msgid "Site information"
+msgstr "Información del Sitio"
+
+msgid "SizeConstraint"
+msgstr "Restricción de tamaño"
+
+msgid ""
+"Source's configuration for a particular host. One key=value per line, "
+"authorized keys depending on the source's type, overriding values defined on "
+"the source."
+msgstr ""
+"Configuración de la fuente por un \"host\" específico. Una clave=valor por "
+"línea, las claves permitidas dependen del tipo de fuente. Estos valores son "
+"prioritarios a los valores definidos en la fuente."
+
+msgid "Startup views"
+msgstr "Vistas de inicio"
+
+msgid "State"
+msgstr "Estado"
+
+msgid "State_plural"
+msgstr "Estados"
+
+msgid "StaticVocabularyConstraint"
+msgstr "Restricción de vocabulario"
+
+msgid "String"
+msgstr "Cadena de caracteres"
+
+msgid "String_plural"
+msgstr "Cadenas de caracteres"
+
+msgid "Sub-classes:"
+msgstr "Clases hijas:"
+
+msgid "SubWorkflowExitPoint"
+msgstr "Salida de sub-workflow"
+
+msgid "SubWorkflowExitPoint_plural"
+msgstr "Salidas de sub-workflow"
+
+msgid "Submit bug report"
+msgstr "Enviar un reporte de error (bug)"
+
+msgid "Submit bug report by mail"
+msgstr "Enviar este reporte por email"
+
+msgid "TZDatetime"
+msgstr "Fecha y hora internacional"
+
+msgid "TZDatetime_plural"
+msgstr "Fechas y horas internacionales"
+
+msgid "TZTime"
+msgstr "Hora internacional"
+
+msgid "TZTime_plural"
+msgstr "Horas internacionales"
+
+#, python-format
+msgid "The view %s can not be applied to this query"
+msgstr "La vista %s no puede ser aplicada a esta búsqueda"
+
+#, python-format
+msgid "The view %s could not be found"
+msgstr "La vista %s no ha podido ser encontrada"
+
+msgid "There is no default workflow"
+msgstr "Esta entidad no posee workflow por defecto"
+
+msgid "This BaseTransition:"
+msgstr "Esta transición abstracta:"
+
+msgid "This Bookmark:"
+msgstr "Este favorito:"
+
+msgid "This CWAttribute:"
+msgstr "Esta definición de relación final:"
+
+msgid "This CWCache:"
+msgstr "Este Caché:"
+
+msgid "This CWComputedRType:"
+msgstr ""
+
+msgid "This CWConstraint:"
+msgstr "Esta Restricción:"
+
+msgid "This CWConstraintType:"
+msgstr "Este tipo de Restricción:"
+
+msgid "This CWDataImport:"
+msgstr "Esta importación de datos:"
+
+msgid "This CWEType:"
+msgstr "Este tipo de Entidad:"
+
+msgid "This CWGroup:"
+msgstr "Este grupo:"
+
+msgid "This CWProperty:"
+msgstr "Esta propiedad:"
+
+msgid "This CWRType:"
+msgstr "Este tipo de relación:"
+
+msgid "This CWRelation:"
+msgstr "Esta definición de relación no final:"
+
+msgid "This CWSource:"
+msgstr "Esta fuente:"
+
+msgid "This CWSourceHostConfig:"
+msgstr "Esta configuración de fuente:"
+
+msgid "This CWSourceSchemaConfig:"
+msgstr "Esta parte de mapeo de fuente:"
+
+msgid "This CWUniqueTogetherConstraint:"
+msgstr "Esta restricción de singularidad:"
+
+msgid "This CWUser:"
+msgstr "Este usuario:"
+
+msgid "This EmailAddress:"
+msgstr "Esta dirección electrónica:"
+
+msgid "This ExternalUri:"
+msgstr "Este Uri externo:"
+
+msgid "This RQLExpression:"
+msgstr "Esta expresión RQL:"
+
+msgid "This State:"
+msgstr "Este estado:"
+
+msgid "This SubWorkflowExitPoint:"
+msgstr "Esta Salida de sub-workflow:"
+
+msgid "This TrInfo:"
+msgstr "Esta información de transición:"
+
+msgid "This Transition:"
+msgstr "Esta transición:"
+
+msgid "This Workflow:"
+msgstr "Este Workflow:"
+
+msgid "This WorkflowTransition:"
+msgstr "Esta transición de Workflow:"
+
+msgid ""
+"This action is forbidden. If you think it should be allowed, please contact "
+"the site administrator."
+msgstr ""
+"Esta acción le es prohibida. Si cree que Ud. debería de tener autorización, "
+"favor de contactar al administrador del sitio. "
+
+msgid "This entity type permissions:"
+msgstr "Permisos para este tipo de entidad:"
+
+msgid "Time"
+msgstr "Hora"
+
+msgid "Time_plural"
+msgstr "Horas"
+
+msgid "TrInfo"
+msgstr "Información Transición"
+
+msgid "TrInfo_plural"
+msgstr "Información Transiciones"
+
+msgid "Transition"
+msgstr "Transición"
+
+msgid "Transition_plural"
+msgstr "Transiciones"
+
+msgid "URLs from which content will be imported. You can put one url per line"
+msgstr ""
+"URLs desde el cual el contenido sera importado. Usted puede incluir un URL "
+"por línea."
+
+msgid "Undoable actions"
+msgstr "Acciones irreversibles"
+
+msgid "Undoing"
+msgstr "Deshaciendo"
+
+msgid "UniqueConstraint"
+msgstr "Restricción de Unicidad"
+
+msgid "Unknown source type"
+msgstr "tipo de fuente desconocida"
+
+msgid "Unreachable objects"
+msgstr "Objetos inaccesibles"
+
+#, python-format
+msgid "Updated %(etype)s : %(entity)s"
+msgstr "Se actualizó %(etype)s : %(entity)s"
+
+msgid "Used by:"
+msgstr "Utilizado por :"
+
+msgid "Users and groups management"
+msgstr "Usuarios y grupos de administradores"
+
+msgid "WARNING"
+msgstr "ADVERTENCIA"
+
+msgid "Web server"
+msgstr "Servidor web"
+
+msgid "Workflow"
+msgstr "Workflow"
+
+msgid "Workflow history"
+msgstr "Histórico del Workflow"
+
+msgid "WorkflowTransition"
+msgstr "Transición de Workflow"
+
+msgid "WorkflowTransition_plural"
+msgstr "Transiciones de Workflow"
+
+msgid "Workflow_plural"
+msgstr "work flows"
+
+msgid ""
+"You can either submit a new file using the browse button above, or choose to "
+"remove already uploaded file by checking the \"detach attached file\" check-"
+"box, or edit file content online with the widget below."
+msgstr ""
+"Usted puede proponer un nuevo archivo utilizando el botón\n"
+"\"buscar\" aquí arriba, o eliminar el archivo ya elegido al\n"
+"seleccionar el cuadro \"soltar archivo adjunto\", o editar el contenido\n"
+"del archivo en línea con el componente inferior."
+
+msgid ""
+"You can either submit a new file using the browse button above, or edit file "
+"content online with the widget below."
+msgstr ""
+"Puede proponer un nuevo archivo utilizando el botón buscar \n"
+"\"buscar\" en la parte superior, o editar el contenido del archivo en línea\n"
+"en el campo siguiente."
+
+msgid "You can't change this relation"
+msgstr "Usted no puede modificar esta relación"
+
+msgid "You cannot remove the system source"
+msgstr "Usted no puede eliminar la fuente sistema"
+
+msgid "You cannot rename the system source"
+msgstr "Usted no puede Renombrar la fuente sistema"
+
+msgid ""
+"You have no access to this view or it can not be used to display the current "
+"data."
+msgstr ""
+"No tiene permisos para accesar esta vista o No puede utilizarse para "
+"desplegar los datos seleccionados."
+
+msgid ""
+"You're not authorized to access this page. If you think you should, please "
+"contact the site administrator."
+msgstr ""
+"Usted no esta autorizado a acceder a esta página. Si Usted cree que \n"
+"hay un error, favor de contactar al administrador del Sistema."
+
+#, python-format
+msgid "[%s supervision] changes summary"
+msgstr "[%s supervision] descripción de cambios"
+
+msgid ""
+"a RQL expression which should return some results, else the transition won't "
+"be available. This query may use X and U variables that will respectivly "
+"represents the current entity and the current user."
+msgstr ""
+"una expresión RQL que debe haber enviado resultados, para que la transición "
+"pueda ser realizada. Esta expresión puede utilizar las variables X y U que "
+"representan respectivamente la entidad en transición y el usuario actual. "
+
+msgid "a URI representing an object in external data store"
+msgstr "una URI designando un objeto en un repositorio de datos externo"
+
+msgid "a float is expected"
+msgstr "un número flotante es requerido"
+
+msgid "a number (in seconds) or 20s, 10min, 24h or 4d are expected"
+msgstr "se espera un número (en segundos) ó 20s, 10min, 24h ó 4d "
+
+msgid ""
+"a simple cache entity characterized by a name and a validity date. The "
+"target application is responsible for updating timestamp when necessary to "
+"invalidate the cache (typically in hooks). Also, checkout the AppObject."
+"get_cache() method."
+msgstr ""
+"un caché simple caracterizado por un nombre y una fecha de validez. Es\n"
+"el código de la instancia quién es responsable de actualizar la fecha de\n"
+"validez mientras el caché debe ser invalidado (en general en un hook).\n"
+"Para recuperar un caché, hace falta utilizar el método\n"
+"get_cache(cachename)."
+
+msgid "abstract base class for transitions"
+msgstr "Clase de base abstracta para la transiciones"
+
+msgid "action menu"
+msgstr "acciones"
+
+msgid "action(s) on this selection"
+msgstr "Acción(es) en esta selección"
+
+msgid "actions"
+msgstr "Acciones"
+
+msgid "activate"
+msgstr "Activar"
+
+msgid "activated"
+msgstr "Activado"
+
+msgid "add"
+msgstr "Agregar"
+
+msgid "add Bookmark bookmarked_by CWUser object"
+msgstr "Agregar a los favoritos "
+
+msgid "add CWAttribute add_permission RQLExpression subject"
+msgstr "Expresión RQL de agregación"
+
+msgid "add CWAttribute constrained_by CWConstraint subject"
+msgstr "Restricción"
+
+msgid "add CWAttribute read_permission RQLExpression subject"
+msgstr "Expresión RQL de lectura"
+
+msgid "add CWAttribute relation_type CWRType object"
+msgstr "Definición de atributo"
+
+msgid "add CWAttribute update_permission RQLExpression subject"
+msgstr "Permiso de actualización"
+
+msgid "add CWEType add_permission RQLExpression subject"
+msgstr "Expresión RQL de agregación"
+
+msgid "add CWEType delete_permission RQLExpression subject"
+msgstr "Expresión RQL de eliminación"
+
+msgid "add CWEType read_permission RQLExpression subject"
+msgstr "Expresión RQL de lectura"
+
+msgid "add CWEType update_permission RQLExpression subject"
+msgstr "Definir una expresión RQL de actualización"
+
+msgid "add CWProperty for_user CWUser object"
+msgstr "Propiedad"
+
+msgid "add CWRelation add_permission RQLExpression subject"
+msgstr "Expresión RQL de agregar"
+
+msgid "add CWRelation constrained_by CWConstraint subject"
+msgstr "Restricción"
+
+msgid "add CWRelation delete_permission RQLExpression subject"
+msgstr "Expresión RQL de supresión"
+
+msgid "add CWRelation read_permission RQLExpression subject"
+msgstr "Expresión RQL de lectura"
+
+msgid "add CWRelation relation_type CWRType object"
+msgstr "Definición de relación"
+
+msgid "add CWSourceHostConfig cw_host_config_of CWSource object"
+msgstr "configuración del host"
+
+msgid "add CWUniqueTogetherConstraint constraint_of CWEType object"
+msgstr "restricción de singularidad"
+
+msgid "add CWUser in_group CWGroup object"
+msgstr "Usuario"
+
+msgid "add CWUser use_email EmailAddress subject"
+msgstr "Email"
+
+msgid "add State allowed_transition Transition object"
+msgstr "Estado en entrada"
+
+msgid "add State allowed_transition Transition subject"
+msgstr "Transición en salida"
+
+msgid "add State allowed_transition WorkflowTransition subject"
+msgstr "Transición workflow en salida"
+
+msgid "add State state_of Workflow object"
+msgstr "Estado"
+
+msgid "add Transition condition RQLExpression subject"
+msgstr "Restricción"
+
+msgid "add Transition destination_state State object"
+msgstr "Transición de entrada"
+
+msgid "add Transition destination_state State subject"
+msgstr "Estado de salida"
+
+msgid "add Transition transition_of Workflow object"
+msgstr "Transición"
+
+msgid "add WorkflowTransition condition RQLExpression subject"
+msgstr "Condición"
+
+msgid "add WorkflowTransition subworkflow_exit SubWorkflowExitPoint subject"
+msgstr "Salida de sub-workflow"
+
+msgid "add WorkflowTransition transition_of Workflow object"
+msgstr "Transición Workflow"
+
+msgid "add a BaseTransition"
+msgstr ""
+
+msgid "add a Bookmark"
+msgstr ""
+
+msgid "add a CWAttribute"
+msgstr ""
+
+msgid "add a CWCache"
+msgstr ""
+
+msgid "add a CWComputedRType"
+msgstr ""
+
+msgid "add a CWConstraint"
+msgstr ""
+
+msgid "add a CWConstraintType"
+msgstr ""
+
+msgid "add a CWDataImport"
+msgstr ""
+
+msgid "add a CWEType"
+msgstr ""
+
+msgctxt "inlined:CWRelation.from_entity.subject"
+msgid "add a CWEType"
+msgstr "Agregar un tipo de entidad"
+
+msgctxt "inlined:CWRelation.to_entity.subject"
+msgid "add a CWEType"
+msgstr "Agregar un tipo de entidad"
+
+msgid "add a CWGroup"
+msgstr ""
+
+msgid "add a CWProperty"
+msgstr ""
+
+msgid "add a CWRType"
+msgstr ""
+
+msgctxt "inlined:CWRelation.relation_type.subject"
+msgid "add a CWRType"
+msgstr "Agregar un tipo de relación"
+
+msgid "add a CWRelation"
+msgstr ""
+
+msgid "add a CWSource"
+msgstr ""
+
+msgid "add a CWSourceHostConfig"
+msgstr ""
+
+msgid "add a CWSourceSchemaConfig"
+msgstr ""
+
+msgid "add a CWUniqueTogetherConstraint"
+msgstr ""
+
+msgid "add a CWUser"
+msgstr ""
+
+msgid "add a EmailAddress"
+msgstr ""
+
+msgctxt "inlined:CWUser.use_email.subject"
+msgid "add a EmailAddress"
+msgstr "Agregar correo electrónico"
+
+msgid "add a ExternalUri"
+msgstr ""
+
+msgid "add a RQLExpression"
+msgstr ""
+
+msgid "add a State"
+msgstr ""
+
+msgid "add a SubWorkflowExitPoint"
+msgstr ""
+
+msgid "add a TrInfo"
+msgstr ""
+
+msgid "add a Transition"
+msgstr ""
+
+msgid "add a Workflow"
+msgstr ""
+
+msgid "add a WorkflowTransition"
+msgstr ""
+
+# subject and object forms for each relation type
+# (no object form for final relation types)
+msgid "add_permission"
+msgstr "Autorización para agregar"
+
+msgctxt "CWAttribute"
+msgid "add_permission"
+msgstr "Permiso de agregar"
+
+# subject and object forms for each relation type
+# (no object form for final relation types)
+msgctxt "CWEType"
+msgid "add_permission"
+msgstr "Permiso de agregar"
+
+msgctxt "CWRelation"
+msgid "add_permission"
+msgstr "Permiso de agregar"
+
+msgid "add_permission_object"
+msgstr "tiene permiso de agregar"
+
+msgctxt "CWGroup"
+msgid "add_permission_object"
+msgstr "tiene permiso de agregar"
+
+msgctxt "RQLExpression"
+msgid "add_permission_object"
+msgstr "tiene permiso de agregar"
+
+msgid "add_relation"
+msgstr "agregar"
+
+#, python-format
+msgid "added %(etype)s #%(eid)s (%(title)s)"
+msgstr "se agregó %(etype)s #%(eid)s (%(title)s)"
+
+#, python-format
+msgid ""
+"added relation %(rtype)s from %(frometype)s #%(eidfrom)s to %(toetype)s #"
+"%(eidto)s"
+msgstr ""
+"la relación %(rtype)s de %(frometype)s #%(eidfrom)s a %(toetype)s #%(eidto)s "
+"ha sido agregada"
+
+msgid "additional type specific properties"
+msgstr "propiedades adicionales específicas al tipo"
+
+msgid "addrelated"
+msgstr "Agregar"
+
+msgid "address"
+msgstr "correo electrónico"
+
+msgctxt "EmailAddress"
+msgid "address"
+msgstr "correo electrónico"
+
+msgid "alias"
+msgstr "alias"
+
+msgctxt "EmailAddress"
+msgid "alias"
+msgstr "alias"
+
+msgid "allow to set a specific workflow for an entity"
+msgstr "permite definir un Workflow específico para una entidad"
+
+msgid "allowed options depends on the source type"
+msgstr "las opciones permitidas dependen del tipo de fuente"
+
+msgid "allowed transitions from this state"
+msgstr "transiciones autorizadas desde este estado"
+
+#, python-format
+msgid "allowed values for \"action\" are %s"
+msgstr "los valores permitidos por \"acción\" son %s"
+
+msgid "allowed_transition"
+msgstr "transiciones autorizadas"
+
+msgctxt "State"
+msgid "allowed_transition"
+msgstr "transiciones autorizadas"
+
+msgid "allowed_transition_object"
+msgstr "Estados de entrada"
+
+msgctxt "BaseTransition"
+msgid "allowed_transition_object"
+msgstr "transición autorizada de"
+
+msgctxt "Transition"
+msgid "allowed_transition_object"
+msgstr "transición autorizada de"
+
+msgctxt "WorkflowTransition"
+msgid "allowed_transition_object"
+msgstr "transición autorizada de"
+
+msgid "an electronic mail address associated to a short alias"
+msgstr "una dirección electrónica asociada a este alias"
+
+msgid "an error occurred"
+msgstr "Ha ocurrido un error"
+
+msgid "an error occurred while processing your request"
+msgstr "un error ocurrió al procesar su demanda"
+
+msgid "an error occurred, the request cannot be fulfilled"
+msgstr "un error ha ocurrido, la búsqueda no ha podido ser realizada"
+
+msgid "an integer is expected"
+msgstr "un número entero es esperado"
+
+msgid "and linked"
+msgstr "y relacionada"
+
+msgid "and/or between different values"
+msgstr "y/o entre los diferentes valores"
+
+msgid "anyrsetview"
+msgstr "vistas rset"
+
+msgid "april"
+msgstr "Abril"
+
+#, python-format
+msgid "archive for %(author)s"
+msgstr "archivo de %(author)s"
+
+#, python-format
+msgid "archive for %(month)s/%(year)s"
+msgstr "archivo del %(month)s/%(year)s"
+
+#, python-format
+msgid "at least one relation %(rtype)s is required on %(etype)s (%(eid)s)"
+msgstr ""
+"La entidad #%(eid)s de tipo %(etype)s debe necesariamente tener almenos una "
+"relación de tipo %(rtype)s"
+
+msgid "attribute"
+msgstr "Atributo"
+
+msgid "august"
+msgstr "Agosto"
+
+msgid "authentication failure"
+msgstr "Usuario o contraseña incorrecta"
+
+msgid "auto"
+msgstr "Automático"
+
+msgid "autocomputed attribute used to ensure transition coherency"
+msgstr ""
+"Atributo automatizado usado para asegurar la coherencia en la transición"
+
+msgid "automatic"
+msgstr "Automático"
+
+#, python-format
+msgid "back to pagination (%s results)"
+msgstr "regresar a paginación (%s resultados)"
+
+msgid "bad value"
+msgstr "Valor erróneo"
+
+msgid "badly formatted url"
+msgstr "url mal formateado"
+
+msgid "base url"
+msgstr "Url de base"
+
+msgid "bookmark has been removed"
+msgstr "Ha sido eliminado de sus favoritos"
+
+msgid "bookmark this page"
+msgstr "Agregar esta página a los favoritos"
+
+msgid "bookmark this search"
+msgstr "Guardar esta búsqueda"
+
+msgid "bookmarked_by"
+msgstr "está en los Favoritos de"
+
+msgctxt "Bookmark"
+msgid "bookmarked_by"
+msgstr "está en los Favoritos de"
+
+msgid "bookmarked_by_object"
+msgstr "tiene como Favoritos"
+
+msgctxt "CWUser"
+msgid "bookmarked_by_object"
+msgstr "tiene como Favoritos"
+
+msgid "bookmarks"
+msgstr "Favoritos"
+
+msgid "bookmarks are used to have user's specific internal links"
+msgstr "los Favoritos son ligas directas a espacios guardados por el usuario"
+
+msgid "boxes"
+msgstr "Cajas"
+
+msgid "bug report sent"
+msgstr "Reporte de error enviado"
+
+msgid "button_apply"
+msgstr "Aplicar"
+
+msgid "button_cancel"
+msgstr "Cancelar"
+
+msgid "button_delete"
+msgstr "Eliminar"
+
+msgid "button_ok"
+msgstr "Validar"
+
+msgid "by"
+msgstr "por"
+
+msgid "by relation"
+msgstr "por la relación"
+
+msgid "by_transition"
+msgstr "transición"
+
+msgctxt "TrInfo"
+msgid "by_transition"
+msgstr "transición"
+
+msgid "by_transition_object"
+msgstr "cambio de estados"
+
+msgctxt "BaseTransition"
+msgid "by_transition_object"
+msgstr "tiene como información"
+
+msgctxt "Transition"
+msgid "by_transition_object"
+msgstr "tiene como información"
+
+msgctxt "WorkflowTransition"
+msgid "by_transition_object"
+msgstr "tiene como información"
+
+msgid "calendar"
+msgstr "mostrar un calendario"
+
+msgid "can not resolve entity types:"
+msgstr "Imposible de interpretar los tipos de entidades:"
+
+msgid "can only have one url"
+msgstr "solo puede tener un URL"
+
+msgid "can't be changed"
+msgstr "No puede ser modificado"
+
+msgid "can't be deleted"
+msgstr "No puede ser eliminado"
+
+msgid "can't change this attribute"
+msgstr "no puede modificar este atributo"
+
+#, python-format
+msgid "can't display data, unexpected error: %s"
+msgstr "imposible de mostrar los datos, a causa del siguiente error: %s"
+
+msgid "can't have multiple exits on the same state"
+msgstr "no puede tener varias salidas en el mismo estado"
+
+#, python-format
+msgid "can't parse %(value)r (expected %(format)s)"
+msgstr "no puede analizar %(value)r (formato requerido : %(format)s)"
+
+#, python-format
+msgid ""
+"can't restore entity %(eid)s of type %(eschema)s, target of %(rtype)s (eid "
+"%(value)s) does not exist any longer"
+msgstr ""
+"no se pudo restaurar la entidad %(eid)s del tipo %(eschema)s, objetivo de "
+"%(rtype)s (eid %(value)s) pues ésta ya no existe"
+
+#, python-format
+msgid ""
+"can't restore relation %(rtype)s of entity %(eid)s, this relation does not "
+"exist in the schema anymore."
+msgstr ""
+"no se pudo restaurar la relación %(rtype)s de la entidad %(eid)s, esta "
+"relación ya no existe en el esquema. "
+
+#, python-format
+msgid "can't restore state of entity %s, it has been deleted inbetween"
+msgstr ""
+"no se puede restaurar el estado de la entidad %s, se ha borrado desde "
+"entonces"
+
+#, python-format
+msgid ""
+"can't set inlined=True, %(stype)s %(rtype)s %(otype)s has cardinality="
+"%(card)s"
+msgstr ""
+"no puede poner 'inlined' = True, %(stype)s %(rtype)s %(otype)s tiene "
+"cardinalidad %(card)s"
+
+msgid "cancel"
+msgstr "anular"
+
+msgid "cancel select"
+msgstr "Cancelar la selección"
+
+msgid "cancel this insert"
+msgstr "Cancelar esta inserción"
+
+msgid "cardinality"
+msgstr "cardinalidad"
+
+msgctxt "CWAttribute"
+msgid "cardinality"
+msgstr "cardinalidad"
+
+msgctxt "CWRelation"
+msgid "cardinality"
+msgstr "cardinalidad"
+
+msgid "category"
+msgstr "categoría"
+
+#, python-format
+msgid "changed state of %(etype)s #%(eid)s (%(title)s)"
+msgstr "Cambiar del estado de %(etype)s #%(eid)s (%(title)s)"
+
+msgid "changes applied"
+msgstr "Cambios realizados"
+
+msgid "click here to see created entity"
+msgstr "Ver la entidad creada"
+
+msgid "click here to see edited entity"
+msgstr "seleccione aquí para ver la entidad modificada"
+
+msgid "click on the box to cancel the deletion"
+msgstr "Seleccione la zona de edición para cancelar la eliminación"
+
+msgid "click to add a value"
+msgstr "seleccione para agregar un valor"
+
+msgid "click to delete this value"
+msgstr "seleccione para eliminar este valor"
+
+msgid "click to edit this field"
+msgstr "seleccione para editar este campo"
+
+msgid "close all"
+msgstr "cerrar todos"
+
+msgid "comment"
+msgstr "Comentario"
+
+msgctxt "TrInfo"
+msgid "comment"
+msgstr "Comentario"
+
+msgid "comment_format"
+msgstr "Formato"
+
+msgctxt "TrInfo"
+msgid "comment_format"
+msgstr "Formato"
+
+msgid "components"
+msgstr "Componentes"
+
+msgid "components_navigation"
+msgstr "Navigación por página"
+
+msgid "components_navigation_description"
+msgstr ""
+"Componente que permite presentar en varias páginas los resultados de "
+"búsqueda cuando son mayores a un número predeterminado "
+
+msgid "components_rqlinput"
+msgstr "Barra RQL"
+
+msgid "components_rqlinput_description"
+msgstr "La barra para realizar consultas en RQL, en el encabezado de página"
+
+msgid "composite"
+msgstr "composite"
+
+msgctxt "CWRelation"
+msgid "composite"
+msgstr "composite"
+
+msgid "condition"
+msgstr "condición"
+
+msgctxt "BaseTransition"
+msgid "condition"
+msgstr "condición"
+
+msgctxt "Transition"
+msgid "condition"
+msgstr "condición"
+
+msgctxt "WorkflowTransition"
+msgid "condition"
+msgstr "condición"
+
+msgid "condition_object"
+msgstr "condición de"
+
+msgctxt "RQLExpression"
+msgid "condition_object"
+msgstr "condición de"
+
+msgid "conditions"
+msgstr "condiciones"
+
+msgid "config"
+msgstr "configuración"
+
+msgctxt "CWSource"
+msgid "config"
+msgstr "configuración"
+
+msgctxt "CWSourceHostConfig"
+msgid "config"
+msgstr "configuración"
+
+msgid "config mode"
+msgstr "Modo de configuración"
+
+msgid "config type"
+msgstr "Tipo de configuración"
+
+msgid "confirm password"
+msgstr "Confirmar contraseña"
+
+msgid "constrained_by"
+msgstr "Restricción impuesta por"
+
+msgctxt "CWAttribute"
+msgid "constrained_by"
+msgstr "Restricción impuesta por"
+
+msgctxt "CWRelation"
+msgid "constrained_by"
+msgstr "Restricción impuesta por"
+
+msgid "constrained_by_object"
+msgstr "Restricción de"
+
+msgctxt "CWConstraint"
+msgid "constrained_by_object"
+msgstr "Restricción de"
+
+msgid "constraint factory"
+msgstr "Fábrica de restricciones"
+
+msgid "constraint_of"
+msgstr "restricción de"
+
+msgctxt "CWUniqueTogetherConstraint"
+msgid "constraint_of"
+msgstr "restricción de"
+
+msgid "constraint_of_object"
+msgstr "restringida por"
+
+msgctxt "CWEType"
+msgid "constraint_of_object"
+msgstr "restringida por"
+
+msgid "constraints"
+msgstr "Restricciones"
+
+msgid "constraints applying on this relation"
+msgstr "Restricciones que se aplican a esta relación"
+
+msgid "content type"
+msgstr "tipo MIME"
+
+msgid "context"
+msgstr "Contexto"
+
+msgid "context where this box should be displayed"
+msgstr "Contexto en el cual la caja debe aparecer en el sistema"
+
+msgid "context where this component should be displayed"
+msgstr "Contexto en el cual el componente debe aparecer en el sistema"
+
+msgid "context where this facet should be displayed, leave empty for both"
+msgstr ""
+"Contexto en el cual esta faceta debe ser mostrada, dejar vacia para ambos"
+
+msgid "control subject entity's relations order"
+msgstr "Controla el orden de relaciones de la entidad sujeto"
+
+msgid "copy"
+msgstr "Copiar"
+
+msgid "core relation indicating a user's groups"
+msgstr ""
+"Relación sistema que indica los grupos a los cuales pertenece un usuario"
+
+msgid ""
+"core relation indicating owners of an entity. This relation implicitly put "
+"the owner into the owners group for the entity"
+msgstr ""
+"Relación sistema que indica el(los) propietario(s) de una entidad. Esta "
+"relación pone de manera implícita al propietario en el grupo de propietarios "
+"de una entidad."
+
+msgid "core relation indicating the original creator of an entity"
+msgstr "Relación sistema que indica el creador de una entidad."
+
+msgid "core relation indicating the type of an entity"
+msgstr "Relación sistema que indica el tipo de entidad."
+
+msgid ""
+"core relation indicating the types (including specialized types) of an entity"
+msgstr ""
+"Relación sistema indicando los tipos (incluídos los tipos padres) de una "
+"entidad"
+
+msgid "could not connect to the SMTP server"
+msgstr "Imposible de conectarse al servidor SMTP"
+
+msgid "create an index for quick search on this attribute"
+msgstr "Crear un índice para accelerar las búsquedas sobre este atributo"
+
+msgid "created on"
+msgstr "creado el"
+
+msgid "created_by"
+msgstr "creado por"
+
+msgid "created_by_object"
+msgstr "ha creado"
+
+msgid "creating Bookmark (Bookmark bookmarked_by CWUser %(linkto)s)"
+msgstr "Creando Favorito"
+
+msgid "creating CWAttribute (CWAttribute relation_type CWRType %(linkto)s)"
+msgstr "Creación del atributo %(linkto)s"
+
+msgid ""
+"creating CWConstraint (CWAttribute %(linkto)s constrained_by CWConstraint)"
+msgstr "Creación condicionada por el atributo %(linkto)s"
+
+msgid ""
+"creating CWConstraint (CWRelation %(linkto)s constrained_by CWConstraint)"
+msgstr "Creación condicionada por la relación %(linkto)s"
+
+msgid "creating CWProperty (CWProperty for_user CWUser %(linkto)s)"
+msgstr "Creación de una propiedad por el usuario %(linkto)s"
+
+msgid "creating CWRelation (CWRelation relation_type CWRType %(linkto)s)"
+msgstr "Creación de la relación %(linkto)s"
+
+msgid ""
+"creating CWSourceHostConfig (CWSourceHostConfig cw_host_config_of CWSource "
+"%(linkto)s)"
+msgstr "creación de una configuración host para la fuente %(linkto)s"
+
+msgid ""
+"creating CWUniqueTogetherConstraint (CWUniqueTogetherConstraint "
+"constraint_of CWEType %(linkto)s)"
+msgstr "creación de una restricción de singularidad en %(linkto)s"
+
+msgid "creating CWUser (CWUser in_group CWGroup %(linkto)s)"
+msgstr "Creación de un usuario para agregar al grupo %(linkto)s"
+
+msgid "creating EmailAddress (CWUser %(linkto)s use_email EmailAddress)"
+msgstr "Creación de una dirección electrónica para el usuario %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWAttribute %(linkto)s add_permission RQLExpression)"
+msgstr "Creación de una expresión RQL para permitir agregar %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWAttribute %(linkto)s read_permission RQLExpression)"
+msgstr "creación de una expresión RQL por el derecho de lectura de %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWAttribute %(linkto)s update_permission "
+"RQLExpression)"
+msgstr ""
+"creación de una expresión RQL por el derecho de actualización de %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWEType %(linkto)s add_permission RQLExpression)"
+msgstr ""
+"Creación de una expresión RQL para la autorización de agregar %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWEType %(linkto)s delete_permission RQLExpression)"
+msgstr ""
+"Creación de una expresión RQL para la autorización de eliminar %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWEType %(linkto)s read_permission RQLExpression)"
+msgstr "Creación de una expresión RQL para permitir leer %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWEType %(linkto)s update_permission RQLExpression)"
+msgstr "Creación de una expresión RQL para permitir actualizar %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWRelation %(linkto)s add_permission RQLExpression)"
+msgstr "Creación de una expresión RQL para permitir agregar %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWRelation %(linkto)s delete_permission "
+"RQLExpression)"
+msgstr "Creación de una expresión RQL para permitir eliminar %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWRelation %(linkto)s read_permission RQLExpression)"
+msgstr "Creación de una expresión RQL para permitir leer %(linkto)s"
+
+msgid "creating RQLExpression (Transition %(linkto)s condition RQLExpression)"
+msgstr "Creación de una expresión RQL para la transición %(linkto)s"
+
+msgid ""
+"creating RQLExpression (WorkflowTransition %(linkto)s condition "
+"RQLExpression)"
+msgstr "Creación de una expresión RQL para la transición Workflow %(linkto)s"
+
+msgid "creating State (State allowed_transition Transition %(linkto)s)"
+msgstr "Creación de un estado que puede ir hacia la transición %(linkto)s"
+
+msgid "creating State (State state_of Workflow %(linkto)s)"
+msgstr "Creando un Estado del Workflow"
+
+msgid "creating State (Transition %(linkto)s destination_state State)"
+msgstr "Creación de un Estado Destinación de la Transición %(linkto)s"
+
+msgid ""
+"creating SubWorkflowExitPoint (WorkflowTransition %(linkto)s "
+"subworkflow_exit SubWorkflowExitPoint)"
+msgstr "creación de un punto de Salida de la Transición Workflow %(linkto)s"
+
+msgid "creating Transition (State %(linkto)s allowed_transition Transition)"
+msgstr "Creación de una transición autorizada desde el Estado %(linkto)s"
+
+msgid "creating Transition (Transition destination_state State %(linkto)s)"
+msgstr "Creación de un transición hacia el Estado %(linkto)s"
+
+msgid "creating Transition (Transition transition_of Workflow %(linkto)s)"
+msgstr "Creación de una Transición Workflow %(linkto)s"
+
+msgid ""
+"creating WorkflowTransition (State %(linkto)s allowed_transition "
+"WorkflowTransition)"
+msgstr ""
+"Creación de una Transición Workflow permitida desde el estado %(linkto)s"
+
+msgid ""
+"creating WorkflowTransition (WorkflowTransition transition_of Workflow "
+"%(linkto)s)"
+msgstr "Creación de una Transición Workflow del Workflow %(linkto)s"
+
+msgid "creation"
+msgstr "Creación"
+
+msgid "creation date"
+msgstr "Fecha de Creación"
+
+msgid "creation time of an entity"
+msgstr "Fecha de creación de una entidad"
+
+msgid "creation_date"
+msgstr "Fecha de Creación"
+
+msgid "cstrtype"
+msgstr "Tipo de restricción"
+
+msgctxt "CWConstraint"
+msgid "cstrtype"
+msgstr "Tipo"
+
+msgid "cstrtype_object"
+msgstr "utilizado por"
+
+msgctxt "CWConstraintType"
+msgid "cstrtype_object"
+msgstr "Tipo de restricciones"
+
+msgid "csv export"
+msgstr "Exportar en CSV"
+
+msgid "csv export (entities)"
+msgstr "Exportar a CSV (entidades)"
+
+msgid "ctxcomponents"
+msgstr "Componentes contextuales"
+
+msgid "ctxcomponents_anonuserlink"
+msgstr "Liga usuario"
+
+msgid "ctxcomponents_anonuserlink_description"
+msgstr ""
+"Muestra un enlace hacia el formulario de conexión para los usuarios "
+"anónimos, o una caja que contiene los enlaces del usuario conectado. "
+
+msgid "ctxcomponents_appliname"
+msgstr "Nombre de la aplicación"
+
+msgid "ctxcomponents_appliname_description"
+msgstr "Muestra el nombre de la aplicación en el encabezado de la página"
+
+msgid "ctxcomponents_bookmarks_box"
+msgstr "Caja de Favoritos"
+
+msgid "ctxcomponents_bookmarks_box_description"
+msgstr "Muestra y permite administrar los favoritos del usuario"
+
+msgid "ctxcomponents_breadcrumbs"
+msgstr "Ruta de Navegación"
+
+msgid "ctxcomponents_breadcrumbs_description"
+msgstr "Muestra la ruta que permite localizar la página actual en el Sistema"
+
+msgid "ctxcomponents_download_box"
+msgstr "Configuración de caja de descargas"
+
+msgid "ctxcomponents_download_box_description"
+msgstr "Caja que contiene los elementos descargados"
+
+msgid "ctxcomponents_edit_box"
+msgstr "Caja de Acciones"
+
+msgid "ctxcomponents_edit_box_description"
+msgstr "Muestra las acciones posibles a ejecutar para los datos seleccionados"
+
+msgid "ctxcomponents_facet.filterbox"
+msgstr "Filtros"
+
+msgid "ctxcomponents_facet.filterbox_description"
+msgstr "Muestra los filtros aplicables a una búsqueda realizada"
+
+msgid "ctxcomponents_logo"
+msgstr "logo"
+
+msgid "ctxcomponents_logo_description"
+msgstr "El logo de la aplicación, en el encabezado de página"
+
+msgid "ctxcomponents_metadata"
+msgstr "Metadatos de la Entidad"
+
+msgid "ctxcomponents_metadata_description"
+msgstr "espacio que incluye los metadatos de la entidad actual"
+
+msgid "ctxcomponents_possible_views_box"
+msgstr "Caja de Vistas Posibles"
+
+msgid "ctxcomponents_possible_views_box_description"
+msgstr "Muestra las vistas posibles a aplicar a los datos seleccionados"
+
+msgid "ctxcomponents_prevnext"
+msgstr "Elemento anterior / siguiente"
+
+msgid "ctxcomponents_prevnext_description"
+msgstr ""
+"Muestra las ligas que permiten pasar de una entidad a otra en las entidades "
+"que implementan la interface \"anterior/siguiente\"."
+
+msgid "ctxcomponents_rss"
+msgstr "Ícono RSS"
+
+msgid "ctxcomponents_rss_description"
+msgstr "Muestra el ícono RSS para vistas RSS"
+
+msgid "ctxcomponents_search_box"
+msgstr "Caja de búsqueda"
+
+msgid "ctxcomponents_search_box_description"
+msgstr ""
+"Permite realizar una búsqueda simple para cualquier tipo de dato en la "
+"aplicación"
+
+msgid "ctxcomponents_startup_views_box"
+msgstr "Caja Vistas de inicio"
+
+msgid "ctxcomponents_startup_views_box_description"
+msgstr "Muestra las vistas de inicio de la aplicación"
+
+msgid "ctxcomponents_userstatus"
+msgstr "estado del usuario"
+
+msgid "ctxcomponents_userstatus_description"
+msgstr "establece el estado del usuario"
+
+msgid "ctxcomponents_wfhistory"
+msgstr "Histórico del workflow."
+
+msgid "ctxcomponents_wfhistory_description"
+msgstr ""
+"Sección que muestra el reporte histórico de las transiciones del workflow. "
+"Aplica solo en entidades con workflow."
+
+msgid "ctxtoolbar"
+msgstr "Barra de herramientas"
+
+msgid "custom_workflow"
+msgstr "Workflow específico"
+
+msgid "custom_workflow_object"
+msgstr "Workflow de"
+
+msgid "cw.groups-management"
+msgstr "grupos"
+
+msgid "cw.users-management"
+msgstr "usuarios"
+
+msgid "cw_for_source"
+msgstr "fuente"
+
+msgctxt "CWSourceSchemaConfig"
+msgid "cw_for_source"
+msgstr "fuente"
+
+msgid "cw_for_source_object"
+msgstr "elemento de mapeo"
+
+msgctxt "CWSource"
+msgid "cw_for_source_object"
+msgstr "elemento de mapeo"
+
+msgid "cw_host_config_of"
+msgstr "configuración del host de"
+
+msgctxt "CWSourceHostConfig"
+msgid "cw_host_config_of"
+msgstr "configuración del host de"
+
+msgid "cw_host_config_of_object"
+msgstr "tiene la configuración del host"
+
+msgctxt "CWSource"
+msgid "cw_host_config_of_object"
+msgstr "tiene la configuración del host"
+
+msgid "cw_import_of"
+msgstr "fuente"
+
+msgctxt "CWDataImport"
+msgid "cw_import_of"
+msgstr "fuente"
+
+msgid "cw_import_of_object"
+msgstr "importación"
+
+msgctxt "CWSource"
+msgid "cw_import_of_object"
+msgstr "importación"
+
+msgid "cw_schema"
+msgstr "esquema"
+
+msgctxt "CWSourceSchemaConfig"
+msgid "cw_schema"
+msgstr "esquema"
+
+msgid "cw_schema_object"
+msgstr "mapeado por"
+
+msgctxt "CWEType"
+msgid "cw_schema_object"
+msgstr "mapeado por"
+
+msgctxt "CWRType"
+msgid "cw_schema_object"
+msgstr "mapeado por"
+
+msgctxt "CWRelation"
+msgid "cw_schema_object"
+msgstr "mapeado por"
+
+msgid "cw_source"
+msgstr "desde la fuente de datos"
+
+msgid "cw_source_object"
+msgstr "entidades"
+
+msgid "cwetype-box"
+msgstr "Vista \"caja\""
+
+msgid "cwetype-description"
+msgstr "Descripción"
+
+msgid "cwetype-permissions"
+msgstr "Permisos"
+
+msgid "cwetype-views"
+msgstr "Vistas"
+
+msgid "cwetype-workflow"
+msgstr "Workflow"
+
+msgid "cwgroup-main"
+msgstr "Descripción"
+
+msgid "cwgroup-permissions"
+msgstr "Permisos"
+
+msgid "cwrtype-description"
+msgstr "Descripción"
+
+msgid "cwrtype-permissions"
+msgstr "Permisos"
+
+msgid "cwsource-imports"
+msgstr "importación"
+
+msgid "cwsource-main"
+msgstr "descripción"
+
+msgid "cwsource-mapping"
+msgstr "mapeo"
+
+msgid "cwuri"
+msgstr "Uri Interna"
+
+msgid "data directory url"
+msgstr "Url del repertorio de datos"
+
+msgid "data model schema"
+msgstr "Esquema del Sistema"
+
+msgid "data sources"
+msgstr "fuente de datos"
+
+msgid "data sources management"
+msgstr "administración de fuentes de datos"
+
+msgid "date"
+msgstr "Fecha"
+
+msgid "deactivate"
+msgstr "Desactivar"
+
+msgid "deactivated"
+msgstr "Desactivado"
+
+msgid "december"
+msgstr "Diciembre"
+
+msgid "default"
+msgstr "Valor por defecto"
+
+msgid "default text format for rich text fields."
+msgstr ""
+"Formato de texto que se utilizará por defecto para los campos de tipo texto"
+
+msgid "default user workflow"
+msgstr "Workflow por defecto de los usuarios"
+
+msgid "default value"
+msgstr "Valor por defecto"
+
+msgid "default value as gziped pickled python object"
+msgstr "valor por defecto, en la forma de objeto python, al usar pickle y gzip"
+
+msgid "default workflow for an entity type"
+msgstr "Workflow por defecto para un tipo de entidad"
+
+msgid "default_workflow"
+msgstr "Workflow por defecto"
+
+msgctxt "CWEType"
+msgid "default_workflow"
+msgstr "Workflow por defecto"
+
+msgid "default_workflow_object"
+msgstr "Workflow por defecto de"
+
+msgctxt "Workflow"
+msgid "default_workflow_object"
+msgstr "Workflow por defecto de"
+
+msgid "defaultval"
+msgstr "Valor por defecto"
+
+msgctxt "CWAttribute"
+msgid "defaultval"
+msgstr "Valor por defecto"
+
+msgid "define a CubicWeb user"
+msgstr "Define un usuario CubicWeb"
+
+msgid "define a CubicWeb users group"
+msgstr "Define un grupo de usuarios CubicWeb"
+
+msgid ""
+"define a final relation: link a final relation type from a non final entity "
+"to a final entity type. used to build the instance schema"
+msgstr ""
+"Define una relación final: liga un tipo de relación final desde una entidad "
+"NO final hacia un tipo de entidad final. Se usa para crear el esquema de la "
+"instancia."
+
+msgid ""
+"define a non final relation: link a non final relation type from a non final "
+"entity to a non final entity type. used to build the instance schema"
+msgstr ""
+"Define una relación NO final: liga un tipo de relación NO final desde una "
+"entidad NO final hacia un tipo de entidad NO final. Se usa para crear el "
+"esquema de la instancia."
+
+msgid "define a relation type, used to build the instance schema"
+msgstr ""
+"Define un tipo de relación, usado para construir el esquema de la instancia."
+
+msgid "define a rql expression used to define permissions"
+msgstr "Expresión RQL utilizada para definir los derechos de acceso"
+
+msgid "define a schema constraint"
+msgstr "Define una condición de esquema"
+
+msgid "define a schema constraint type"
+msgstr "Define un tipo de condición de esquema"
+
+msgid "define a virtual relation type, used to build the instance schema"
+msgstr ""
+
+msgid "define an entity type, used to build the instance schema"
+msgstr ""
+"Define un tipo de entidad, usado para construir el esquema de la instancia."
+
+msgid "define how we get out from a sub-workflow"
+msgstr "Define como salir de un sub-Workflow"
+
+msgid "defines a sql-level multicolumn unique index"
+msgstr "define un índice SQL único a través de varias columnas"
+
+msgid ""
+"defines what's the property is applied for. You must select this first to be "
+"able to set value"
+msgstr ""
+"Define a que se aplica la propiedad . Debe de seleccionar esto antes de "
+"establecer un valor"
+
+msgid "delete"
+msgstr "Eliminar"
+
+msgid "delete this bookmark"
+msgstr "Eliminar este favorito"
+
+msgid "delete this relation"
+msgstr "Eliminar esta relación"
+
+msgid "delete_permission"
+msgstr "Permiso de eliminar"
+
+msgctxt "CWEType"
+msgid "delete_permission"
+msgstr "Permiso de eliminar"
+
+msgctxt "CWRelation"
+msgid "delete_permission"
+msgstr "Permiso de eliminar"
+
+msgid "delete_permission_object"
+msgstr "posee permiso para eliminar"
+
+msgctxt "CWGroup"
+msgid "delete_permission_object"
+msgstr "puede eliminar"
+
+msgctxt "RQLExpression"
+msgid "delete_permission_object"
+msgstr "puede eliminar"
+
+#, python-format
+msgid "deleted %(etype)s #%(eid)s (%(title)s)"
+msgstr "Eliminación de la entidad %(etype)s #%(eid)s (%(title)s)"
+
+#, python-format
+msgid ""
+"deleted relation %(rtype)s from %(frometype)s #%(eidfrom)s to %(toetype)s #"
+"%(eidto)s"
+msgstr ""
+"La relación %(rtype)s de %(frometype)s #%(eidfrom)s a %(toetype)s #%(eidto)s "
+"ha sido suprimida."
+
+msgid "depends on the constraint type"
+msgstr "Depende del tipo de restricción"
+
+msgid "description"
+msgstr "Descripción"
+
+msgctxt "BaseTransition"
+msgid "description"
+msgstr "Descripción"
+
+msgctxt "CWAttribute"
+msgid "description"
+msgstr "Descripción"
+
+msgctxt "CWComputedRType"
+msgid "description"
+msgstr ""
+
+msgctxt "CWEType"
+msgid "description"
+msgstr "Descripción"
+
+msgctxt "CWRType"
+msgid "description"
+msgstr "Descripción"
+
+msgctxt "CWRelation"
+msgid "description"
+msgstr "Descripción"
+
+msgctxt "State"
+msgid "description"
+msgstr "Descripción"
+
+msgctxt "Transition"
+msgid "description"
+msgstr "Descripción"
+
+msgctxt "Workflow"
+msgid "description"
+msgstr "Descripción"
+
+msgctxt "WorkflowTransition"
+msgid "description"
+msgstr "Descripción"
+
+msgid "description_format"
+msgstr "Formato"
+
+msgctxt "BaseTransition"
+msgid "description_format"
+msgstr "Formato"
+
+msgctxt "CWAttribute"
+msgid "description_format"
+msgstr "Formato"
+
+msgctxt "CWComputedRType"
+msgid "description_format"
+msgstr ""
+
+msgctxt "CWEType"
+msgid "description_format"
+msgstr "Formato"
+
+msgctxt "CWRType"
+msgid "description_format"
+msgstr "Formato"
+
+msgctxt "CWRelation"
+msgid "description_format"
+msgstr "Formato"
+
+msgctxt "State"
+msgid "description_format"
+msgstr "Formato"
+
+msgctxt "Transition"
+msgid "description_format"
+msgstr "Formato"
+
+msgctxt "Workflow"
+msgid "description_format"
+msgstr "Formato"
+
+msgctxt "WorkflowTransition"
+msgid "description_format"
+msgstr "Formato"
+
+msgid "destination state for this transition"
+msgstr "Estados accesibles para esta transición"
+
+msgid "destination state must be in the same workflow as our parent transition"
+msgstr ""
+"El estado de destino debe pertenecer al mismo Workflow que la transición "
+"padre."
+
+msgid "destination state of a transition"
+msgstr "Estado destino de una transición"
+
+msgid ""
+"destination state. No destination state means that transition should go back "
+"to the state from which we've entered the subworkflow."
+msgstr ""
+"Estado destino de la transición. Si el Estado destino no ha sido "
+"especificado, la transición regresará hacia el estado que tenía la entidad "
+"al entrar en el Sub-Workflow."
+
+msgid "destination_state"
+msgstr "Estado destino"
+
+msgctxt "SubWorkflowExitPoint"
+msgid "destination_state"
+msgstr "Estado destino"
+
+msgctxt "Transition"
+msgid "destination_state"
+msgstr "Estado destino"
+
+msgid "destination_state_object"
+msgstr "Destino de"
+
+msgctxt "State"
+msgid "destination_state_object"
+msgstr "Estado final de"
+
+msgid "detach attached file"
+msgstr "soltar el archivo existente"
+
+msgid "display order of the box"
+msgstr "Orden de aparición de la caja"
+
+msgid "display order of the component"
+msgstr "Orden de aparición del componente"
+
+msgid "display order of the facet"
+msgstr "Orden de aparición de la faceta"
+
+msgid "display the box or not"
+msgstr "Mostrar o no la caja"
+
+msgid "display the component or not"
+msgstr "Mostrar o no el componente"
+
+msgid "display the facet or not"
+msgstr "Mostrar o no la faceta"
+
+msgid "download"
+msgstr "Descargar"
+
+#, python-format
+msgid "download %s"
+msgstr "Descargar %s"
+
+msgid "download icon"
+msgstr "ícono de descarga"
+
+msgid "download schema as owl"
+msgstr "Descargar esquema en formato OWL"
+
+msgid "edit bookmarks"
+msgstr "Editar favoritos"
+
+msgid "edit canceled"
+msgstr "Edición cancelada"
+
+msgid "editable-table"
+msgstr "Tabla modificable"
+
+msgid "eid"
+msgstr "eid"
+
+msgid "embedded html"
+msgstr "Html incrustado"
+
+msgid "end_timestamp"
+msgstr "horario final"
+
+msgctxt "CWDataImport"
+msgid "end_timestamp"
+msgstr "horario final"
+
+msgid "entities deleted"
+msgstr "Entidades eliminadas"
+
+msgid "entity and relation types can't be mapped, only attributes or relations"
+msgstr ""
+"los tipos de entidad y relación no pueden ser mapeados, solo los atributos y "
+"las relaciones"
+
+msgid "entity copied"
+msgstr "Entidad copiada"
+
+msgid "entity created"
+msgstr "Entidad creada"
+
+msgid "entity creation"
+msgstr "Creación de entidad"
+
+msgid "entity deleted"
+msgstr "Entidad eliminada"
+
+msgid "entity deletion"
+msgstr "Eliminación de entidad"
+
+msgid "entity edited"
+msgstr "Entidad modificada"
+
+msgid "entity has no workflow set"
+msgstr "La entidad no tiene Workflow"
+
+msgid "entity linked"
+msgstr "Entidad asociada"
+
+msgid "entity type"
+msgstr "Tipo de entidad"
+
+msgid "entity types which may use this workflow"
+msgstr "Tipos de entidades que pueden utilizar este Workflow"
+
+msgid "entity update"
+msgstr "Actualización de la Entidad"
+
+msgid "entityview"
+msgstr "vistas de entidades"
+
+msgid "error"
+msgstr "error"
+
+msgid "error while publishing ReST text"
+msgstr ""
+"Se ha producido un error durante la interpretación del texto en formato ReST"
+
+msgid "exit state must be a subworkflow state"
+msgstr "El estado de salida debe de ser un estado del Sub-Workflow"
+
+msgid "exit_point"
+msgstr "Estado de Salida"
+
+msgid "exit_point_object"
+msgstr "Estado de Salida de"
+
+#, python-format
+msgid "exiting from subworkflow %s"
+msgstr "Salida del subworkflow %s"
+
+msgid "expression"
+msgstr "Expresión"
+
+msgctxt "RQLExpression"
+msgid "expression"
+msgstr "RQL de la expresión"
+
+msgid "exprtype"
+msgstr "Tipo de la expresión"
+
+msgctxt "RQLExpression"
+msgid "exprtype"
+msgstr "Tipo"
+
+msgid "extra_props"
+msgstr "propiedades adicionales"
+
+msgctxt "CWAttribute"
+msgid "extra_props"
+msgstr "propiedades adicionales"
+
+msgid "facet-loading-msg"
+msgstr "procesando, espere por favor"
+
+msgid "facet.filters"
+msgstr "Filtros"
+
+msgid "facetbox"
+msgstr "Caja de facetas"
+
+msgid "facets_created_by-facet"
+msgstr "Faceta \"creada por\""
+
+msgid "facets_created_by-facet_description"
+msgstr "Faceta creada por"
+
+msgid "facets_cw_source-facet"
+msgstr "faceta \"fuente de datos\""
+
+msgid "facets_cw_source-facet_description"
+msgstr "fuente de datos"
+
+msgid "facets_cwfinal-facet"
+msgstr "Faceta \"final\""
+
+msgid "facets_cwfinal-facet_description"
+msgstr "Faceta para las entidades \"finales\""
+
+msgid "facets_datafeed.dataimport.status"
+msgstr "estado de la importación"
+
+msgid "facets_datafeed.dataimport.status_description"
+msgstr "Estado de la importación de datos"
+
+msgid "facets_etype-facet"
+msgstr "Faceta \"es de tipo\""
+
+msgid "facets_etype-facet_description"
+msgstr "Faceta es de tipo"
+
+msgid "facets_has_text-facet"
+msgstr "Faceta \"contiene el texto\""
+
+msgid "facets_has_text-facet_description"
+msgstr "Faceta contiene el texto"
+
+msgid "facets_in_group-facet"
+msgstr "Faceta \"forma parte del grupo\""
+
+msgid "facets_in_group-facet_description"
+msgstr "Faceta en grupo"
+
+msgid "facets_in_state-facet"
+msgstr "Faceta \"en el estado\""
+
+msgid "facets_in_state-facet_description"
+msgstr "Faceta en el estado"
+
+msgid "failed"
+msgstr "fallido"
+
+#, python-format
+msgid "failed to uniquify path (%s, %s)"
+msgstr "No se pudo obtener un dato único (%s, %s)"
+
+msgid "february"
+msgstr "Febrero"
+
+msgid "file tree view"
+msgstr "Arborescencia (archivos)"
+
+msgid "final"
+msgstr "Final"
+
+msgctxt "CWEType"
+msgid "final"
+msgstr "Final"
+
+msgctxt "CWRType"
+msgid "final"
+msgstr "Final"
+
+msgid "first name"
+msgstr "Nombre"
+
+msgid "firstname"
+msgstr "Nombre"
+
+msgctxt "CWUser"
+msgid "firstname"
+msgstr "Nombre"
+
+msgid "foaf"
+msgstr "Amigo de un Amigo, FOAF"
+
+msgid "focus on this selection"
+msgstr "muestre esta selección"
+
+msgid "follow"
+msgstr "Seguir la liga"
+
+#, python-format
+msgid "follow this link for more information on this %s"
+msgstr "Seleccione esta liga para obtener mayor información sobre %s"
+
+msgid "for_user"
+msgstr "Para el usuario"
+
+msgctxt "CWProperty"
+msgid "for_user"
+msgstr "Propiedad del Usuario"
+
+msgid "for_user_object"
+msgstr "Utiliza las propiedades"
+
+msgctxt "CWUser"
+msgid "for_user_object"
+msgstr "Tiene como preferencia"
+
+msgid "formula"
+msgstr ""
+
+msgctxt "CWAttribute"
+msgid "formula"
+msgstr ""
+
+msgid "friday"
+msgstr "Viernes"
+
+msgid "from"
+msgstr "De"
+
+#, python-format
+msgid "from %(date)s"
+msgstr "de %(date)s"
+
+msgid "from_entity"
+msgstr "De la entidad"
+
+msgctxt "CWAttribute"
+msgid "from_entity"
+msgstr "Atributo de la entidad"
+
+msgctxt "CWRelation"
+msgid "from_entity"
+msgstr "Relación de la entidad"
+
+msgid "from_entity_object"
+msgstr "Relación sujeto"
+
+msgctxt "CWEType"
+msgid "from_entity_object"
+msgstr "Entidad de"
+
+msgid "from_interval_start"
+msgstr "De"
+
+msgid "from_state"
+msgstr "Del Estado"
+
+msgctxt "TrInfo"
+msgid "from_state"
+msgstr "Estado de Inicio"
+
+msgid "from_state_object"
+msgstr "Transiciones desde este estado"
+
+msgctxt "State"
+msgid "from_state_object"
+msgstr "Estado de Inicio de"
+
+msgid "full text or RQL query"
+msgstr "Texto de búsqueda o demanda RQL"
+
+msgid "fulltext_container"
+msgstr "Contenedor de texto indexado"
+
+msgctxt "CWRType"
+msgid "fulltext_container"
+msgstr "Objeto a indexar"
+
+msgid "fulltextindexed"
+msgstr "Indexación de texto"
+
+msgctxt "CWAttribute"
+msgid "fulltextindexed"
+msgstr "Texto indexado"
+
+msgid "gc"
+msgstr "fuga de memoria"
+
+msgid "generic plot"
+msgstr "Gráfica Genérica"
+
+msgid "generic relation to link one entity to another"
+msgstr "Relación genérica para ligar entidades"
+
+msgid ""
+"generic relation to specify that an external entity represent the same "
+"object as a local one: http://www.w3.org/TR/owl-ref/#sameAs-def"
+msgstr ""
+"Relación genérica que indicar que una entidad es idéntica a otro recurso web "
+"(ver http://www.w3.org/TR/owl-ref/#sameAs-def)."
+
+msgid "granted to groups"
+msgstr "Otorgado a los grupos"
+
+#, python-format
+msgid "graphical representation of %(appid)s data model"
+msgstr "Representación gráfica del modelo de datos de %(appid)s"
+
+#, python-format
+msgid ""
+"graphical representation of the %(etype)s entity type from %(appid)s data "
+"model"
+msgstr ""
+"Representación gráfica del modelo de datos para el tipo de entidad %(etype)s "
+"de %(appid)s"
+
+#, python-format
+msgid ""
+"graphical representation of the %(rtype)s relation type from %(appid)s data "
+"model"
+msgstr ""
+"Representación gráfica del modelo de datos para el tipo de relación "
+"%(rtype)s de %(appid)s"
+
+msgid "group in which a user should be to be allowed to pass this transition"
+msgstr "Grupo en el cual el usuario debe estar lograr la transición"
+
+msgid "groups"
+msgstr "Grupos"
+
+msgid "groups allowed to add entities/relations of this type"
+msgstr "grupos autorizados a agregar entidades/relaciones de este tipo"
+
+msgid "groups allowed to delete entities/relations of this type"
+msgstr "grupos autorizados a eliminar entidades/relaciones de este tipo"
+
+msgid "groups allowed to read entities/relations of this type"
+msgstr "grupos autorizados a leer entidades/relaciones de este tipo"
+
+msgid "groups allowed to update entities/relations of this type"
+msgstr "grupos autorizados a actualizar entidades/relaciones de este tipo"
+
+msgid "groups grant permissions to the user"
+msgstr "Los grupos otorgan los permisos al usuario"
+
+msgid "guests"
+msgstr "Invitados"
+
+msgid "hCalendar"
+msgstr "hCalendar"
+
+msgid "has_text"
+msgstr "Contiene el texto"
+
+msgid "header-center"
+msgstr "header - centro"
+
+msgid "header-left"
+msgstr "encabezado (izquierdo)"
+
+msgid "header-right"
+msgstr "encabezado (derecho)"
+
+msgid "hide filter form"
+msgstr "Esconder el filtro"
+
+msgid ""
+"how to format date and time in the ui (see this page for format "
+"description)"
+msgstr ""
+"Formato de fecha y hora que se utilizará por defecto en la interfaz (mayor información del formato)"
+
+msgid ""
+"how to format date in the ui (see this page for format "
+"description)"
+msgstr ""
+"Formato de fecha que se utilizará por defecto en la interfaz (mayor información del formato)"
+
+msgid "how to format float numbers in the ui"
+msgstr ""
+"Formato de números flotantes que se utilizará por defecto en la interfaz"
+
+msgid ""
+"how to format time in the ui (see this page for format "
+"description)"
+msgstr ""
+"Formato de hora que se utilizará por defecto en la interfaz (mayor información del formato)"
+
+msgid "i18n_bookmark_url_fqs"
+msgstr "Parámetros"
+
+msgid "i18n_bookmark_url_path"
+msgstr "Ruta"
+
+msgid "i18n_login_popup"
+msgstr "Identificarse"
+
+msgid "i18ncard_*"
+msgstr "0..n"
+
+msgid "i18ncard_+"
+msgstr "1..n"
+
+msgid "i18ncard_1"
+msgstr "1"
+
+msgid "i18ncard_?"
+msgstr "0..1"
+
+msgid "i18nprevnext_next"
+msgstr "Siguiente"
+
+msgid "i18nprevnext_previous"
+msgstr "Anterior"
+
+msgid "i18nprevnext_up"
+msgstr "Padre"
+
+msgid "iCalendar"
+msgstr "iCalendar"
+
+msgid "id of main template used to render pages"
+msgstr "ID del template principal"
+
+msgid "identical to"
+msgstr "Idéntico a"
+
+msgid "identical_to"
+msgstr "idéntico a"
+
+msgid "identity"
+msgstr "es idéntico a"
+
+msgid "identity_object"
+msgstr "es idéntico a"
+
+msgid ""
+"if full text content of subject/object entity should be added to other side "
+"entity (the container)."
+msgstr ""
+"Si el texto indexado de la entidad sujeto/objeto debe ser agregado a la "
+"entidad al otro extremo de la relación (el contenedor)."
+
+msgid "image"
+msgstr "Imagen"
+
+msgid "in progress"
+msgstr "en progreso"
+
+msgid "in_group"
+msgstr "En el grupo"
+
+msgctxt "CWUser"
+msgid "in_group"
+msgstr "Forma parte del grupo"
+
+msgid "in_group_object"
+msgstr "Miembros"
+
+msgctxt "CWGroup"
+msgid "in_group_object"
+msgstr "Contiene los usuarios"
+
+msgid "in_state"
+msgstr "Estado"
+
+msgid "in_state_object"
+msgstr "Estado de"
+
+msgid "in_synchronization"
+msgstr "sincronizado"
+
+msgctxt "CWSource"
+msgid "in_synchronization"
+msgstr "sincronizado"
+
+msgid "incontext"
+msgstr "En el contexto"
+
+msgid "incorrect captcha value"
+msgstr "Valor del Captcha incorrecto"
+
+#, python-format
+msgid "incorrect value (%(KEY-value)r) for type \"%(KEY-type)s\""
+msgstr "el valor (%(KEY-value)r) es incorrecto para el tipo \"%(KEY-type)s\""
+
+msgid "index this attribute's value in the plain text index"
+msgstr "Indexar el valor de este atributo en el índice de texto simple"
+
+msgid "indexed"
+msgstr "Indexado"
+
+msgctxt "CWAttribute"
+msgid "indexed"
+msgstr "Indexado"
+
+msgid "indicate the current state of an entity"
+msgstr "Indica el estado actual de una entidad"
+
+msgid ""
+"indicate which state should be used by default when an entity using states "
+"is created"
+msgstr ""
+"Indica cual estado deberá ser utilizado por defecto al crear una entidad"
+
+msgid "indifferent"
+msgstr "indifferente"
+
+msgid "info"
+msgstr "Información del Sistema"
+
+msgid "initial state for this workflow"
+msgstr "Estado inicial para este Workflow"
+
+msgid "initial_state"
+msgstr "Estado inicial"
+
+msgctxt "Workflow"
+msgid "initial_state"
+msgstr "Estado inicial"
+
+msgid "initial_state_object"
+msgstr "Estado inicial de"
+
+msgctxt "State"
+msgid "initial_state_object"
+msgstr "Estado inicial de"
+
+msgid "inlined"
+msgstr "Inlined"
+
+msgctxt "CWRType"
+msgid "inlined"
+msgstr "Inlined"
+
+msgid "instance home"
+msgstr "Repertorio de la Instancia"
+
+msgid "internal entity uri"
+msgstr "Uri Interna"
+
+msgid "internationalizable"
+msgstr "Internacionalizable"
+
+msgctxt "CWAttribute"
+msgid "internationalizable"
+msgstr "Internacionalizable"
+
+#, python-format
+msgid "invalid action %r"
+msgstr "Acción %r invalida"
+
+#, python-format
+msgid "invalid value %(KEY-value)s, it must be one of %(KEY-choices)s"
+msgstr "Valor %(KEY-value)s es incorrecto, seleccione entre %(KEY-choices)s"
+
+msgid "is"
+msgstr "es"
+
+msgid "is object of:"
+msgstr "es objeto de"
+
+msgid "is subject of:"
+msgstr "es sujeto de"
+
+msgid ""
+"is the subject/object entity of the relation composed of the other ? This "
+"implies that when the composite is deleted, composants are also deleted."
+msgstr ""
+"Es la entidad sujeto/objeto de la relación une agregación de el otro ? De "
+"ser así, el destruir el composite destruirá de igual manera sus componentes "
+
+msgid "is this attribute's value translatable"
+msgstr "Es el valor de este atributo traducible ?"
+
+msgid "is this relation equivalent in both direction ?"
+msgstr "Es esta relación equivalente en los ambos sentidos ?"
+
+msgid ""
+"is this relation physically inlined? you should know what you're doing if "
+"you are changing this!"
+msgstr ""
+"Es esta relación estilo INLINED en la base de datos ? Usted debe saber lo "
+"que hace si cambia esto !"
+
+msgid "is_instance_of"
+msgstr "es una instancia de"
+
+msgid "is_instance_of_object"
+msgstr "tiene como instancias"
+
+msgid "is_object"
+msgstr "tiene por instancia"
+
+msgid "january"
+msgstr "Enero"
+
+msgid "json-entities-export-view"
+msgstr "Exportación JSON (de entidades)"
+
+msgid "json-export-view"
+msgstr "Exportación JSON"
+
+msgid "july"
+msgstr "Julio"
+
+msgid "june"
+msgstr "Junio"
+
+msgid "language of the user interface"
+msgstr "Idioma que se utilizará por defecto en la interfaz usuario"
+
+msgid "last connection date"
+msgstr "Ultima conexión"
+
+msgid "last login time"
+msgstr "Ultima conexión"
+
+msgid "last name"
+msgstr "Apellido"
+
+msgid "last usage"
+msgstr "Ultimo uso"
+
+msgid "last_login_time"
+msgstr "Ultima fecha de conexión"
+
+msgctxt "CWUser"
+msgid "last_login_time"
+msgstr "Ultima conexión"
+
+msgid "latest import"
+msgstr "importaciones recientes"
+
+msgid "latest modification time of an entity"
+msgstr "Fecha de la última modificación de una entidad "
+
+msgid "latest synchronization time"
+msgstr "fecha de la última sincronización"
+
+msgid "latest update on"
+msgstr "Actualizado el"
+
+msgid "latest_retrieval"
+msgstr "última sincronización"
+
+msgctxt "CWSource"
+msgid "latest_retrieval"
+msgstr "fecha de la última sincronización de la fuente"
+
+msgid "left"
+msgstr "izquierda"
+
+msgid "line"
+msgstr "línea"
+
+msgid ""
+"link a property to the user which want this property customization. Unless "
+"you're a site manager, this relation will be handled automatically."
+msgstr ""
+"Liga una propiedad al usuario que desea esta personalización. Salvo que "
+"usted sea un administrador del sistema, esta relación será administrada de "
+"forma automática."
+
+msgid "link a relation definition to its object entity type"
+msgstr "Liga una definición de relación a su tipo de entidad objeto"
+
+msgid "link a relation definition to its relation type"
+msgstr "Liga una definición de relación a su tipo de relación"
+
+msgid "link a relation definition to its subject entity type"
+msgstr "Liga una definición de relación a su tipo de entidad"
+
+msgid "link a state to one or more workflow"
+msgstr "Liga un estado a uno o más Workflow"
+
+msgid "link a transition information to its object"
+msgstr "Liga una transición de informacion hacia los objetos asociados"
+
+msgid "link a transition to one or more workflow"
+msgstr "Liga una transición a uno o más Workflow"
+
+msgid "link a workflow to one or more entity type"
+msgstr "Liga un Workflow a uno a más tipos de entidad"
+
+msgid "list"
+msgstr "Lista"
+
+msgid "log"
+msgstr "log"
+
+msgctxt "CWDataImport"
+msgid "log"
+msgstr "log"
+
+msgid "log in"
+msgstr "Acceder"
+
+msgid "login"
+msgstr "Usuario"
+
+msgctxt "CWUser"
+msgid "login"
+msgstr "Usuario"
+
+msgid "login / password"
+msgstr "usuario / contraseña"
+
+msgid "login or email"
+msgstr "Usuario o dirección de correo"
+
+msgid "login_action"
+msgstr "Ingresa tus datos"
+
+msgid "logout"
+msgstr "Desconectarse"
+
+#, python-format
+msgid "loop in %(rel)s relation (%(eid)s)"
+msgstr "loop detectado en %(rel)s de la entidad #%(eid)s"
+
+msgid "main informations"
+msgstr "Informaciones Generales"
+
+msgid "main_tab"
+msgstr "descripción"
+
+msgid "mainvars"
+msgstr "Variables principales"
+
+msgctxt "RQLExpression"
+msgid "mainvars"
+msgstr "Variables principales"
+
+msgid "manage"
+msgstr "Administración Sistema"
+
+msgid "manage bookmarks"
+msgstr "Gestión de favoritos"
+
+msgid "manage permissions"
+msgstr "Gestión de permisos"
+
+msgid "managers"
+msgstr "Administradores"
+
+msgid "mandatory relation"
+msgstr "Relación obligatoria"
+
+msgid "march"
+msgstr "Marzo"
+
+msgid "match_host"
+msgstr "para el host"
+
+msgctxt "CWSourceHostConfig"
+msgid "match_host"
+msgstr "para el host"
+
+msgid "maximum number of characters in short description"
+msgstr "Máximo de caracteres en las descripciones cortas"
+
+msgid "maximum number of entities to display in related combo box"
+msgstr "Máximo de entidades a mostrar en las listas dinámicas"
+
+msgid "maximum number of objects displayed by page of results"
+msgstr "Máximo de elementos mostrados por página de resultados"
+
+msgid "maximum number of related entities to display in the primary view"
+msgstr "Máximo de entidades relacionadas a mostrar en la vista primaria"
+
+msgid "may"
+msgstr "Mayo"
+
+msgid "memory leak debugging"
+msgstr "depuración (debugging) de fuga de memoria"
+
+msgid "message"
+msgstr "mensaje"
+
+#, python-format
+msgid "missing parameters for entity %s"
+msgstr "Parámetros faltantes a la entidad %s"
+
+msgid "modification"
+msgstr "modificación"
+
+msgid "modification_date"
+msgstr "Fecha de modificación"
+
+msgid "modify"
+msgstr "Modificar"
+
+msgid "monday"
+msgstr "Lunes"
+
+msgid "more actions"
+msgstr "Más acciones"
+
+msgid "more info about this workflow"
+msgstr "Más información acerca de este workflow"
+
+msgid "multiple edit"
+msgstr "Edición multiple"
+
+msgid "my custom search"
+msgstr "Mi búsqueda personalizada"
+
+msgid "name"
+msgstr "Nombre"
+
+msgctxt "BaseTransition"
+msgid "name"
+msgstr "Nombre"
+
+msgctxt "CWCache"
+msgid "name"
+msgstr "Nombre"
+
+msgctxt "CWComputedRType"
+msgid "name"
+msgstr ""
+
+msgctxt "CWConstraintType"
+msgid "name"
+msgstr "Nombre"
+
+msgctxt "CWEType"
+msgid "name"
+msgstr "Nombre"
+
+msgctxt "CWGroup"
+msgid "name"
+msgstr "Nombre"
+
+msgctxt "CWRType"
+msgid "name"
+msgstr "Nombre"
+
+msgctxt "CWSource"
+msgid "name"
+msgstr "nombre"
+
+msgctxt "CWUniqueTogetherConstraint"
+msgid "name"
+msgstr "nombre"
+
+msgctxt "State"
+msgid "name"
+msgstr "nombre"
+
+msgctxt "Transition"
+msgid "name"
+msgstr "Nombre"
+
+msgctxt "Workflow"
+msgid "name"
+msgstr "Nombre"
+
+msgctxt "WorkflowTransition"
+msgid "name"
+msgstr "Nombre"
+
+msgid "name of the cache"
+msgstr "Nombre del Caché"
+
+msgid ""
+"name of the main variables which should be used in the selection if "
+"necessary (comma separated)"
+msgstr ""
+"Nombre de las variables principales que deberían ser utilizadas en la "
+"selección de ser necesario (separarlas con comas)"
+
+msgid "name of the source"
+msgstr "nombre de la fuente"
+
+msgid "navbottom"
+msgstr "Pie de página"
+
+msgid "navcontentbottom"
+msgstr "Pie de página del contenido principal"
+
+msgid "navcontenttop"
+msgstr "Encabezado"
+
+msgid "navigation"
+msgstr "Navegación"
+
+msgid "navigation.combobox-limit"
+msgstr "ComboBox"
+
+msgid "navigation.page-size"
+msgstr "Paginación"
+
+msgid "navigation.related-limit"
+msgstr "Entidades relacionadas"
+
+msgid "navigation.short-line-size"
+msgstr "Descripción corta"
+
+msgid "navtop"
+msgstr "Encabezado del contenido principal"
+
+msgid "new"
+msgstr "Nuevo"
+
+msgid "next page"
+msgstr "página siguiente"
+
+msgid "next_results"
+msgstr "Siguientes resultados"
+
+msgid "no"
+msgstr "No"
+
+msgid "no content next link"
+msgstr "no hay liga siguiente"
+
+msgid "no content prev link"
+msgstr "no existe liga previa"
+
+msgid "no edited fields specified"
+msgstr "ningún campo por editar especificado"
+
+msgid "no log to display"
+msgstr "no arrojó elementos para mostrar"
+
+msgid "no related entity"
+msgstr "No posee entidad asociada"
+
+msgid "no repository sessions found"
+msgstr "Ninguna sesión encontrada"
+
+msgid "no selected entities"
+msgstr "No hay entidades seleccionadas"
+
+#, python-format
+msgid "no such entity type %s"
+msgstr "El tipo de entidad '%s' no existe"
+
+msgid "no version information"
+msgstr "No existe la información de version"
+
+msgid "no web sessions found"
+msgstr "Ninguna sesión web encontrada"
+
+msgid "normal"
+msgstr "Normal"
+
+msgid "not authorized"
+msgstr "No autorizado"
+
+msgid "not selected"
+msgstr "No seleccionado"
+
+msgid "november"
+msgstr "Noviembre"
+
+msgid "num. users"
+msgstr "Número de Usuarios"
+
+msgid "object"
+msgstr "Objeto"
+
+msgid "object type"
+msgstr "Tipo de Objeto"
+
+msgid "october"
+msgstr "Octubre"
+
+msgid "one month"
+msgstr "Un mes"
+
+msgid "one week"
+msgstr "Una semana"
+
+msgid "oneline"
+msgstr "En una línea"
+
+msgid "only select queries are authorized"
+msgstr "Solo están permitidas consultas de lectura"
+
+msgid "open all"
+msgstr "Abrir todos"
+
+msgid "opened sessions"
+msgstr "Sesiones abiertas"
+
+msgid "opened web sessions"
+msgstr "Sesiones Web abiertas"
+
+msgid "options"
+msgstr "Opciones"
+
+msgctxt "CWSourceSchemaConfig"
+msgid "options"
+msgstr "opciones"
+
+msgid "order"
+msgstr "Orden"
+
+msgid "ordernum"
+msgstr "Orden"
+
+msgctxt "CWAttribute"
+msgid "ordernum"
+msgstr "Número de Orden"
+
+msgctxt "CWRelation"
+msgid "ordernum"
+msgstr "Número de Orden"
+
+msgid "owl"
+msgstr "OWL"
+
+msgid "owlabox"
+msgstr "OWLabox"
+
+msgid "owned_by"
+msgstr "Pertenece a"
+
+msgid "owned_by_object"
+msgstr "Pertenece al objeto"
+
+msgid "owners"
+msgstr "Proprietarios"
+
+msgid "ownerships have been changed"
+msgstr "Derechos de propiedad modificados"
+
+msgid "pageid-not-found"
+msgstr "Página no encontrada."
+
+msgid "parser"
+msgstr "analizador (parser)"
+
+msgctxt "CWSource"
+msgid "parser"
+msgstr "analizador (parser)"
+
+msgid "parser to use to extract entities from content retrieved at given URLs."
+msgstr ""
+"analizador (parser) que sirve para extraer entidades y relaciones del "
+"contenido recuperado de las URLs."
+
+msgid "password"
+msgstr "Contraseña"
+
+msgid "password and confirmation don't match"
+msgstr "Su contraseña y confirmación no concuerdan"
+
+msgid "path"
+msgstr "Ruta"
+
+msgctxt "Bookmark"
+msgid "path"
+msgstr "Ruta"
+
+msgid "permalink to this message"
+msgstr "liga permanente a este mensaje"
+
+msgid "permission"
+msgstr "Permiso"
+
+msgid "permissions"
+msgstr "Permisos"
+
+msgid "pick existing bookmarks"
+msgstr "Seleccionar favoritos existentes"
+
+msgid "pkey"
+msgstr "Clave"
+
+msgctxt "CWProperty"
+msgid "pkey"
+msgstr "Código de la Propiedad"
+
+msgid "please correct errors below"
+msgstr "Por favor corregir los errores señalados en la parte inferior"
+
+msgid "please correct the following errors:"
+msgstr "Por favor corregir los siguientes errores:"
+
+msgid "possible views"
+msgstr "Vistas posibles"
+
+msgid "prefered_form"
+msgstr "Forma preferida"
+
+msgctxt "EmailAddress"
+msgid "prefered_form"
+msgstr "Email principal"
+
+msgid "prefered_form_object"
+msgstr "Formato preferido sobre"
+
+msgctxt "EmailAddress"
+msgid "prefered_form_object"
+msgstr "Email principal de"
+
+msgid "preferences"
+msgstr "Preferencias"
+
+msgid "previous page"
+msgstr "página anterior"
+
+msgid "previous_results"
+msgstr "Resultados Anteriores"
+
+msgid "primary"
+msgstr "Primaria"
+
+msgid "primary_email"
+msgstr "Dirección principal de correo electrónico"
+
+msgctxt "CWUser"
+msgid "primary_email"
+msgstr "Dirección principal de correo electrónico"
+
+msgid "primary_email_object"
+msgstr "Dirección de email principal (objeto)"
+
+msgctxt "EmailAddress"
+msgid "primary_email_object"
+msgstr "Dirección principal de correo electrónico de"
+
+msgid "profile"
+msgstr "perfil"
+
+msgid "rdef-description"
+msgstr "Descripción"
+
+msgid "rdef-permissions"
+msgstr "Permisos"
+
+msgid "rdf export"
+msgstr "Exportación RDF"
+
+msgid "read"
+msgstr "Lectura"
+
+msgid "read_permission"
+msgstr "Permiso de lectura"
+
+msgctxt "CWAttribute"
+msgid "read_permission"
+msgstr "Permiso de Lectura"
+
+msgctxt "CWEType"
+msgid "read_permission"
+msgstr "Permiso de Lectura"
+
+msgctxt "CWRelation"
+msgid "read_permission"
+msgstr "Permiso de Lectura"
+
+msgid "read_permission_object"
+msgstr "Tiene acceso de lectura a"
+
+msgctxt "CWGroup"
+msgid "read_permission_object"
+msgstr "Puede leer"
+
+msgctxt "RQLExpression"
+msgid "read_permission_object"
+msgstr "Puede leer"
+
+msgid "regexp matching host(s) to which this config applies"
+msgstr ""
+"expresión regular de los nombres de hosts a los cuales esta configuración "
+"aplica"
+
+msgid "registry"
+msgstr "Registro"
+
+msgid "related entity has no state"
+msgstr "La entidad relacionada no posee Estado"
+
+msgid "related entity has no workflow set"
+msgstr "La entidad relacionada no posee Workflow definido"
+
+msgid "relation"
+msgstr "relación"
+
+#, python-format
+msgid "relation %(relname)s of %(ent)s"
+msgstr "relación %(relname)s de %(ent)s"
+
+#, python-format
+msgid ""
+"relation %(rtype)s with %(etype)s as %(role)s is supported but no target "
+"type supported"
+msgstr ""
+"la relación %(rtype)s con %(etype)s como %(role)s es aceptada pero ningún "
+"tipo target es aceptado"
+
+#, python-format
+msgid ""
+"relation %(type)s with %(etype)s as %(role)s and target type %(target)s is "
+"mandatory but not supported"
+msgstr ""
+"la relación %(type)s con %(etype)s como %(role)s y tipo objetivo %(target)s "
+"es obligatoria pero no mantenida"
+
+#, python-format
+msgid ""
+"relation %s is supported but none of its definitions matches supported "
+"entities"
+msgstr ""
+"la relación %s es aceptada pero ninguna de sus definiciones corresponden a "
+"los tipos de entidades aceptadas"
+
+msgid "relation add"
+msgstr "Agregar Relación"
+
+msgid "relation removal"
+msgstr "Eliminar Relación"
+
+msgid "relation_type"
+msgstr "Tipo de Relación"
+
+msgctxt "CWAttribute"
+msgid "relation_type"
+msgstr "Tipo de Relación"
+
+msgctxt "CWRelation"
+msgid "relation_type"
+msgstr "Tipo de Relación"
+
+msgid "relation_type_object"
+msgstr "Definición de Relaciones"
+
+msgctxt "CWRType"
+msgid "relation_type_object"
+msgstr "Definición de Relaciones"
+
+msgid "relations"
+msgstr "relaciones"
+
+msgctxt "CWUniqueTogetherConstraint"
+msgid "relations"
+msgstr "relaciones"
+
+msgid "relations deleted"
+msgstr "Relaciones Eliminadas"
+
+msgid "relations_object"
+msgstr "relaciones de"
+
+msgctxt "CWRType"
+msgid "relations_object"
+msgstr "relaciones de"
+
+msgid "relative url of the bookmarked page"
+msgstr "Url relativa de la página"
+
+msgid "remove-inlined-entity-form"
+msgstr "Eliminar"
+
+msgid "require_group"
+msgstr "Requiere el grupo"
+
+msgctxt "BaseTransition"
+msgid "require_group"
+msgstr "Restringida al Grupo"
+
+msgctxt "Transition"
+msgid "require_group"
+msgstr "Restringida al Grupo"
+
+msgctxt "WorkflowTransition"
+msgid "require_group"
+msgstr "Restringida al Grupo"
+
+msgid "require_group_object"
+msgstr "Posee derechos sobre"
+
+msgctxt "CWGroup"
+msgid "require_group_object"
+msgstr "Posee derechos sobre"
+
+msgid "required"
+msgstr "Requerido"
+
+msgid "required attribute"
+msgstr "Atributo requerido"
+
+msgid "required field"
+msgstr "Campo requerido"
+
+msgid "resources usage"
+msgstr "Recursos utilizados"
+
+msgid ""
+"restriction part of a rql query. For entity rql expression, X and U are "
+"predefined respectivly to the current object and to the request user. For "
+"relation rql expression, S, O and U are predefined respectivly to the "
+"current relation'subject, object and to the request user. "
+msgstr ""
+"Parte restrictiva de una consulta RQL. En una expresión ligada a una "
+"entidad, X y U son respectivamente asignadas a la Entidad y el Usuario en "
+"curso.En una expresión ligada a una relación, S, O y U son respectivamente "
+"asignados al Sujeto/Objeto de la relación y al Usuario actual."
+
+msgid "revert changes"
+msgstr "Anular modificación"
+
+msgid "right"
+msgstr "Derecha"
+
+msgid "rql expression allowing to add entities/relations of this type"
+msgstr "Expresión RQL que permite AGREGAR entidades/relaciones de este tipo"
+
+msgid "rql expression allowing to delete entities/relations of this type"
+msgstr "Expresión RQL que permite ELIMINAR entidades/relaciones de este tipo"
+
+msgid "rql expression allowing to read entities/relations of this type"
+msgstr "Expresión RQL que permite LEER entidades/relaciones de este tipo"
+
+msgid "rql expression allowing to update entities/relations of this type"
+msgstr "Expresión RQL que permite ACTUALIZAR entidades/relaciones de este tipo"
+
+msgid "rql expressions"
+msgstr "Expresiones RQL"
+
+msgid "rss export"
+msgstr "Exportación RSS"
+
+msgid "rule"
+msgstr ""
+
+msgctxt "CWComputedRType"
+msgid "rule"
+msgstr ""
+
+msgid "same_as"
+msgstr "Idéntico a"
+
+msgid "sample format"
+msgstr "Ejemplo"
+
+msgid "saturday"
+msgstr "Sábado"
+
+msgid "schema-diagram"
+msgstr "Gráfica"
+
+msgid "schema-entity-types"
+msgstr "Entidades"
+
+msgid "schema-relation-types"
+msgstr "Relaciones"
+
+msgid "search"
+msgstr "Buscar"
+
+msgid "search for association"
+msgstr "Búsqueda por asociación"
+
+msgid "searching for"
+msgstr "Buscando"
+
+msgid "security"
+msgstr "Seguridad"
+
+msgid "see more"
+msgstr "ver más"
+
+msgid "see them all"
+msgstr "Ver todos"
+
+msgid "see_also"
+msgstr "Ver además"
+
+msgid "select"
+msgstr "Seleccionar"
+
+msgid "select a"
+msgstr "Seleccione un"
+
+msgid "select a key first"
+msgstr "Seleccione una clave"
+
+msgid "select a relation"
+msgstr "Seleccione una relación"
+
+msgid "select this entity"
+msgstr "Seleccionar esta entidad"
+
+msgid "selected"
+msgstr "Seleccionado"
+
+msgid "semantic description of this attribute"
+msgstr "Descripción semántica de este atributo"
+
+msgid "semantic description of this entity type"
+msgstr "Descripción semántica de este tipo de entidad"
+
+msgid "semantic description of this relation"
+msgstr "Descripción semántica de esta relación"
+
+msgid "semantic description of this relation type"
+msgstr "Descripción semántica de este tipo de relación"
+
+msgid "semantic description of this state"
+msgstr "Descripción semántica de este estado"
+
+msgid "semantic description of this transition"
+msgstr "Descripcion semántica de esta transición"
+
+msgid "semantic description of this workflow"
+msgstr "Descripcion semántica de este Workflow"
+
+msgid "september"
+msgstr "Septiembre"
+
+msgid "server information"
+msgstr "Información del servidor"
+
+msgid "severity"
+msgstr "severidad"
+
+msgid ""
+"should html fields being edited using fckeditor (a HTML WYSIWYG editor). "
+"You should also select text/html as default text format to actually get "
+"fckeditor."
+msgstr ""
+"Indica si los campos de tipo texto deberán ser editados usando fckeditor "
+"(un\n"
+"editor HTML WYSIWYG). Deberá también elegir text/html\n"
+"como formato de texto por defecto para poder utilizar fckeditor."
+
+#, python-format
+msgid "show %s results"
+msgstr "Mostrar %s resultados"
+
+msgid "show advanced fields"
+msgstr "Mostrar campos avanzados"
+
+msgid "show filter form"
+msgstr "Mostrar el Filtro"
+
+msgid "site configuration"
+msgstr "Configuración Sistema"
+
+msgid "site documentation"
+msgstr "Documentación Sistema"
+
+msgid "site title"
+msgstr "Nombre del Sistema"
+
+msgid "site-wide property can't be set for user"
+msgstr "Una propiedad específica al Sistema no puede ser propia al usuario"
+
+msgid "some later transaction(s) touch entity, undo them first"
+msgstr ""
+"Las transacciones más recientes modificaron esta entidad, anúlelas primero"
+
+msgid "some relations violate a unicity constraint"
+msgstr "algunas relaciones no respetan la restricción de unicidad"
+
+msgid "sorry, the server is unable to handle this query"
+msgstr "Lo sentimos, el servidor no puede manejar esta consulta"
+
+msgid ""
+"source's configuration. One key=value per line, authorized keys depending on "
+"the source's type"
+msgstr ""
+"configuración de fuentes. Una clave=valor por línea, las claves permitidas "
+"dependen del tipo de la fuente."
+
+msgid "sparql xml"
+msgstr "XML Sparql"
+
+msgid "special transition allowing to go through a sub-workflow"
+msgstr "Transición especial que permite ir en un Sub-Workflow"
+
+msgid "specializes"
+msgstr "Deriva de"
+
+msgctxt "CWEType"
+msgid "specializes"
+msgstr "Especializa"
+
+msgid "specializes_object"
+msgstr "Especializado por"
+
+msgctxt "CWEType"
+msgid "specializes_object"
+msgstr "Especializado por"
+
+#, python-format
+msgid "specifying %s is mandatory"
+msgstr "especificar %s es obligatorio"
+
+msgid ""
+"start timestamp of the currently in synchronization, or NULL when no "
+"synchronization in progress."
+msgstr ""
+"horario de inicio de la sincronización en curso, o NULL cuando no existe "
+"sincronización en curso"
+
+msgid "start_timestamp"
+msgstr "horario inicio"
+
+msgctxt "CWDataImport"
+msgid "start_timestamp"
+msgstr "horario inicio"
+
+msgid "startup views"
+msgstr "Vistas de inicio"
+
+msgid "startupview"
+msgstr "Vistas de Inicio"
+
+msgid "state"
+msgstr "Estado"
+
+msgid "state and transition don't belong the the same workflow"
+msgstr "El Estado y la Transición no pertenecen al mismo Workflow"
+
+msgid "state doesn't apply to this entity's type"
+msgstr "Este Estado no aplica a este tipo de Entidad"
+
+msgid "state doesn't belong to entity's current workflow"
+msgstr "El Estado no pertenece al Workflow actual de la Entidad"
+
+msgid "state doesn't belong to entity's workflow"
+msgstr "El Estado no pertenece al Workflow de la Entidad"
+
+msgid ""
+"state doesn't belong to entity's workflow. You may want to set a custom "
+"workflow for this entity first."
+msgstr ""
+"El Estado no pertenece al Workflow Actual de la Entidad. Usted deseaquizás "
+"especificar que esta entidad debe utilizar este Workflow"
+
+msgid "state doesn't belong to this workflow"
+msgstr "El Estado no pertenece a este Workflow"
+
+msgid "state_of"
+msgstr "Estado de"
+
+msgctxt "State"
+msgid "state_of"
+msgstr "Estado de"
+
+msgid "state_of_object"
+msgstr "Tiene por Estado"
+
+msgctxt "Workflow"
+msgid "state_of_object"
+msgstr "Tiene por Estado"
+
+msgid "status"
+msgstr "estado"
+
+msgctxt "CWDataImport"
+msgid "status"
+msgstr "estado"
+
+msgid "status change"
+msgstr "Cambio de Estatus"
+
+msgid "status changed"
+msgstr "Estatus cambiado"
+
+#, python-format
+msgid "status will change from %(st1)s to %(st2)s"
+msgstr "El estatus cambiará de %(st1)s a %(st2)s"
+
+msgid "subject"
+msgstr "Sujeto"
+
+msgid "subject type"
+msgstr "Tipo del sujeto"
+
+msgid "subject/object cardinality"
+msgstr "Cardinalidad Sujeto/Objeto"
+
+msgid "subworkflow"
+msgstr "Sub-Workflow"
+
+msgctxt "WorkflowTransition"
+msgid "subworkflow"
+msgstr "Sub-Workflow"
+
+msgid ""
+"subworkflow isn't a workflow for the same types as the transition's workflow"
+msgstr ""
+"Le Sub-Workflow no se aplica a los mismos tipos que el Workflow de esta "
+"transición"
+
+msgid "subworkflow state"
+msgstr "Estado de Sub-Workflow"
+
+msgid "subworkflow_exit"
+msgstr "Salida del Sub-Workflow"
+
+msgctxt "WorkflowTransition"
+msgid "subworkflow_exit"
+msgstr "Salida del Sub-Workflow"
+
+msgid "subworkflow_exit_object"
+msgstr "Salida Sub-Workflow de"
+
+msgctxt "SubWorkflowExitPoint"
+msgid "subworkflow_exit_object"
+msgstr "Salida Sub-Workflow de"
+
+msgid "subworkflow_object"
+msgstr "Sub-Workflow de"
+
+msgctxt "Workflow"
+msgid "subworkflow_object"
+msgstr "Sub-Workflow de"
+
+msgid "subworkflow_state"
+msgstr "Estado de Sub-Workflow"
+
+msgctxt "SubWorkflowExitPoint"
+msgid "subworkflow_state"
+msgstr "Estado de Sub-Workflow"
+
+msgid "subworkflow_state_object"
+msgstr "Estado de Salida de"
+
+msgctxt "State"
+msgid "subworkflow_state_object"
+msgstr "Estado de Salida de"
+
+msgid "success"
+msgstr "éxito"
+
+msgid "sunday"
+msgstr "Domingo"
+
+msgid "surname"
+msgstr "Apellido"
+
+msgctxt "CWUser"
+msgid "surname"
+msgstr "Apellido"
+
+msgid "symmetric"
+msgstr "Simétrico"
+
+msgctxt "CWRType"
+msgid "symmetric"
+msgstr "Simétrico"
+
+msgid "synchronization-interval must be greater than 1 minute"
+msgstr "synchronization-interval debe ser mayor a 1 minuto"
+
+msgid "table"
+msgstr "Tabla"
+
+msgid "tablefilter"
+msgstr "Tablero de Filtrado"
+
+msgid "text"
+msgstr "Texto"
+
+msgid "text/cubicweb-page-template"
+msgstr "Usar Page Templates"
+
+msgid "text/html"
+msgstr "Usar HTML"
+
+msgid "text/markdown"
+msgstr ""
+
+msgid "text/plain"
+msgstr "Usar Texto simple"
+
+msgid "text/rest"
+msgstr "Texto en REST"
+
+msgid "the URI of the object"
+msgstr "El URI del Objeto"
+
+msgid "the prefered email"
+msgstr "Dirección principal de email"
+
+msgid "the system source has its configuration stored on the file-system"
+msgstr ""
+"el sistema fuente tiene su configuración almacenada en el sistema de archivos"
+
+#, python-format
+msgid "the value \"%s\" is already used, use another one"
+msgstr "El valor \"%s\" ya esta en uso, favor de utilizar otro"
+
+msgid "there is no next page"
+msgstr "no existe página siguiente"
+
+msgid "there is no previous page"
+msgstr "no existe página anterior"
+
+#, python-format
+msgid "there is no transaction #%s"
+msgstr "no existe la transacción #%s"
+
+msgid "this action is not reversible!"
+msgstr "Esta acción es irreversible!."
+
+msgid "this entity is currently owned by"
+msgstr "Esta Entidad es propiedad de"
+
+msgid "this parser doesn't use a mapping"
+msgstr "este analizador (parser) no utiliza mapeo"
+
+msgid "this resource does not exist"
+msgstr "Este recurso no existe"
+
+msgid "this source doesn't use a mapping"
+msgstr "esta fuente no utiliza mapeo"
+
+msgid "thursday"
+msgstr "Jueves"
+
+msgid "timestamp"
+msgstr "Fecha"
+
+msgctxt "CWCache"
+msgid "timestamp"
+msgstr "Válido desde"
+
+msgid "timetable"
+msgstr "Tablero de tiempos"
+
+msgid "title"
+msgstr "Nombre"
+
+msgctxt "Bookmark"
+msgid "title"
+msgstr "Nombre"
+
+msgid "to"
+msgstr "a"
+
+#, python-format
+msgid "to %(date)s"
+msgstr "a %(date)s"
+
+msgid "to associate with"
+msgstr "Para asociar con"
+
+msgid "to_entity"
+msgstr "Hacia la entidad"
+
+msgctxt "CWAttribute"
+msgid "to_entity"
+msgstr "Por la entidad"
+
+msgctxt "CWRelation"
+msgid "to_entity"
+msgstr "Por la entidad"
+
+msgid "to_entity_object"
+msgstr "Objeto de la Relación"
+
+msgctxt "CWEType"
+msgid "to_entity_object"
+msgstr "Objeto de la Relación"
+
+msgid "to_interval_end"
+msgstr "a"
+
+msgid "to_state"
+msgstr "Hacia el Estado"
+
+msgctxt "TrInfo"
+msgid "to_state"
+msgstr "Hacia el Estado"
+
+msgid "to_state_object"
+msgstr "Transición hacia este Estado"
+
+msgctxt "State"
+msgid "to_state_object"
+msgstr "Transición hacia este Estado"
+
+msgid "toggle check boxes"
+msgstr "Cambiar valor"
+
+msgid "tr_count"
+msgstr "n° de transición"
+
+msgctxt "TrInfo"
+msgid "tr_count"
+msgstr "n° de transición"
+
+msgid "transaction undone"
+msgstr "transacción anulada"
+
+#, python-format
+msgid "transition %(tr)s isn't allowed from %(st)s"
+msgstr "La transición %(tr)s no esta permitida desde el Estado %(st)s"
+
+msgid "transition doesn't belong to entity's workflow"
+msgstr "La transición no pertenece al Workflow de la Entidad"
+
+msgid "transition isn't allowed"
+msgstr "La transición no esta permitida"
+
+msgid "transition may not be fired"
+msgstr "La transición no puede ser lanzada"
+
+msgid "transition_of"
+msgstr "Transición de"
+
+msgctxt "BaseTransition"
+msgid "transition_of"
+msgstr "Transición de"
+
+msgctxt "Transition"
+msgid "transition_of"
+msgstr "Transición de"
+
+msgctxt "WorkflowTransition"
+msgid "transition_of"
+msgstr "Transición de"
+
+msgid "transition_of_object"
+msgstr "Utiliza las transiciones"
+
+msgctxt "Workflow"
+msgid "transition_of_object"
+msgstr "Utiliza las transiciones"
+
+msgid "tree view"
+msgstr "Vista Jerárquica"
+
+msgid "tuesday"
+msgstr "Martes"
+
+msgid "type"
+msgstr "Tipo"
+
+msgctxt "BaseTransition"
+msgid "type"
+msgstr "Tipo"
+
+msgctxt "CWSource"
+msgid "type"
+msgstr "tipo"
+
+msgctxt "Transition"
+msgid "type"
+msgstr "Tipo"
+
+msgctxt "WorkflowTransition"
+msgid "type"
+msgstr "Tipo"
+
+msgid "type here a sparql query"
+msgstr "Escriba aquí su consulta en Sparql"
+
+msgid "type of the source"
+msgstr "tipo de la fuente"
+
+msgid "ui"
+msgstr "Interfaz Genérica"
+
+msgid "ui.date-format"
+msgstr "Formato de Fecha"
+
+msgid "ui.datetime-format"
+msgstr "Formato de Fecha y Hora"
+
+msgid "ui.default-text-format"
+msgstr "Formato de texto"
+
+msgid "ui.encoding"
+msgstr "Codificación"
+
+msgid "ui.fckeditor"
+msgstr "Editor de texto FCK"
+
+msgid "ui.float-format"
+msgstr "Números flotantes"
+
+msgid "ui.language"
+msgstr "Lenguaje"
+
+msgid "ui.main-template"
+msgstr "Plantilla Principal"
+
+msgid "ui.site-title"
+msgstr "Nombre del Sistema"
+
+msgid "ui.time-format"
+msgstr "Formato de hora"
+
+msgid "unable to check captcha, please try again"
+msgstr "Imposible de verificar el Captcha, inténtelo otra vez"
+
+msgid "unaccessible"
+msgstr "Inaccesible"
+
+msgid "unauthorized value"
+msgstr "Valor no permitido"
+
+msgid "undefined user"
+msgstr "usuario indefinido"
+
+msgid "undo"
+msgstr "Anular"
+
+msgid "unique identifier used to connect to the application"
+msgstr "Identificador único utilizado para conectarse al Sistema"
+
+msgid "unknown external entity"
+msgstr "Entidad externa desconocida"
+
+#, python-format
+msgid "unknown options %s"
+msgstr "opciones desconocidas: %s"
+
+#, python-format
+msgid "unknown property key %s"
+msgstr "Clave de Propiedad desconocida: %s"
+
+msgid "unknown vocabulary:"
+msgstr "Vocabulario desconocido: "
+
+msgid "unsupported protocol"
+msgstr "protocolo no soportado"
+
+msgid "upassword"
+msgstr "Contraseña"
+
+msgctxt "CWUser"
+msgid "upassword"
+msgstr "Contraseña"
+
+msgid "update"
+msgstr "Modificación"
+
+msgid "update_permission"
+msgstr "Puede ser modificado por"
+
+msgctxt "CWAttribute"
+msgid "update_permission"
+msgstr "Puede ser modificado por"
+
+msgctxt "CWEType"
+msgid "update_permission"
+msgstr "Puede ser modificado por"
+
+msgid "update_permission_object"
+msgstr "Tiene permiso de modificar"
+
+msgctxt "CWGroup"
+msgid "update_permission_object"
+msgstr "Puede modificar"
+
+msgctxt "RQLExpression"
+msgid "update_permission_object"
+msgstr "Puede modificar"
+
+msgid "update_relation"
+msgstr "Modificar"
+
+msgid "updated"
+msgstr "Actualizado"
+
+#, python-format
+msgid "updated %(etype)s #%(eid)s (%(title)s)"
+msgstr "actualización de la entidad %(etype)s #%(eid)s (%(title)s)"
+
+msgid "uri"
+msgstr "URI"
+
+msgctxt "ExternalUri"
+msgid "uri"
+msgstr "URI"
+
+msgid "url"
+msgstr "url"
+
+msgctxt "CWSource"
+msgid "url"
+msgstr "url"
+
+msgid ""
+"use to define a transition from one or multiple states to a destination "
+"states in workflow's definitions. Transition without destination state will "
+"go back to the state from which we arrived to the current state."
+msgstr ""
+"Se utiliza en una definición de procesos para agregar una transición desde "
+"uno o varios estados hacia un estado destino. Una transición sin Estado "
+"destino regresará al Estado anterior del Estado actual"
+
+msgid "use_email"
+msgstr "Correo electrónico"
+
+msgctxt "CWUser"
+msgid "use_email"
+msgstr "Usa el Correo Electrónico"
+
+msgid "use_email_object"
+msgstr "Email utilizado por"
+
+msgctxt "EmailAddress"
+msgid "use_email_object"
+msgstr "Utilizado por"
+
+msgid ""
+"used for cubicweb configuration. Once a property has been created you can't "
+"change the key."
+msgstr ""
+"Se utiliza para la configuración de CubicWeb. Una vez que la propiedad ha "
+"sido creada no puede cambiar la clave"
+
+msgid ""
+"used to associate simple states to an entity type and/or to define workflows"
+msgstr ""
+"Se utiliza para asociar estados simples a un tipo de entidad y/o para "
+"definir Workflows"
+
+msgid "user"
+msgstr "Usuario"
+
+#, python-format
+msgid ""
+"user %s has made the following change(s):\n"
+"\n"
+msgstr ""
+"El usuario %s ha efectuado los siguentes cambios:\n"
+"\n"
+
+msgid "user interface encoding"
+msgstr "Encoding de la interfaz de usuario"
+
+msgid "user preferences"
+msgstr "Preferencias"
+
+msgid "user's email account"
+msgstr "email del usuario"
+
+msgid "users"
+msgstr "Usuarios"
+
+msgid "users and groups"
+msgstr "usuarios y grupos"
+
+msgid "users using this bookmark"
+msgstr "Usuarios utilizando este Favorito"
+
+msgid "validate modifications on selected items"
+msgstr "Valida modificaciones sobre elementos seleccionados"
+
+msgid "validating..."
+msgstr "Validando ..."
+
+msgid "value"
+msgstr "Valor"
+
+msgctxt "CWConstraint"
+msgid "value"
+msgstr "Valor"
+
+msgctxt "CWProperty"
+msgid "value"
+msgstr "Vampr"
+
+#, python-format
+msgid "value %(KEY-value)s must be < %(KEY-boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(KEY-value)s must be <= %(KEY-boundary)s"
+msgstr "el valor %(KEY-value)s debe ser <= %(KEY-boundary)s"
+
+#, python-format
+msgid "value %(KEY-value)s must be > %(KEY-boundary)s"
+msgstr ""
+
+#, python-format
+msgid "value %(KEY-value)s must be >= %(KEY-boundary)s"
+msgstr "el valor %(KEY-value)s debe ser >= %(KEY-boundary)s"
+
+msgid "value associated to this key is not editable manually"
+msgstr "El valor asociado a este elemento no es editable manualmente"
+
+#, python-format
+msgid "value should have maximum size of %(KEY-max)s but found %(KEY-size)s"
+msgstr "el valor máximo es %(KEY-max)s y encontramos %(KEY-size)s"
+
+#, python-format
+msgid "value should have minimum size of %(KEY-min)s but found %(KEY-size)s"
+msgstr "el valor mínimo debe ser %(KEY-min)s y encontramos %(KEY-size)s"
+
+msgid "vcard"
+msgstr "vcard"
+
+msgid "versions configuration"
+msgstr "Configuración de Versión"
+
+msgid "view"
+msgstr "Ver"
+
+msgid "view all"
+msgstr "Ver todos"
+
+msgid "view detail for this entity"
+msgstr "Ver a detalle esta entidad"
+
+msgid "view history"
+msgstr "Ver histórico"
+
+msgid "view identifier"
+msgstr "Identificador"
+
+msgid "view title"
+msgstr "Nombre"
+
+msgid "view workflow"
+msgstr "Ver Workflow"
+
+msgid "view_index"
+msgstr "Inicio"
+
+msgid "visible"
+msgstr "Visible"
+
+msgid "warning"
+msgstr "atención"
+
+msgid "we are not yet ready to handle this query"
+msgstr "Aún no podemos manejar este tipo de consulta Sparql"
+
+msgid "wednesday"
+msgstr "Miércoles"
+
+#, python-format
+msgid "welcome %s!"
+msgstr "Bienvenido %s."
+
+msgid "wf_info_for"
+msgstr "Histórico de"
+
+msgid "wf_info_for_object"
+msgstr "Histórico de transiciones"
+
+msgid "wf_tab_info"
+msgstr "Descripción"
+
+msgid "wfgraph"
+msgstr "Gráfica del Workflow"
+
+msgid ""
+"when multiple addresses are equivalent (such as python-projects@logilab.org "
+"and python-projects@lists.logilab.org), set this to indicate which is the "
+"preferred form."
+msgstr ""
+"Cuando varias direcciones email son equivalentes (como python-"
+"projects@logilab.org y python-projects@lists.logilab.org), aquí se indica "
+"cual es la forma preferida."
+
+msgid "workflow"
+msgstr "Workflow"
+
+#, python-format
+msgid "workflow changed to \"%s\""
+msgstr "Workflow cambiado a \"%s\""
+
+msgid "workflow has no initial state"
+msgstr "El Workflow no posee Estado Inicial"
+
+msgid "workflow history item"
+msgstr "Elemento histórico del Workflow"
+
+msgid "workflow isn't a workflow for this type"
+msgstr "El Workflow no se aplica a este Tipo de Entidad"
+
+msgid "workflow to which this state belongs"
+msgstr "Workflow al cual pertenece este estado"
+
+msgid "workflow to which this transition belongs"
+msgstr "Workflow al cual pertenece esta transición"
+
+msgid "workflow_of"
+msgstr "Workflow de"
+
+msgctxt "Workflow"
+msgid "workflow_of"
+msgstr "Workflow de"
+
+msgid "workflow_of_object"
+msgstr "Utiliza el Workflow"
+
+msgctxt "CWEType"
+msgid "workflow_of_object"
+msgstr "Utiliza el Workflow"
+
+#, python-format
+msgid "wrong query parameter line %s"
+msgstr "Parámetro erróneo de consulta línea %s"
+
+msgid "xbel export"
+msgstr "Exportación XBEL"
+
+msgid "xml export"
+msgstr "Exportar XML"
+
+msgid "xml export (entities)"
+msgstr "Exportación XML (entidades)"
+
+msgid "yes"
+msgstr "Sí"
+
+msgid "you have been logged out"
+msgstr "Ha terminado la sesión"
+
+msgid "you should probably delete that property"
+msgstr "probablamente debería suprimir esta propriedad"
+
+#~ msgid "%s relation should not be in mapped"
+#~ msgstr "la relación %s no debería estar mapeada"
+
+#~ msgid "Any"
+#~ msgstr "Cualquiera"
+
+#~ msgid "Browse by category"
+#~ msgstr "Busca por categoría"
+
+#~ msgid "No account? Try public access at %s"
+#~ msgstr "No esta registrado? Use el acceso público en %s"
+
+#~ msgid "anonymous"
+#~ msgstr "anónimo"
+
+#~ msgid "attribute/relation can't be mapped, only entity and relation types"
+#~ msgstr ""
+#~ "los atributos y las relaciones no pueden ser mapeados, solamente los "
+#~ "tipos de entidad y de relación"
+
+#~ msgid "can't connect to source %s, some data may be missing"
+#~ msgstr "no se puede conectar a la fuente %s, algunos datos pueden faltar"
+
+#~ msgid "can't mix dontcross and maycross options"
+#~ msgstr "no puede mezclar las opciones dontcross y maycross"
+
+#~ msgid "can't mix dontcross and write options"
+#~ msgstr "no puede mezclar las opciones dontcross y write"
+
+#~ msgid "components_etypenavigation"
+#~ msgstr "Filtar por tipo"
+
+#~ msgid "components_etypenavigation_description"
+#~ msgstr "Permite filtrar por tipo de entidad los resultados de una búsqueda"
+
+#~ msgid "error while querying source %s, some data may be missing"
+#~ msgstr ""
+#~ "Un error ha ocurrido al interrogar %s, es posible que los \n"
+#~ "datos visibles se encuentren incompletos"
+
+#~ msgid "inlined relation %(rtype)s of %(etype)s should be supported"
+#~ msgstr ""
+#~ "la relación %(rtype)s del tipo de entidad %(etype)s debe ser aceptada "
+#~ "('inlined')"
+
+#~ msgid "no edited fields specified for entity %s"
+#~ msgstr "Ningún campo editable especificado para la entidad %s"
+
+#~ msgid "timeline"
+#~ msgstr "Escala de Tiempo"
+
+#~ msgid "unknown option(s): %s"
+#~ msgstr "opcion(es) desconocida(s): %s"
+
+#~ msgid "value %(KEY-value)s must be %(KEY-op)s %(KEY-boundary)s"
+#~ msgstr "El valor %(KEY-value)s debe ser %(KEY-op)s %(KEY-boundary)s"
+
+#~ msgid "web sessions without CNX"
+#~ msgstr "sesiones web sin conexión asociada"
+
+#~ msgid "workflow already has a state of that name"
+#~ msgstr "el workflow posee ya un estado con ese nombre"
+
+#~ msgid "workflow already has a transition of that name"
+#~ msgstr "El Workflow posee ya una transición con ese nombre"
+
+#~ msgid "you may want to specify something for %s"
+#~ msgstr "usted desea quizás especificar algo para la relación %s"
+
+#~ msgid ""
+#~ "you should un-inline relation %s which is supported and may be crossed "
+#~ msgstr ""
+#~ "usted debe quitar la puesta en línea de la relación %s que es aceptada y "
+#~ "puede ser cruzada"
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/i18n/fr.po
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/i18n/fr.po Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,4725 @@
+# cubicweb i18n catalog
+# Copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# Logilab
+msgid ""
+msgstr ""
+"Project-Id-Version: cubicweb 2.46.0\n"
+"PO-Revision-Date: 2014-06-24 13:29+0200\n"
+"Last-Translator: Logilab Team \n"
+"Language-Team: fr \n"
+"Language: \n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+
+#, python-format
+msgid ""
+"\n"
+"%(user)s changed status from <%(previous_state)s> to <%(current_state)s> for "
+"entity\n"
+"'%(title)s'\n"
+"\n"
+"%(comment)s\n"
+"\n"
+"url: %(url)s\n"
+msgstr ""
+"\n"
+"%(user)s a changé l'état de <%(previous_state)s> vers <%(current_state)s> "
+"pour l'entité\n"
+"'%(title)s'\n"
+"\n"
+"%(comment)s\n"
+"\n"
+"url: %(url)s\n"
+
+#, python-format
+msgid " from state %(fromstate)s to state %(tostate)s\n"
+msgstr " de l'état %(fromstate)s vers l'état %(tostate)s\n"
+
+msgid " :"
+msgstr " :"
+
+#, python-format
+msgid "\"action\" must be specified in options; allowed values are %s"
+msgstr ""
+"\"action\" doit être specifié dans les options; les valeurs autorisées "
+"sont : %s"
+
+msgid "\"role=subject\" or \"role=object\" must be specified in options"
+msgstr ""
+"\"role=subject\" ou \"role=object\" doit être specifié dans les options"
+
+#, python-format
+msgid "%(KEY-cstr)s constraint failed for value %(KEY-value)r"
+msgstr "la valeur %(KEY-value)r ne satisfait pas la contrainte %(KEY-cstr)s"
+
+#, python-format
+msgid "%(KEY-rtype)s is part of violated unicity constraint"
+msgstr "%(KEY-rtype)s appartient à une contrainte d'unicité transgressée"
+
+#, python-format
+msgid "%(KEY-value)r doesn't match the %(KEY-regexp)r regular expression"
+msgstr ""
+"%(KEY-value)r ne correspond pas à l'expression régulière %(KEY-regexp)r"
+
+#, python-format
+msgid "%(attr)s set to %(newvalue)s"
+msgstr "%(attr)s modifié à %(newvalue)s"
+
+#, python-format
+msgid "%(attr)s updated from %(oldvalue)s to %(newvalue)s"
+msgstr "%(attr)s modifié de %(oldvalue)s à %(newvalue)s"
+
+#, python-format
+msgid "%(etype)s by %(author)s"
+msgstr "%(etype)s par %(author)s"
+
+#, python-format
+msgid "%(firstname)s %(surname)s"
+msgstr "%(firstname)s %(surname)s"
+
+#, python-format
+msgid "%(subject)s %(etype)s #%(eid)s (%(login)s)"
+msgstr "%(subject)s %(etype)s #%(eid)s (%(login)s)"
+
+#, python-format
+msgid "%d days"
+msgstr "%d jours"
+
+#, python-format
+msgid "%d hours"
+msgstr "%d heures"
+
+#, python-format
+msgid "%d minutes"
+msgstr "%d minutes"
+
+#, python-format
+msgid "%d months"
+msgstr "%d mois"
+
+#, python-format
+msgid "%d seconds"
+msgstr "%d secondes"
+
+#, python-format
+msgid "%d weeks"
+msgstr "%d semaines"
+
+#, python-format
+msgid "%d years"
+msgstr "%d années"
+
+#, python-format
+msgid "%s could be supported"
+msgstr "%s pourrait être supporté"
+
+#, python-format
+msgid "%s error report"
+msgstr "%s rapport d'erreur"
+
+#, python-format
+msgid "%s software version of the database"
+msgstr "version logicielle de la base pour %s"
+
+#, python-format
+msgid "%s updated"
+msgstr "%s mis à jour"
+
+#, python-format
+msgid "'%s' action doesn't take any options"
+msgstr "l'action '%s' ne prend pas d'option"
+
+#, python-format
+msgid ""
+"'%s' action for in_state relation should at least have 'linkattr=name' option"
+msgstr ""
+"l'action '%s' pour la relation in_state doit au moins avoir l'option "
+"'linkattr=name'"
+
+#, python-format
+msgid "'%s' action requires 'linkattr' option"
+msgstr "l'action '%s' nécessite une option 'linkattr'"
+
+msgid "(UNEXISTANT EID)"
+msgstr "(EID INTROUVABLE)"
+
+#, python-format
+msgid "(suppressed) entity #%d"
+msgstr "entité #%d (supprimée)"
+
+msgid "**"
+msgstr "0..n 0..n"
+
+msgid "*+"
+msgstr "0..n 1..n"
+
+msgid "*1"
+msgstr "0..n 1"
+
+msgid "*?"
+msgstr "0..n 0..1"
+
+msgid "+*"
+msgstr "1..n 0..n"
+
+msgid "++"
+msgstr "1..n 1..n"
+
+msgid "+1"
+msgstr "1..n 1"
+
+msgid "+?"
+msgstr "1..n 0..1"
+
+msgid "1*"
+msgstr "1 0..n"
+
+msgid "1+"
+msgstr "1 1..n"
+
+msgid "11"
+msgstr "1 1"
+
+msgid "1?"
+msgstr "1 0..1"
+
+#, python-format
+msgid "<%s not specified>"
+msgstr "<%s non spécifié>"
+
+#, python-format
+msgid ""
+"
Ce schéma du modèle de données exclut les méta-données, mais "
+"vous pouvez afficher un schéma complet.
"
+
+msgid ""
+msgstr ""
+
+msgid ""
+msgstr ""
+
+msgid "?*"
+msgstr "0..1 0..n"
+
+msgid "?+"
+msgstr "0..1 1..n"
+
+msgid "?1"
+msgstr "0..1 1"
+
+msgid "??"
+msgstr "0..1 0..1"
+
+msgid "AND"
+msgstr "ET"
+
+msgid "About this site"
+msgstr "À propos de ce site"
+
+#, python-format
+msgid "Added relation : %(entity_from)s %(rtype)s %(entity_to)s"
+msgstr "Relation ajoutée : %(entity_from)s %(rtype)s %(entity_to)s"
+
+msgid "Attributes permissions:"
+msgstr "Permissions des attributs"
+
+# schema pot file, generated on 2009-09-16 16:46:55
+#
+# singular and plural forms for each entity type
+msgid "BaseTransition"
+msgstr "Transition (abstraite)"
+
+msgid "BaseTransition_plural"
+msgstr "Transitions (abstraites)"
+
+msgid "BigInt"
+msgstr "Entier long"
+
+msgid "BigInt_plural"
+msgstr "Entiers longs"
+
+msgid "Bookmark"
+msgstr "Signet"
+
+msgid "Bookmark_plural"
+msgstr "Signets"
+
+msgid "Boolean"
+msgstr "Booléen"
+
+msgid "Boolean_plural"
+msgstr "Booléen"
+
+msgid "BoundConstraint"
+msgstr "contrainte de bornes"
+
+msgid "BoundaryConstraint"
+msgstr "contrainte de bornes"
+
+msgid "Browse by entity type"
+msgstr "Naviguer par type d'entité"
+
+#, python-format
+msgid "By %(user)s on %(dt)s [%(undo_link)s]"
+msgstr "Par %(user)s le %(dt)s [%(undo_link)s] "
+
+msgid "Bytes"
+msgstr "Donnée binaires"
+
+msgid "Bytes_plural"
+msgstr "Données binaires"
+
+msgid "CWAttribute"
+msgstr "Attribut"
+
+msgid "CWAttribute_plural"
+msgstr "Attributs"
+
+msgid "CWCache"
+msgstr "Cache applicatif"
+
+msgid "CWCache_plural"
+msgstr "Caches applicatifs"
+
+msgid "CWComputedRType"
+msgstr "Relation virtuelle"
+
+msgid "CWComputedRType_plural"
+msgstr "Relations virtuelles"
+
+msgid "CWConstraint"
+msgstr "Contrainte"
+
+msgid "CWConstraintType"
+msgstr "Type de contrainte"
+
+msgid "CWConstraintType_plural"
+msgstr "Types de contrainte"
+
+msgid "CWConstraint_plural"
+msgstr "Contraintes"
+
+msgid "CWDataImport"
+msgstr "Import de données"
+
+msgid "CWDataImport_plural"
+msgstr "Imports de données"
+
+msgid "CWEType"
+msgstr "Type d'entité"
+
+msgctxt "inlined:CWRelation.from_entity.subject"
+msgid "CWEType"
+msgstr "Type d'entité"
+
+msgctxt "inlined:CWRelation.to_entity.subject"
+msgid "CWEType"
+msgstr "Type d'entité"
+
+msgid "CWEType_plural"
+msgstr "Types d'entité"
+
+msgid "CWGroup"
+msgstr "Groupe"
+
+msgid "CWGroup_plural"
+msgstr "Groupes"
+
+msgid "CWProperty"
+msgstr "Propriété"
+
+msgid "CWProperty_plural"
+msgstr "Propriétés"
+
+msgid "CWRType"
+msgstr "Type de relation"
+
+msgctxt "inlined:CWRelation.relation_type.subject"
+msgid "CWRType"
+msgstr "Type de relation"
+
+msgid "CWRType_plural"
+msgstr "Types de relation"
+
+msgid "CWRelation"
+msgstr "Relation"
+
+msgid "CWRelation_plural"
+msgstr "Relations"
+
+msgid "CWSource"
+msgstr "Source de données"
+
+msgid "CWSourceHostConfig"
+msgstr "Configuration de source"
+
+msgid "CWSourceHostConfig_plural"
+msgstr "Configurations de source"
+
+msgid "CWSourceSchemaConfig"
+msgstr "Configuration de schéma de source"
+
+msgid "CWSourceSchemaConfig_plural"
+msgstr "Configurations de schéma de source"
+
+msgid "CWSource_plural"
+msgstr "Source de données"
+
+msgid "CWUniqueTogetherConstraint"
+msgstr "Contrainte d'unicité"
+
+msgid "CWUniqueTogetherConstraint_plural"
+msgstr "Contraintes d'unicité"
+
+msgid "CWUser"
+msgstr "Utilisateur"
+
+msgid "CWUser_plural"
+msgstr "Utilisateurs"
+
+#, python-format
+msgid ""
+"Can't restore %(role)s relation %(rtype)s to entity %(eid)s which is already "
+"linked using this relation."
+msgstr ""
+"Ne peut restaurer la relation %(role)s %(rtype)s vers l'entité %(eid)s qui "
+"est déja lié à une autre entité par cette relation."
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s between %(subj)s and %(obj)s, that relation "
+"does not exists anymore in the schema."
+msgstr ""
+"Ne peut restaurer la relation %(rtype)s entre %(subj)s et %(obj)s, cette "
+"relation n'existe plus dans le schéma."
+
+#, python-format
+msgid ""
+"Can't restore relation %(rtype)s, %(role)s entity %(eid)s doesn't exist "
+"anymore."
+msgstr ""
+"Ne peut restaurer la relation %(rtype)s, l'entité %(role)s %(eid)s n'existe "
+"plus."
+
+#, python-format
+msgid ""
+"Can't undo addition of relation %(rtype)s from %(subj)s to %(obj)s, doesn't "
+"exist anymore"
+msgstr ""
+"Ne peut annuler l'ajout de relation %(rtype)s de %(subj)s vers %(obj)s, "
+"cette relation n'existe plus"
+
+#, python-format
+msgid ""
+"Can't undo creation of entity %(eid)s of type %(etype)s, type no more "
+"supported"
+msgstr ""
+"Ne peut annuler la création de l'entité %(eid)s de type %(etype)s, ce type "
+"n'existe plus"
+
+msgid "Click to sort on this column"
+msgstr "Cliquer pour trier sur cette colonne"
+
+msgid ""
+"Configuration of the system source goes to the 'sources' file, not in the "
+"database"
+msgstr ""
+"La configuration de la source système va dans le fichier 'sources' et non "
+"dans la base de données"
+
+#, python-format
+msgid "Created %(etype)s : %(entity)s"
+msgstr "Entité %(etype)s crée : %(entity)s"
+
+msgid "DEBUG"
+msgstr "DEBUG"
+
+msgid "Date"
+msgstr "Date"
+
+msgid "Date_plural"
+msgstr "Dates"
+
+msgid "Datetime"
+msgstr "Date et heure"
+
+msgid "Datetime_plural"
+msgstr "Dates et heures"
+
+msgid "Decimal"
+msgstr "Nombre décimal"
+
+msgid "Decimal_plural"
+msgstr "Nombres décimaux"
+
+#, python-format
+msgid "Delete relation : %(entity_from)s %(rtype)s %(entity_to)s"
+msgstr "Relation supprimée : %(entity_from)s %(rtype)s %(entity_to)s"
+
+#, python-format
+msgid "Deleted %(etype)s : %(entity)s"
+msgstr "Entité %(etype)s supprimée : %(entity)s"
+
+msgid "Detected problems"
+msgstr "Problèmes détectés"
+
+msgid "Do you want to delete the following element(s)?"
+msgstr "Voulez-vous supprimer le(s) élément(s) suivant(s) ?"
+
+msgid "Download schema as OWL"
+msgstr "Télécharger le schéma au format OWL"
+
+msgid "ERROR"
+msgstr "ERREUR"
+
+msgid "EmailAddress"
+msgstr "Adresse électronique"
+
+msgctxt "inlined:CWUser.use_email.subject"
+msgid "EmailAddress"
+msgstr "Adresse électronique"
+
+msgid "EmailAddress_plural"
+msgstr "Adresses électroniques"
+
+msgid "Entities"
+msgstr "entités"
+
+#, python-format
+msgid ""
+"Entity %(eid)s has changed since you started to edit it. Reload the page and "
+"reapply your changes."
+msgstr ""
+"L'entité %(eid)s a été modifiée depuis votre demande d'édition. Veuillez "
+"recharger cette page et réappliquer vos changements."
+
+msgid "Entity and relation supported by this source"
+msgstr "Entités et relations supportés par cette source"
+
+msgid "ExternalUri"
+msgstr "Uri externe"
+
+msgid "ExternalUri_plural"
+msgstr "Uri externes"
+
+msgid "FATAL"
+msgstr "FATAL"
+
+msgid "Float"
+msgstr "Nombre flottant"
+
+msgid "Float_plural"
+msgstr "Nombres flottants"
+
+# schema pot file, generated on 2009-12-03 09:22:35
+#
+# singular and plural forms for each entity type
+msgid "FormatConstraint"
+msgstr "contrainte de format"
+
+msgid "Garbage collection information"
+msgstr "Information sur le ramasse-miette"
+
+msgid "Help"
+msgstr "Aide"
+
+msgid "INFO"
+msgstr "INFO"
+
+msgid "Instance"
+msgstr "Instance"
+
+msgid "Int"
+msgstr "Nombre entier"
+
+msgid "Int_plural"
+msgstr "Nombres entiers"
+
+msgid "Interval"
+msgstr "Durée"
+
+msgid "IntervalBoundConstraint"
+msgstr "contrainte d'interval"
+
+msgid "Interval_plural"
+msgstr "Durées"
+
+msgid "Link:"
+msgstr "Lien :"
+
+msgid "Looked up classes"
+msgstr "Classes recherchées"
+
+msgid "Manage"
+msgstr "Administration"
+
+msgid "Manage security"
+msgstr "Gestion de la sécurité"
+
+msgid "Message threshold"
+msgstr "Niveau du message"
+
+msgid "Most referenced classes"
+msgstr "Classes les plus référencées"
+
+msgid "New BaseTransition"
+msgstr "XXX"
+
+msgid "New Bookmark"
+msgstr "Nouveau signet"
+
+msgid "New CWAttribute"
+msgstr "Nouvelle définition de relation finale"
+
+msgid "New CWCache"
+msgstr "Nouveau cache applicatif"
+
+msgid "New CWComputedRType"
+msgstr "Nouvelle relation virtuelle"
+
+msgid "New CWConstraint"
+msgstr "Nouvelle contrainte"
+
+msgid "New CWConstraintType"
+msgstr "Nouveau type de contrainte"
+
+msgid "New CWDataImport"
+msgstr "Nouvel import de données"
+
+msgid "New CWEType"
+msgstr "Nouveau type d'entité"
+
+msgid "New CWGroup"
+msgstr "Nouveau groupe"
+
+msgid "New CWProperty"
+msgstr "Nouvelle propriété"
+
+msgid "New CWRType"
+msgstr "Nouveau type de relation"
+
+msgid "New CWRelation"
+msgstr "Nouvelle définition de relation non finale"
+
+msgid "New CWSource"
+msgstr "Nouvelle source"
+
+msgid "New CWSourceHostConfig"
+msgstr "Nouvelle configuration de source"
+
+msgid "New CWSourceSchemaConfig"
+msgstr "Nouvelle partie de mapping de source"
+
+msgid "New CWUniqueTogetherConstraint"
+msgstr "Nouvelle contrainte unique_together"
+
+msgid "New CWUser"
+msgstr "Nouvel utilisateur"
+
+msgid "New EmailAddress"
+msgstr "Nouvelle adresse électronique"
+
+msgid "New ExternalUri"
+msgstr "Nouvelle Uri externe"
+
+msgid "New RQLExpression"
+msgstr "Nouvelle expression rql"
+
+msgid "New State"
+msgstr "Nouvel état"
+
+msgid "New SubWorkflowExitPoint"
+msgstr "Nouvelle sortie de sous-workflow"
+
+msgid "New TrInfo"
+msgstr "Nouvelle information de transition"
+
+msgid "New Transition"
+msgstr "Nouvelle transition"
+
+msgid "New Workflow"
+msgstr "Nouveau workflow"
+
+msgid "New WorkflowTransition"
+msgstr "Nouvelle transition workflow"
+
+msgid "No result matching query"
+msgstr "Aucun résultat ne correspond à la requête"
+
+msgid "Non exhaustive list of views that may apply to entities of this type"
+msgstr "Liste non exhaustive des vues s'appliquant à ce type d'entité"
+
+msgid "OR"
+msgstr "OU"
+
+msgid "Ownership"
+msgstr "Propriété"
+
+msgid "Parent class:"
+msgstr "Classe parente"
+
+msgid "Password"
+msgstr "Mot de passe"
+
+msgid "Password_plural"
+msgstr "Mots de passe"
+
+msgid "Please note that this is only a shallow copy"
+msgstr "Attention, cela n'effectue qu'une copie de surface"
+
+msgid "Powered by CubicWeb"
+msgstr "Construit avec CubicWeb"
+
+msgid "RQLConstraint"
+msgstr "contrainte rql"
+
+msgid "RQLExpression"
+msgstr "Expression RQL"
+
+msgid "RQLExpression_plural"
+msgstr "Expressions RQL"
+
+msgid "RQLUniqueConstraint"
+msgstr "contrainte rql d'unicité"
+
+msgid "RQLVocabularyConstraint"
+msgstr "contrainte rql de vocabulaire"
+
+msgid "RegexpConstraint"
+msgstr "contrainte expression régulière"
+
+msgid "Registry's content"
+msgstr "Contenu du registre"
+
+msgid "Relations"
+msgstr "Relations"
+
+msgid "Repository"
+msgstr "Entrepôt de données"
+
+#, python-format
+msgid "Schema %s"
+msgstr "Schéma %s"
+
+msgid "Schema's permissions definitions"
+msgstr "Permissions définies dans le schéma"
+
+msgid "Search for"
+msgstr "Rechercher"
+
+msgid "Site information"
+msgstr "Information du site"
+
+msgid "SizeConstraint"
+msgstr "contrainte de taille"
+
+msgid ""
+"Source's configuration for a particular host. One key=value per line, "
+"authorized keys depending on the source's type, overriding values defined on "
+"the source."
+msgstr ""
+"Configuration de la source pour un hôte spécifique. Une clé=valeur par "
+"ligne, les clés autorisées dépendantes du type de source. Les valeurs "
+"surchargent celles définies sur la source."
+
+msgid "Startup views"
+msgstr "Vues de départ"
+
+msgid "State"
+msgstr "État"
+
+msgid "State_plural"
+msgstr "États"
+
+msgid "StaticVocabularyConstraint"
+msgstr "contrainte de vocabulaire"
+
+msgid "String"
+msgstr "Chaîne de caractères"
+
+msgid "String_plural"
+msgstr "Chaînes de caractères"
+
+msgid "Sub-classes:"
+msgstr "Classes filles :"
+
+msgid "SubWorkflowExitPoint"
+msgstr "Sortie de sous-workflow"
+
+msgid "SubWorkflowExitPoint_plural"
+msgstr "Sorties de sous-workflow"
+
+msgid "Submit bug report"
+msgstr "Soumettre un rapport de bug"
+
+msgid "Submit bug report by mail"
+msgstr "Soumettre ce rapport par email"
+
+msgid "TZDatetime"
+msgstr "Date et heure internationale"
+
+msgid "TZDatetime_plural"
+msgstr "Dates et heures internationales"
+
+msgid "TZTime"
+msgstr "Heure internationale"
+
+msgid "TZTime_plural"
+msgstr "Heures internationales"
+
+#, python-format
+msgid "The view %s can not be applied to this query"
+msgstr "La vue %s ne peut être appliquée à cette requête"
+
+#, python-format
+msgid "The view %s could not be found"
+msgstr "La vue %s est introuvable"
+
+msgid "There is no default workflow"
+msgstr "Ce type d'entité n'a pas de workflow par défault"
+
+msgid "This BaseTransition:"
+msgstr "Cette transition abstraite :"
+
+msgid "This Bookmark:"
+msgstr "Ce signet :"
+
+msgid "This CWAttribute:"
+msgstr "Cette définition de relation finale :"
+
+msgid "This CWCache:"
+msgstr "Ce cache applicatif :"
+
+msgid "This CWComputedRType:"
+msgstr "Cette relation virtuelle :"
+
+msgid "This CWConstraint:"
+msgstr "Cette contrainte :"
+
+msgid "This CWConstraintType:"
+msgstr "Ce type de contrainte :"
+
+msgid "This CWDataImport:"
+msgstr "Cet import de données :"
+
+msgid "This CWEType:"
+msgstr "Ce type d'entité :"
+
+msgid "This CWGroup:"
+msgstr "Ce groupe :"
+
+msgid "This CWProperty:"
+msgstr "Cette propriété :"
+
+msgid "This CWRType:"
+msgstr "Ce type de relation :"
+
+msgid "This CWRelation:"
+msgstr "Cette définition de relation :"
+
+msgid "This CWSource:"
+msgstr "Cette source :"
+
+msgid "This CWSourceHostConfig:"
+msgstr "Cette configuration de source :"
+
+msgid "This CWSourceSchemaConfig:"
+msgstr "Cette partie de mapping de source :"
+
+msgid "This CWUniqueTogetherConstraint:"
+msgstr "Cette contrainte unique_together :"
+
+msgid "This CWUser:"
+msgstr "Cet utilisateur :"
+
+msgid "This EmailAddress:"
+msgstr "Cette adresse électronique :"
+
+msgid "This ExternalUri:"
+msgstr "Cette Uri externe :"
+
+msgid "This RQLExpression:"
+msgstr "Cette expression RQL :"
+
+msgid "This State:"
+msgstr "Cet état :"
+
+msgid "This SubWorkflowExitPoint:"
+msgstr "Cette sortie de sous-workflow :"
+
+msgid "This TrInfo:"
+msgstr "Cette information de transition :"
+
+msgid "This Transition:"
+msgstr "Cette transition :"
+
+msgid "This Workflow:"
+msgstr "Ce workflow :"
+
+msgid "This WorkflowTransition:"
+msgstr "Cette transition workflow :"
+
+msgid ""
+"This action is forbidden. If you think it should be allowed, please contact "
+"the site administrator."
+msgstr ""
+"Cette action est interdite. Si toutefois vous pensez qu'elle devrait être "
+"autorisée, veuillez contacter l'administrateur du site."
+
+msgid "This entity type permissions:"
+msgstr "Permissions pour ce type d'entité"
+
+msgid "Time"
+msgstr "Heure"
+
+msgid "Time_plural"
+msgstr "Heures"
+
+msgid "TrInfo"
+msgstr "Information transition"
+
+msgid "TrInfo_plural"
+msgstr "Information transitions"
+
+msgid "Transition"
+msgstr "Transition"
+
+msgid "Transition_plural"
+msgstr "Transitions"
+
+msgid "URLs from which content will be imported. You can put one url per line"
+msgstr ""
+"URLs depuis lesquelles le contenu sera importé. Vous pouvez mettre une URL "
+"par ligne."
+
+msgid "Undoable actions"
+msgstr "Action annulables"
+
+msgid "Undoing"
+msgstr "Annuler"
+
+msgid "UniqueConstraint"
+msgstr "contrainte d'unicité"
+
+msgid "Unknown source type"
+msgstr "Type de source inconnue"
+
+msgid "Unreachable objects"
+msgstr "Objets inaccessibles"
+
+#, python-format
+msgid "Updated %(etype)s : %(entity)s"
+msgstr "Entité %(etype)s mise à jour : %(entity)s"
+
+msgid "Used by:"
+msgstr "Utilisé par :"
+
+msgid "Users and groups management"
+msgstr "Gestion des utilisateurs et groupes"
+
+msgid "WARNING"
+msgstr "AVERTISSEMENT"
+
+msgid "Web server"
+msgstr "Serveur web"
+
+msgid "Workflow"
+msgstr "Workflow"
+
+msgid "Workflow history"
+msgstr "Historique des changements d'état"
+
+msgid "WorkflowTransition"
+msgstr "Transition workflow"
+
+msgid "WorkflowTransition_plural"
+msgstr "Transitions workflow"
+
+msgid "Workflow_plural"
+msgstr "Workflows"
+
+msgid ""
+"You can either submit a new file using the browse button above, or choose to "
+"remove already uploaded file by checking the \"detach attached file\" check-"
+"box, or edit file content online with the widget below."
+msgstr ""
+"Vous pouvez soit soumettre un nouveau fichier en utilisant le bouton\n"
+"\"parcourir\" ci-dessus, soit supprimer le fichier déjà présent en\n"
+"cochant la case \"détacher fichier attaché\", soit éditer le contenu\n"
+"du fichier en ligne avec le champ ci-dessous."
+
+msgid ""
+"You can either submit a new file using the browse button above, or edit file "
+"content online with the widget below."
+msgstr ""
+"Vous pouvez soit soumettre un nouveau fichier en utilisant le bouton\n"
+"\"parcourir\" ci-dessus, soit éditer le contenu du fichier en ligne\n"
+"avec le champ ci-dessous."
+
+msgid "You can't change this relation"
+msgstr "Vous ne pouvez pas modifier cette relation"
+
+msgid "You cannot remove the system source"
+msgstr "Vous ne pouvez pas supprimer la source système"
+
+msgid "You cannot rename the system source"
+msgstr "Vous ne pouvez pas renommer la source système"
+
+msgid ""
+"You have no access to this view or it can not be used to display the current "
+"data."
+msgstr ""
+"Vous n'avez pas accès à cette vue ou elle ne peut pas afficher ces données."
+
+msgid ""
+"You're not authorized to access this page. If you think you should, please "
+"contact the site administrator."
+msgstr ""
+"Vous n'êtes pas autorisé à accéder à cette page. Si toutefois vous pensez\n"
+"que c'est une erreur, veuillez contacter l'administrateur du site."
+
+#, python-format
+msgid "[%s supervision] changes summary"
+msgstr "[%s supervision] description des changements"
+
+msgid ""
+"a RQL expression which should return some results, else the transition won't "
+"be available. This query may use X and U variables that will respectivly "
+"represents the current entity and the current user."
+msgstr ""
+"une expression RQL devant retourner des résultats pour que la transition "
+"puisse être passée. Cette expression peut utiliser les variables X et U qui "
+"représentent respectivement l'entité à laquelle on veut appliquer la "
+"transition et l'utilisateur courant."
+
+msgid "a URI representing an object in external data store"
+msgstr "une Uri désignant un objet dans un entrepôt de données externe"
+
+msgid "a float is expected"
+msgstr "un nombre flottant est attendu"
+
+msgid "a number (in seconds) or 20s, 10min, 24h or 4d are expected"
+msgstr "un nombre (en seconde) ou 20s, 10min, 24h ou 4d sont attendus"
+
+msgid ""
+"a simple cache entity characterized by a name and a validity date. The "
+"target application is responsible for updating timestamp when necessary to "
+"invalidate the cache (typically in hooks). Also, checkout the AppObject."
+"get_cache() method."
+msgstr ""
+"un cache simple caractérisé par un nom et une date de validité. C'est\n"
+"le code de l'instance qui est responsable de mettre à jour la date de\n"
+"validité lorsque le cache doit être invalidé (en général dans un hook).\n"
+"Pour récupérer un cache, il faut utiliser utiliser la méthode\n"
+"get_cache(cachename)."
+
+msgid "abstract base class for transitions"
+msgstr "classe de base abstraite pour les transitions"
+
+msgid "action menu"
+msgstr "actions"
+
+msgid "action(s) on this selection"
+msgstr "action(s) sur cette sélection"
+
+msgid "actions"
+msgstr "actions"
+
+msgid "activate"
+msgstr "activer"
+
+msgid "activated"
+msgstr "activé"
+
+msgid "add"
+msgstr "ajouter"
+
+msgid "add Bookmark bookmarked_by CWUser object"
+msgstr "signet"
+
+msgid "add CWAttribute add_permission RQLExpression subject"
+msgstr "définir une expression RQL d'ajout"
+
+msgid "add CWAttribute constrained_by CWConstraint subject"
+msgstr "contrainte"
+
+msgid "add CWAttribute read_permission RQLExpression subject"
+msgstr "expression rql de lecture"
+
+msgid "add CWAttribute relation_type CWRType object"
+msgstr "définition d'attribut"
+
+msgid "add CWAttribute update_permission RQLExpression subject"
+msgstr "permission de mise à jour"
+
+msgid "add CWEType add_permission RQLExpression subject"
+msgstr "définir une expression RQL d'ajout"
+
+msgid "add CWEType delete_permission RQLExpression subject"
+msgstr "définir une expression RQL de suppression"
+
+msgid "add CWEType read_permission RQLExpression subject"
+msgstr "définir une expression RQL de lecture"
+
+msgid "add CWEType update_permission RQLExpression subject"
+msgstr "définir une expression RQL de mise à jour"
+
+msgid "add CWProperty for_user CWUser object"
+msgstr "propriété"
+
+msgid "add CWRelation add_permission RQLExpression subject"
+msgstr "expression rql d'ajout"
+
+msgid "add CWRelation constrained_by CWConstraint subject"
+msgstr "contrainte"
+
+msgid "add CWRelation delete_permission RQLExpression subject"
+msgstr "expression rql de suppression"
+
+msgid "add CWRelation read_permission RQLExpression subject"
+msgstr "expression rql de lecture"
+
+msgid "add CWRelation relation_type CWRType object"
+msgstr "définition de relation"
+
+msgid "add CWSourceHostConfig cw_host_config_of CWSource object"
+msgstr "configuration d'hôte"
+
+msgid "add CWUniqueTogetherConstraint constraint_of CWEType object"
+msgstr "contrainte unique_together"
+
+msgid "add CWUser in_group CWGroup object"
+msgstr "utilisateur"
+
+msgid "add CWUser use_email EmailAddress subject"
+msgstr "adresse email"
+
+msgid "add State allowed_transition Transition object"
+msgstr "état en entrée"
+
+msgid "add State allowed_transition Transition subject"
+msgstr "transition en sortie"
+
+msgid "add State allowed_transition WorkflowTransition subject"
+msgstr "transition workflow en sortie"
+
+msgid "add State state_of Workflow object"
+msgstr "état"
+
+msgid "add Transition condition RQLExpression subject"
+msgstr "condition"
+
+msgid "add Transition destination_state State object"
+msgstr "transition en entrée"
+
+msgid "add Transition destination_state State subject"
+msgstr "état de sortie"
+
+msgid "add Transition transition_of Workflow object"
+msgstr "transition"
+
+msgid "add WorkflowTransition condition RQLExpression subject"
+msgstr "condition"
+
+msgid "add WorkflowTransition subworkflow_exit SubWorkflowExitPoint subject"
+msgstr "sortie de sous-workflow"
+
+msgid "add WorkflowTransition transition_of Workflow object"
+msgstr "transition workflow"
+
+msgid "add a BaseTransition"
+msgstr ""
+
+msgid "add a Bookmark"
+msgstr ""
+
+msgid "add a CWAttribute"
+msgstr ""
+
+msgid "add a CWCache"
+msgstr ""
+
+msgid "add a CWComputedRType"
+msgstr ""
+
+msgid "add a CWConstraint"
+msgstr ""
+
+msgid "add a CWConstraintType"
+msgstr ""
+
+msgid "add a CWDataImport"
+msgstr ""
+
+msgid "add a CWEType"
+msgstr ""
+
+msgctxt "inlined:CWRelation.from_entity.subject"
+msgid "add a CWEType"
+msgstr "ajouter un type d'entité sujet"
+
+msgctxt "inlined:CWRelation.to_entity.subject"
+msgid "add a CWEType"
+msgstr "ajouter un type d'entité objet"
+
+msgid "add a CWGroup"
+msgstr ""
+
+msgid "add a CWProperty"
+msgstr ""
+
+msgid "add a CWRType"
+msgstr ""
+
+msgctxt "inlined:CWRelation.relation_type.subject"
+msgid "add a CWRType"
+msgstr "ajouter un type de relation"
+
+msgid "add a CWRelation"
+msgstr ""
+
+msgid "add a CWSource"
+msgstr ""
+
+msgid "add a CWSourceHostConfig"
+msgstr ""
+
+msgid "add a CWSourceSchemaConfig"
+msgstr ""
+
+msgid "add a CWUniqueTogetherConstraint"
+msgstr ""
+
+msgid "add a CWUser"
+msgstr ""
+
+msgid "add a EmailAddress"
+msgstr ""
+
+msgctxt "inlined:CWUser.use_email.subject"
+msgid "add a EmailAddress"
+msgstr "ajouter une adresse électronique"
+
+msgid "add a ExternalUri"
+msgstr ""
+
+msgid "add a RQLExpression"
+msgstr ""
+
+msgid "add a State"
+msgstr ""
+
+msgid "add a SubWorkflowExitPoint"
+msgstr ""
+
+msgid "add a TrInfo"
+msgstr ""
+
+msgid "add a Transition"
+msgstr ""
+
+msgid "add a Workflow"
+msgstr ""
+
+msgid "add a WorkflowTransition"
+msgstr ""
+
+# subject and object forms for each relation type
+# (no object form for final relation types)
+msgid "add_permission"
+msgstr "peut ajouter"
+
+msgctxt "CWAttribute"
+msgid "add_permission"
+msgstr "permission d'ajout"
+
+# subject and object forms for each relation type
+# (no object form for final relation types)
+msgctxt "CWEType"
+msgid "add_permission"
+msgstr "permission d'ajout"
+
+msgctxt "CWRelation"
+msgid "add_permission"
+msgstr "permission d'ajout"
+
+msgid "add_permission_object"
+msgstr "a la permission d'ajouter"
+
+msgctxt "CWGroup"
+msgid "add_permission_object"
+msgstr "a la permission d'ajouter"
+
+msgctxt "RQLExpression"
+msgid "add_permission_object"
+msgstr "a la permission d'ajouter"
+
+msgid "add_relation"
+msgstr "ajouter"
+
+#, python-format
+msgid "added %(etype)s #%(eid)s (%(title)s)"
+msgstr "ajout de l'entité %(etype)s #%(eid)s (%(title)s)"
+
+#, python-format
+msgid ""
+"added relation %(rtype)s from %(frometype)s #%(eidfrom)s to %(toetype)s #"
+"%(eidto)s"
+msgstr ""
+"la relation %(rtype)s de %(frometype)s #%(eidfrom)s vers %(toetype)s #"
+"%(eidto)s a été ajoutée"
+
+msgid "additional type specific properties"
+msgstr "propriétés supplémentaires spécifiques au type"
+
+msgid "addrelated"
+msgstr "ajouter"
+
+msgid "address"
+msgstr "adresse électronique"
+
+msgctxt "EmailAddress"
+msgid "address"
+msgstr "adresse électronique"
+
+msgid "alias"
+msgstr "alias"
+
+msgctxt "EmailAddress"
+msgid "alias"
+msgstr "alias"
+
+msgid "allow to set a specific workflow for an entity"
+msgstr "permet de spécifier un workflow donné pour une entité"
+
+msgid "allowed options depends on the source type"
+msgstr "les options autorisées dépendent du type de la source"
+
+msgid "allowed transitions from this state"
+msgstr "transitions autorisées depuis cet état"
+
+#, python-format
+msgid "allowed values for \"action\" are %s"
+msgstr "les valeurs autorisées pour \"action\" sont %s"
+
+msgid "allowed_transition"
+msgstr "transitions autorisées"
+
+msgctxt "State"
+msgid "allowed_transition"
+msgstr "transitions autorisées"
+
+msgid "allowed_transition_object"
+msgstr "états en entrée"
+
+msgctxt "BaseTransition"
+msgid "allowed_transition_object"
+msgstr "transition autorisée de"
+
+msgctxt "Transition"
+msgid "allowed_transition_object"
+msgstr "transition autorisée de"
+
+msgctxt "WorkflowTransition"
+msgid "allowed_transition_object"
+msgstr "transition autorisée de"
+
+msgid "an electronic mail address associated to a short alias"
+msgstr "une adresse électronique associée à un alias"
+
+msgid "an error occurred"
+msgstr "une erreur est survenue"
+
+msgid "an error occurred while processing your request"
+msgstr "une erreur est survenue pendant le traitement de votre requête"
+
+msgid "an error occurred, the request cannot be fulfilled"
+msgstr "une erreur est survenue, la requête ne peut être complétée"
+
+msgid "an integer is expected"
+msgstr "un nombre entier est attendu"
+
+msgid "and linked"
+msgstr "et liée"
+
+msgid "and/or between different values"
+msgstr "et/ou entre les différentes valeurs"
+
+msgid "anyrsetview"
+msgstr "vues pour tout rset"
+
+msgid "april"
+msgstr "avril"
+
+#, python-format
+msgid "archive for %(author)s"
+msgstr "archive pour l'auteur %(author)s"
+
+#, python-format
+msgid "archive for %(month)s/%(year)s"
+msgstr "archive pour le mois %(month)s/%(year)s"
+
+#, python-format
+msgid "at least one relation %(rtype)s is required on %(etype)s (%(eid)s)"
+msgstr ""
+"l'entité #%(eid)s de type %(etype)s doit nécessairement être reliée à une\n"
+"autre via la relation %(rtype)s"
+
+msgid "attribute"
+msgstr "attribut"
+
+msgid "august"
+msgstr "août"
+
+msgid "authentication failure"
+msgstr "Identifiant ou mot de passe incorrect"
+
+msgid "auto"
+msgstr "automatique"
+
+msgid "autocomputed attribute used to ensure transition coherency"
+msgstr ""
+"attribut calculé automatiquement pour assurer la cohérence de la transition"
+
+msgid "automatic"
+msgstr "automatique"
+
+#, python-format
+msgid "back to pagination (%s results)"
+msgstr "retour à la vue paginée (%s résultats)"
+
+msgid "bad value"
+msgstr "mauvaise valeur"
+
+msgid "badly formatted url"
+msgstr "URL mal formattée"
+
+msgid "base url"
+msgstr "url de base"
+
+msgid "bookmark has been removed"
+msgstr "le signet a été retiré"
+
+msgid "bookmark this page"
+msgstr "poser un signet ici"
+
+msgid "bookmark this search"
+msgstr "mémoriser cette recherche"
+
+msgid "bookmarked_by"
+msgstr "utilisé par"
+
+msgctxt "Bookmark"
+msgid "bookmarked_by"
+msgstr "utilisé par"
+
+msgid "bookmarked_by_object"
+msgstr "utilise le(s) signet(s)"
+
+msgctxt "CWUser"
+msgid "bookmarked_by_object"
+msgstr "utilise le(s) signet(s)"
+
+msgid "bookmarks"
+msgstr "signets"
+
+msgid "bookmarks are used to have user's specific internal links"
+msgstr ""
+"les signets sont utilisés pour gérer des liens internes par utilisateur"
+
+msgid "boxes"
+msgstr "boîtes"
+
+msgid "bug report sent"
+msgstr "rapport d'erreur envoyé"
+
+msgid "button_apply"
+msgstr "appliquer"
+
+msgid "button_cancel"
+msgstr "annuler"
+
+msgid "button_delete"
+msgstr "supprimer"
+
+msgid "button_ok"
+msgstr "valider"
+
+msgid "by"
+msgstr "par"
+
+msgid "by relation"
+msgstr "via la relation"
+
+msgid "by_transition"
+msgstr "transition"
+
+msgctxt "TrInfo"
+msgid "by_transition"
+msgstr "transition"
+
+msgid "by_transition_object"
+msgstr "changement d'états"
+
+msgctxt "BaseTransition"
+msgid "by_transition_object"
+msgstr "a pour information"
+
+msgctxt "Transition"
+msgid "by_transition_object"
+msgstr "a pour information"
+
+msgctxt "WorkflowTransition"
+msgid "by_transition_object"
+msgstr "a pour information"
+
+msgid "calendar"
+msgstr "afficher un calendrier"
+
+msgid "can not resolve entity types:"
+msgstr "impossible d'interpréter les types d'entités :"
+
+msgid "can only have one url"
+msgstr "ne supporte qu'une seule URL"
+
+msgid "can't be changed"
+msgstr "ne peut-être modifié"
+
+msgid "can't be deleted"
+msgstr "ne peut-être supprimé"
+
+msgid "can't change this attribute"
+msgstr "cet attribut ne peut pas être modifié"
+
+#, python-format
+msgid "can't display data, unexpected error: %s"
+msgstr "impossible d'afficher les données à cause de l'erreur suivante: %s"
+
+msgid "can't have multiple exits on the same state"
+msgstr "ne peut avoir plusieurs sorties sur le même état"
+
+#, python-format
+msgid "can't parse %(value)r (expected %(format)s)"
+msgstr "ne peut analyser %(value)r (format attendu : %(format)s)"
+
+#, python-format
+msgid ""
+"can't restore entity %(eid)s of type %(eschema)s, target of %(rtype)s (eid "
+"%(value)s) does not exist any longer"
+msgstr ""
+"impossible de rétablir l'entité %(eid)s de type %(eschema)s, cible de la "
+"relation %(rtype)s (eid %(value)s) n'existe plus"
+
+#, python-format
+msgid ""
+"can't restore relation %(rtype)s of entity %(eid)s, this relation does not "
+"exist in the schema anymore."
+msgstr ""
+"impossible de rétablir la relation %(rtype)s sur l'entité %(eid)s, cette "
+"relation n'existe plus dans le schéma."
+
+#, python-format
+msgid "can't restore state of entity %s, it has been deleted inbetween"
+msgstr ""
+"impossible de rétablir l'état de l'entité %s, elle a été supprimée entre-"
+"temps"
+
+#, python-format
+msgid ""
+"can't set inlined=True, %(stype)s %(rtype)s %(otype)s has cardinality="
+"%(card)s"
+msgstr ""
+"ne peut mettre 'inlined'=Vrai, %(stype)s %(rtype)s %(otype)s a pour "
+"cardinalité %(card)s"
+
+msgid "cancel"
+msgstr "annuler"
+
+msgid "cancel select"
+msgstr "annuler la sélection"
+
+msgid "cancel this insert"
+msgstr "annuler cette insertion"
+
+msgid "cardinality"
+msgstr "cardinalité"
+
+msgctxt "CWAttribute"
+msgid "cardinality"
+msgstr "cardinalité"
+
+msgctxt "CWRelation"
+msgid "cardinality"
+msgstr "cardinalité"
+
+msgid "category"
+msgstr "categorie"
+
+#, python-format
+msgid "changed state of %(etype)s #%(eid)s (%(title)s)"
+msgstr "changement de l'état de %(etype)s #%(eid)s (%(title)s)"
+
+msgid "changes applied"
+msgstr "changements appliqués"
+
+msgid "click here to see created entity"
+msgstr "cliquez ici pour voir l'entité créée"
+
+msgid "click here to see edited entity"
+msgstr "cliquez ici pour voir l'entité modifiée"
+
+msgid "click on the box to cancel the deletion"
+msgstr "cliquez dans la zone d'édition pour annuler la suppression"
+
+msgid "click to add a value"
+msgstr "cliquer pour ajouter une valeur"
+
+msgid "click to delete this value"
+msgstr "cliquer pour supprimer cette valeur"
+
+msgid "click to edit this field"
+msgstr "cliquez pour éditer ce champ"
+
+msgid "close all"
+msgstr "tout fermer"
+
+msgid "comment"
+msgstr "commentaire"
+
+msgctxt "TrInfo"
+msgid "comment"
+msgstr "commentaire"
+
+msgid "comment_format"
+msgstr "format"
+
+msgctxt "TrInfo"
+msgid "comment_format"
+msgstr "format"
+
+msgid "components"
+msgstr "composants"
+
+msgid "components_navigation"
+msgstr "navigation par page"
+
+msgid "components_navigation_description"
+msgstr ""
+"composant permettant de présenter sur plusieurs pages les requêtes renvoyant "
+"plus d'un certain nombre de résultat"
+
+msgid "components_rqlinput"
+msgstr "barre rql"
+
+msgid "components_rqlinput_description"
+msgstr "la barre de requête rql, dans l'en-tête de page"
+
+msgid "composite"
+msgstr "composite"
+
+msgctxt "CWRelation"
+msgid "composite"
+msgstr "composite"
+
+msgid "condition"
+msgstr "condition"
+
+msgctxt "BaseTransition"
+msgid "condition"
+msgstr "condition"
+
+msgctxt "Transition"
+msgid "condition"
+msgstr "condition"
+
+msgctxt "WorkflowTransition"
+msgid "condition"
+msgstr "condition"
+
+msgid "condition_object"
+msgstr "condition de"
+
+msgctxt "RQLExpression"
+msgid "condition_object"
+msgstr "condition de"
+
+msgid "conditions"
+msgstr "conditions"
+
+msgid "config"
+msgstr "configuration"
+
+msgctxt "CWSource"
+msgid "config"
+msgstr "configuration"
+
+msgctxt "CWSourceHostConfig"
+msgid "config"
+msgstr "configuration"
+
+msgid "config mode"
+msgstr "mode de configuration"
+
+msgid "config type"
+msgstr "type de configuration"
+
+msgid "confirm password"
+msgstr "confirmer le mot de passe"
+
+msgid "constrained_by"
+msgstr "contraint par"
+
+msgctxt "CWAttribute"
+msgid "constrained_by"
+msgstr "contraint par"
+
+msgctxt "CWRelation"
+msgid "constrained_by"
+msgstr "contraint par"
+
+msgid "constrained_by_object"
+msgstr "contrainte de"
+
+msgctxt "CWConstraint"
+msgid "constrained_by_object"
+msgstr "contrainte de"
+
+msgid "constraint factory"
+msgstr "fabrique de contraintes"
+
+msgid "constraint_of"
+msgstr "contrainte de"
+
+msgctxt "CWUniqueTogetherConstraint"
+msgid "constraint_of"
+msgstr "contrainte de"
+
+msgid "constraint_of_object"
+msgstr "contraint par"
+
+msgctxt "CWEType"
+msgid "constraint_of_object"
+msgstr "contraint par"
+
+msgid "constraints"
+msgstr "contraintes"
+
+msgid "constraints applying on this relation"
+msgstr "contraintes s'appliquant à cette relation"
+
+msgid "content type"
+msgstr "type MIME"
+
+msgid "context"
+msgstr "contexte"
+
+msgid "context where this box should be displayed"
+msgstr "contexte dans lequel la boite devrait être affichée"
+
+msgid "context where this component should be displayed"
+msgstr "contexte où ce composant doit être affiché"
+
+msgid "context where this facet should be displayed, leave empty for both"
+msgstr ""
+"contexte où cette facette doit être affichée. Laissez ce champ vide pour "
+"l'avoir dans les deux."
+
+msgid "control subject entity's relations order"
+msgstr "contrôle l'ordre des relations de l'entité sujet"
+
+msgid "copy"
+msgstr "copier"
+
+msgid "core relation indicating a user's groups"
+msgstr ""
+"relation système indiquant les groupes auxquels appartient l'utilisateur"
+
+msgid ""
+"core relation indicating owners of an entity. This relation implicitly put "
+"the owner into the owners group for the entity"
+msgstr ""
+"relation système indiquant le(s) propriétaire(s) d'une entité. Cette "
+"relation place implicitement les utilisateurs liés dans le groupe des "
+"propriétaires pour cette entité"
+
+msgid "core relation indicating the original creator of an entity"
+msgstr "relation système indiquant le créateur d'une entité."
+
+msgid "core relation indicating the type of an entity"
+msgstr "relation système indiquant le type de l'entité"
+
+msgid ""
+"core relation indicating the types (including specialized types) of an entity"
+msgstr ""
+"relation système indiquant les types (y compris les types parents) d'une "
+"entité"
+
+msgid "could not connect to the SMTP server"
+msgstr "impossible de se connecter au serveur SMTP"
+
+msgid "create an index for quick search on this attribute"
+msgstr "créer un index pour accélérer les recherches sur cet attribut"
+
+msgid "created on"
+msgstr "créé le"
+
+msgid "created_by"
+msgstr "créé par"
+
+msgid "created_by_object"
+msgstr "a créé"
+
+msgid "creating Bookmark (Bookmark bookmarked_by CWUser %(linkto)s)"
+msgstr "création d'un signet pour %(linkto)s"
+
+msgid "creating CWAttribute (CWAttribute relation_type CWRType %(linkto)s)"
+msgstr "création d'un attribut %(linkto)s"
+
+msgid ""
+"creating CWConstraint (CWAttribute %(linkto)s constrained_by CWConstraint)"
+msgstr "création d'une contrainte pour l'attribut %(linkto)s"
+
+msgid ""
+"creating CWConstraint (CWRelation %(linkto)s constrained_by CWConstraint)"
+msgstr "création d'une contrainte pour la relation %(linkto)s"
+
+msgid "creating CWProperty (CWProperty for_user CWUser %(linkto)s)"
+msgstr "création d'une propriété pour l'utilisateur %(linkto)s"
+
+msgid "creating CWRelation (CWRelation relation_type CWRType %(linkto)s)"
+msgstr "création relation %(linkto)s"
+
+msgid ""
+"creating CWSourceHostConfig (CWSourceHostConfig cw_host_config_of CWSource "
+"%(linkto)s)"
+msgstr "création d'une configuration d'hôte pour la source %(linkto)s"
+
+msgid ""
+"creating CWUniqueTogetherConstraint (CWUniqueTogetherConstraint "
+"constraint_of CWEType %(linkto)s)"
+msgstr "création d'une contrainte unique_together sur %(linkto)s"
+
+msgid "creating CWUser (CWUser in_group CWGroup %(linkto)s)"
+msgstr "création d'un utilisateur à rajouter au groupe %(linkto)s"
+
+msgid "creating EmailAddress (CWUser %(linkto)s use_email EmailAddress)"
+msgstr "création d'une adresse électronique pour l'utilisateur %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWAttribute %(linkto)s add_permission RQLExpression)"
+msgstr "création d'une expression rql pour le droit d'ajout de %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWAttribute %(linkto)s read_permission RQLExpression)"
+msgstr "création d'une expression rql pour le droit de lecture de %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWAttribute %(linkto)s update_permission "
+"RQLExpression)"
+msgstr ""
+"création d'une expression rql pour le droit de mise à jour de %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWEType %(linkto)s add_permission RQLExpression)"
+msgstr "création d'une expression RQL pour la permission d'ajout de %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWEType %(linkto)s delete_permission RQLExpression)"
+msgstr ""
+"création d'une expression RQL pour la permission de suppression de %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWEType %(linkto)s read_permission RQLExpression)"
+msgstr "création d'une expression RQL pour la permission de lire %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWEType %(linkto)s update_permission RQLExpression)"
+msgstr ""
+"création d'une expression RQL pour la permission de mise à jour de %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWRelation %(linkto)s add_permission RQLExpression)"
+msgstr "création d'une expression rql pour le droit d'ajout de %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWRelation %(linkto)s delete_permission "
+"RQLExpression)"
+msgstr ""
+"création d'une expression rql pour le droit de suppression de %(linkto)s"
+
+msgid ""
+"creating RQLExpression (CWRelation %(linkto)s read_permission RQLExpression)"
+msgstr "création d'une expression rql pour le droit de lecture de %(linkto)s"
+
+msgid "creating RQLExpression (Transition %(linkto)s condition RQLExpression)"
+msgstr "création d'une expression RQL pour la transition %(linkto)s"
+
+msgid ""
+"creating RQLExpression (WorkflowTransition %(linkto)s condition "
+"RQLExpression)"
+msgstr "création d'une expression RQL pour la transition workflow %(linkto)s"
+
+msgid "creating State (State allowed_transition Transition %(linkto)s)"
+msgstr "création d'un état pouvant aller vers la transition %(linkto)s"
+
+msgid "creating State (State state_of Workflow %(linkto)s)"
+msgstr "création d'un état du workflow %(linkto)s"
+
+msgid "creating State (Transition %(linkto)s destination_state State)"
+msgstr "création d'un état destination de la transition %(linkto)s"
+
+msgid ""
+"creating SubWorkflowExitPoint (WorkflowTransition %(linkto)s "
+"subworkflow_exit SubWorkflowExitPoint)"
+msgstr "création d'un point de sortie de la transition workflow %(linkto)s"
+
+msgid "creating Transition (State %(linkto)s allowed_transition Transition)"
+msgstr "création d'une transition autorisée depuis l'état %(linkto)s"
+
+msgid "creating Transition (Transition destination_state State %(linkto)s)"
+msgstr "création d'une transition vers l'état %(linkto)s"
+
+msgid "creating Transition (Transition transition_of Workflow %(linkto)s)"
+msgstr "création d'une transition du workflow %(linkto)s"
+
+msgid ""
+"creating WorkflowTransition (State %(linkto)s allowed_transition "
+"WorkflowTransition)"
+msgstr "création d'une transition workflow autorisée depuis l'état %(linkto)s"
+
+msgid ""
+"creating WorkflowTransition (WorkflowTransition transition_of Workflow "
+"%(linkto)s)"
+msgstr "création d'une transition workflow du workflow %(linkto)s"
+
+msgid "creation"
+msgstr "création"
+
+msgid "creation date"
+msgstr "date de création"
+
+msgid "creation time of an entity"
+msgstr "date de création d'une entité"
+
+msgid "creation_date"
+msgstr "date de création"
+
+msgid "cstrtype"
+msgstr "type de contrainte"
+
+msgctxt "CWConstraint"
+msgid "cstrtype"
+msgstr "type"
+
+msgid "cstrtype_object"
+msgstr "utilisé par"
+
+msgctxt "CWConstraintType"
+msgid "cstrtype_object"
+msgstr "type des contraintes"
+
+msgid "csv export"
+msgstr "export CSV"
+
+msgid "csv export (entities)"
+msgstr "export CSV (entités)"
+
+msgid "ctxcomponents"
+msgstr "composants contextuels"
+
+msgid "ctxcomponents_anonuserlink"
+msgstr "lien utilisateur"
+
+msgid "ctxcomponents_anonuserlink_description"
+msgstr ""
+"affiche un lien vers le formulaire d'authentification pour les utilisateurs "
+"anonymes, sinon une boite contenant notamment des liens propres à "
+"l'utilisateur connectés"
+
+msgid "ctxcomponents_appliname"
+msgstr "titre de l'application"
+
+msgid "ctxcomponents_appliname_description"
+msgstr "affiche le titre de l'application dans l'en-tête de page"
+
+msgid "ctxcomponents_bookmarks_box"
+msgstr "boîte signets"
+
+msgid "ctxcomponents_bookmarks_box_description"
+msgstr "boîte contenant les signets de l'utilisateur"
+
+msgid "ctxcomponents_breadcrumbs"
+msgstr "fil d'ariane"
+
+msgid "ctxcomponents_breadcrumbs_description"
+msgstr ""
+"affiche un chemin permettant de localiser la page courante dans le site"
+
+msgid "ctxcomponents_download_box"
+msgstr "boîte de téléchargement"
+
+msgid "ctxcomponents_download_box_description"
+msgstr "boîte contenant un lien permettant de télécharger la ressource"
+
+msgid "ctxcomponents_edit_box"
+msgstr "boîte d'actions"
+
+msgid "ctxcomponents_edit_box_description"
+msgstr ""
+"boîte affichant les différentes actions possibles sur les données affichées"
+
+msgid "ctxcomponents_facet.filterbox"
+msgstr "boîte à facettes"
+
+msgid "ctxcomponents_facet.filterbox_description"
+msgstr ""
+"boîte permettant de filtrer parmi les résultats d'une recherche à l'aide de "
+"facettes"
+
+msgid "ctxcomponents_logo"
+msgstr "logo"
+
+msgid "ctxcomponents_logo_description"
+msgstr "le logo de l'application, dans l'en-tête de page"
+
+msgid "ctxcomponents_metadata"
+msgstr "méta-données de l'entité"
+
+msgid "ctxcomponents_metadata_description"
+msgstr ""
+
+msgid "ctxcomponents_possible_views_box"
+msgstr "boîte des vues possibles"
+
+msgid "ctxcomponents_possible_views_box_description"
+msgstr "boîte affichant les vues possibles pour les données courantes"
+
+msgid "ctxcomponents_prevnext"
+msgstr "élément précedent / suivant"
+
+msgid "ctxcomponents_prevnext_description"
+msgstr ""
+"affiche des liens permettant de passer d'une entité à une autre sur les "
+"entités implémentant l'interface \"précédent/suivant\"."
+
+msgid "ctxcomponents_rss"
+msgstr "icône RSS"
+
+msgid "ctxcomponents_rss_description"
+msgstr "l'icône RSS permettant de récupérer la vue RSS des données affichées"
+
+msgid "ctxcomponents_search_box"
+msgstr "boîte de recherche"
+
+msgid "ctxcomponents_search_box_description"
+msgstr "boîte avec un champ de recherche simple"
+
+msgid "ctxcomponents_startup_views_box"
+msgstr "boîte des vues de départs"
+
+msgid "ctxcomponents_startup_views_box_description"
+msgstr "boîte affichant les vues de départs de l'application"
+
+msgid "ctxcomponents_userstatus"
+msgstr "état de l'utilisateur"
+
+msgid "ctxcomponents_userstatus_description"
+msgstr ""
+
+msgid "ctxcomponents_wfhistory"
+msgstr "historique du workflow."
+
+msgid "ctxcomponents_wfhistory_description"
+msgstr ""
+"section affichant l'historique du workflow pour les entités ayant un "
+"workflow."
+
+msgid "ctxtoolbar"
+msgstr "barre d'outils"
+
+msgid "custom_workflow"
+msgstr "workflow spécifique"
+
+msgid "custom_workflow_object"
+msgstr "workflow de"
+
+msgid "cw.groups-management"
+msgstr "groupes"
+
+msgid "cw.users-management"
+msgstr "utilisateurs"
+
+msgid "cw_for_source"
+msgstr "source"
+
+msgctxt "CWSourceSchemaConfig"
+msgid "cw_for_source"
+msgstr "source"
+
+msgid "cw_for_source_object"
+msgstr "élément de mapping"
+
+msgctxt "CWSource"
+msgid "cw_for_source_object"
+msgstr "élément de mapping"
+
+msgid "cw_host_config_of"
+msgstr "host configuration of"
+
+msgctxt "CWSourceHostConfig"
+msgid "cw_host_config_of"
+msgstr "host configuration of"
+
+msgid "cw_host_config_of_object"
+msgstr "has host configuration"
+
+msgctxt "CWSource"
+msgid "cw_host_config_of_object"
+msgstr "has host configuration"
+
+msgid "cw_import_of"
+msgstr "source"
+
+msgctxt "CWDataImport"
+msgid "cw_import_of"
+msgstr "source"
+
+msgid "cw_import_of_object"
+msgstr "imports"
+
+msgctxt "CWSource"
+msgid "cw_import_of_object"
+msgstr "imports"
+
+msgid "cw_schema"
+msgstr "schéma"
+
+msgctxt "CWSourceSchemaConfig"
+msgid "cw_schema"
+msgstr "schéma"
+
+msgid "cw_schema_object"
+msgstr "mappé par"
+
+msgctxt "CWEType"
+msgid "cw_schema_object"
+msgstr "mappé par"
+
+msgctxt "CWRType"
+msgid "cw_schema_object"
+msgstr "mappé par"
+
+msgctxt "CWRelation"
+msgid "cw_schema_object"
+msgstr "mappé par"
+
+msgid "cw_source"
+msgstr "source"
+
+msgid "cw_source_object"
+msgstr "entités"
+
+msgid "cwetype-box"
+msgstr "vue \"boîte\""
+
+msgid "cwetype-description"
+msgstr "description"
+
+msgid "cwetype-permissions"
+msgstr "permissions"
+
+msgid "cwetype-views"
+msgstr "vues"
+
+msgid "cwetype-workflow"
+msgstr "workflow"
+
+msgid "cwgroup-main"
+msgstr "description"
+
+msgid "cwgroup-permissions"
+msgstr "permissions"
+
+msgid "cwrtype-description"
+msgstr "description"
+
+msgid "cwrtype-permissions"
+msgstr "permissions"
+
+msgid "cwsource-imports"
+msgstr "imports"
+
+msgid "cwsource-main"
+msgstr "description"
+
+msgid "cwsource-mapping"
+msgstr "mapping"
+
+msgid "cwuri"
+msgstr "uri interne"
+
+msgid "data directory url"
+msgstr "url du répertoire de données"
+
+msgid "data model schema"
+msgstr "schéma du modèle de données"
+
+msgid "data sources"
+msgstr "sources de données"
+
+msgid "data sources management"
+msgstr "gestion des sources de données"
+
+msgid "date"
+msgstr "date"
+
+msgid "deactivate"
+msgstr "désactiver"
+
+msgid "deactivated"
+msgstr "désactivé"
+
+msgid "december"
+msgstr "décembre"
+
+msgid "default"
+msgstr "valeur par défaut"
+
+msgid "default text format for rich text fields."
+msgstr "format de texte par défaut pour les champs textes"
+
+msgid "default user workflow"
+msgstr "workflow par défaut des utilisateurs"
+
+msgid "default value"
+msgstr "valeur par défaut"
+
+msgid "default value as gziped pickled python object"
+msgstr "valeur par défaut, sous forme d'objet python picklé zippé"
+
+msgid "default workflow for an entity type"
+msgstr "workflow par défaut pour un type d'entité"
+
+msgid "default_workflow"
+msgstr "workflow par défaut"
+
+msgctxt "CWEType"
+msgid "default_workflow"
+msgstr "workflow par défaut"
+
+msgid "default_workflow_object"
+msgstr "workflow par défaut de"
+
+msgctxt "Workflow"
+msgid "default_workflow_object"
+msgstr "workflow par défaut de"
+
+msgid "defaultval"
+msgstr "valeur par défaut"
+
+msgctxt "CWAttribute"
+msgid "defaultval"
+msgstr "valeur par défaut"
+
+msgid "define a CubicWeb user"
+msgstr "défini un utilisateur CubicWeb"
+
+msgid "define a CubicWeb users group"
+msgstr "défini un groupe d'utilisateur CubicWeb"
+
+msgid ""
+"define a final relation: link a final relation type from a non final entity "
+"to a final entity type. used to build the instance schema"
+msgstr ""
+"définit une relation non finale: lie un type de relation non finale depuis "
+"une entité vers un type d'entité non final. Utilisé pour construire le "
+"schéma de l'instance"
+
+msgid ""
+"define a non final relation: link a non final relation type from a non final "
+"entity to a non final entity type. used to build the instance schema"
+msgstr ""
+"définit une relation 'attribut', utilisé pour construire le schéma de "
+"l'instance"
+
+msgid "define a relation type, used to build the instance schema"
+msgstr "définit un type de relation"
+
+msgid "define a rql expression used to define permissions"
+msgstr "définit une expression rql donnant une permission"
+
+msgid "define a schema constraint"
+msgstr "définit une contrainte de schema"
+
+msgid "define a schema constraint type"
+msgstr "définit un type de contrainte de schema"
+
+msgid "define a virtual relation type, used to build the instance schema"
+msgstr "définit une relation virtuelle"
+
+msgid "define an entity type, used to build the instance schema"
+msgstr "définit un type d'entité"
+
+msgid "define how we get out from a sub-workflow"
+msgstr "définit comment sortir d'un sous-workflow"
+
+msgid "defines a sql-level multicolumn unique index"
+msgstr "définit un index SQL unique sur plusieurs colonnes"
+
+msgid ""
+"defines what's the property is applied for. You must select this first to be "
+"able to set value"
+msgstr ""
+"définit à quoi la propriété est appliquée. Vous devez sélectionner cela "
+"avant de pouvoir fixer une valeur"
+
+msgid "delete"
+msgstr "supprimer"
+
+msgid "delete this bookmark"
+msgstr "supprimer ce signet"
+
+msgid "delete this relation"
+msgstr "supprimer cette relation"
+
+msgid "delete_permission"
+msgstr "permission de supprimer"
+
+msgctxt "CWEType"
+msgid "delete_permission"
+msgstr "permission de supprimer"
+
+msgctxt "CWRelation"
+msgid "delete_permission"
+msgstr "permission de supprimer"
+
+msgid "delete_permission_object"
+msgstr "a la permission de supprimer"
+
+msgctxt "CWGroup"
+msgid "delete_permission_object"
+msgstr "peut supprimer"
+
+msgctxt "RQLExpression"
+msgid "delete_permission_object"
+msgstr "peut supprimer"
+
+#, python-format
+msgid "deleted %(etype)s #%(eid)s (%(title)s)"
+msgstr "suppression de l'entité %(etype)s #%(eid)s (%(title)s)"
+
+#, python-format
+msgid ""
+"deleted relation %(rtype)s from %(frometype)s #%(eidfrom)s to %(toetype)s #"
+"%(eidto)s"
+msgstr ""
+"relation %(rtype)s de %(frometype)s #%(eidfrom)s vers %(toetype)s #%(eidto)s "
+"supprimée"
+
+msgid "depends on the constraint type"
+msgstr "dépend du type de contrainte"
+
+msgid "description"
+msgstr "description"
+
+msgctxt "BaseTransition"
+msgid "description"
+msgstr "description"
+
+msgctxt "CWAttribute"
+msgid "description"
+msgstr "description"
+
+msgctxt "CWComputedRType"
+msgid "description"
+msgstr "description"
+
+msgctxt "CWEType"
+msgid "description"
+msgstr "description"
+
+msgctxt "CWRType"
+msgid "description"
+msgstr "description"
+
+msgctxt "CWRelation"
+msgid "description"
+msgstr "description"
+
+msgctxt "State"
+msgid "description"
+msgstr "description"
+
+msgctxt "Transition"
+msgid "description"
+msgstr "description"
+
+msgctxt "Workflow"
+msgid "description"
+msgstr "description"
+
+msgctxt "WorkflowTransition"
+msgid "description"
+msgstr "description"
+
+msgid "description_format"
+msgstr "format"
+
+msgctxt "BaseTransition"
+msgid "description_format"
+msgstr "format"
+
+msgctxt "CWAttribute"
+msgid "description_format"
+msgstr "format"
+
+msgctxt "CWComputedRType"
+msgid "description_format"
+msgstr "format"
+
+msgctxt "CWEType"
+msgid "description_format"
+msgstr "format"
+
+msgctxt "CWRType"
+msgid "description_format"
+msgstr "format"
+
+msgctxt "CWRelation"
+msgid "description_format"
+msgstr "format"
+
+msgctxt "State"
+msgid "description_format"
+msgstr "format"
+
+msgctxt "Transition"
+msgid "description_format"
+msgstr "format"
+
+msgctxt "Workflow"
+msgid "description_format"
+msgstr "format"
+
+msgctxt "WorkflowTransition"
+msgid "description_format"
+msgstr "format"
+
+msgid "destination state for this transition"
+msgstr "états accessibles par cette transition"
+
+msgid "destination state must be in the same workflow as our parent transition"
+msgstr ""
+"l'état de destination doit être dans le même workflow que la transition "
+"parente"
+
+msgid "destination state of a transition"
+msgstr "état d'arrivée d'une transition"
+
+msgid ""
+"destination state. No destination state means that transition should go back "
+"to the state from which we've entered the subworkflow."
+msgstr ""
+"état de destination de la transition. Si aucun état de destination n'est "
+"spécifié, la transition ira vers l'état depuis lequel l'entité est entrée "
+"dans le sous-workflow."
+
+msgid "destination_state"
+msgstr "état de destination"
+
+msgctxt "SubWorkflowExitPoint"
+msgid "destination_state"
+msgstr "état de destination"
+
+msgctxt "Transition"
+msgid "destination_state"
+msgstr "état de destination"
+
+msgid "destination_state_object"
+msgstr "destination de"
+
+msgctxt "State"
+msgid "destination_state_object"
+msgstr "état final de"
+
+msgid "detach attached file"
+msgstr "détacher le fichier existant"
+
+msgid "display order of the box"
+msgstr "ordre d'affichage de la boîte"
+
+msgid "display order of the component"
+msgstr "ordre d'affichage du composant"
+
+msgid "display order of the facet"
+msgstr "ordre d'affichage de la facette"
+
+msgid "display the box or not"
+msgstr "afficher la boîte ou non"
+
+msgid "display the component or not"
+msgstr "afficher le composant ou non"
+
+msgid "display the facet or not"
+msgstr "afficher la facette ou non"
+
+msgid "download"
+msgstr "télécharger"
+
+#, python-format
+msgid "download %s"
+msgstr "télécharger %s"
+
+msgid "download icon"
+msgstr "icône de téléchargement"
+
+msgid "download schema as owl"
+msgstr "télécharger le schéma OWL"
+
+msgid "edit bookmarks"
+msgstr "éditer les signets"
+
+msgid "edit canceled"
+msgstr "édition annulée"
+
+msgid "editable-table"
+msgstr "table éditable"
+
+msgid "eid"
+msgstr "eid"
+
+msgid "embedded html"
+msgstr "HTML contenu"
+
+msgid "end_timestamp"
+msgstr "horodate de fin"
+
+msgctxt "CWDataImport"
+msgid "end_timestamp"
+msgstr "horodate de fin"
+
+msgid "entities deleted"
+msgstr "entités supprimées"
+
+msgid "entity and relation types can't be mapped, only attributes or relations"
+msgstr ""
+"les types d'entités et de relations ne peuvent être mappés, uniquement les "
+"relations"
+
+msgid "entity copied"
+msgstr "entité copiée"
+
+msgid "entity created"
+msgstr "entité créée"
+
+msgid "entity creation"
+msgstr "création d'entité"
+
+msgid "entity deleted"
+msgstr "entité supprimée"
+
+msgid "entity deletion"
+msgstr "suppression d'entité"
+
+msgid "entity edited"
+msgstr "entité éditée"
+
+msgid "entity has no workflow set"
+msgstr "l'entité n'a pas de workflow"
+
+msgid "entity linked"
+msgstr "entité liée"
+
+msgid "entity type"
+msgstr "type d'entité"
+
+msgid "entity types which may use this workflow"
+msgstr "types d'entité pouvant utiliser ce workflow"
+
+msgid "entity update"
+msgstr "mise à jour d'entité"
+
+msgid "entityview"
+msgstr "vues d'entité"
+
+msgid "error"
+msgstr "erreur"
+
+msgid "error while publishing ReST text"
+msgstr ""
+"une erreur s'est produite lors de l'interprétation du texte au format ReST"
+
+msgid "exit state must be a subworkflow state"
+msgstr "l'état de sortie doit être un état du sous-workflow"
+
+msgid "exit_point"
+msgstr "état de sortie"
+
+msgid "exit_point_object"
+msgstr "état de sortie de"
+
+#, python-format
+msgid "exiting from subworkflow %s"
+msgstr "sortie du sous-workflow %s"
+
+msgid "expression"
+msgstr "expression"
+
+msgctxt "RQLExpression"
+msgid "expression"
+msgstr "rql de l'expression"
+
+msgid "exprtype"
+msgstr "type de l'expression"
+
+msgctxt "RQLExpression"
+msgid "exprtype"
+msgstr "type"
+
+msgid "extra_props"
+msgstr ""
+
+msgctxt "CWAttribute"
+msgid "extra_props"
+msgstr "propriétés additionnelles"
+
+msgid "facet-loading-msg"
+msgstr "en cours de traitement, merci de patienter"
+
+msgid "facet.filters"
+msgstr "facettes"
+
+msgid "facetbox"
+msgstr "boîte à facettes"
+
+msgid "facets_created_by-facet"
+msgstr "facette \"créé par\""
+
+msgid "facets_created_by-facet_description"
+msgstr ""
+
+msgid "facets_cw_source-facet"
+msgstr "facette \"source de données\""
+
+msgid "facets_cw_source-facet_description"
+msgstr ""
+
+msgid "facets_cwfinal-facet"
+msgstr "facette \"type d'entité ou de relation final\""
+
+msgid "facets_cwfinal-facet_description"
+msgstr ""
+
+msgid "facets_datafeed.dataimport.status"
+msgstr "état de l'iport"
+
+msgid "facets_datafeed.dataimport.status_description"
+msgstr ""
+
+msgid "facets_etype-facet"
+msgstr "facette \"est de type\""
+
+msgid "facets_etype-facet_description"
+msgstr ""
+
+msgid "facets_has_text-facet"
+msgstr "facette \"contient le texte\""
+
+msgid "facets_has_text-facet_description"
+msgstr ""
+
+msgid "facets_in_group-facet"
+msgstr "facette \"fait partie du groupe\""
+
+msgid "facets_in_group-facet_description"
+msgstr ""
+
+msgid "facets_in_state-facet"
+msgstr "facette \"dans l'état\""
+
+msgid "facets_in_state-facet_description"
+msgstr ""
+
+msgid "failed"
+msgstr "échec"
+
+#, python-format
+msgid "failed to uniquify path (%s, %s)"
+msgstr "ne peut obtenir un nom de fichier unique (%s, %s)"
+
+msgid "february"
+msgstr "février"
+
+msgid "file tree view"
+msgstr "arborescence (fichiers)"
+
+msgid "final"
+msgstr "final"
+
+msgctxt "CWEType"
+msgid "final"
+msgstr "final"
+
+msgctxt "CWRType"
+msgid "final"
+msgstr "final"
+
+msgid "first name"
+msgstr "prénom"
+
+msgid "firstname"
+msgstr "prénom"
+
+msgctxt "CWUser"
+msgid "firstname"
+msgstr "prénom"
+
+msgid "foaf"
+msgstr "foaf"
+
+msgid "focus on this selection"
+msgstr "afficher cette sélection"
+
+msgid "follow"
+msgstr "suivre le lien"
+
+#, python-format
+msgid "follow this link for more information on this %s"
+msgstr "suivez ce lien pour plus d'information sur ce %s"
+
+msgid "for_user"
+msgstr "pour l'utilisateur"
+
+msgctxt "CWProperty"
+msgid "for_user"
+msgstr "propriété de l'utilisateur"
+
+msgid "for_user_object"
+msgstr "utilise les propriétés"
+
+msgctxt "CWUser"
+msgid "for_user_object"
+msgstr "a pour préférence"
+
+msgid "formula"
+msgstr "formule"
+
+msgctxt "CWAttribute"
+msgid "formula"
+msgstr "formule"
+
+msgid "friday"
+msgstr "vendredi"
+
+msgid "from"
+msgstr "de"
+
+#, python-format
+msgid "from %(date)s"
+msgstr "du %(date)s"
+
+msgid "from_entity"
+msgstr "de l'entité"
+
+msgctxt "CWAttribute"
+msgid "from_entity"
+msgstr "attribut de l'entité"
+
+msgctxt "CWRelation"
+msgid "from_entity"
+msgstr "relation de l'entité"
+
+msgid "from_entity_object"
+msgstr "relation sujet"
+
+msgctxt "CWEType"
+msgid "from_entity_object"
+msgstr "entité de"
+
+msgid "from_interval_start"
+msgstr "De"
+
+msgid "from_state"
+msgstr "de l'état"
+
+msgctxt "TrInfo"
+msgid "from_state"
+msgstr "état de départ"
+
+msgid "from_state_object"
+msgstr "transitions depuis cet état"
+
+msgctxt "State"
+msgid "from_state_object"
+msgstr "état de départ de"
+
+msgid "full text or RQL query"
+msgstr "texte à rechercher ou requête RQL"
+
+msgid "fulltext_container"
+msgstr "conteneur du texte indexé"
+
+msgctxt "CWRType"
+msgid "fulltext_container"
+msgstr "objet à indexer"
+
+msgid "fulltextindexed"
+msgstr "indexation du texte"
+
+msgctxt "CWAttribute"
+msgid "fulltextindexed"
+msgstr "texte indexé"
+
+msgid "gc"
+msgstr "fuite mémoire"
+
+msgid "generic plot"
+msgstr "tracé de courbes standard"
+
+msgid "generic relation to link one entity to another"
+msgstr "relation générique pour lier une entité à une autre"
+
+msgid ""
+"generic relation to specify that an external entity represent the same "
+"object as a local one: http://www.w3.org/TR/owl-ref/#sameAs-def"
+msgstr ""
+"relation générique permettant d'indiquer qu'une entité est identique à une "
+"autre ressource web (voir http://www.w3.org/TR/owl-ref/#sameAs-def)."
+
+msgid "granted to groups"
+msgstr "accordée aux groupes"
+
+#, python-format
+msgid "graphical representation of %(appid)s data model"
+msgstr "réprésentation graphique du modèle de données de %(appid)s"
+
+#, python-format
+msgid ""
+"graphical representation of the %(etype)s entity type from %(appid)s data "
+"model"
+msgstr ""
+"réprésentation graphique du modèle de données pour le type d'entité "
+"%(etype)s de %(appid)s"
+
+#, python-format
+msgid ""
+"graphical representation of the %(rtype)s relation type from %(appid)s data "
+"model"
+msgstr ""
+"réprésentation graphique du modèle de données pour le type de relation "
+"%(rtype)s de %(appid)s"
+
+msgid "group in which a user should be to be allowed to pass this transition"
+msgstr ""
+"groupe dans lequel l'utilisateur doit être pour pouvoir passer la transition"
+
+msgid "groups"
+msgstr "groupes"
+
+msgid "groups allowed to add entities/relations of this type"
+msgstr "groupes autorisés à ajouter des entités/relations de ce type"
+
+msgid "groups allowed to delete entities/relations of this type"
+msgstr "groupes autorisés à supprimer des entités/relations de ce type"
+
+msgid "groups allowed to read entities/relations of this type"
+msgstr "groupes autorisés à lire des entités/relations de ce type"
+
+msgid "groups allowed to update entities/relations of this type"
+msgstr "groupes autorisés à mettre à jour des entités/relations de ce type"
+
+msgid "groups grant permissions to the user"
+msgstr "les groupes donnent des permissions à l'utilisateur"
+
+msgid "guests"
+msgstr "invités"
+
+msgid "hCalendar"
+msgstr "hCalendar"
+
+msgid "has_text"
+msgstr "contient le texte"
+
+msgid "header-center"
+msgstr "en-tête (centre)"
+
+msgid "header-left"
+msgstr "en-tête (gauche)"
+
+msgid "header-right"
+msgstr "en-tête (droite)"
+
+msgid "hide filter form"
+msgstr "cacher le filtre"
+
+msgid ""
+"how to format date and time in the ui (see this page for format "
+"description)"
+msgstr ""
+"comment formater l'horodate dans l'interface (description du "
+"format)"
+
+msgid ""
+"how to format date in the ui (see this page for format "
+"description)"
+msgstr ""
+"comment formater la date dans l'interface (description du format)"
+
+msgid "how to format float numbers in the ui"
+msgstr "comment formater les nombres flottants dans l'interface"
+
+msgid ""
+"how to format time in the ui (see this page for format "
+"description)"
+msgstr ""
+"comment formater l'heure dans l'interface (description du format)"
+
+msgid "i18n_bookmark_url_fqs"
+msgstr "paramètres"
+
+msgid "i18n_bookmark_url_path"
+msgstr "chemin"
+
+msgid "i18n_login_popup"
+msgstr "s'identifier"
+
+msgid "i18ncard_*"
+msgstr "0..n"
+
+msgid "i18ncard_+"
+msgstr "1..n"
+
+msgid "i18ncard_1"
+msgstr "1"
+
+msgid "i18ncard_?"
+msgstr "0..1"
+
+msgid "i18nprevnext_next"
+msgstr "suivant"
+
+msgid "i18nprevnext_previous"
+msgstr "précédent"
+
+msgid "i18nprevnext_up"
+msgstr "parent"
+
+msgid "iCalendar"
+msgstr "iCalendar"
+
+msgid "id of main template used to render pages"
+msgstr "id du template principal"
+
+msgid "identical to"
+msgstr "identique à"
+
+msgid "identical_to"
+msgstr "identique à"
+
+msgid "identity"
+msgstr "est identique à"
+
+msgid "identity_object"
+msgstr "est identique à"
+
+msgid ""
+"if full text content of subject/object entity should be added to other side "
+"entity (the container)."
+msgstr ""
+"si le text indexé de l'entité sujet/objet doit être ajouté à l'entité à "
+"l'autre extrémité de la relation (le conteneur)."
+
+msgid "image"
+msgstr "image"
+
+msgid "in progress"
+msgstr "en cours"
+
+msgid "in_group"
+msgstr "dans le groupe"
+
+msgctxt "CWUser"
+msgid "in_group"
+msgstr "fait partie du groupe"
+
+msgid "in_group_object"
+msgstr "membres"
+
+msgctxt "CWGroup"
+msgid "in_group_object"
+msgstr "contient les utilisateurs"
+
+msgid "in_state"
+msgstr "état"
+
+msgid "in_state_object"
+msgstr "état de"
+
+msgid "in_synchronization"
+msgstr "en cours de synchronisation"
+
+msgctxt "CWSource"
+msgid "in_synchronization"
+msgstr "en cours de synchronisation"
+
+msgid "incontext"
+msgstr "dans le contexte"
+
+msgid "incorrect captcha value"
+msgstr "valeur de captcha incorrecte"
+
+#, python-format
+msgid "incorrect value (%(KEY-value)r) for type \"%(KEY-type)s\""
+msgstr "la valeur %(KEY-value)s est incorrecte pour le type \"%(KEY-type)s\""
+
+msgid "index this attribute's value in the plain text index"
+msgstr "indexer la valeur de cet attribut dans l'index plein texte"
+
+msgid "indexed"
+msgstr "index"
+
+msgctxt "CWAttribute"
+msgid "indexed"
+msgstr "indexé"
+
+msgid "indicate the current state of an entity"
+msgstr "indique l'état courant d'une entité"
+
+msgid ""
+"indicate which state should be used by default when an entity using states "
+"is created"
+msgstr ""
+"indique quel état devrait être utilisé par défaut lorsqu'une entité est créée"
+
+msgid "indifferent"
+msgstr "indifférent"
+
+msgid "info"
+msgstr "information"
+
+msgid "initial state for this workflow"
+msgstr "état initial pour ce workflow"
+
+msgid "initial_state"
+msgstr "état initial"
+
+msgctxt "Workflow"
+msgid "initial_state"
+msgstr "état initial"
+
+msgid "initial_state_object"
+msgstr "état initial de"
+
+msgctxt "State"
+msgid "initial_state_object"
+msgstr "état initial de"
+
+msgid "inlined"
+msgstr "mise en ligne"
+
+msgctxt "CWRType"
+msgid "inlined"
+msgstr "mise en ligne"
+
+msgid "instance home"
+msgstr "répertoire de l'instance"
+
+msgid "internal entity uri"
+msgstr "uri interne"
+
+msgid "internationalizable"
+msgstr "internationalisable"
+
+msgctxt "CWAttribute"
+msgid "internationalizable"
+msgstr "internationalisable"
+
+#, python-format
+msgid "invalid action %r"
+msgstr "action %r invalide"
+
+#, python-format
+msgid "invalid value %(KEY-value)s, it must be one of %(KEY-choices)s"
+msgstr ""
+"la valeur %(KEY-value)s est incorrecte, elle doit être parmi %(KEY-choices)s"
+
+msgid "is"
+msgstr "de type"
+
+msgid "is object of:"
+msgstr "est object de"
+
+msgid "is subject of:"
+msgstr "est sujet de"
+
+msgid ""
+"is the subject/object entity of the relation composed of the other ? This "
+"implies that when the composite is deleted, composants are also deleted."
+msgstr ""
+"Est-ce que l'entité sujet/objet de la relation est une agrégation de "
+"l'autre ?Si c'est le cas, détruire le composite détruira ses composants "
+"également"
+
+msgid "is this attribute's value translatable"
+msgstr "est-ce que la valeur de cet attribut est traduisible ?"
+
+msgid "is this relation equivalent in both direction ?"
+msgstr "est que cette relation est équivalent dans les deux sens ?"
+
+msgid ""
+"is this relation physically inlined? you should know what you're doing if "
+"you are changing this!"
+msgstr ""
+"est ce que cette relation est mise en ligne dans la base de données ?vous "
+"devez savoir ce que vous faites si vous changez cela !"
+
+msgid "is_instance_of"
+msgstr "est une instance de"
+
+msgid "is_instance_of_object"
+msgstr "type de"
+
+msgid "is_object"
+msgstr "a pour instance"
+
+msgid "january"
+msgstr "janvier"
+
+msgid "json-entities-export-view"
+msgstr "export JSON (entités)"
+
+msgid "json-export-view"
+msgstr "export JSON"
+
+msgid "july"
+msgstr "juillet"
+
+msgid "june"
+msgstr "juin"
+
+msgid "language of the user interface"
+msgstr "langue pour l'interface utilisateur"
+
+msgid "last connection date"
+msgstr "dernière date de connexion"
+
+msgid "last login time"
+msgstr "dernière date de connexion"
+
+msgid "last name"
+msgstr "nom"
+
+msgid "last usage"
+msgstr "dernier usage"
+
+msgid "last_login_time"
+msgstr "dernière date de connexion"
+
+msgctxt "CWUser"
+msgid "last_login_time"
+msgstr "dernière date de connexion"
+
+msgid "latest import"
+msgstr "dernier import"
+
+msgid "latest modification time of an entity"
+msgstr "date de dernière modification d'une entité"
+
+msgid "latest synchronization time"
+msgstr "date de la dernière synchronisation"
+
+msgid "latest update on"
+msgstr "dernière mise à jour"
+
+msgid "latest_retrieval"
+msgstr "dernière synchronisation"
+
+msgctxt "CWSource"
+msgid "latest_retrieval"
+msgstr "date de la dernière synchronisation de la source."
+
+msgid "left"
+msgstr "gauche"
+
+msgid "line"
+msgstr "ligne"
+
+msgid ""
+"link a property to the user which want this property customization. Unless "
+"you're a site manager, this relation will be handled automatically."
+msgstr ""
+"lie une propriété à l'utilisateur désirant cette personnalisation. A moins "
+"que vous ne soyez gestionnaire du site, cette relation est gérée "
+"automatiquement."
+
+msgid "link a relation definition to its object entity type"
+msgstr "lie une définition de relation à son type d'entité objet"
+
+msgid "link a relation definition to its relation type"
+msgstr "lie une définition de relation à son type d'entité"
+
+msgid "link a relation definition to its subject entity type"
+msgstr "lie une définition de relation à son type d'entité sujet"
+
+msgid "link a state to one or more workflow"
+msgstr "lie un état à un ou plusieurs workflow"
+
+msgid "link a transition information to its object"
+msgstr "lié une enregistrement de transition vers l'objet associé"
+
+msgid "link a transition to one or more workflow"
+msgstr "lie une transition à un ou plusieurs workflow"
+
+msgid "link a workflow to one or more entity type"
+msgstr "lie un workflow à un ou plusieurs types d'entité"
+
+msgid "list"
+msgstr "liste"
+
+msgid "log"
+msgstr "journal"
+
+msgctxt "CWDataImport"
+msgid "log"
+msgstr "journal"
+
+msgid "log in"
+msgstr "s'identifier"
+
+msgid "login"
+msgstr "identifiant"
+
+msgctxt "CWUser"
+msgid "login"
+msgstr "identifiant"
+
+msgid "login / password"
+msgstr "identifiant / mot de passe"
+
+msgid "login or email"
+msgstr "identifiant ou email"
+
+msgid "login_action"
+msgstr "identifiez vous"
+
+msgid "logout"
+msgstr "se déconnecter"
+
+#, python-format
+msgid "loop in %(rel)s relation (%(eid)s)"
+msgstr "boucle détectée en parcourant la relation %(rel)s de l'entité #%(eid)s"
+
+msgid "main informations"
+msgstr "Informations générales"
+
+msgid "main_tab"
+msgstr "description"
+
+msgid "mainvars"
+msgstr "variables principales"
+
+msgctxt "RQLExpression"
+msgid "mainvars"
+msgstr "variables principales"
+
+msgid "manage"
+msgstr "gestion du site"
+
+msgid "manage bookmarks"
+msgstr "gérer les signets"
+
+msgid "manage permissions"
+msgstr "gestion des permissions"
+
+msgid "managers"
+msgstr "administrateurs"
+
+msgid "mandatory relation"
+msgstr "relation obligatoire"
+
+msgid "march"
+msgstr "mars"
+
+msgid "match_host"
+msgstr "pour l'hôte"
+
+msgctxt "CWSourceHostConfig"
+msgid "match_host"
+msgstr "pour l'hôte"
+
+msgid "maximum number of characters in short description"
+msgstr "nombre maximum de caractères dans les descriptions courtes"
+
+msgid "maximum number of entities to display in related combo box"
+msgstr "nombre maximum d'entités à afficher dans les listes déroulantes"
+
+msgid "maximum number of objects displayed by page of results"
+msgstr "nombre maximum d'entités affichées par pages"
+
+msgid "maximum number of related entities to display in the primary view"
+msgstr "nombre maximum d'entités liées à afficher dans la vue primaire"
+
+msgid "may"
+msgstr "mai"
+
+msgid "memory leak debugging"
+msgstr "Déboguage des fuites de mémoire"
+
+msgid "message"
+msgstr "message"
+
+#, python-format
+msgid "missing parameters for entity %s"
+msgstr "paramètres manquants pour l'entité %s"
+
+msgid "modification"
+msgstr "modification"
+
+msgid "modification_date"
+msgstr "date de modification"
+
+msgid "modify"
+msgstr "modifier"
+
+msgid "monday"
+msgstr "lundi"
+
+msgid "more actions"
+msgstr "plus d'actions"
+
+msgid "more info about this workflow"
+msgstr "plus d'information sur ce workflow"
+
+msgid "multiple edit"
+msgstr "édition multiple"
+
+msgid "my custom search"
+msgstr "ma recherche personnalisée"
+
+msgid "name"
+msgstr "nom"
+
+msgctxt "BaseTransition"
+msgid "name"
+msgstr "nom"
+
+msgctxt "CWCache"
+msgid "name"
+msgstr "nom"
+
+msgctxt "CWComputedRType"
+msgid "name"
+msgstr "nom"
+
+msgctxt "CWConstraintType"
+msgid "name"
+msgstr "nom"
+
+msgctxt "CWEType"
+msgid "name"
+msgstr "nom"
+
+msgctxt "CWGroup"
+msgid "name"
+msgstr "nom"
+
+msgctxt "CWRType"
+msgid "name"
+msgstr "nom"
+
+msgctxt "CWSource"
+msgid "name"
+msgstr "nom"
+
+msgctxt "CWUniqueTogetherConstraint"
+msgid "name"
+msgstr "nom"
+
+msgctxt "State"
+msgid "name"
+msgstr "nom"
+
+msgctxt "Transition"
+msgid "name"
+msgstr "nom"
+
+msgctxt "Workflow"
+msgid "name"
+msgstr "nom"
+
+msgctxt "WorkflowTransition"
+msgid "name"
+msgstr "nom"
+
+msgid "name of the cache"
+msgstr "nom du cache applicatif"
+
+msgid ""
+"name of the main variables which should be used in the selection if "
+"necessary (comma separated)"
+msgstr ""
+"nom des variables principales qui devrait être utilisées dans la sélection "
+"si nécessaire (les séparer par des virgules)"
+
+msgid "name of the source"
+msgstr "nom de la source"
+
+msgid "navbottom"
+msgstr "bas de page"
+
+msgid "navcontentbottom"
+msgstr "bas de page du contenu principal"
+
+msgid "navcontenttop"
+msgstr "haut de page"
+
+msgid "navigation"
+msgstr "navigation"
+
+msgid "navigation.combobox-limit"
+msgstr "nombre d'entités dans les listes déroulantes"
+
+msgid "navigation.page-size"
+msgstr "nombre de résultats"
+
+msgid "navigation.related-limit"
+msgstr "nombre d'entités dans la vue primaire"
+
+msgid "navigation.short-line-size"
+msgstr "taille des descriptions courtes"
+
+msgid "navtop"
+msgstr "haut de page du contenu principal"
+
+msgid "new"
+msgstr "nouveau"
+
+msgid "next page"
+msgstr "page suivante"
+
+msgid "next_results"
+msgstr "résultats suivants"
+
+msgid "no"
+msgstr "non"
+
+msgid "no content next link"
+msgstr "pas de lien 'suivant'"
+
+msgid "no content prev link"
+msgstr "pas de lien 'précédent'"
+
+msgid "no edited fields specified"
+msgstr "aucun champ à éditer spécifié"
+
+msgid "no log to display"
+msgstr "rien à afficher"
+
+msgid "no related entity"
+msgstr "pas d'entité liée"
+
+msgid "no repository sessions found"
+msgstr "aucune session trouvée"
+
+msgid "no selected entities"
+msgstr "pas d'entité sélectionnée"
+
+#, python-format
+msgid "no such entity type %s"
+msgstr "le type d'entité '%s' n'existe pas"
+
+msgid "no version information"
+msgstr "pas d'information de version"
+
+msgid "no web sessions found"
+msgstr "aucune session trouvée"
+
+msgid "normal"
+msgstr "normal"
+
+msgid "not authorized"
+msgstr "non autorisé"
+
+msgid "not selected"
+msgstr "non sélectionné"
+
+msgid "november"
+msgstr "novembre"
+
+msgid "num. users"
+msgstr "nombre d'utilisateurs"
+
+msgid "object"
+msgstr "objet"
+
+msgid "object type"
+msgstr "type de l'objet"
+
+msgid "october"
+msgstr "octobre"
+
+msgid "one month"
+msgstr "un mois"
+
+msgid "one week"
+msgstr "une semaine"
+
+msgid "oneline"
+msgstr "une ligne"
+
+msgid "only select queries are authorized"
+msgstr "seules les requêtes de sélections sont autorisées"
+
+msgid "open all"
+msgstr "tout ouvrir"
+
+msgid "opened sessions"
+msgstr "sessions ouvertes"
+
+msgid "opened web sessions"
+msgstr "sessions web ouvertes"
+
+msgid "options"
+msgstr "options"
+
+msgctxt "CWSourceSchemaConfig"
+msgid "options"
+msgstr "options"
+
+msgid "order"
+msgstr "ordre"
+
+msgid "ordernum"
+msgstr "ordre"
+
+msgctxt "CWAttribute"
+msgid "ordernum"
+msgstr "numéro d'ordre"
+
+msgctxt "CWRelation"
+msgid "ordernum"
+msgstr "numéro d'ordre"
+
+msgid "owl"
+msgstr "owl"
+
+msgid "owlabox"
+msgstr "owl ABox"
+
+msgid "owned_by"
+msgstr "appartient à"
+
+msgid "owned_by_object"
+msgstr "possède"
+
+msgid "owners"
+msgstr "propriétaires"
+
+msgid "ownerships have been changed"
+msgstr "les droits de propriété ont été modifiés"
+
+msgid "pageid-not-found"
+msgstr ""
+"des données nécessaires semblent expirées, veuillez recharger la page et "
+"recommencer."
+
+msgid "parser"
+msgstr "parseur"
+
+msgctxt "CWSource"
+msgid "parser"
+msgstr "parseur"
+
+msgid "parser to use to extract entities from content retrieved at given URLs."
+msgstr ""
+"parseur à utiliser pour extraire entités et relations du contenu récupéré "
+"aux URLs données"
+
+msgid "password"
+msgstr "mot de passe"
+
+msgid "password and confirmation don't match"
+msgstr "le mot de passe et la confirmation sont différents"
+
+msgid "path"
+msgstr "chemin"
+
+msgctxt "Bookmark"
+msgid "path"
+msgstr "chemin"
+
+msgid "permalink to this message"
+msgstr "lien permanent vers ce message"
+
+msgid "permission"
+msgstr "permission"
+
+msgid "permissions"
+msgstr "permissions"
+
+msgid "pick existing bookmarks"
+msgstr "récupérer des signets existants"
+
+msgid "pkey"
+msgstr "clé"
+
+msgctxt "CWProperty"
+msgid "pkey"
+msgstr "code de la propriété"
+
+msgid "please correct errors below"
+msgstr "veuillez corriger les erreurs ci-dessous"
+
+msgid "please correct the following errors:"
+msgstr "veuillez corriger les erreurs suivantes :"
+
+msgid "possible views"
+msgstr "vues possibles"
+
+msgid "prefered_form"
+msgstr "forme préférée"
+
+msgctxt "EmailAddress"
+msgid "prefered_form"
+msgstr "forme préférée"
+
+msgid "prefered_form_object"
+msgstr "forme préférée à"
+
+msgctxt "EmailAddress"
+msgid "prefered_form_object"
+msgstr "forme préférée de"
+
+msgid "preferences"
+msgstr "préférences"
+
+msgid "previous page"
+msgstr "page précédente"
+
+msgid "previous_results"
+msgstr "résultats précédents"
+
+msgid "primary"
+msgstr "primaire"
+
+msgid "primary_email"
+msgstr "adresse email principale"
+
+msgctxt "CWUser"
+msgid "primary_email"
+msgstr "email principal"
+
+msgid "primary_email_object"
+msgstr "adresse email principale (object)"
+
+msgctxt "EmailAddress"
+msgid "primary_email_object"
+msgstr "adresse principale de"
+
+msgid "profile"
+msgstr "profil"
+
+msgid "rdef-description"
+msgstr "description"
+
+msgid "rdef-permissions"
+msgstr "permissions"
+
+msgid "rdf export"
+msgstr "export RDF"
+
+msgid "read"
+msgstr "lecture"
+
+msgid "read_permission"
+msgstr "permission de lire"
+
+msgctxt "CWAttribute"
+msgid "read_permission"
+msgstr "permission de lire"
+
+msgctxt "CWEType"
+msgid "read_permission"
+msgstr "permission de lire"
+
+msgctxt "CWRelation"
+msgid "read_permission"
+msgstr "permission de lire"
+
+msgid "read_permission_object"
+msgstr "a la permission de lire"
+
+msgctxt "CWGroup"
+msgid "read_permission_object"
+msgstr "peut lire"
+
+msgctxt "RQLExpression"
+msgid "read_permission_object"
+msgstr "peut lire"
+
+msgid "regexp matching host(s) to which this config applies"
+msgstr ""
+"expression régulière des noms d'hôtes auxquels cette configuration s'applique"
+
+msgid "registry"
+msgstr "registre"
+
+msgid "related entity has no state"
+msgstr "l'entité lié n'a pas d'état"
+
+msgid "related entity has no workflow set"
+msgstr "l'entité lié n'a pas de workflow"
+
+msgid "relation"
+msgstr "relation"
+
+#, python-format
+msgid "relation %(relname)s of %(ent)s"
+msgstr "relation %(relname)s de %(ent)s"
+
+#, python-format
+msgid ""
+"relation %(rtype)s with %(etype)s as %(role)s is supported but no target "
+"type supported"
+msgstr ""
+"la relation %(rtype)s avec %(etype)s comme %(role)s est supportée mais aucun "
+"type cible n'est supporté"
+
+#, python-format
+msgid ""
+"relation %(type)s with %(etype)s as %(role)s and target type %(target)s is "
+"mandatory but not supported"
+msgstr ""
+"la relation %(rtype)s avec %(etype)s comme %(role)s est obligatoire mais non "
+"supportée"
+
+#, python-format
+msgid ""
+"relation %s is supported but none of its definitions matches supported "
+"entities"
+msgstr ""
+"la relation %s est supportée mais aucune de ses définitions ne correspondent "
+"aux types d'entités supportés"
+
+msgid "relation add"
+msgstr "ajout de relation"
+
+msgid "relation removal"
+msgstr "suppression de relation"
+
+msgid "relation_type"
+msgstr "type de relation"
+
+msgctxt "CWAttribute"
+msgid "relation_type"
+msgstr "type de relation"
+
+msgctxt "CWRelation"
+msgid "relation_type"
+msgstr "type de relation"
+
+msgid "relation_type_object"
+msgstr "définition"
+
+msgctxt "CWRType"
+msgid "relation_type_object"
+msgstr "définition"
+
+msgid "relations"
+msgstr "relations"
+
+msgctxt "CWUniqueTogetherConstraint"
+msgid "relations"
+msgstr "relations"
+
+msgid "relations deleted"
+msgstr "relations supprimées"
+
+msgid "relations_object"
+msgstr "relations de"
+
+msgctxt "CWRType"
+msgid "relations_object"
+msgstr "relations de"
+
+msgid "relative url of the bookmarked page"
+msgstr "url relative de la page"
+
+msgid "remove-inlined-entity-form"
+msgstr "supprimer"
+
+msgid "require_group"
+msgstr "nécessite le groupe"
+
+msgctxt "BaseTransition"
+msgid "require_group"
+msgstr "restreinte au groupe"
+
+msgctxt "Transition"
+msgid "require_group"
+msgstr "restreinte au groupe"
+
+msgctxt "WorkflowTransition"
+msgid "require_group"
+msgstr "restreinte au groupe"
+
+msgid "require_group_object"
+msgstr "a les droits"
+
+msgctxt "CWGroup"
+msgid "require_group_object"
+msgstr "a les droits"
+
+msgid "required"
+msgstr "requis"
+
+msgid "required attribute"
+msgstr "attribut requis"
+
+msgid "required field"
+msgstr "champ requis"
+
+msgid "resources usage"
+msgstr "resources utilisées"
+
+msgid ""
+"restriction part of a rql query. For entity rql expression, X and U are "
+"predefined respectivly to the current object and to the request user. For "
+"relation rql expression, S, O and U are predefined respectivly to the "
+"current relation'subject, object and to the request user. "
+msgstr ""
+"partie restriction de la requête rql. Pour une expression s'appliquant à une "
+"entité, X et U sont respectivement prédéfinis à l'entité et à l'utilisateur "
+"courant. Pour une expression s'appliquant à une relation, S, O et U sont "
+"respectivement prédéfinis au sujet/objet de la relation et à l'utilisateur "
+"courant."
+
+msgid "revert changes"
+msgstr "annuler les changements"
+
+msgid "right"
+msgstr "droite"
+
+msgid "rql expression allowing to add entities/relations of this type"
+msgstr "expression rql autorisant à ajouter des entités/relations de ce type"
+
+msgid "rql expression allowing to delete entities/relations of this type"
+msgstr "expression rql autorisant à supprimer des entités/relations de ce type"
+
+msgid "rql expression allowing to read entities/relations of this type"
+msgstr "expression rql autorisant à lire des entités/relations de ce type"
+
+msgid "rql expression allowing to update entities/relations of this type"
+msgstr ""
+"expression rql autorisant à mettre à jour des entités/relations de ce type"
+
+msgid "rql expressions"
+msgstr "conditions rql"
+
+msgid "rss export"
+msgstr "export RSS"
+
+msgid "rule"
+msgstr "règle"
+
+msgctxt "CWComputedRType"
+msgid "rule"
+msgstr "règle"
+
+msgid "same_as"
+msgstr "identique à"
+
+msgid "sample format"
+msgstr "exemple"
+
+msgid "saturday"
+msgstr "samedi"
+
+msgid "schema-diagram"
+msgstr "diagramme"
+
+msgid "schema-entity-types"
+msgstr "types d'entités"
+
+msgid "schema-relation-types"
+msgstr "types de relations"
+
+msgid "search"
+msgstr "rechercher"
+
+msgid "search for association"
+msgstr "rechercher pour associer"
+
+msgid "searching for"
+msgstr "Recherche de"
+
+msgid "security"
+msgstr "sécurité"
+
+msgid "see more"
+msgstr "voir plus"
+
+msgid "see them all"
+msgstr "les voir toutes"
+
+msgid "see_also"
+msgstr "voir aussi"
+
+msgid "select"
+msgstr "sélectionner"
+
+msgid "select a"
+msgstr "sélectionner un"
+
+msgid "select a key first"
+msgstr "sélectionnez d'abord une clé"
+
+msgid "select a relation"
+msgstr "sélectionner une relation"
+
+msgid "select this entity"
+msgstr "sélectionner cette entité"
+
+msgid "selected"
+msgstr "sélectionné"
+
+msgid "semantic description of this attribute"
+msgstr "description sémantique de cet attribut"
+
+msgid "semantic description of this entity type"
+msgstr "description sémantique de ce type d'entité"
+
+msgid "semantic description of this relation"
+msgstr "description sémantique de cette relation"
+
+msgid "semantic description of this relation type"
+msgstr "description sémantique de ce type de relation"
+
+msgid "semantic description of this state"
+msgstr "description sémantique de cet état"
+
+msgid "semantic description of this transition"
+msgstr "description sémantique de cette transition"
+
+msgid "semantic description of this workflow"
+msgstr "description sémantique de ce workflow"
+
+msgid "september"
+msgstr "septembre"
+
+msgid "server information"
+msgstr "informations serveur"
+
+msgid "severity"
+msgstr "sévérité"
+
+msgid ""
+"should html fields being edited using fckeditor (a HTML WYSIWYG editor). "
+"You should also select text/html as default text format to actually get "
+"fckeditor."
+msgstr ""
+"indique si les champs HTML doivent être édités avec fckeditor (un\n"
+"éditeur HTML WYSIWYG). Il est également conseillé de choisir text/html\n"
+"comme format de texte par défaut pour pouvoir utiliser fckeditor."
+
+#, python-format
+msgid "show %s results"
+msgstr "montrer %s résultats"
+
+msgid "show advanced fields"
+msgstr "montrer les champs avancés"
+
+msgid "show filter form"
+msgstr "afficher le filtre"
+
+msgid "site configuration"
+msgstr "configuration du site"
+
+msgid "site documentation"
+msgstr "documentation du site"
+
+msgid "site title"
+msgstr "titre du site"
+
+msgid "site-wide property can't be set for user"
+msgstr "une propriété spécifique au site ne peut être propre à un utilisateur"
+
+msgid "some later transaction(s) touch entity, undo them first"
+msgstr ""
+"des transactions plus récentes modifient cette entité, annulez les d'abord"
+
+msgid "some relations violate a unicity constraint"
+msgstr "certaines relations transgressent une contrainte d'unicité"
+
+msgid "sorry, the server is unable to handle this query"
+msgstr "désolé, le serveur ne peut traiter cette requête"
+
+msgid ""
+"source's configuration. One key=value per line, authorized keys depending on "
+"the source's type"
+msgstr ""
+"Configuration de la source. Une clé=valeur par ligne, les clés autorisées "
+"dépendantes du type de source. Les valeur surchargent celles définies sur la "
+"source."
+
+msgid "sparql xml"
+msgstr "XML Sparql"
+
+msgid "special transition allowing to go through a sub-workflow"
+msgstr "transition spécial permettant d'aller dans un sous-workfow"
+
+msgid "specializes"
+msgstr "dérive de"
+
+msgctxt "CWEType"
+msgid "specializes"
+msgstr "spécialise"
+
+msgid "specializes_object"
+msgstr "parent de"
+
+msgctxt "CWEType"
+msgid "specializes_object"
+msgstr "parent de"
+
+#, python-format
+msgid "specifying %s is mandatory"
+msgstr "spécifier %s est obligatoire"
+
+msgid ""
+"start timestamp of the currently in synchronization, or NULL when no "
+"synchronization in progress."
+msgstr ""
+"horodate de départ de la synchronisation en cours, ou NULL s'il n'y en a pas."
+
+msgid "start_timestamp"
+msgstr "horodate de début"
+
+msgctxt "CWDataImport"
+msgid "start_timestamp"
+msgstr "horodate de début"
+
+msgid "startup views"
+msgstr "vues de départ"
+
+msgid "startupview"
+msgstr "vues de départ"
+
+msgid "state"
+msgstr "état"
+
+msgid "state and transition don't belong the the same workflow"
+msgstr "l'état et la transition n'appartiennent pas au même workflow"
+
+msgid "state doesn't apply to this entity's type"
+msgstr "cet état ne s'applique pas à ce type d'entité"
+
+msgid "state doesn't belong to entity's current workflow"
+msgstr "l'état n'appartient pas au workflow courant de l'entité"
+
+msgid "state doesn't belong to entity's workflow"
+msgstr "l'état n'appartient pas au workflow de l'entité"
+
+msgid ""
+"state doesn't belong to entity's workflow. You may want to set a custom "
+"workflow for this entity first."
+msgstr ""
+"l'état n'appartient pas au workflow courant de l'entité. Vous désirez peut-"
+"être spécifier que cette entité doit utiliser ce workflow."
+
+msgid "state doesn't belong to this workflow"
+msgstr "l'état n'appartient pas à ce workflow"
+
+msgid "state_of"
+msgstr "état de"
+
+msgctxt "State"
+msgid "state_of"
+msgstr "état de"
+
+msgid "state_of_object"
+msgstr "a pour état"
+
+msgctxt "Workflow"
+msgid "state_of_object"
+msgstr "contient les états"
+
+msgid "status"
+msgstr "état"
+
+msgctxt "CWDataImport"
+msgid "status"
+msgstr "état"
+
+msgid "status change"
+msgstr "changer l'état"
+
+msgid "status changed"
+msgstr "changement d'état"
+
+#, python-format
+msgid "status will change from %(st1)s to %(st2)s"
+msgstr "l'entité passera de l'état %(st1)s à l'état %(st2)s"
+
+msgid "subject"
+msgstr "sujet"
+
+msgid "subject type"
+msgstr "type du sujet"
+
+msgid "subject/object cardinality"
+msgstr "cardinalité sujet/objet"
+
+msgid "subworkflow"
+msgstr "sous-workflow"
+
+msgctxt "WorkflowTransition"
+msgid "subworkflow"
+msgstr "sous-workflow"
+
+msgid ""
+"subworkflow isn't a workflow for the same types as the transition's workflow"
+msgstr ""
+"le sous-workflow ne s'applique pas aux mêmes types que le workflow de cette "
+"transition"
+
+msgid "subworkflow state"
+msgstr "état de sous-workflow"
+
+msgid "subworkflow_exit"
+msgstr "sortie de sous-workflow"
+
+msgctxt "WorkflowTransition"
+msgid "subworkflow_exit"
+msgstr "sortie du sous-workflow"
+
+msgid "subworkflow_exit_object"
+msgstr "états de sortie"
+
+msgctxt "SubWorkflowExitPoint"
+msgid "subworkflow_exit_object"
+msgstr "états de sortie"
+
+msgid "subworkflow_object"
+msgstr "utilisé par la transition"
+
+msgctxt "Workflow"
+msgid "subworkflow_object"
+msgstr "sous workflow de"
+
+msgid "subworkflow_state"
+msgstr "état du sous-workflow"
+
+msgctxt "SubWorkflowExitPoint"
+msgid "subworkflow_state"
+msgstr "état"
+
+msgid "subworkflow_state_object"
+msgstr "état de sortie de"
+
+msgctxt "State"
+msgid "subworkflow_state_object"
+msgstr "état de sortie de"
+
+msgid "success"
+msgstr "succès"
+
+msgid "sunday"
+msgstr "dimanche"
+
+msgid "surname"
+msgstr "nom"
+
+msgctxt "CWUser"
+msgid "surname"
+msgstr "nom de famille"
+
+msgid "symmetric"
+msgstr "symétrique"
+
+msgctxt "CWRType"
+msgid "symmetric"
+msgstr "symétrique"
+
+msgid "synchronization-interval must be greater than 1 minute"
+msgstr "synchronization-interval doit être supérieur à 1 minute"
+
+msgid "table"
+msgstr "table"
+
+msgid "tablefilter"
+msgstr "filtre de tableau"
+
+msgid "text"
+msgstr "text"
+
+msgid "text/cubicweb-page-template"
+msgstr "contenu dynamique"
+
+msgid "text/html"
+msgstr "html"
+
+msgid "text/markdown"
+msgstr "texte au format markdown"
+
+msgid "text/plain"
+msgstr "texte pur"
+
+msgid "text/rest"
+msgstr "ReST"
+
+msgid "the URI of the object"
+msgstr "l'Uri de l'objet"
+
+msgid "the prefered email"
+msgstr "l'adresse électronique principale"
+
+msgid "the system source has its configuration stored on the file-system"
+msgstr "la source système a sa configuration stockée sur le système de fichier"
+
+#, python-format
+msgid "the value \"%s\" is already used, use another one"
+msgstr "la valeur \"%s\" est déjà utilisée, veuillez utiliser une autre valeur"
+
+msgid "there is no next page"
+msgstr "Il n'y a pas de page suivante"
+
+msgid "there is no previous page"
+msgstr "Il n'y a pas de page précédente"
+
+#, python-format
+msgid "there is no transaction #%s"
+msgstr "Il n'y a pas de transaction #%s"
+
+msgid "this action is not reversible!"
+msgstr ""
+"Attention ! Cette opération va détruire les données de façon irréversible."
+
+msgid "this entity is currently owned by"
+msgstr "cette entité appartient à"
+
+msgid "this parser doesn't use a mapping"
+msgstr "ce parseur n'utilise pas de mapping"
+
+msgid "this resource does not exist"
+msgstr "cette ressource est introuvable"
+
+msgid "this source doesn't use a mapping"
+msgstr "cette source n'utilise pas de mapping"
+
+msgid "thursday"
+msgstr "jeudi"
+
+msgid "timestamp"
+msgstr "date"
+
+msgctxt "CWCache"
+msgid "timestamp"
+msgstr "valide depuis"
+
+msgid "timetable"
+msgstr "emploi du temps"
+
+msgid "title"
+msgstr "titre"
+
+msgctxt "Bookmark"
+msgid "title"
+msgstr "libellé"
+
+msgid "to"
+msgstr "à"
+
+#, python-format
+msgid "to %(date)s"
+msgstr "au %(date)s"
+
+msgid "to associate with"
+msgstr "pour associer à"
+
+msgid "to_entity"
+msgstr "vers l'entité"
+
+msgctxt "CWAttribute"
+msgid "to_entity"
+msgstr "pour l'entité"
+
+msgctxt "CWRelation"
+msgid "to_entity"
+msgstr "pour l'entité"
+
+msgid "to_entity_object"
+msgstr "objet de la relation"
+
+msgctxt "CWEType"
+msgid "to_entity_object"
+msgstr "objet de la relation"
+
+msgid "to_interval_end"
+msgstr "à"
+
+msgid "to_state"
+msgstr "vers l'état"
+
+msgctxt "TrInfo"
+msgid "to_state"
+msgstr "état de destination"
+
+msgid "to_state_object"
+msgstr "transitions vers cet état"
+
+msgctxt "State"
+msgid "to_state_object"
+msgstr "transition vers cet état"
+
+msgid "toggle check boxes"
+msgstr "afficher/masquer les cases à cocher"
+
+msgid "tr_count"
+msgstr "n° de transition"
+
+msgctxt "TrInfo"
+msgid "tr_count"
+msgstr "n° de transition"
+
+msgid "transaction undone"
+msgstr "transaction annulée"
+
+#, python-format
+msgid "transition %(tr)s isn't allowed from %(st)s"
+msgstr "la transition %(tr)s n'est pas autorisée depuis l'état %(st)s"
+
+msgid "transition doesn't belong to entity's workflow"
+msgstr "la transition n'appartient pas au workflow de l'entité"
+
+msgid "transition isn't allowed"
+msgstr "la transition n'est pas autorisée"
+
+msgid "transition may not be fired"
+msgstr "la transition ne peut-être déclenchée"
+
+msgid "transition_of"
+msgstr "transition de"
+
+msgctxt "BaseTransition"
+msgid "transition_of"
+msgstr "transition de"
+
+msgctxt "Transition"
+msgid "transition_of"
+msgstr "transition de"
+
+msgctxt "WorkflowTransition"
+msgid "transition_of"
+msgstr "transition de"
+
+msgid "transition_of_object"
+msgstr "a pour transition"
+
+msgctxt "Workflow"
+msgid "transition_of_object"
+msgstr "a pour transition"
+
+msgid "tree view"
+msgstr "arborescence"
+
+msgid "tuesday"
+msgstr "mardi"
+
+msgid "type"
+msgstr "type"
+
+msgctxt "BaseTransition"
+msgid "type"
+msgstr "type"
+
+msgctxt "CWSource"
+msgid "type"
+msgstr "type"
+
+msgctxt "Transition"
+msgid "type"
+msgstr "type"
+
+msgctxt "WorkflowTransition"
+msgid "type"
+msgstr "type"
+
+msgid "type here a sparql query"
+msgstr "Tapez une requête sparql"
+
+msgid "type of the source"
+msgstr "type de la source"
+
+msgid "ui"
+msgstr "propriétés génériques de l'interface"
+
+msgid "ui.date-format"
+msgstr "format de date"
+
+msgid "ui.datetime-format"
+msgstr "format de date et de l'heure"
+
+msgid "ui.default-text-format"
+msgstr "format de texte"
+
+msgid "ui.encoding"
+msgstr "encodage"
+
+msgid "ui.fckeditor"
+msgstr "éditeur du contenu"
+
+msgid "ui.float-format"
+msgstr "format des flottants"
+
+msgid "ui.language"
+msgstr "langue"
+
+msgid "ui.main-template"
+msgstr "gabarit principal"
+
+msgid "ui.site-title"
+msgstr "titre du site"
+
+msgid "ui.time-format"
+msgstr "format de l'heure"
+
+msgid "unable to check captcha, please try again"
+msgstr "impossible de vérifier le captcha, veuillez réessayer"
+
+msgid "unaccessible"
+msgstr "inaccessible"
+
+msgid "unauthorized value"
+msgstr "valeur non autorisée"
+
+msgid "undefined user"
+msgstr "utilisateur inconnu"
+
+msgid "undo"
+msgstr "annuler"
+
+msgid "unique identifier used to connect to the application"
+msgstr "identifiant unique utilisé pour se connecter à l'application"
+
+msgid "unknown external entity"
+msgstr "entité (externe) introuvable"
+
+#, python-format
+msgid "unknown options %s"
+msgstr "options inconnues : %s"
+
+#, python-format
+msgid "unknown property key %s"
+msgstr "clé de propriété inconnue : %s"
+
+msgid "unknown vocabulary:"
+msgstr "vocabulaire inconnu : "
+
+msgid "unsupported protocol"
+msgstr "protocole non supporté"
+
+msgid "upassword"
+msgstr "mot de passe"
+
+msgctxt "CWUser"
+msgid "upassword"
+msgstr "mot de passe"
+
+msgid "update"
+msgstr "modification"
+
+msgid "update_permission"
+msgstr "permission de modification"
+
+msgctxt "CWAttribute"
+msgid "update_permission"
+msgstr "permission de modifier"
+
+msgctxt "CWEType"
+msgid "update_permission"
+msgstr "permission de modifier"
+
+msgid "update_permission_object"
+msgstr "a la permission de modifier"
+
+msgctxt "CWGroup"
+msgid "update_permission_object"
+msgstr "peut modifier"
+
+msgctxt "RQLExpression"
+msgid "update_permission_object"
+msgstr "peut modifier"
+
+msgid "update_relation"
+msgstr "modifier"
+
+msgid "updated"
+msgstr "mis à jour"
+
+#, python-format
+msgid "updated %(etype)s #%(eid)s (%(title)s)"
+msgstr "modification de l'entité %(etype)s #%(eid)s (%(title)s)"
+
+msgid "uri"
+msgstr "uri"
+
+msgctxt "ExternalUri"
+msgid "uri"
+msgstr "uri"
+
+msgid "url"
+msgstr "url"
+
+msgctxt "CWSource"
+msgid "url"
+msgstr "url"
+
+msgid ""
+"use to define a transition from one or multiple states to a destination "
+"states in workflow's definitions. Transition without destination state will "
+"go back to the state from which we arrived to the current state."
+msgstr ""
+"utilisé dans une définition de processus pour ajouter une transition depuis "
+"un ou plusieurs états vers un état de destination. Une transition sans état "
+"de destination retournera à l'état précédent l'état courant."
+
+msgid "use_email"
+msgstr "adresse électronique"
+
+msgctxt "CWUser"
+msgid "use_email"
+msgstr "utilise l'adresse électronique"
+
+msgid "use_email_object"
+msgstr "adresse utilisée par"
+
+msgctxt "EmailAddress"
+msgid "use_email_object"
+msgstr "utilisée par"
+
+msgid ""
+"used for cubicweb configuration. Once a property has been created you can't "
+"change the key."
+msgstr ""
+"utilisé pour la configuration de l'application. Une fois qu'une propriété a "
+"été créée, vous ne pouvez plus changez la clé associée"
+
+msgid ""
+"used to associate simple states to an entity type and/or to define workflows"
+msgstr "associe les états à un type d'entité pour définir un workflow"
+
+msgid "user"
+msgstr "utilisateur"
+
+#, python-format
+msgid ""
+"user %s has made the following change(s):\n"
+"\n"
+msgstr ""
+"l'utilisateur %s a effectué le(s) changement(s) suivant(s):\n"
+"\n"
+
+msgid "user interface encoding"
+msgstr "encodage utilisé dans l'interface utilisateur"
+
+msgid "user preferences"
+msgstr "préférences utilisateur"
+
+msgid "user's email account"
+msgstr "email de l'utilisateur"
+
+msgid "users"
+msgstr "utilisateurs"
+
+msgid "users and groups"
+msgstr "utilisateurs et groupes"
+
+msgid "users using this bookmark"
+msgstr "utilisateurs utilisant ce signet"
+
+msgid "validate modifications on selected items"
+msgstr "valider les modifications apportées aux éléments sélectionnés"
+
+msgid "validating..."
+msgstr "chargement en cours ..."
+
+msgid "value"
+msgstr "valeur"
+
+msgctxt "CWConstraint"
+msgid "value"
+msgstr "contrainte"
+
+msgctxt "CWProperty"
+msgid "value"
+msgstr "valeur"
+
+#, python-format
+msgid "value %(KEY-value)s must be < %(KEY-boundary)s"
+msgstr "la valeur %(KEY-value)s doit être strictement inférieure à %(KEY-boundary)s"
+
+#, python-format
+msgid "value %(KEY-value)s must be <= %(KEY-boundary)s"
+msgstr ""
+"la valeur %(KEY-value)s doit être inférieure ou égale à %(KEY-boundary)s"
+
+#, python-format
+msgid "value %(KEY-value)s must be > %(KEY-boundary)s"
+msgstr "la valeur %(KEY-value)s doit être strictement supérieure à %(KEY-boundary)s"
+
+#, python-format
+msgid "value %(KEY-value)s must be >= %(KEY-boundary)s"
+msgstr ""
+"la valeur %(KEY-value)s doit être supérieure ou égale à %(KEY-boundary)s"
+
+msgid "value associated to this key is not editable manually"
+msgstr "la valeur associée à cette clé n'est pas éditable manuellement"
+
+#, python-format
+msgid "value should have maximum size of %(KEY-max)s but found %(KEY-size)s"
+msgstr ""
+"la taille maximum est %(KEY-max)s mais cette valeur est de taille "
+"%(KEY-size)s"
+
+#, python-format
+msgid "value should have minimum size of %(KEY-min)s but found %(KEY-size)s"
+msgstr ""
+"la taille minimum est %(KEY-min)s mais cette valeur est de taille "
+"%(KEY-size)s"
+
+msgid "vcard"
+msgstr "vcard"
+
+msgid "versions configuration"
+msgstr "configuration de version"
+
+msgid "view"
+msgstr "voir"
+
+msgid "view all"
+msgstr "voir tous"
+
+msgid "view detail for this entity"
+msgstr "voir les détails de cette entité"
+
+msgid "view history"
+msgstr "voir l'historique"
+
+msgid "view identifier"
+msgstr "identifiant"
+
+msgid "view title"
+msgstr "titre"
+
+msgid "view workflow"
+msgstr "voir les états possibles"
+
+msgid "view_index"
+msgstr "accueil"
+
+msgid "visible"
+msgstr "visible"
+
+msgid "warning"
+msgstr "attention"
+
+msgid "we are not yet ready to handle this query"
+msgstr ""
+"nous ne sommes pas capable de gérer ce type de requête sparql pour le moment"
+
+msgid "wednesday"
+msgstr "mercredi"
+
+#, python-format
+msgid "welcome %s!"
+msgstr "bienvenue %s !"
+
+msgid "wf_info_for"
+msgstr "historique de"
+
+msgid "wf_info_for_object"
+msgstr "historique des transitions"
+
+msgid "wf_tab_info"
+msgstr "description"
+
+msgid "wfgraph"
+msgstr "image du workflow"
+
+msgid ""
+"when multiple addresses are equivalent (such as python-projects@logilab.org "
+"and python-projects@lists.logilab.org), set this to indicate which is the "
+"preferred form."
+msgstr ""
+"quand plusieurs addresses sont équivalentes (comme python-projects@logilab."
+"org et python-projects@lists.logilab.org), indique laquelle est la forme "
+"préférentielle."
+
+msgid "workflow"
+msgstr "workflow"
+
+#, python-format
+msgid "workflow changed to \"%s\""
+msgstr "workflow changé à \"%s\""
+
+msgid "workflow has no initial state"
+msgstr "le workflow n'a pas d'état initial"
+
+msgid "workflow history item"
+msgstr "entrée de l'historique de workflow"
+
+msgid "workflow isn't a workflow for this type"
+msgstr "le workflow ne s'applique pas à ce type d'entité"
+
+msgid "workflow to which this state belongs"
+msgstr "workflow auquel cet état appartient"
+
+msgid "workflow to which this transition belongs"
+msgstr "workflow auquel cette transition appartient"
+
+msgid "workflow_of"
+msgstr "workflow de"
+
+msgctxt "Workflow"
+msgid "workflow_of"
+msgstr "workflow de"
+
+msgid "workflow_of_object"
+msgstr "a pour workflow"
+
+msgctxt "CWEType"
+msgid "workflow_of_object"
+msgstr "a pour workflow"
+
+#, python-format
+msgid "wrong query parameter line %s"
+msgstr "mauvais paramètre de requête ligne %s"
+
+msgid "xbel export"
+msgstr "export XBEL"
+
+msgid "xml export"
+msgstr "export XML"
+
+msgid "xml export (entities)"
+msgstr "export XML (entités)"
+
+msgid "yes"
+msgstr "oui"
+
+msgid "you have been logged out"
+msgstr "vous avez été déconnecté"
+
+msgid "you should probably delete that property"
+msgstr "vous devriez probablement supprimer cette propriété"
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/mail.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/mail.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,154 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Common utilies to format / send emails."""
+
+__docformat__ = "restructuredtext en"
+
+from base64 import b64encode, b64decode
+from time import time
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.mime.image import MIMEImage
+from email.header import Header
+from email.utils import formatdate
+from socket import gethostname
+
+from six import PY2, PY3, text_type
+
+
+def header(ustring):
+ if PY3:
+ return Header(ustring, 'utf-8')
+ return Header(ustring.encode('UTF-8'), 'UTF-8')
+
+def addrheader(uaddr, uname=None):
+ # even if an email address should be ascii, encode it using utf8 since
+ # automatic tests may generate non ascii email address
+ if PY2:
+ addr = uaddr.encode('UTF-8')
+ else:
+ addr = uaddr
+ if uname:
+ val = '%s <%s>' % (header(uname).encode(), addr)
+ else:
+ val = addr
+ assert isinstance(val, str) # bytes in py2, ascii-encoded unicode in py3
+ return val
+
+
+def construct_message_id(appid, eid, withtimestamp=True):
+ if withtimestamp:
+ addrpart = 'eid=%s×tamp=%.10f' % (eid, time())
+ else:
+ addrpart = 'eid=%s' % eid
+ # we don't want any equal sign nor trailing newlines
+ leftpart = b64encode(addrpart.encode('ascii'), b'.-').decode('ascii').rstrip().rstrip('=')
+ return '<%s@%s.%s>' % (leftpart, appid, gethostname())
+
+
+def parse_message_id(msgid, appid):
+ if msgid[0] == '<':
+ msgid = msgid[1:]
+ if msgid[-1] == '>':
+ msgid = msgid[:-1]
+ try:
+ values, qualif = msgid.split('@')
+ padding = len(values) % 4
+ values = b64decode(str(values + '='*padding), '.-').decode('ascii')
+ values = dict(v.split('=') for v in values.split('&'))
+ fromappid, host = qualif.split('.', 1)
+ except Exception:
+ return None
+ if appid != fromappid or host != gethostname():
+ return None
+ return values
+
+
+def format_mail(uinfo, to_addrs, content, subject="",
+ cc_addrs=(), msgid=None, references=(), config=None):
+ """Sends an Email to 'e_addr' with content 'content', and subject 'subject'
+
+ to_addrs and cc_addrs are expected to be a list of email address without
+ name
+ """
+ assert isinstance(content, text_type), repr(content)
+ msg = MIMEText(content.encode('UTF-8'), 'plain', 'UTF-8')
+ # safety: keep only the first newline
+ try:
+ subject = subject.splitlines()[0]
+ msg['Subject'] = header(subject)
+ except IndexError:
+ pass # no subject
+ if uinfo.get('email'):
+ email = uinfo['email']
+ elif config and config['sender-addr']:
+ email = text_type(config['sender-addr'])
+ else:
+ email = u''
+ if uinfo.get('name'):
+ name = uinfo['name']
+ elif config and config['sender-name']:
+ name = text_type(config['sender-name'])
+ else:
+ name = u''
+ msg['From'] = addrheader(email, name)
+ if config and config['sender-addr'] and config['sender-addr'] != email:
+ appaddr = addrheader(config['sender-addr'], config['sender-name'])
+ msg['Reply-to'] = '%s, %s' % (msg['From'], appaddr)
+ elif email:
+ msg['Reply-to'] = msg['From']
+ if config is not None:
+ msg['X-CW'] = config.appid
+ unique_addrs = lambda addrs: sorted(set(addr for addr in addrs if addr is not None))
+ msg['To'] = ', '.join(addrheader(addr) for addr in unique_addrs(to_addrs))
+ if cc_addrs:
+ msg['Cc'] = ', '.join(addrheader(addr) for addr in unique_addrs(cc_addrs))
+ if msgid:
+ msg['Message-id'] = msgid
+ if references:
+ msg['References'] = ', '.join(references)
+ msg['Date'] = formatdate()
+ return msg
+
+
+class HtmlEmail(MIMEMultipart):
+
+ def __init__(self, subject, textcontent, htmlcontent,
+ sendermail=None, sendername=None, recipients=None, ccrecipients=None):
+ MIMEMultipart.__init__(self, 'related')
+ self['Subject'] = header(subject)
+ self.preamble = 'This is a multi-part message in MIME format.'
+ # Attach alternative text message
+ alternative = MIMEMultipart('alternative')
+ self.attach(alternative)
+ msgtext = MIMEText(textcontent.encode('UTF-8'), 'plain', 'UTF-8')
+ alternative.attach(msgtext)
+ # Attach html message
+ msghtml = MIMEText(htmlcontent.encode('UTF-8'), 'html', 'UTF-8')
+ alternative.attach(msghtml)
+ if sendermail or sendername:
+ self['From'] = addrheader(sendermail, sendername)
+ if recipients:
+ self['To'] = ', '.join(addrheader(addr) for addr in recipients if addr is not None)
+ if ccrecipients:
+ self['Cc'] = ', '.join(addrheader(addr) for addr in ccrecipients if addr is not None)
+
+ def attach_image(self, data, htmlId):
+ image = MIMEImage(data)
+ image.add_header('Content-ID', '<%s>' % htmlId)
+ self.attach(image)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/md5crypt.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/md5crypt.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,121 @@
+# md5crypt.py
+#
+# 0423.2000 by michal wallace http://www.sabren.com/
+# based on perl's Crypt::PasswdMD5 by Luis Munoz (lem@cantv.net)
+# based on /usr/src/libcrypt/crypt.c from FreeBSD 2.2.5-RELEASE
+#
+# MANY THANKS TO
+#
+# Carey Evans - http://home.clear.net.nz/pages/c.evans/
+# Dennis Marti - http://users.starpower.net/marti1/
+#
+# For the patches that got this thing working!
+#
+# modification by logilab:
+# * remove usage of the string module
+# * don't include the magic string in the output string
+# for true crypt.crypt compatibility
+# * use hashlib module instead of md5
+#########################################################
+"""md5crypt.py - Provides interoperable MD5-based crypt() function
+
+SYNOPSIS
+
+ import md5crypt.py
+
+ cryptedpassword = md5crypt.md5crypt(password, salt);
+
+DESCRIPTION
+
+unix_md5_crypt() provides a crypt()-compatible interface to the
+rather new MD5-based crypt() function found in modern operating systems.
+It's based on the implementation found on FreeBSD 2.2.[56]-RELEASE and
+contains the following license in it:
+
+ "THE BEER-WARE LICENSE" (Revision 42):
+ wrote this file. As long as you retain this notice you
+ can do whatever you want with this stuff. If we meet some day, and you think
+ this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
+"""
+
+MAGIC = b'$1$' # Magic string
+ITOA64 = b"./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+
+from hashlib import md5 # pylint: disable=E0611
+
+from six import text_type, indexbytes
+from six.moves import range
+
+
+def to64 (v, n):
+ ret = bytearray()
+ while (n - 1 >= 0):
+ n = n - 1
+ ret.append(ITOA64[v & 0x3f])
+ v = v >> 6
+ return ret
+
+def crypt(pw, salt):
+ if isinstance(pw, text_type):
+ pw = pw.encode('utf-8')
+ if isinstance(salt, text_type):
+ salt = salt.encode('ascii')
+ # Take care of the magic string if present
+ if salt.startswith(MAGIC):
+ salt = salt[len(MAGIC):]
+ # salt can have up to 8 characters:
+ salt = salt.split(b'$', 1)[0]
+ salt = salt[:8]
+ ctx = pw + MAGIC + salt
+ final = md5(pw + salt + pw).digest()
+ for pl in range(len(pw), 0, -16):
+ if pl > 16:
+ ctx = ctx + final[:16]
+ else:
+ ctx = ctx + final[:pl]
+ # Now the 'weird' xform (??)
+ i = len(pw)
+ while i:
+ if i & 1:
+ ctx = ctx + b'\0' #if ($i & 1) { $ctx->add(pack("C", 0)); }
+ else:
+ ctx = ctx + pw[0]
+ i = i >> 1
+ final = md5(ctx).digest()
+ # The following is supposed to make
+ # things run slower.
+ # my question: WTF???
+ for i in range(1000):
+ ctx1 = b''
+ if i & 1:
+ ctx1 = ctx1 + pw
+ else:
+ ctx1 = ctx1 + final[:16]
+ if i % 3:
+ ctx1 = ctx1 + salt
+ if i % 7:
+ ctx1 = ctx1 + pw
+ if i & 1:
+ ctx1 = ctx1 + final[:16]
+ else:
+ ctx1 = ctx1 + pw
+ final = md5(ctx1).digest()
+ # Final xform
+ passwd = b''
+ passwd += to64((indexbytes(final, 0) << 16)
+ |(indexbytes(final, 6) << 8)
+ |(indexbytes(final, 12)),4)
+ passwd += to64((indexbytes(final, 1) << 16)
+ |(indexbytes(final, 7) << 8)
+ |(indexbytes(final, 13)), 4)
+ passwd += to64((indexbytes(final, 2) << 16)
+ |(indexbytes(final, 8) << 8)
+ |(indexbytes(final, 14)), 4)
+ passwd += to64((indexbytes(final, 3) << 16)
+ |(indexbytes(final, 9) << 8)
+ |(indexbytes(final, 15)), 4)
+ passwd += to64((indexbytes(final, 4) << 16)
+ |(indexbytes(final, 10) << 8)
+ |(indexbytes(final, 5)), 4)
+ passwd += to64((indexbytes(final, 11)), 2)
+ return passwd
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/migration.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/migration.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,553 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""utilities for instances migration"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import sys
+import os
+import logging
+import tempfile
+from os.path import exists, join, basename, splitext
+from itertools import chain
+from warnings import warn
+
+from six import string_types
+
+from logilab.common import IGNORED_EXTENSIONS
+from logilab.common.decorators import cached
+from logilab.common.configuration import REQUIRED, read_old_config
+from logilab.common.shellutils import ASK
+from logilab.common.changelog import Version
+from logilab.common.deprecation import deprecated
+
+from cubicweb import ConfigurationError, ExecutionError
+from cubicweb.cwconfig import CubicWebConfiguration as cwcfg
+from cubicweb.toolsutils import show_diffs
+
+def filter_scripts(config, directory, fromversion, toversion, quiet=True):
+ """return a list of paths of migration files to consider to upgrade
+ from a version to a greater one
+ """
+ from logilab.common.changelog import Version # doesn't work with appengine
+ assert fromversion
+ assert toversion
+ assert isinstance(fromversion, tuple), fromversion.__class__
+ assert isinstance(toversion, tuple), toversion.__class__
+ assert fromversion <= toversion, (fromversion, toversion)
+ if not exists(directory):
+ if not quiet:
+ print(directory, "doesn't exists, no migration path")
+ return []
+ if fromversion == toversion:
+ return []
+ result = []
+ for fname in os.listdir(directory):
+ if fname.endswith(IGNORED_EXTENSIONS):
+ continue
+ fpath = join(directory, fname)
+ try:
+ tver, mode = fname.split('_', 1)
+ except ValueError:
+ continue
+ mode = mode.split('.', 1)[0]
+ if not config.accept_mode(mode):
+ continue
+ try:
+ tver = Version(tver)
+ except ValueError:
+ continue
+ if tver <= fromversion:
+ continue
+ if tver > toversion:
+ continue
+ result.append((tver, fpath))
+ # be sure scripts are executed in order
+ return sorted(result)
+
+
+def execscript_confirm(scriptpath):
+ """asks for confirmation before executing a script and provides the
+ ability to show the script's content
+ """
+ while True:
+ answer = ASK.ask('Execute %r ?' % scriptpath,
+ ('Y','n','show','abort'), 'Y')
+ if answer == 'abort':
+ raise SystemExit(1)
+ elif answer == 'n':
+ return False
+ elif answer == 'show':
+ stream = open(scriptpath)
+ scriptcontent = stream.read()
+ stream.close()
+ print()
+ print(scriptcontent)
+ print()
+ else:
+ return True
+
+def yes(*args, **kwargs):
+ return True
+
+
+class MigrationHelper(object):
+ """class holding CubicWeb Migration Actions used by migration scripts"""
+
+ def __init__(self, config, interactive=True, verbosity=1):
+ self.config = config
+ if config:
+ # no config on shell to a remote instance
+ self.config.init_log(logthreshold=logging.ERROR)
+ # 0: no confirmation, 1: only main commands confirmed, 2 ask for everything
+ self.verbosity = verbosity
+ self.need_wrap = True
+ if not interactive or not verbosity:
+ self.confirm = yes
+ self.execscript_confirm = yes
+ else:
+ self.execscript_confirm = execscript_confirm
+ self._option_changes = []
+ self.__context = {'confirm': self.confirm,
+ 'config': self.config,
+ 'interactive_mode': interactive,
+ }
+ self._context_stack = []
+
+ def __getattribute__(self, name):
+ try:
+ return object.__getattribute__(self, name)
+ except AttributeError:
+ cmd = 'cmd_%s' % name
+ # search self.__class__ to avoid infinite recursion
+ if hasattr(self.__class__, cmd):
+ meth = getattr(self, cmd)
+ return lambda *args, **kwargs: self.interact(args, kwargs,
+ meth=meth)
+ raise
+ raise AttributeError(name)
+
+ def migrate(self, vcconf, toupgrade, options):
+ """upgrade the given set of cubes
+
+ `cubes` is an ordered list of 3-uple:
+ (cube, fromversion, toversion)
+ """
+ if options.fs_only:
+ # monkey path configuration.accept_mode so database mode (e.g. Any)
+ # won't be accepted
+ orig_accept_mode = self.config.accept_mode
+ def accept_mode(mode):
+ if mode == 'Any':
+ return False
+ return orig_accept_mode(mode)
+ self.config.accept_mode = accept_mode
+ # may be an iterator
+ toupgrade = tuple(toupgrade)
+ vmap = dict( (cube, (fromver, tover)) for cube, fromver, tover in toupgrade)
+ ctx = self.__context
+ ctx['versions_map'] = vmap
+ if self.config.accept_mode('Any') and 'cubicweb' in vmap:
+ migrdir = self.config.migration_scripts_dir()
+ self.cmd_process_script(join(migrdir, 'bootstrapmigration_repository.py'))
+ for cube, fromversion, toversion in toupgrade:
+ if cube == 'cubicweb':
+ migrdir = self.config.migration_scripts_dir()
+ else:
+ migrdir = self.config.cube_migration_scripts_dir(cube)
+ scripts = filter_scripts(self.config, migrdir, fromversion, toversion)
+ if scripts:
+ prevversion = None
+ for version, script in scripts:
+ # take care to X.Y.Z_Any.py / X.Y.Z_common.py: we've to call
+ # cube_upgraded once all script of X.Y.Z have been executed
+ if prevversion is not None and version != prevversion:
+ self.cube_upgraded(cube, prevversion)
+ prevversion = version
+ self.cmd_process_script(script)
+ self.cube_upgraded(cube, toversion)
+ else:
+ self.cube_upgraded(cube, toversion)
+
+ def cube_upgraded(self, cube, version):
+ pass
+
+ def shutdown(self):
+ pass
+
+ def interact(self, args, kwargs, meth):
+ """execute the given method according to user's confirmation"""
+ msg = 'Execute command: %s(%s) ?' % (
+ meth.__name__[4:],
+ ', '.join([repr(arg) for arg in args] +
+ ['%s=%r' % (n,v) for n,v in kwargs.items()]))
+ if 'ask_confirm' in kwargs:
+ ask_confirm = kwargs.pop('ask_confirm')
+ else:
+ ask_confirm = True
+ if not ask_confirm or self.confirm(msg):
+ return meth(*args, **kwargs)
+
+ def confirm(self, question, # pylint: disable=E0202
+ shell=True, abort=True, retry=False, pdb=False, default='y'):
+ """ask for confirmation and return true on positive answer
+
+ if `retry` is true the r[etry] answer may return 2
+ """
+ possibleanswers = ['y', 'n']
+ if abort:
+ possibleanswers.append('abort')
+ if pdb:
+ possibleanswers.append('pdb')
+ if shell:
+ possibleanswers.append('shell')
+ if retry:
+ possibleanswers.append('retry')
+ try:
+ answer = ASK.ask(question, possibleanswers, default)
+ except (EOFError, KeyboardInterrupt):
+ answer = 'abort'
+ if answer == 'n':
+ return False
+ if answer == 'retry':
+ return 2
+ if answer == 'abort':
+ raise SystemExit(1)
+ if answer == 'shell':
+ self.interactive_shell()
+ return self.confirm(question, shell, abort, retry, pdb, default)
+ if answer == 'pdb':
+ import pdb
+ pdb.set_trace()
+ return self.confirm(question, shell, abort, retry, pdb, default)
+ return True
+
+ def interactive_shell(self):
+ self.confirm = yes
+ self.need_wrap = False
+ # avoid '_' to be added to builtins by sys.display_hook
+ def do_not_add___to_builtins(obj):
+ if obj is not None:
+ print(repr(obj))
+ sys.displayhook = do_not_add___to_builtins
+ local_ctx = self._create_context()
+ try:
+ import readline
+ from cubicweb.toolsutils import CWShellCompleter
+ except ImportError:
+ # readline not available
+ pass
+ else:
+ rql_completer = CWShellCompleter(local_ctx)
+ readline.set_completer(rql_completer.complete)
+ readline.parse_and_bind('tab: complete')
+ home_key = 'HOME'
+ if sys.platform == 'win32':
+ home_key = 'USERPROFILE'
+ histfile = os.path.join(os.environ[home_key], ".cwshell_history")
+ try:
+ readline.read_history_file(histfile)
+ except IOError:
+ pass
+ from code import interact
+ banner = """entering the migration python shell
+just type migration commands or arbitrary python code and type ENTER to execute it
+type "exit" or Ctrl-D to quit the shell and resume operation"""
+ # give custom readfunc to avoid http://bugs.python.org/issue1288615
+ def unicode_raw_input(prompt):
+ return unicode(raw_input(prompt), sys.stdin.encoding)
+ interact(banner, readfunc=unicode_raw_input, local=local_ctx)
+ try:
+ readline.write_history_file(histfile)
+ except IOError:
+ pass
+ # delete instance's confirm attribute to avoid questions
+ del self.confirm
+ self.need_wrap = True
+
+ @cached
+ def _create_context(self):
+ """return a dictionary to use as migration script execution context"""
+ context = self.__context
+ for attr in dir(self):
+ if attr.startswith('cmd_'):
+ if self.need_wrap:
+ context[attr[4:]] = getattr(self, attr[4:])
+ else:
+ context[attr[4:]] = getattr(self, attr)
+ return context
+
+ def update_context(self, key, value):
+ for context in self._context_stack:
+ context[key] = value
+ self.__context[key] = value
+
+ def cmd_process_script(self, migrscript, funcname=None, *args, **kwargs):
+ """execute a migration script in interactive mode
+
+ Display the migration script path, ask for confirmation and execute it
+ if confirmed
+
+ Allowed input file formats for migration scripts:
+ - `python` (.py)
+ - `sql` (.sql)
+ - `doctest` (.txt or .rst)
+
+ .. warning:: sql migration scripts are not available in web-only instance
+
+ You can pass script parameters with using double dash (--) in the
+ command line
+
+ Context environment can have these variables defined:
+ - __name__ : will be determine by funcname parameter
+ - __file__ : is the name of the script if it exists
+ - __args__ : script arguments coming from command-line
+
+ :param migrscript: name of the script
+ :param funcname: defines __name__ inside the shell (or use __main__)
+ :params args: optional arguments for funcname
+ :keyword scriptargs: optional arguments of the script
+ """
+ ftypes = {'python': ('.py',),
+ 'doctest': ('.txt', '.rst'),
+ 'sql': ('.sql',)}
+ # sql migration scripts are not available in web-only instance
+ if not hasattr(self, "session"):
+ ftypes.pop('sql')
+ migrscript = os.path.normpath(migrscript)
+ for (script_mode, ftype) in ftypes.items():
+ if migrscript.endswith(ftype):
+ break
+ else:
+ ftypes = ', '.join(chain(*ftypes.values()))
+ msg = 'ignoring %s, not a valid script extension (%s)'
+ raise ExecutionError(msg % (migrscript, ftypes))
+ if not self.execscript_confirm(migrscript):
+ return
+ scriptlocals = self._create_context().copy()
+ scriptlocals.update({'__file__': migrscript,
+ '__args__': kwargs.pop("scriptargs", [])})
+ self._context_stack.append(scriptlocals)
+ if script_mode == 'python':
+ if funcname is None:
+ pyname = '__main__'
+ else:
+ pyname = splitext(basename(migrscript))[0]
+ scriptlocals['__name__'] = pyname
+ with open(migrscript, 'rb') as fobj:
+ fcontent = fobj.read()
+ try:
+ code = compile(fcontent, migrscript, 'exec')
+ except SyntaxError:
+ # try without print_function
+ code = compile(fcontent, migrscript, 'exec', 0, True)
+ warn('[3.22] script %r should be updated to work with print_function'
+ % migrscript, DeprecationWarning)
+ exec(code, scriptlocals)
+ if funcname is not None:
+ try:
+ func = scriptlocals[funcname]
+ self.info('found %s in locals', funcname)
+ assert callable(func), '%s (%s) is not callable' % (func, funcname)
+ except KeyError:
+ self.critical('no %s in script %s', funcname, migrscript)
+ return None
+ return func(*args, **kwargs)
+ elif script_mode == 'sql':
+ from cubicweb.server.sqlutils import sqlexec
+ sqlexec(open(migrscript).read(), self.session.system_sql)
+ self.commit()
+ else: # script_mode == 'doctest'
+ import doctest
+ return doctest.testfile(migrscript, module_relative=False,
+ optionflags=doctest.ELLIPSIS,
+ # verbose mode when user input is expected
+ verbose=self.verbosity==2,
+ report=True,
+ encoding='utf-8',
+ globs=scriptlocals)
+ self._context_stack.pop()
+
+ def cmd_option_renamed(self, oldname, newname):
+ """a configuration option has been renamed"""
+ self._option_changes.append(('renamed', oldname, newname))
+
+ def cmd_option_group_changed(self, option, oldgroup, newgroup):
+ """a configuration option has been moved in another group"""
+ self._option_changes.append(('moved', option, oldgroup, newgroup))
+
+ def cmd_option_added(self, optname):
+ """a configuration option has been added"""
+ self._option_changes.append(('added', optname))
+
+ def cmd_option_removed(self, optname):
+ """a configuration option has been removed"""
+ # can safely be ignored
+ #self._option_changes.append(('removed', optname))
+
+ def cmd_option_type_changed(self, optname, oldtype, newvalue):
+ """a configuration option's type has changed"""
+ self._option_changes.append(('typechanged', optname, oldtype, newvalue))
+
+ def cmd_add_cubes(self, cubes):
+ """modify the list of used cubes in the in-memory config
+ returns newly inserted cubes, including dependencies
+ """
+ if isinstance(cubes, string_types):
+ cubes = (cubes,)
+ origcubes = self.config.cubes()
+ newcubes = [p for p in self.config.expand_cubes(cubes)
+ if not p in origcubes]
+ if newcubes:
+ self.config.add_cubes(newcubes)
+ return newcubes
+
+ @deprecated('[3.20] use drop_cube() instead of remove_cube()')
+ def cmd_remove_cube(self, cube, removedeps=False):
+ return self.cmd_drop_cube(cube, removedeps)
+
+ def cmd_drop_cube(self, cube, removedeps=False):
+ if removedeps:
+ toremove = self.config.expand_cubes([cube])
+ else:
+ toremove = (cube,)
+ origcubes = self.config._cubes
+ basecubes = [c for c in origcubes if not c in toremove]
+ # don't fake-add any new ones, or we won't be able to really-add them later
+ self.config._cubes = tuple(cube for cube in self.config.expand_cubes(basecubes)
+ if cube in origcubes)
+ removed = [p for p in origcubes if not p in self.config._cubes]
+ if not cube in removed and cube in origcubes:
+ raise ConfigurationError("can't remove cube %s, "
+ "used as a dependency" % cube)
+ return removed
+
+ def rewrite_configuration(self):
+ configfile = self.config.main_config_file()
+ if self._option_changes:
+ read_old_config(self.config, self._option_changes, configfile)
+ fd, newconfig = tempfile.mkstemp()
+ for optdescr in self._option_changes:
+ if optdescr[0] == 'added':
+ optdict = self.config.get_option_def(optdescr[1])
+ if optdict.get('default') is REQUIRED:
+ self.config.input_option(optdescr[1], optdict)
+ self.config.generate_config(open(newconfig, 'w'))
+ show_diffs(configfile, newconfig, askconfirm=self.confirm is not yes)
+ os.close(fd)
+ if exists(newconfig):
+ os.unlink(newconfig)
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
+from logging import getLogger
+from cubicweb import set_log_methods
+set_log_methods(MigrationHelper, getLogger('cubicweb.migration'))
+
+
+def version_strictly_lower(a, b):
+ if a is None:
+ return True
+ if b is None:
+ return False
+ if a:
+ a = Version(a)
+ if b:
+ b = Version(b)
+ return a < b
+
+def max_version(a, b):
+ return str(max(Version(a), Version(b)))
+
+class ConfigurationProblem(object):
+ """Each cube has its own list of dependencies on other cubes/versions.
+
+ The ConfigurationProblem is used to record the loaded cubes, then to detect
+ inconsistencies in their dependencies.
+
+ See configuration management on wikipedia for litterature.
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.cubes = {'cubicweb': cwcfg.cubicweb_version()}
+
+ def add_cube(self, name, version):
+ self.cubes[name] = version
+
+ def solve(self):
+ self.warnings = []
+ self.errors = []
+ self.dependencies = {}
+ self.reverse_dependencies = {}
+ self.constraints = {}
+ # read dependencies
+ for cube in self.cubes:
+ if cube == 'cubicweb': continue
+ self.dependencies[cube] = dict(self.config.cube_dependencies(cube))
+ self.dependencies[cube]['cubicweb'] = self.config.cube_depends_cubicweb_version(cube)
+ # compute reverse dependencies
+ for cube, dependencies in self.dependencies.items():
+ for name, constraint in dependencies.items():
+ self.reverse_dependencies.setdefault(name,set())
+ if constraint:
+ try:
+ oper, version = constraint.split()
+ self.reverse_dependencies[name].add( (oper, version, cube) )
+ except Exception:
+ self.warnings.append(
+ 'cube %s depends on %s but constraint badly '
+ 'formatted: %s' % (cube, name, constraint))
+ else:
+ self.reverse_dependencies[name].add( (None, None, cube) )
+ # check consistency
+ for cube, versions in sorted(self.reverse_dependencies.items()):
+ oper, version, source = None, None, None
+ # simplify constraints
+ if versions:
+ for constraint in versions:
+ op, ver, src = constraint
+ if oper is None:
+ oper = op
+ version = ver
+ source = src
+ elif op == '>=' and oper == '>=':
+ if version_strictly_lower(version, ver):
+ version = ver
+ source = src
+ elif op == None:
+ continue
+ else:
+ print('unable to handle %s in %s, set to `%s %s` '
+ 'but currently up to `%s %s`' %
+ (cube, source, oper, version, op, ver))
+ # "solve" constraint satisfaction problem
+ if cube not in self.cubes:
+ self.errors.append( ('add', cube, version, source) )
+ elif versions:
+ lower_strict = version_strictly_lower(self.cubes[cube], version)
+ if oper in ('>=','=','=='):
+ if lower_strict:
+ self.errors.append( ('update', cube, version, source) )
+ elif oper is None:
+ pass # no constraint on version
+ else:
+ print('unknown operator', oper)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cmp_schema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/cmp_schema.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,24 @@
+"""This module compare the Schema on the file system to the one in the database"""
+
+from cStringIO import StringIO
+from cubicweb.web.schemaviewer import SchemaViewer
+from logilab.common.ureports import TextWriter
+import difflib
+
+viewer = SchemaViewer()
+layout_db = viewer.visit_schema(schema, display_relations=True)
+layout_fs = viewer.visit_schema(fsschema, display_relations=True)
+writer = TextWriter()
+stream_db = StringIO()
+stream_fs = StringIO()
+writer.format(layout_db, stream=stream_db)
+writer.format(layout_fs, stream=stream_fs)
+
+stream_db.seek(0)
+stream_fs.seek(0)
+db = stream_db.getvalue().splitlines()
+fs = stream_fs.getvalue().splitlines()
+open('db_schema.txt', 'w').write(stream_db.getvalue())
+open('fs_schema.txt', 'w').write(stream_fs.getvalue())
+#for diff in difflib.ndiff(fs, db):
+# print diff
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwdesklets/gfx/bg.png
Binary file cubicweb/misc/cwdesklets/gfx/bg.png has changed
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwdesklets/gfx/border-left.png
Binary file cubicweb/misc/cwdesklets/gfx/border-left.png has changed
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwdesklets/gfx/logo_cw.png
Binary file cubicweb/misc/cwdesklets/gfx/logo_cw.png has changed
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwdesklets/gfx/rss.png
Binary file cubicweb/misc/cwdesklets/gfx/rss.png has changed
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwdesklets/rql_query.display
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/cwdesklets/rql_query.display Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,22 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwdesklets/rqlsensor/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/cwdesklets/rqlsensor/__init__.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,118 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+import webbrowser
+reload(webbrowser)
+
+from sensor.Sensor import Sensor
+from utils import datatypes, i18n
+
+from cubicweb.dbapi import connect
+
+_ = str
+
+class RQLSensor(Sensor):
+
+ def __init__(self, *args):
+ global _; _ = i18n.Translator("rql-desklet")
+ Sensor.__init__(self)
+ # define configuration
+ self._set_config_type("appid", datatypes.TYPE_STRING, "")
+ self._set_config_type("user", datatypes.TYPE_STRING, "")
+ self._set_config_type("passwd", datatypes.TYPE_SECRET_STRING, "")
+ self._set_config_type("rql", datatypes.TYPE_STRING, "")
+ self._set_config_type("url", datatypes.TYPE_STRING, "")
+ self._set_config_type("delay", datatypes.TYPE_STRING, "600")
+ # default timer
+ self._add_timer(20, self.__update)
+
+ def get_configurator(self):
+ configurator = self._new_configurator()
+ configurator.set_name(_("RQL"))
+ configurator.add_title(_("CubicWeb source settings"))
+ configurator.add_entry(_("ID",), "appid", _("The application id of this source"))
+ configurator.add_entry(_("User",), "user", _("The user to connect to this source"))
+ configurator.add_entry(_("Password",), "passwd", _("The user's password to connect to this source"))
+ configurator.add_entry(_("URL",), "url", _("The url of the web interface for this source"))
+ configurator.add_entry(_("RQL",), "rql", _("The rql query"))
+ configurator.add_entry(_("Update interval",), "delay", _("Delay in seconds between updates"))
+ return configurator
+
+
+ def call_action(self, action, path, args=[]):
+ index = path[-1]
+ output = self._new_output()
+ if action=="enter-line":
+ # change background
+ output.set('resultbg[%s]' % index, 'yellow')
+ elif action=="leave-line":
+ # change background
+ output.set('resultbg[%s]' % index, 'black')
+ elif action=="click-line":
+ # open url
+ output.set('resultbg[%s]' % index, 'black')
+ webbrowser.open(self._urls[index])
+ self._send_output(output)
+
+ def __get_connection(self):
+ try:
+ return self._v_cnx
+ except AttributeError:
+ appid, user, passwd = self._get_config("appid"), self._get_config("user"), self._get_config("passwd")
+ cnx = connect(database=appid, login=user, password=passwd)
+ self._v_cnx = cnx
+ return cnx
+
+ def __run_query(self, output):
+ base = self._get_config('url')
+ rql = self._get_config('rql')
+ cnx = self.__get_connection()
+ cursor = cnx.cursor()
+ try:
+ rset = cursor.execute(rql)
+ except Exception:
+ del self._v_cnx
+ raise
+ self._urls = []
+ output.set('layout', 'vertical, 14')
+ output.set('length', rset.rowcount)
+ i = 0
+ for line in rset:
+ output.set('result[%s]' % i, ', '.join([str(v) for v in line[1:]]))
+ output.set('resultbg[%s]' % i, 'black')
+ try:
+ self._urls.append(base % 'Any X WHERE X eid %s' % line[0])
+ except Exception:
+ self._urls.append('')
+ i += 1
+
+ def __update(self):
+ output = self._new_output()
+ try:
+ self.__run_query(output)
+ except Exception as ex:
+ import traceback
+ traceback.print_exc()
+ output.set('layout', 'vertical, 10')
+ output.set('length', 1)
+ output.set('result[0]', str(ex))
+ self._send_output(output)
+ self._add_timer(int(self._get_config('delay'))*1000, self.__update)
+
+
+def new_sensor(args):
+ return RQLSensor(*args)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwdesklets/web_query.display
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/cwdesklets/web_query.display Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwfs/A_FAIRE
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/cwfs/A_FAIRE Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,14 @@
+TACHES
+======
+
+-- crire objet stocke/manipule les donnes
+
+-- extraire tests de chane de caractre
+
+* utiliser sqlite
+
+* crire fonction prend chemin en argument et renvoie contenu
+
+* extraire tests (chane de caractre) de spec
+
+* utiliser yams pour schma
\ No newline at end of file
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwfs/cwfs-spec.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/cwfs/cwfs-spec.txt Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,180 @@
+=======================
+ Specification cubicwebfs
+=======================
+
+Remarque: cubicwebfs c'est le siamois de yamsfs
+en fait c'est un yamsfs avec une interrogation
+de base RQL
+
+Modle
+-------
+
+Description du modle;
+::
+ societe
+ nom
+ ville
+
+ affaire
+ ref
+
+ document
+ annee
+ mois
+ jour
+ type {RAP,CLI,OFR,FCT}
+ fichier
+
+document concerne affaire
+affaire concerne societe
+
+Contenu de la base exemple
+---------------------------
+
+societe | nom | ville |
+ | CETIAD | Dijon |
+ | EDF R&D | Clamart |
+ | Logilab | Paris |
+
+affaire | ref | concerne |
+ | CTIA01 | CETIAD |
+ | EDFR01 | EDF R&D |
+ | EDFR02 | EDF R&D |
+
+document | annee | mois | jour | type | concerne | fichier |
+ | 2004 | 09 | 06 | PRE | CTIA01 | depodoc/2004/09/CTIA01-040906-PRE-1-01.pdf |
+ | 2005 | 02 | 01 | CLI | EDFR01 | depodoc/2005/02/EDFR01-050201-CLI-1-01.pdf |
+ | 2005 | 03 | 22 | OFR | EDFR01 | depodoc/2005/02/EDFR01-050322-OFR-1-01.pdf |
+
+
+Exemples de chemins/recherches
+-------------------------------
+
+Cherche documents de mars 2005;
+::
+ /document/annee/2005/mois/03/
+
+
+Dont le contenu successif serait;
+
+Test::
+
+ $ ls /document
+ annee/ mois/ jour/ type/
+ affaire/ concerne/ CTIA01-040906-PRE-1-01.pdf
+ EDFR01-050201-CLI-1-01.pdf EDFR01-050322-OFR-1-01.pdf
+
+ $ ls /document/annee/
+ 2004/ 2005/
+
+ $ ls /document/annee/2005/
+ mois/ jour/ type/ affaire/
+ concerne/ EDFR01-050201-CLI-1-01.pdf EDFR01-050322-OFR-1-01.pdf
+
+ $ ls /document/annee/2005/mois/
+ 02/ 03/
+
+ $ ls /document/annee/2005/mois/03/
+ jour/ type/ affaire/ concerne/
+ EDFR01-050322-OFR-1-01.pdf
+
+
+Question: est-ce que fichier/ ne va pas nous manquer ?
+
+
+Cherche documents relatifs CTIA01;
+::
+ /affaire/ref/CTIA01/document/
+
+Dont le contenu des rpertoires successifs serait:
+
+Test::
+
+ $ ls /affaire/
+ ref/ societe/ concerne/ document/
+ concerne_par/ CTIA01 EDFR01 EDFR02
+
+ $ ls /affaire/ref/
+ CTIA01/ EDFR01/ EDFR02/
+
+ $ ls /affaire/ref/CTIA01/
+ societe/ concerne/ document/ concerne_par/
+
+ $ ls /affaire/ref/CTIA01/document/
+ annee/ mois/ jour/ type/
+ CTIA01-040906-PRE-1-01.pdf
+
+
+Cherche documents des affaires qui concernent CETIAD;
+::
+ /societe/nom/CETIAD/affaire/document/
+
+Dont le contenu des rpertoires successifs serait;
+
+Test::
+
+ $ ls /societe/
+ nom/ ville/ affaire/ concerne_par/
+ CETIAD EDF R&D Logilab
+
+ $ ls /societe/nom/
+ CETIAD EDF R&D Logilab
+
+ $ ls /societe/nom/CETIAD/
+ ville/ affaire/ concerne_par/ CETIAD Logilab
+
+ $ ls /societe/nom/CETIAD/affaire/
+ ref/ societe/ concerne/ document/
+ concerne_par/ CTIA01
+
+ $ ls /societe/nom/CETIAD/affaire/document/
+ annee/ mois/ jour/ type/
+ affaire/ concerne/ CTIA01-040906-PRE-1-01.pdf
+
+
+En particulier, pour la recherche ci-dessus on ne peut pas crire;
+::
+ /document/affaire/concerne/societe/CETIAD/
+
+La logique est que si on est dans un rpertoire document, il faut
+qu'il contienne des documents.
+
+Cherche documents de 2002 qui concernent des affaires
+qui concernent CETIAD;
+::
+ /societe/CETIAD/affaire/document/annee/2002/
+
+Question: est-ce que les relations doivent tre des composants
+du chemin ?
+Question : si les relations ne font pas partie du chemin, il faudrait
+pouvoir faire des recherches en utilisant des relations anonymes (ce
+qui est impossible en RQL par exemple);
+::
+ /document/affaire/... s'il existe plusieurs relations entre
+ les entits document et affaire, on ne peut pas s'en sortir
+
+Question: que va-t-il se passer pour des chemins du type;
+::
+ /affaire/CTIA*/document/
+
+Nicolas: mon avis on a rien faire, car c'est le shell qui
+s'en occupe. De la mme faon, le systme de fichier n'a pas
+ se proccuper de ~/ et les programmes reoivent pas le "qqch*"
+en argument, mais directement la liste.
+
+Attention: si jamais l'arborescence est sans fond, les
+commandes rcursives vont prendre du temps...
+
+Attention: dans un premier temps, un systme de fichiers en
+lecture seule est satisfaisant. on verra ensuite pour l'dition.
+pour l'dition, on peut s'inspirer du external editor de zope
+et avoir un format d'change XML entre le serveur et l'diteur.
+
+Le cas suivant est dbile, faut-il l'interdire ?
+::
+ /document/affaire/societe/concerne_par/affaire/concerne_par/document
+
+
+NB: manque dtail d'un cas comme /document/annee/2005/concerne/affaire/
+
+
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwfs/cwfs.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/cwfs/cwfs.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,175 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""
+
+"""
+class Schema :
+
+ def __init__(self, schema) :
+ self._schema = schema
+
+ def get_attrs(self, entity) :
+ return self._schema[entity][0]
+
+ def get_relations(self, entity) :
+ return self._schema[entity][1]
+
+ def get_attr_index(self, entity, attr) :
+ return list(self._schema[entity][0]).index(attr)
+
+SCHEMA = Schema({'societe': ( ('nom','ville'),
+ [('concerne_par','affaire'),
+ ] ),
+ 'affaire': ( ('ref',),
+ [('concerne','societe'),
+ ('concerne_par', 'document')
+ ] ),
+ 'document':( ('fichier', 'annee','mois','jour','type'),
+ [('concerne','affaire'),
+ ] ),
+ })
+
+
+
+DATA = { 'societe': [ ('CETIAD', 'Dijon'),
+ ('EDF_R&D', 'Clamart'),
+ ('Logilab', 'Paris'),
+ ],
+ 'affaire': [ ('CTIA01', 'CETIAD'),
+ ('EDFR01', 'EDF_R&D'),
+ ('EDFR02', 'EDF_R&D'),
+ ],
+ 'document':[ ('CTIA01-040906-PRE-1-01.pdf','2004','09','06','PRE','CTIA01'),
+ ('EDFR01-050201-CLI-1-01.pdf','2005','02','01','CLI','EDFR01'),
+ ('EDFR01-050322-OFR-1-01.pdf','2005','03','22','OFR','EDFR01'),
+ ],
+ }
+
+def get_data(entity, where=[]) :
+ for value in DATA[entity] :
+ for index, val in where :
+ if value[index] != val :
+ break
+ else :
+ yield value
+
+class PathParser :
+
+ def __init__(self, schema, path) :
+ self.schema = schema
+ self.path = path
+ self._components = iter([comp for comp in self.path.split('/') if comp])
+ self._entity = None
+ self._attr = None
+ self._rel = None
+ self._restrictions = []
+
+ def parse(self) :
+ self._entity = next(self._components)
+ try:
+ self.process_entity()
+ except StopIteration :
+ pass
+
+ def process_entity(self) :
+ _next = next(self._components)
+ if _next in self.schema.get_attrs(self._entity) :
+ self._attr = _next
+ _next = next(self._components)
+ self._restrictions.append( (self._entity, self._attr, _next) )
+ self._attr = None
+ self._rel = None
+ self.process_entity()
+
+ def get_list(self) :
+ if self._rel :
+ return
+ elif self._attr :
+ where = []
+ for e,a,v in self._restrictions :
+ i = self.schema.get_attr_index(e, a)
+ where.append( (i,v) )
+ i = self.schema.get_attr_index(self._entity, self._attr)
+ for values in get_data(self._entity,where) :
+ yield values[i]+'/'
+ else :
+ attr_restrict = [a for e,a,v in self._restrictions]
+ for attr in self.schema.get_attrs(self._entity) :
+ if attr not in attr_restrict :
+ yield attr+'/'
+ for data in DATA[self._entity]:
+ yield data[0]
+ for nom, entity in self.schema.get_relations(self._entity) :
+ yield nom+'/'
+ yield entity+'/'
+
+def ls(path) :
+ p = PathParser(SCHEMA,path)
+ p.parse()
+ return list(p.get_list())
+
+
+class SytPathParser :
+
+ def __init__(self, schema, path) :
+ self.schema = schema
+ self.path = path
+ self._components = iter([comp for comp in self.path.split('/') if comp])
+ self._e_type = None
+ self._restrictions = []
+ self._alphabet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
+
+ def parse(self):
+ self._var = self._alphabet.pop(0)
+ self._e_type = next(self._components)
+ e_type = self._e_type.capitalize()
+ self._restrictions.append('%s is %s' % (self._var, e_type))
+ try:
+ self.process_entity()
+ except StopIteration :
+ pass
+ return 'Any %s WHERE %s' % (self._var, ', '.join(self._restrictions))
+
+ def process_entity(self) :
+ _next = next(self._components)
+ if _next in self.schema.get_attrs(self._e_type) :
+ attr = _next
+ try:
+ _next = next(self._components)
+ self._restrictions.append('%s %s %s' % (self._var, attr, _next))
+ except StopIteration:
+ a_var = self._alphabet.pop(0)
+ self._restrictions.append('%s %s %s' % (self._var, attr, a_var) )
+ self._var = a_var
+ raise
+ elif _next in [r for r,e in self.schema.get_relations(self._e_type)]:
+ rel = _next
+ r_var = self._alphabet.pop(0)
+ self._restrictions.append('%s %s %s' % (self._var, rel, r_var))
+ self._var = r_var
+ try:
+ _next = next(self._components)
+ self._restrictions.append('%s is %s' % (r_var, _next.capitalize()))
+ except StopIteration:
+ raise
+ self.process_entity()
+
+
+def to_rql(path) :
+ p = SytPathParser(SCHEMA,path)
+ return p.parse()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwfs/cwfs_test.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/cwfs/cwfs_test.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,66 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""
+
+"""
+from logilab.common.testlib import TestCase, unittest_main
+
+import cubicwebfs
+import sre
+
+def spec_parser(filename) :
+ """
+ extract tests from specification
+ """
+ sections = []
+ buffer = ""
+ in_section = False
+ for line in open(filename) :
+ if line.startswith('Test::'):
+ in_section = True
+ buffer = ""
+ elif in_section :
+ if line.startswith(" ") or not line.strip() :
+ buffer += line.lstrip()
+ else :
+ sections.append(buffer)
+ in_section = False
+ tests = []
+ for section in sections :
+ subsections = [t for t in section.strip().split('$ ls') if t]
+ for subsection in subsections :
+ path, results = subsection.splitlines()[0], subsection.splitlines()[1:]
+ path = path.strip()
+ items = set([i for i in sre.split('[\t\n]', '\n'.join(results)) if i])
+ tests.append((path, items))
+ return tests
+
+tests = spec_parser("cubicwebfs-spec.txt")
+
+class monTC(TestCase) :
+ pass
+
+for index, (path, results) in enumerate(tests) :
+ def f(self, p=path, r=results) :
+ res = set(cubicwebfs.ls(p))
+ self.assertEqual(r, res) #, 'en trop %s\nmanque %s' % (r-results,results-r))
+ f.__doc__ = "%s %s"%(index,path)
+ setattr(monTC,'test_%s'%index,f)
+
+if __name__ == '__main__':
+ unittest_main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/cwzope/cwzope.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/cwzope/cwzope.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,50 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""
+
+"""
+from AccessControl import getSecurityManager
+
+from cubicweb.dbapi import connect, Connection, Cursor
+from cubicweb.common.utils import ResultSet, ResultSetIterator, ResultSetRow, Entity
+
+Connection.__allow_access_to_unprotected_subobjects__ = 1
+Cursor.__allow_access_to_unprotected_subobjects__ = 1
+ResultSet.__allow_access_to_unprotected_subobjects__ = 1
+ResultSetIterator.__allow_access_to_unprotected_subobjects__ = 1
+ResultSetRow.__allow_access_to_unprotected_subobjects__ = 1
+Entity.__allow_access_to_unprotected_subobjects__ = 1
+
+CNX_CACHE = {}
+
+def get_connection(context, user=None, password=None,
+ host=None, database=None, group='cubicweb'):
+ """get a connection on an cubicweb server"""
+ request = context.REQUEST
+ zope_user = getSecurityManager().getUser()
+ if user is None:
+ user = zope_user.getId()
+ key = (user, host, database)
+ try:
+ return CNX_CACHE[key]
+ except KeyError:
+ if password is None:
+ password = zope_user._getPassword()
+ cnx = connect(user, password, host, database, group)
+ CNX_CACHE[key] = cnx
+ return cnx
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.10.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.10.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,37 @@
+from six import text_type
+
+from cubicweb.server.session import hooks_control
+
+for uri, cfg in config.read_sources_file().items():
+ if uri in ('system', 'admin'):
+ continue
+ repo.sources_by_uri[uri] = repo.get_source(cfg['adapter'], uri, cfg.copy())
+
+add_entity_type('CWSource')
+add_relation_definition('CWSource', 'cw_source', 'CWSource')
+add_entity_type('CWSourceHostConfig')
+
+with hooks_control(session, session.HOOKS_ALLOW_ALL, 'cw.sources'):
+ create_entity('CWSource', type=u'native', name=u'system')
+commit()
+
+sql('INSERT INTO cw_source_relation(eid_from,eid_to) '
+ 'SELECT e.eid,s.cw_eid FROM entities as e, cw_CWSource as s '
+ 'WHERE s.cw_name=e.type')
+commit()
+
+for uri, cfg in config.read_sources_file().items():
+ if uri in ('system', 'admin'):
+ continue
+ repo.sources_by_uri.pop(uri)
+ config = u'\n'.join('%s=%s' % (key, value) for key, value in cfg.items()
+ if key != 'adapter' and value is not None)
+ create_entity('CWSource', name=text_type(uri), type=text_type(cfg['adapter']),
+ config=config)
+commit()
+
+# rename cwprops for boxes/contentnavigation
+for x in rql('Any X,XK WHERE X pkey XK, '
+ 'X pkey ~= "boxes.%" OR '
+ 'X pkey ~= "contentnavigation.%"').entities():
+ x.cw_set(pkey=u'ctxcomponents.' + x.pkey.split('.', 1)[1])
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.10.0_common.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.10.0_common.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+option_group_changed('cleanup-session-time', 'web', 'main')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.10.4_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.10.4_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,8 @@
+for eschema in schema.entities():
+ if not (eschema.final or 'cw_source' in eschema.subjrels):
+ add_relation_definition(eschema.type, 'cw_source', 'CWSource', ask_confirm=False)
+
+sql('INSERT INTO cw_source_relation(eid_from, eid_to) '
+ 'SELECT e.eid,s.cw_eid FROM entities as e, cw_CWSource as s '
+ 'WHERE s.cw_name=e.source AND NOT EXISTS(SELECT 1 FROM cw_source_relation WHERE eid_from=e.eid AND eid_to=s.cw_eid)')
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.10.5_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.10.5_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,6 @@
+sync_schema_props_perms('CWSourceHostConfig', syncperms=False)
+
+sql('INSERT INTO cw_source_relation(eid_from, eid_to) '
+ 'SELECT e.eid,s.cw_eid FROM entities as e, cw_CWSource as s '
+ 'WHERE s.cw_name=e.source AND NOT EXISTS(SELECT 1 FROM cw_source_relation WHERE eid_from=e.eid AND eid_to=s.cw_eid)')
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.10.7_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.10.7_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,2 @@
+add_attribute('TrInfo', 'tr_count')
+sync_schema_props_perms('TrInfo')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.10.8_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.10.8_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+sync_schema_props_perms('CWSource', syncprops=False)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.10.9_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.10.9_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,32 @@
+import sys
+
+if confirm('fix some corrupted entities noticed on several instances?'):
+ rql('DELETE CWConstraint X WHERE NOT E constrained_by X')
+ rql('SET X is_instance_of Y WHERE X is Y, NOT X is_instance_of Y')
+ commit()
+
+if confirm('fix existing cwuri?'):
+ from logilab.common.shellutils import progress
+ from cubicweb.server.session import hooks_control
+ rset = rql('Any X, XC WHERE X cwuri XC, X cwuri ~= "%/eid/%"')
+ title = "%i entities to fix" % len(rset)
+ nbops = rset.rowcount
+ enabled = interactive_mode
+ with progress(title=title, nbops=nbops, size=30, enabled=enabled) as pb:
+ for i, row in enumerate(rset):
+ with hooks_control(session, session.HOOKS_DENY_ALL, 'integrity'):
+ data = {'eid': row[0], 'cwuri': row[1].replace(u'/eid', u'')}
+ rql('SET X cwuri %(cwuri)s WHERE X eid %(eid)s', data)
+ if not i % 100: # commit every 100 entities to limit memory consumption
+ pb.text = "%i committed" % i
+ commit(ask_confirm=False)
+ pb.update()
+ commit(ask_confirm=False)
+
+try:
+ from cubicweb import devtools
+ option_group_changed('anonymous-user', 'main', 'web')
+ option_group_changed('anonymous-password', 'main', 'web')
+except ImportError:
+ # cubicweb-dev unavailable, nothing needed
+ pass
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.11.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.11.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,11 @@
+from datetime import datetime
+
+for rtype in ('cw_support', 'cw_dont_cross', 'cw_may_cross'):
+ drop_relation_type(rtype)
+
+add_entity_type('CWSourceSchemaConfig')
+
+if not 'url' in schema['CWSource'].subjrels:
+ add_attribute('CWSource', 'url')
+ add_attribute('CWSource', 'parser')
+ add_attribute('CWSource', 'latest_retrieval')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.12.9_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.12.9_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+sync_schema_props_perms('cw_source')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.13.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.13.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,3 @@
+sync_schema_props_perms('cw_source', syncprops=False)
+if schema['BigInt'].eid is None:
+ add_entity_type('BigInt')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.13.3_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.13.3_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,2 @@
+drop_relation_definition('CWSourceSchemaConfig', 'cw_schema', 'CWAttribute')
+sync_schema_props_perms('cw_schema')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.13.6_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.13.6_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+sync_schema_props_perms('CWSourceSchemaConfig')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.13.8_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.13.8_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,5 @@
+change_attribute_type('CWCache', 'timestamp', 'TZDatetime')
+change_attribute_type('CWUser', 'last_login_time', 'TZDatetime')
+change_attribute_type('CWSource', 'latest_retrieval', 'TZDatetime')
+drop_attribute('CWSource', 'synchronizing')
+add_attribute('CWSource', 'in_synchronization')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.14.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.14.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,15 @@
+from __future__ import print_function
+
+config['rql-cache-size'] = config['rql-cache-size'] * 10
+
+add_entity_type('CWDataImport')
+
+from cubicweb.schema import CONSTRAINTS, guess_rrqlexpr_mainvars
+for rqlcstr in rql('Any X,XT,XV WHERE X is CWConstraint, X cstrtype XT, X value XV,'
+ 'X cstrtype XT, XT name IN ("RQLUniqueConstraint","RQLConstraint","RQLVocabularyConstraint"),'
+ 'NOT X value ~= ";%"').entities():
+ expression = rqlcstr.value
+ mainvars = guess_rrqlexpr_mainvars(expression)
+ yamscstr = CONSTRAINTS[rqlcstr.type](expression, mainvars)
+ rqlcstr.cw_set(value=yamscstr.serialize())
+ print('updated', rqlcstr.type, rqlcstr.value.strip())
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.14.7_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.14.7_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,4 @@
+# migrate default format for TriInfo `comment_format` attribute
+sync_schema_props_perms('TrInfo')
+
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.15.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.15.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,10 @@
+sync_schema_props_perms('EmailAddress')
+
+for source in rql('CWSource X WHERE X type "ldapuser"').entities():
+ config = source.dictconfig
+ host = config.pop('host', u'ldap')
+ protocol = config.pop('protocol', u'ldap')
+ source.cw_set(url=u'%s://%s' % (protocol, host))
+ source.update_config(skip_unknown=True, **config)
+
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.15.0_common.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.15.0_common.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,7 @@
+import ConfigParser
+try:
+ undo_actions = config.cfgfile_parser.get('MAIN', 'undo-support', False)
+except ConfigParser.NoOptionError:
+ pass # this conf. file was probably already migrated
+else:
+ config.global_set_option('undo-enabled', bool(undo_actions))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.15.4_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.15.4_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,13 @@
+from __future__ import print_function
+
+from logilab.common.shellutils import generate_password
+from cubicweb.server.utils import crypt_password
+
+for user in rql('CWUser U WHERE U cw_source S, S name "system", U upassword P, U login L').entities():
+ salt = user.upassword.getvalue()
+ if crypt_password('', salt) == salt:
+ passwd = generate_password()
+ print('setting random password for user %s' % user.login)
+ user.set_attributes(upassword=passwd)
+
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.15.9_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.15.9_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,2 @@
+sync_schema_props_perms(('State', 'state_of', 'Workflow'), commit=False)
+sync_schema_props_perms(('State', 'name', 'String'))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.16.1_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.16.1_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,2 @@
+sync_schema_props_perms(('State', 'state_of', 'Workflow'), commit=False)
+sync_schema_props_perms(('State', 'name', 'String'))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.17.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.17.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+add_attribute('CWAttribute', 'extra_props')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.17.11_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.17.11_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,7 @@
+for table, column in [
+ ('transactions', 'tx_time'),
+ ('tx_entity_actions', 'tx_uuid'),
+ ('tx_relation_actions', 'tx_uuid')]:
+ repo.system_source.create_index(session, table, column)
+
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.18.2_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.18.2_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,2 @@
+install_custom_sql_scripts()
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.18.4_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.18.4_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,2 @@
+sync_schema_props_perms('CWSource')
+sync_schema_props_perms('CWSourceHostConfig')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.19.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.19.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,3 @@
+sql('DROP TABLE "deleted_entities"')
+
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.20.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.20.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,6 @@
+sync_schema_props_perms('state_of')
+sync_schema_props_perms('transition_of')
+sync_schema_props_perms('State')
+sync_schema_props_perms('BaseTransition')
+sync_schema_props_perms('Transition')
+sync_schema_props_perms('WorkflowTransition')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.20.7_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.20.7_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,2 @@
+if repo.system_source.dbdriver == 'postgres':
+ install_custom_sql_scripts()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.20.8_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.20.8_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+sync_schema_props_perms('cwuri')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.21.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.21.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,174 @@
+from __future__ import print_function
+
+from cubicweb.schema import PURE_VIRTUAL_RTYPES
+from cubicweb.server.schema2sql import rschema_has_table
+
+
+def add_foreign_keys():
+ source = repo.system_source
+ if not source.dbhelper.alter_column_support:
+ return
+ for rschema in schema.relations():
+ if rschema.inlined:
+ add_foreign_keys_inlined(rschema)
+ elif rschema_has_table(rschema, skip_relations=PURE_VIRTUAL_RTYPES):
+ add_foreign_keys_relation(rschema)
+ for eschema in schema.entities():
+ if eschema.final:
+ continue
+ add_foreign_key_etype(eschema)
+
+
+def add_foreign_keys_relation(rschema):
+ args = {'r': rschema.type}
+ count = sql('SELECT COUNT(*) FROM ('
+ ' SELECT eid_from FROM %(r)s_relation'
+ ' UNION'
+ ' SELECT eid_to FROM %(r)s_relation'
+ ' EXCEPT'
+ ' SELECT eid FROM entities) AS eids' % args,
+ ask_confirm=False)[0][0]
+ if count:
+ print('%s references %d unknown entities, deleting' % (rschema, count))
+ sql('DELETE FROM %(r)s_relation '
+ 'WHERE eid_from IN (SELECT eid_from FROM %(r)s_relation EXCEPT SELECT eid FROM entities)' % args)
+ sql('DELETE FROM %(r)s_relation '
+ 'WHERE eid_to IN (SELECT eid_to FROM %(r)s_relation EXCEPT SELECT eid FROM entities)' % args)
+
+ args['from_fk'] = '%(r)s_relation_eid_from_fkey' % args
+ args['to_fk'] = '%(r)s_relation_eid_to_fkey' % args
+ args['table'] = '%(r)s_relation' % args
+ if repo.system_source.dbdriver == 'postgres':
+ sql('ALTER TABLE %(table)s DROP CONSTRAINT IF EXISTS %(from_fk)s' % args,
+ ask_confirm=False)
+ sql('ALTER TABLE %(table)s DROP CONSTRAINT IF EXISTS %(to_fk)s' % args,
+ ask_confirm=False)
+ elif repo.system_source.dbdriver.startswith('sqlserver'):
+ sql("IF OBJECT_ID('%(from_fk)s', 'F') IS NOT NULL "
+ "ALTER TABLE %(table)s DROP CONSTRAINT %(from_fk)s" % args,
+ ask_confirm=False)
+ sql("IF OBJECT_ID('%(to_fk)s', 'F') IS NOT NULL "
+ "ALTER TABLE %(table)s DROP CONSTRAINT %(to_fk)s" % args,
+ ask_confirm=False)
+ sql('ALTER TABLE %(table)s ADD CONSTRAINT %(from_fk)s '
+ 'FOREIGN KEY (eid_from) REFERENCES entities (eid)' % args,
+ ask_confirm=False)
+ sql('ALTER TABLE %(table)s ADD CONSTRAINT %(to_fk)s '
+ 'FOREIGN KEY (eid_to) REFERENCES entities (eid)' % args,
+ ask_confirm=False)
+
+
+def add_foreign_keys_inlined(rschema):
+ for eschema in rschema.subjects():
+ args = {'e': eschema.type, 'r': rschema.type}
+ args['c'] = 'cw_%(e)s_cw_%(r)s_fkey' % args
+
+ if eschema.rdef(rschema).cardinality[0] == '1':
+ broken_eids = sql('SELECT cw_eid FROM cw_%(e)s WHERE cw_%(r)s IS NULL' % args,
+ ask_confirm=False)
+ if broken_eids:
+ print('Required relation %(e)s.%(r)s missing' % args)
+ args['eids'] = ', '.join(str(eid) for eid, in broken_eids)
+ rql('DELETE %(e)s X WHERE X eid IN (%(eids)s)' % args)
+ broken_eids = sql('SELECT cw_eid FROM cw_%(e)s WHERE cw_%(r)s IN (SELECT cw_%(r)s FROM cw_%(e)s '
+ 'EXCEPT SELECT eid FROM entities)' % args,
+ ask_confirm=False)
+ if broken_eids:
+ print('Required relation %(e)s.%(r)s references unknown objects, deleting subject entities' % args)
+ args['eids'] = ', '.join(str(eid) for eid, in broken_eids)
+ rql('DELETE %(e)s X WHERE X eid IN (%(eids)s)' % args)
+ else:
+ if sql('SELECT COUNT(*) FROM ('
+ ' SELECT cw_%(r)s FROM cw_%(e)s WHERE cw_%(r)s IS NOT NULL'
+ ' EXCEPT'
+ ' SELECT eid FROM entities) AS eids' % args,
+ ask_confirm=False)[0][0]:
+ print('%(e)s.%(r)s references unknown entities, deleting relation' % args)
+ sql('UPDATE cw_%(e)s SET cw_%(r)s = NULL WHERE cw_%(r)s IS NOT NULL AND cw_%(r)s IN '
+ '(SELECT cw_%(r)s FROM cw_%(e)s EXCEPT SELECT eid FROM entities)' % args)
+
+ if repo.system_source.dbdriver == 'postgres':
+ sql('ALTER TABLE cw_%(e)s DROP CONSTRAINT IF EXISTS %(c)s' % args,
+ ask_confirm=False)
+ elif repo.system_source.dbdriver.startswith('sqlserver'):
+ sql("IF OBJECT_ID('%(c)s', 'F') IS NOT NULL "
+ "ALTER TABLE cw_%(e)s DROP CONSTRAINT %(c)s" % args,
+ ask_confirm=False)
+ sql('ALTER TABLE cw_%(e)s ADD CONSTRAINT %(c)s '
+ 'FOREIGN KEY (cw_%(r)s) references entities(eid)' % args,
+ ask_confirm=False)
+
+
+def add_foreign_key_etype(eschema):
+ args = {'e': eschema.type}
+ if sql('SELECT COUNT(*) FROM ('
+ ' SELECT cw_eid FROM cw_%(e)s'
+ ' EXCEPT'
+ ' SELECT eid FROM entities) AS eids' % args,
+ ask_confirm=False)[0][0]:
+ print('%(e)s has nonexistent entities, deleting' % args)
+ sql('DELETE FROM cw_%(e)s WHERE cw_eid IN '
+ '(SELECT cw_eid FROM cw_%(e)s EXCEPT SELECT eid FROM entities)' % args)
+ args['c'] = 'cw_%(e)s_cw_eid_fkey' % args
+ if repo.system_source.dbdriver == 'postgres':
+ sql('ALTER TABLE cw_%(e)s DROP CONSTRAINT IF EXISTS %(c)s' % args,
+ ask_confirm=False)
+ elif repo.system_source.dbdriver.startswith('sqlserver'):
+ sql("IF OBJECT_ID('%(c)s', 'F') IS NOT NULL "
+ "ALTER TABLE cw_%(e)s DROP CONSTRAINT %(c)s" % args,
+ ask_confirm=False)
+ sql('ALTER TABLE cw_%(e)s ADD CONSTRAINT %(c)s '
+ 'FOREIGN KEY (cw_eid) REFERENCES entities (eid)' % args,
+ ask_confirm=False)
+
+
+add_foreign_keys()
+
+cu = session.cnxset.cu
+helper = repo.system_source.dbhelper
+
+helper.drop_index(cu, 'entities', 'extid', False)
+# don't use create_index because it doesn't work for columns that may be NULL
+# on sqlserver
+for query in helper.sqls_create_multicol_unique_index('entities', ['extid']):
+ cu.execute(query)
+
+if 'moved_entities' not in helper.list_tables(cu):
+ sql('''
+ CREATE TABLE moved_entities (
+ eid INTEGER PRIMARY KEY NOT NULL,
+ extid VARCHAR(256) UNIQUE
+ )
+ ''')
+
+moved_entities = sql('SELECT -eid, extid FROM entities WHERE eid < 0',
+ ask_confirm=False)
+if moved_entities:
+ cu.executemany('INSERT INTO moved_entities (eid, extid) VALUES (%s, %s)',
+ moved_entities)
+ sql('DELETE FROM entities WHERE eid < 0')
+
+commit()
+
+sync_schema_props_perms('CWEType')
+
+sync_schema_props_perms('cwuri')
+
+from cubicweb.server.schema2sql import check_constraint
+
+for cwconstraint in rql('Any C WHERE R constrained_by C').entities():
+ cwrdef = cwconstraint.reverse_constrained_by[0]
+ rdef = cwrdef.yams_schema()
+ cstr = rdef.constraint_by_eid(cwconstraint.eid)
+ if cstr.type() not in ('BoundaryConstraint', 'IntervalBoundConstraint', 'StaticVocabularyConstraint'):
+ continue
+ cstrname, check = check_constraint(rdef.subject, rdef.object, rdef.rtype.type,
+ cstr, helper, prefix='cw_')
+ args = {'e': rdef.subject.type, 'c': cstrname, 'v': check}
+ if repo.system_source.dbdriver == 'postgres':
+ sql('ALTER TABLE cw_%(e)s DROP CONSTRAINT IF EXISTS %(c)s' % args)
+ elif repo.system_source.dbdriver.startswith('sqlserver'):
+ sql("IF OBJECT_ID('%(c)s', 'C') IS NOT NULL "
+ "ALTER TABLE cw_%(e)s DROP CONSTRAINT %(c)s" % args)
+ sql('ALTER TABLE cw_%(e)s ADD CONSTRAINT %(c)s CHECK(%(v)s)' % args)
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.21.1_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.21.1_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,4 @@
+# re-read ComputedRelation permissions from schema.py now that we're
+# able to serialize them
+for computedrtype in schema.iter_computed_relations():
+ sync_schema_props_perms(computedrtype.type)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.21.2_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.21.2_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,7 @@
+sync_schema_props_perms('cwuri')
+
+helper = repo.system_source.dbhelper
+cu = session.cnxset.cu
+helper.set_null_allowed(cu, 'moved_entities', 'extid', 'VARCHAR(256)', False)
+
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.22.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.22.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,21 @@
+if confirm('use Europe/Paris as timezone?'):
+ timezone = 'Europe/Paris'
+else:
+ import pytz
+ while True:
+ timezone = raw_input('enter your timezone')
+ if timezone in pytz.common_timezones:
+ break
+
+dbdriver = repo.system_source.dbdriver
+if dbdriver == 'postgres':
+ sql("SET TIME ZONE '%s'" % timezone)
+
+for entity in schema.entities():
+ if entity.final:
+ continue
+ change_attribute_type(entity.type, 'creation_date', 'TZDatetime', ask_confirm=False)
+ change_attribute_type(entity.type, 'modification_date', 'TZDatetime', ask_confirm=False)
+
+if dbdriver == 'postgres':
+ sql("SET TIME ZONE UTC")
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.3.5_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.3.5_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+raise NotImplementedError("Cannot migrate such an old version. Use intermediate Cubiweb version (try 3.16.x)")
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.4.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.4.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+raise NotImplementedError("Cannot migrate such an old version. Use intermediate Cubiweb version (try 3.16.x)")
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.4.0_common.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.4.0_common.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+raise NotImplementedError("Cannot migrate such an old version. Use intermediate Cubiweb version (try 3.16.x)")
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.4.3_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.4.3_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+raise NotImplementedError("Cannot migrate such an old version. Use intermediate Cubiweb version (try 3.16.x)")
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.5.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.5.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+raise NotImplementedError("Cannot migrate such an old version. Use intermediate Cubiweb version (try 3.16.x)")
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.5.10_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.5.10_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+raise NotImplementedError("Cannot migrate such an old version. Use intermediate Cubiweb version (try 3.16.x)")
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.5.3_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.5.3_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+raise NotImplementedError("Cannot migrate such an old version. Use intermediate Cubiweb version (try 3.16.x)")
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.6.1_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.6.1_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+raise NotImplementedError("Cannot migrate such an old version. Use intermediate Cubiweb version (try 3.16.x)")
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.7.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.7.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,40 @@
+typemap = repo.system_source.dbhelper.TYPE_MAPPING
+sqls = """
+CREATE TABLE transactions (
+ tx_uuid CHAR(32) PRIMARY KEY NOT NULL,
+ tx_user INTEGER NOT NULL,
+ tx_time %s NOT NULL
+);;
+CREATE INDEX transactions_tx_user_idx ON transactions(tx_user);;
+
+CREATE TABLE tx_entity_actions (
+ tx_uuid CHAR(32) REFERENCES transactions(tx_uuid) ON DELETE CASCADE,
+ txa_action CHAR(1) NOT NULL,
+ txa_public %s NOT NULL,
+ txa_order INTEGER,
+ eid INTEGER NOT NULL,
+ etype VARCHAR(64) NOT NULL,
+ changes %s
+);;
+CREATE INDEX tx_entity_actions_txa_action_idx ON tx_entity_actions(txa_action);;
+CREATE INDEX tx_entity_actions_txa_public_idx ON tx_entity_actions(txa_public);;
+CREATE INDEX tx_entity_actions_eid_idx ON tx_entity_actions(eid);;
+CREATE INDEX tx_entity_actions_etype_idx ON tx_entity_actions(etype);;
+
+CREATE TABLE tx_relation_actions (
+ tx_uuid CHAR(32) REFERENCES transactions(tx_uuid) ON DELETE CASCADE,
+ txa_action CHAR(1) NOT NULL,
+ txa_public %s NOT NULL,
+ txa_order INTEGER,
+ eid_from INTEGER NOT NULL,
+ eid_to INTEGER NOT NULL,
+ rtype VARCHAR(256) NOT NULL
+);;
+CREATE INDEX tx_relation_actions_txa_action_idx ON tx_relation_actions(txa_action);;
+CREATE INDEX tx_relation_actions_txa_public_idx ON tx_relation_actions(txa_public);;
+CREATE INDEX tx_relation_actions_eid_from_idx ON tx_relation_actions(eid_from);;
+CREATE INDEX tx_relation_actions_eid_to_idx ON tx_relation_actions(eid_to)
+""" % (typemap['Datetime'],
+ typemap['Boolean'], typemap['Bytes'], typemap['Boolean'])
+for statement in sqls.split(';;'):
+ sql(statement)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.7.2_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.7.2_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,2 @@
+sql('DROP FUNCTION IF EXISTS _fsopen(bytea)')
+sql('DROP FUNCTION IF EXISTS fspath(bigint, text, text)')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.7.4_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.7.4_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1 @@
+sync_schema_props_perms('TrInfo', syncprops=False)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.7.5_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.7.5_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,4 @@
+if versions_map['cubicweb'][0] == (3, 7, 4):
+ config['http-session-time'] *= 60
+ config['cleanup-session-time'] *= 60
+ config['cleanup-anonymous-session-time'] *= 60
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.8.1_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.8.1_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,2 @@
+rql('SET X name "BoundaryConstraint" '
+ 'WHERE X is CWConstraintType, X name "BoundConstraint"')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.8.3_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.8.3_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,3 @@
+if 'same_as' in schema:
+ sync_schema_props_perms('same_as', syncperms=False)
+sync_schema_props_perms('Bookmark', syncperms=False)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.8.3_common.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.8.3_common.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,4 @@
+option_group_changed('port', 'main', 'web')
+option_group_changed('query-log-file', 'main', 'web')
+option_group_changed('profile', 'main', 'web')
+option_group_changed('max-post-length', 'main', 'web')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.8.5_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.8.5_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,61 @@
+from __future__ import print_function
+
+def migrate_varchar_to_nvarchar():
+ dbdriver = config.system_source_config['db-driver']
+ if dbdriver != "sqlserver2005":
+ return
+
+ introspection_sql = """\
+SELECT table_schema, table_name, column_name, is_nullable, character_maximum_length
+FROM information_schema.columns
+WHERE data_type = 'VARCHAR' and table_name <> 'SYSDIAGRAMS'
+"""
+ has_index_sql = """\
+SELECT i.name AS index_name,
+ i.type_desc,
+ i.is_unique,
+ i.is_unique_constraint
+FROM sys.indexes AS i, sys.index_columns as j, sys.columns as k
+WHERE is_hypothetical = 0 AND i.index_id <> 0
+AND i.object_id = j.object_id
+AND i.index_id = j.index_id
+AND i.object_id = OBJECT_ID('%(table)s')
+AND k.name = '%(col)s'
+AND k.object_id=i.object_id
+AND j.column_id = k.column_id;"""
+
+ generated_statements = []
+ for schema, table, column, is_nullable, length in sql(introspection_sql, ask_confirm=False):
+ qualified_table = '[%s].[%s]' % (schema, table)
+ rset = sql(has_index_sql % {'table': qualified_table, 'col':column},
+ ask_confirm = False)
+ drops = []
+ creates = []
+ for idx_name, idx_type, idx_unique, is_unique_constraint in rset:
+ if is_unique_constraint:
+ drops.append('ALTER TABLE %s DROP CONSTRAINT %s' % (qualified_table, idx_name))
+ creates.append('ALTER TABLE %s ADD CONSTRAINT %s UNIQUE (%s)' % (qualified_table, idx_name, column))
+ else:
+ drops.append('DROP INDEX %s ON %s' % (idx_name, qualified_table))
+ if idx_unique:
+ unique = 'UNIQUE'
+ else:
+ unique = ''
+ creates.append('CREATE %s %s INDEX %s ON %s(%s)' % (unique, idx_type, idx_name, qualified_table, column))
+
+ if length == -1:
+ length = 'max'
+ if is_nullable == 'YES':
+ not_null = 'NULL'
+ else:
+ not_null = 'NOT NULL'
+ alter_sql = 'ALTER TABLE %s ALTER COLUMN %s NVARCHAR(%s) %s' % (qualified_table, column, length, not_null)
+ generated_statements+= drops + [alter_sql] + creates
+
+
+ for statement in generated_statements:
+ print(statement)
+ sql(statement, ask_confirm=False)
+ commit()
+
+migrate_varchar_to_nvarchar()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.9.0_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.9.0_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,3 @@
+if repo.system_source.dbdriver == 'postgres':
+ sql('ALTER TABLE appears ADD COLUMN weight float')
+ sql('UPDATE appears SET weight=1.0 ')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/3.9.5_Any.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/3.9.5_Any.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,4 @@
+if not rql('CWConstraintType X WHERE X name "RQLUniqueConstraint"',
+ ask_confirm=False):
+ rql('INSERT CWConstraintType X: X name "RQLUniqueConstraint"',
+ ask_confirm=False)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/bootstrapmigration_repository.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/bootstrapmigration_repository.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,459 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""allways executed before all others in server migration
+
+it should only include low level schema changes
+"""
+from __future__ import print_function
+
+from six import text_type
+
+from cubicweb import ConfigurationError
+from cubicweb.server.session import hooks_control
+from cubicweb.server import schemaserial as ss
+
+applcubicwebversion, cubicwebversion = versions_map['cubicweb']
+
+def _add_relation_definition_no_perms(subjtype, rtype, objtype):
+ rschema = fsschema.rschema(rtype)
+ rdef = rschema.rdefs[(subjtype, objtype)]
+ rdef.rtype = schema.rschema(rtype)
+ rdef.subject = schema.eschema(subjtype)
+ rdef.object = schema.eschema(objtype)
+ ss.execschemarql(rql, rdef, ss.rdef2rql(rdef, CSTRMAP, groupmap=None))
+ commit(ask_confirm=False)
+
+def replace_eid_sequence_with_eid_numrange(session):
+ dbh = session.repo.system_source.dbhelper
+ cursor = session.cnxset.cu
+ try:
+ cursor.execute(dbh.sql_sequence_current_state('entities_id_seq'))
+ lasteid = cursor.fetchone()[0]
+ except: # programming error, already migrated
+ return
+
+ cursor.execute(dbh.sql_drop_sequence('entities_id_seq'))
+ cursor.execute(dbh.sql_create_numrange('entities_id_seq'))
+ cursor.execute(dbh.sql_restart_numrange('entities_id_seq', initial_value=lasteid))
+ session.commit()
+
+if applcubicwebversion <= (3, 13, 0) and cubicwebversion >= (3, 13, 1):
+ sql('ALTER TABLE entities ADD asource VARCHAR(64)')
+ sql('UPDATE entities SET asource=cw_name '
+ 'FROM cw_CWSource, cw_source_relation '
+ 'WHERE entities.eid=cw_source_relation.eid_from AND cw_source_relation.eid_to=cw_CWSource.cw_eid')
+ commit()
+
+if applcubicwebversion <= (3, 14, 4) and cubicwebversion >= (3, 14, 4):
+ from cubicweb.server import schema2sql as y2sql
+ dbhelper = repo.system_source.dbhelper
+ rdefdef = schema['CWSource'].rdef('name')
+ attrtype = y2sql.type_from_constraints(dbhelper, rdefdef.object, rdefdef.constraints).split()[0]
+ cursor = session.cnxset.cu
+ sql('UPDATE entities SET asource = source WHERE asource is NULL')
+ dbhelper.change_col_type(cursor, 'entities', 'asource', attrtype, False)
+ dbhelper.change_col_type(cursor, 'entities', 'source', attrtype, False)
+
+ # we now have a functional asource column, start using the normal eid_type_source method
+ if repo.system_source.eid_type_source == repo.system_source.eid_type_source_pre_131:
+ del repo.system_source.eid_type_source
+
+if applcubicwebversion < (3, 19, 0) and cubicwebversion >= (3, 19, 0):
+ try:
+ # need explicit drop of the indexes on some database systems (sqlserver)
+ sql(repo.system_source.dbhelper.sql_drop_index('entities', 'mtime'))
+ sql('ALTER TABLE "entities" DROP COLUMN "mtime"')
+ sql('ALTER TABLE "entities" DROP COLUMN "source"')
+ except: # programming error, already migrated
+ print("Failed to drop mtime or source database columns")
+ print("'entities' table of the database has probably been already updated")
+
+ commit()
+
+ replace_eid_sequence_with_eid_numrange(session)
+
+if applcubicwebversion < (3, 20, 0) and cubicwebversion >= (3, 20, 0):
+ ss._IGNORED_PROPS.append('formula')
+ add_attribute('CWAttribute', 'formula', commit=False)
+ ss._IGNORED_PROPS.remove('formula')
+ commit()
+ add_entity_type('CWComputedRType')
+ commit()
+
+if schema['TZDatetime'].eid is None:
+ add_entity_type('TZDatetime', auto=False)
+if schema['TZTime'].eid is None:
+ add_entity_type('TZTime', auto=False)
+
+if applcubicwebversion < (3, 18, 0) and cubicwebversion >= (3, 18, 0):
+ driver = config.system_source_config['db-driver']
+ if not (driver == 'postgres' or driver.startswith('sqlserver')):
+ import sys
+ print('This migration is not supported for backends other than sqlserver or postgres (yet).', file=sys.stderr)
+ sys.exit(1)
+
+ add_relation_definition('CWAttribute', 'add_permission', 'CWGroup')
+ add_relation_definition('CWAttribute', 'add_permission', 'RQLExpression')
+
+ # a bad defaultval in 3.13.8 schema was fixed in 3.13.9, but the migration was missed
+ rql('SET ATTR defaultval NULL WHERE ATTR from_entity E, E name "CWSource", ATTR relation_type T, T name "in_synchronization"')
+
+ # the migration gets confused when we change rdefs out from under it. So
+ # explicitly remove this size constraint so it doesn't stick around and break
+ # things later.
+ rdefeid = schema['defaultval'].rdefs.values()[0].eid
+ rql('DELETE CWConstraint C WHERE C cstrtype T, T name "SizeConstraint", R constrained_by C, R eid %(eid)s', {'eid': rdefeid})
+
+ sync_schema_props_perms('defaultval')
+
+ def convert_defaultval(cwattr, default):
+ from decimal import Decimal
+ import yams
+ from cubicweb import Binary
+ if default is None:
+ return
+ if isinstance(default, Binary):
+ # partially migrated instance, try to be idempotent
+ return default
+ atype = cwattr.to_entity[0].name
+ if atype == 'Boolean':
+ # boolean attributes with default=False were stored as ''
+ assert default in ('True', 'False', ''), repr(default)
+ default = default == 'True'
+ elif atype in ('Int', 'BigInt'):
+ default = int(default)
+ elif atype == 'Float':
+ default = float(default)
+ elif atype == 'Decimal':
+ default = Decimal(default)
+ elif atype in ('Date', 'Datetime', 'TZDatetime', 'Time'):
+ try:
+ # handle NOW and TODAY, keep them stored as strings
+ yams.KEYWORD_MAP[atype][default.upper()]
+ default = default.upper()
+ except KeyError:
+ # otherwise get an actual date or datetime
+ default = yams.DATE_FACTORY_MAP[atype](default)
+ else:
+ assert atype == 'String', atype
+ default = text_type(default)
+ return Binary.zpickle(default)
+
+ dbh = repo.system_source.dbhelper
+
+
+ sql('ALTER TABLE cw_cwattribute ADD new_defaultval %s' % dbh.TYPE_MAPPING['Bytes'])
+
+ for cwattr in rql('CWAttribute X').entities():
+ olddefault = cwattr.defaultval
+ if olddefault is not None:
+ req = "UPDATE cw_cwattribute SET new_defaultval = %(val)s WHERE cw_eid = %(eid)s"
+ args = {'val': dbh.binary_value(convert_defaultval(cwattr, olddefault).getvalue()), 'eid': cwattr.eid}
+ sql(req, args, ask_confirm=False)
+
+ sql('ALTER TABLE cw_cwattribute DROP COLUMN cw_defaultval')
+ if driver == 'postgres':
+ sql('ALTER TABLE cw_cwattribute RENAME COLUMN new_defaultval TO cw_defaultval')
+ else: # sqlserver
+ sql("sp_rename 'cw_cwattribute.new_defaultval', 'cw_defaultval', 'COLUMN'")
+
+
+ # Set object type to "Bytes" for CWAttribute's "defaultval" attribute
+ rql('SET X to_entity B WHERE X is CWAttribute, X from_entity Y, Y name "CWAttribute", '
+ 'X relation_type Z, Z name "defaultval", B name "Bytes", NOT X to_entity B')
+
+ oldrdef = schema['CWAttribute'].rdef('defaultval')
+ import yams.buildobjs as ybo
+ newrdef = ybo.RelationDefinition('CWAttribute', 'defaultval', 'Bytes')
+ newrdef.eid = oldrdef.eid
+ schema.add_relation_def(newrdef)
+ schema.del_relation_def('CWAttribute', 'defaultval', 'String')
+
+ commit()
+
+ sync_schema_props_perms('defaultval')
+
+ for rschema in schema.relations():
+ if rschema.symmetric:
+ subjects = set(repr(e.type) for e in rschema.subjects())
+ objects = set(repr(e.type) for e in rschema.objects())
+ assert subjects == objects
+ martians = set(str(eid) for eid, in sql('SELECT eid_to FROM %s_relation, entities WHERE eid_to = eid AND type NOT IN (%s)' %
+ (rschema.type, ','.join(subjects))))
+ martians |= set(str(eid) for eid, in sql('SELECT eid_from FROM %s_relation, entities WHERE eid_from = eid AND type NOT IN (%s)' %
+ (rschema.type, ','.join(subjects))))
+ if martians:
+ martians = ','.join(martians)
+ print('deleting broken relations %s for eids %s' % (rschema.type, martians))
+ sql('DELETE FROM %s_relation WHERE eid_from IN (%s) OR eid_to IN (%s)' % (rschema.type, martians, martians))
+ with session.deny_all_hooks_but():
+ rql('SET X %(r)s Y WHERE Y %(r)s X, NOT X %(r)s Y' % {'r': rschema.type})
+ commit()
+
+
+ # multi columns unique constraints regeneration
+ from cubicweb.server import schemaserial
+
+ # syncschema hooks would try to remove indices but
+ # 1) we already do that below
+ # 2) the hook expects the CWUniqueTogetherConstraint.name attribute that hasn't
+ # yet been added
+ with session.allow_all_hooks_but('syncschema'):
+ rql('DELETE CWUniqueTogetherConstraint C')
+ commit()
+ add_attribute('CWUniqueTogetherConstraint', 'name')
+
+ # low-level wipe code for postgres & sqlserver, plain sql ...
+ if driver == 'postgres':
+ for indexname, in sql('select indexname from pg_indexes'):
+ if indexname.startswith('unique_'):
+ print('dropping index', indexname)
+ sql('DROP INDEX %s' % indexname)
+ commit()
+ elif driver.startswith('sqlserver'):
+ for viewname, in sql('select name from sys.views'):
+ if viewname.startswith('utv_'):
+ print('dropping view (index should be cascade-deleted)', viewname)
+ sql('DROP VIEW %s' % viewname)
+ commit()
+
+ # recreate the constraints, hook will lead to low-level recreation
+ for eschema in sorted(schema.entities()):
+ if eschema._unique_together:
+ print('recreate unique indexes for', eschema)
+ rql_args = schemaserial.uniquetogether2rqls(eschema)
+ for rql, args in rql_args:
+ args['x'] = eschema.eid
+ session.execute(rql, args)
+ commit()
+
+ # all attributes perms have to be refreshed ...
+ for rschema in sorted(schema.relations()):
+ if rschema.final:
+ if rschema.type in fsschema:
+ print('sync perms for', rschema.type)
+ sync_schema_props_perms(rschema.type, syncprops=False, ask_confirm=False, commit=False)
+ else:
+ print('WARNING: attribute %s missing from fs schema' % rschema.type)
+ commit()
+
+if applcubicwebversion < (3, 17, 0) and cubicwebversion >= (3, 17, 0):
+ try:
+ add_cube('sioc', update_database=False)
+ except ConfigurationError:
+ if not confirm('In cubicweb 3.17 sioc views have been moved to the sioc '
+ 'cube, which is not installed. Continue anyway?'):
+ raise
+ try:
+ add_cube('embed', update_database=False)
+ except ConfigurationError:
+ if not confirm('In cubicweb 3.17 embedding views have been moved to the embed '
+ 'cube, which is not installed. Continue anyway?'):
+ raise
+ try:
+ add_cube('geocoding', update_database=False)
+ except ConfigurationError:
+ if not confirm('In cubicweb 3.17 geocoding views have been moved to the geocoding '
+ 'cube, which is not installed. Continue anyway?'):
+ raise
+
+
+if applcubicwebversion <= (3, 14, 0) and cubicwebversion >= (3, 14, 0):
+ if 'require_permission' in schema and not 'localperms'in repo.config.cubes():
+ from cubicweb import ExecutionError
+ try:
+ add_cube('localperms', update_database=False)
+ except ConfigurationError:
+ raise ExecutionError('In cubicweb 3.14, CWPermission and related stuff '
+ 'has been moved to cube localperms. Install it first.')
+
+
+if applcubicwebversion == (3, 6, 0) and cubicwebversion >= (3, 6, 0):
+ CSTRMAP = dict(rql('Any T, X WHERE X is CWConstraintType, X name T',
+ ask_confirm=False))
+ _add_relation_definition_no_perms('CWAttribute', 'update_permission', 'CWGroup')
+ _add_relation_definition_no_perms('CWAttribute', 'update_permission', 'RQLExpression')
+ rql('SET X update_permission Y WHERE X is CWAttribute, X add_permission Y')
+ drop_relation_definition('CWAttribute', 'delete_permission', 'CWGroup')
+ drop_relation_definition('CWAttribute', 'delete_permission', 'RQLExpression')
+
+elif applcubicwebversion < (3, 6, 0) and cubicwebversion >= (3, 6, 0):
+ CSTRMAP = dict(rql('Any T, X WHERE X is CWConstraintType, X name T',
+ ask_confirm=False))
+ session.set_cnxset()
+ permsdict = ss.deserialize_ertype_permissions(session)
+
+ with hooks_control(session, session.HOOKS_ALLOW_ALL, 'integrity'):
+ for rschema in repo.schema.relations():
+ rpermsdict = permsdict.get(rschema.eid, {})
+ for rdef in rschema.rdefs.values():
+ for action in rdef.ACTIONS:
+ actperms = []
+ for something in rpermsdict.get(action == 'update' and 'add' or action, ()):
+ if isinstance(something, tuple):
+ actperms.append(rdef.rql_expression(*something))
+ else: # group name
+ actperms.append(something)
+ rdef.set_action_permissions(action, actperms)
+ for action in ('read', 'add', 'delete'):
+ _add_relation_definition_no_perms('CWRelation', '%s_permission' % action, 'CWGroup')
+ _add_relation_definition_no_perms('CWRelation', '%s_permission' % action, 'RQLExpression')
+ for action in ('read', 'update'):
+ _add_relation_definition_no_perms('CWAttribute', '%s_permission' % action, 'CWGroup')
+ _add_relation_definition_no_perms('CWAttribute', '%s_permission' % action, 'RQLExpression')
+ for action in ('read', 'add', 'delete'):
+ rql('SET X %s_permission Y WHERE X is CWRelation, '
+ 'RT %s_permission Y, X relation_type RT, Y is CWGroup' % (action, action))
+ rql('INSERT RQLExpression Y: Y exprtype YET, Y mainvars YMV, Y expression YEX, '
+ 'X %s_permission Y WHERE X is CWRelation, '
+ 'X relation_type RT, RT %s_permission Y2, Y2 exprtype YET, '
+ 'Y2 mainvars YMV, Y2 expression YEX' % (action, action))
+ rql('SET X read_permission Y WHERE X is CWAttribute, '
+ 'RT read_permission Y, X relation_type RT, Y is CWGroup')
+ rql('INSERT RQLExpression Y: Y exprtype YET, Y mainvars YMV, Y expression YEX, '
+ 'X read_permission Y WHERE X is CWAttribute, '
+ 'X relation_type RT, RT read_permission Y2, Y2 exprtype YET, '
+ 'Y2 mainvars YMV, Y2 expression YEX')
+ rql('SET X update_permission Y WHERE X is CWAttribute, '
+ 'RT add_permission Y, X relation_type RT, Y is CWGroup')
+ rql('INSERT RQLExpression Y: Y exprtype YET, Y mainvars YMV, Y expression YEX, '
+ 'X update_permission Y WHERE X is CWAttribute, '
+ 'X relation_type RT, RT add_permission Y2, Y2 exprtype YET, '
+ 'Y2 mainvars YMV, Y2 expression YEX')
+ for action in ('read', 'add', 'delete'):
+ drop_relation_definition('CWRType', '%s_permission' % action, 'CWGroup', commit=False)
+ drop_relation_definition('CWRType', '%s_permission' % action, 'RQLExpression')
+ sync_schema_props_perms('read_permission', syncperms=False) # fix read_permission cardinality
+
+if applcubicwebversion < (3, 9, 6) and cubicwebversion >= (3, 9, 6) and not 'CWUniqueTogetherConstraint' in schema:
+ add_entity_type('CWUniqueTogetherConstraint')
+
+if not ('CWUniqueTogetherConstraint', 'CWRType') in schema['relations'].rdefs:
+ add_relation_definition('CWUniqueTogetherConstraint', 'relations', 'CWRType')
+ rql('SET C relations RT WHERE C relations RDEF, RDEF relation_type RT')
+ commit()
+ drop_relation_definition('CWUniqueTogetherConstraint', 'relations', 'CWAttribute')
+ drop_relation_definition('CWUniqueTogetherConstraint', 'relations', 'CWRelation')
+
+
+if applcubicwebversion < (3, 4, 0) and cubicwebversion >= (3, 4, 0):
+
+ with hooks_control(session, session.HOOKS_ALLOW_ALL, 'integrity'):
+ session.set_shared_data('do-not-insert-cwuri', True)
+ add_relation_type('cwuri')
+ base_url = session.base_url()
+ for eid, in rql('Any X', ask_confirm=False):
+ type, source, extid = session.describe(eid)
+ if source == 'system':
+ rql('SET X cwuri %(u)s WHERE X eid %(x)s',
+ {'x': eid, 'u': u'%s%s' % (base_url, eid)})
+ isession.commit()
+ session.set_shared_data('do-not-insert-cwuri', False)
+
+if applcubicwebversion < (3, 5, 0) and cubicwebversion >= (3, 5, 0):
+ # check that migration is not doomed
+ rset = rql('Any X,Y WHERE X transition_of E, Y transition_of E, '
+ 'X name N, Y name N, NOT X identity Y',
+ ask_confirm=False)
+ if rset:
+ from logilab.common.shellutils import ASK
+ if not ASK.confirm('Migration will fail because of transitions with the same name. '
+ 'Continue anyway ?'):
+ import sys
+ sys.exit(1)
+ # proceed with migration
+ add_entity_type('Workflow')
+ add_entity_type('BaseTransition')
+ add_entity_type('WorkflowTransition')
+ add_entity_type('SubWorkflowExitPoint')
+ # drop explicit 'State allowed_transition Transition' since it should be
+ # infered due to yams inheritance. However we've to disable the schema
+ # sync hook first to avoid to destroy existing data...
+ try:
+ from cubicweb.hooks import syncschema
+ repo.vreg.unregister(syncschema.AfterDelRelationTypeHook)
+ try:
+ drop_relation_definition('State', 'allowed_transition', 'Transition')
+ finally:
+ repo.vreg.register(syncschema.AfterDelRelationTypeHook)
+ except ImportError: # syncschema is in CW >= 3.6 only
+ from cubicweb.server.schemahooks import after_del_relation_type
+ repo.hm.unregister_hook(after_del_relation_type,
+ 'after_delete_relation', 'relation_type')
+ try:
+ drop_relation_definition('State', 'allowed_transition', 'Transition')
+ finally:
+ repo.hm.register_hook(after_del_relation_type,
+ 'after_delete_relation', 'relation_type')
+ schema.rebuild_infered_relations() # need to be explicitly called once everything is in place
+
+ for et in rql('DISTINCT Any ET,ETN WHERE S state_of ET, ET name ETN',
+ ask_confirm=False).entities():
+ wf = add_workflow(u'default %s workflow' % et.name, et.name,
+ ask_confirm=False)
+ rql('SET S state_of WF WHERE S state_of ET, ET eid %(et)s, WF eid %(wf)s',
+ {'et': et.eid, 'wf': wf.eid}, 'et', ask_confirm=False)
+ rql('SET T transition_of WF WHERE T transition_of ET, ET eid %(et)s, WF eid %(wf)s',
+ {'et': et.eid, 'wf': wf.eid}, 'et', ask_confirm=False)
+ rql('SET WF initial_state S WHERE ET initial_state S, ET eid %(et)s, WF eid %(wf)s',
+ {'et': et.eid, 'wf': wf.eid}, 'et', ask_confirm=False)
+
+
+ rql('DELETE TrInfo TI WHERE NOT TI from_state S')
+ rql('SET TI by_transition T WHERE TI from_state FS, TI to_state TS, '
+ 'FS allowed_transition T, T destination_state TS')
+ commit()
+
+ drop_relation_definition('State', 'state_of', 'CWEType')
+ drop_relation_definition('Transition', 'transition_of', 'CWEType')
+ drop_relation_definition('CWEType', 'initial_state', 'State')
+
+ sync_schema_props_perms()
+
+if applcubicwebversion < (3, 2, 2) and cubicwebversion >= (3, 2, 1):
+ from base64 import b64encode
+ for eid, extid in sql('SELECT eid, extid FROM entities '
+ 'WHERE extid is NOT NULL',
+ ask_confirm=False):
+ sql('UPDATE entities SET extid=%(extid)s WHERE eid=%(eid)s',
+ {'extid': b64encode(extid), 'eid': eid}, ask_confirm=False)
+ commit()
+
+if applcubicwebversion < (3, 2, 0) and cubicwebversion >= (3, 2, 0):
+ add_cube('card', update_database=False)
+
+
+if applcubicwebversion < (3, 21, 1) and cubicwebversion >= (3, 21, 1):
+ add_relation_definition('CWComputedRType', 'read_permission', 'CWGroup')
+ add_relation_definition('CWComputedRType', 'read_permission', 'RQLExpression')
+
+
+def sync_constraint_types():
+ """Make sure the repository knows about all constraint types defined in the code"""
+ from cubicweb.schema import CONSTRAINTS
+ repo_constraints = set(row[0] for row in rql('Any N WHERE X is CWConstraintType, X name N'))
+
+ for cstrtype in set(CONSTRAINTS) - repo_constraints:
+ if cstrtype == 'BoundConstraint':
+ # was renamed to BoundaryConstraint, we don't need the old name
+ continue
+ rql('INSERT CWConstraintType X: X name %(name)s', {'name': cstrtype})
+
+ commit()
+
+sync_constraint_types()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/migration/postcreate.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/migration/postcreate.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,77 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""cubicweb post creation script, set user's workflow"""
+from __future__ import print_function
+
+from six import text_type
+
+from cubicweb import _
+
+
+# insert versions
+create_entity('CWProperty', pkey=u'system.version.cubicweb',
+ value=text_type(config.cubicweb_version()))
+for cube in config.cubes():
+ create_entity('CWProperty', pkey=u'system.version.%s' % cube.lower(),
+ value=text_type(config.cube_version(cube)))
+
+# some entities have been added before schema entities, fix the 'is' and
+# 'is_instance_of' relations
+for rtype in ('is', 'is_instance_of'):
+ sql('INSERT INTO %s_relation '
+ 'SELECT X.eid, ET.cw_eid FROM entities as X, cw_CWEType as ET '
+ 'WHERE X.type=ET.cw_name AND NOT EXISTS('
+ ' SELECT 1 from %s_relation '
+ ' WHERE eid_from=X.eid AND eid_to=ET.cw_eid)' % (rtype, rtype))
+
+# user workflow
+userwf = add_workflow(_('default user workflow'), 'CWUser')
+activated = userwf.add_state(_('activated'), initial=True)
+deactivated = userwf.add_state(_('deactivated'))
+userwf.add_transition(_('deactivate'), (activated,), deactivated,
+ requiredgroups=(u'managers',))
+userwf.add_transition(_('activate'), (deactivated,), activated,
+ requiredgroups=(u'managers',))
+
+# create anonymous user if all-in-one config and anonymous user has been specified
+if hasattr(config, 'anonymous_user'):
+ anonlogin, anonpwd = config.anonymous_user()
+ if anonlogin == session.user.login:
+ print('you are using a manager account as anonymous user.')
+ print('Hopefully this is not a production instance...')
+ elif anonlogin:
+ from cubicweb.server import create_user
+ create_user(session, text_type(anonlogin), anonpwd, u'guests')
+
+# need this since we already have at least one user in the database (the default admin)
+for user in rql('Any X WHERE X is CWUser').entities():
+ rql('SET X in_state S WHERE X eid %(x)s, S eid %(s)s',
+ {'x': user.eid, 's': activated.eid})
+
+# on interactive mode, ask for level 0 persistent options
+if interactive_mode:
+ cfg = config.persistent_options_configuration()
+ cfg.input_config(inputlevel=0)
+ for section, options in cfg.options_by_section():
+ for optname, optdict, value in options:
+ key = u'%s.%s' % (section, optname)
+ default = cfg.option_default(optname, optdict)
+ # only record values differing from default
+ if value != default:
+ rql('INSERT CWProperty X: X pkey %(k)s, X value %(v)s',
+ {'k': key, 'v': value})
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/scripts/chpasswd.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/scripts/chpasswd.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,48 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+import sys
+import getpass
+
+from cubicweb import Binary
+from cubicweb.server.utils import crypt_password
+
+
+if __args__:
+ login = __args__.pop()
+else:
+ login = raw_input("login? ")
+
+rset = rql('Any U WHERE U is CWUser, U login %(login)s', {'login': login})
+
+if len(rset) != 1:
+ sys.exit("user '%s' does not exist!" % login)
+
+pass1 = getpass.getpass(prompt='Enter new password? ')
+pass2 = getpass.getpass(prompt='Confirm? ')
+
+if pass1 != pass2:
+ sys.exit("passwords don't match!")
+
+crypted = crypt_password(pass1)
+
+cwuser = rset.get_entity(0,0)
+cwuser.cw_set(upassword=Binary(crypted))
+commit()
+
+print("password updated.")
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/scripts/cwuser_ldap2system.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/scripts/cwuser_ldap2system.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,42 @@
+from __future__ import print_function
+
+import base64
+from cubicweb.server.utils import crypt_password
+
+dbdriver = config.system_source_config['db-driver']
+from logilab.database import get_db_helper
+dbhelper = get_db_helper(driver)
+
+insert = ('INSERT INTO cw_cwuser (cw_creation_date,'
+ ' cw_eid,'
+ ' cw_modification_date,'
+ ' cw_login,'
+ ' cw_firstname,'
+ ' cw_surname,'
+ ' cw_last_login_time,'
+ ' cw_upassword,'
+ ' cw_cwuri) '
+ "VALUES (%(mtime)s, %(eid)s, %(mtime)s, %(login)s, "
+ " %(firstname)s, %(surname)s, %(mtime)s, %(pwd)s, 'foo');")
+update = "UPDATE entities SET source='system' WHERE eid=%(eid)s;"
+rset = sql("SELECT eid,type,source,extid,mtime FROM entities WHERE source!='system'", ask_confirm=False)
+for eid, type, source, extid, mtime in rset:
+ if type != 'CWUser':
+ print("don't know what to do with entity type", type)
+ continue
+ if not source.lower().startswith('ldap'):
+ print("don't know what to do with source type", source)
+ continue
+ extid = base64.decodestring(extid)
+ ldapinfos = [x.strip().split('=') for x in extid.split(',')]
+ login = ldapinfos[0][1]
+ firstname = login.capitalize()
+ surname = login.capitalize()
+ args = dict(eid=eid, type=type, source=source, login=login,
+ firstname=firstname, surname=surname, mtime=mtime,
+ pwd=dbhelper.binary_value(crypt_password('toto')))
+ print(args)
+ sql(insert, args)
+ sql(update, args)
+
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/scripts/detect_cycle.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/scripts/detect_cycle.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,16 @@
+from __future__ import print_function
+
+try:
+ rtype, = __args__
+except ValueError:
+ print('USAGE: cubicweb-ctl shell detect_cycle.py -- ')
+ print()
+
+graph = {}
+for fromeid, toeid in rql('Any X,Y WHERE X %s Y' % rtype):
+ graph.setdefault(fromeid, []).append(toeid)
+
+from logilab.common.graph import get_cycles
+
+for cycle in get_cycles(graph):
+ print('cycle', '->'.join(str(n) for n in cycle))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/scripts/ldap_change_base_dn.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/scripts/ldap_change_base_dn.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,26 @@
+from __future__ import print_function
+
+from base64 import b64decode, b64encode
+try:
+ uri, newdn = __args__
+except ValueError:
+ print('USAGE: cubicweb-ctl shell ldap_change_base_dn.py -- ')
+ print()
+ print('you should not have updated your sources file yet')
+
+olddn = repo.sources_by_uri[uri].config['user-base-dn']
+
+assert olddn != newdn
+
+raw_input("Ensure you've stopped the instance, type enter when done.")
+
+for eid, extid in sql("SELECT eid, extid FROM entities WHERE source='%s'" % uri):
+ olduserdn = b64decode(extid)
+ newuserdn = olduserdn.replace(olddn, newdn)
+ if newuserdn != olduserdn:
+ print(olduserdn, '->', newuserdn)
+ sql("UPDATE entities SET extid='%s' WHERE eid=%s" % (b64encode(newuserdn), eid))
+
+commit()
+
+print('you can now update the sources file to the new dn and restart the instance')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/scripts/ldapuser2ldapfeed.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/scripts/ldapuser2ldapfeed.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,98 @@
+"""turn a pyro source into a datafeed source
+
+Once this script is run, execute c-c db-check to cleanup relation tables.
+"""
+from __future__ import print_function
+
+import sys
+from collections import defaultdict
+from logilab.common.shellutils import generate_password
+
+try:
+ source_name, = __args__
+ source = repo.sources_by_uri[source_name]
+except ValueError:
+ print('you should specify the source name as script argument (i.e. after --'
+ ' on the command line)')
+ sys.exit(1)
+except KeyError:
+ print('%s is not an active source' % source_name)
+ sys.exit(1)
+
+# check source is reachable before doing anything
+if not source.get_connection().cnx:
+ print('%s is not reachable. Fix this before running this script' % source_name)
+ sys.exit(1)
+
+raw_input('Ensure you have shutdown all instances of this application before continuing.'
+ ' Type enter when ready.')
+
+system_source = repo.system_source
+
+from datetime import datetime
+from cubicweb.server.edition import EditedEntity
+
+
+print('******************** backport entity content ***************************')
+
+todelete = defaultdict(list)
+extids = set()
+duplicates = []
+for entity in rql('Any X WHERE X cw_source S, S eid %(s)s', {'s': source.eid}).entities():
+ etype = entity.cw_etype
+ if not source.support_entity(etype):
+ print("source doesn't support %s, delete %s" % (etype, entity.eid))
+ todelete[etype].append(entity)
+ continue
+ try:
+ entity.complete()
+ except Exception:
+ print('%s %s much probably deleted, delete it (extid %s)' % (
+ etype, entity.eid, entity.cw_metainformation()['extid']))
+ todelete[etype].append(entity)
+ continue
+ print('get back', etype, entity.eid)
+ entity.cw_edited = EditedEntity(entity, **entity.cw_attr_cache)
+ if not entity.creation_date:
+ entity.cw_edited['creation_date'] = datetime.utcnow()
+ if not entity.modification_date:
+ entity.cw_edited['modification_date'] = datetime.utcnow()
+ if not entity.upassword:
+ entity.cw_edited['upassword'] = generate_password()
+ extid = entity.cw_metainformation()['extid']
+ if not entity.cwuri:
+ entity.cw_edited['cwuri'] = '%s/?dn=%s' % (
+ source.urls[0], extid.decode('utf-8', 'ignore'))
+ print(entity.cw_edited)
+ if extid in extids:
+ duplicates.append(extid)
+ continue
+ extids.add(extid)
+ system_source.add_entity(session, entity)
+ sql("UPDATE entities SET source='system' "
+ "WHERE eid=%(eid)s", {'eid': entity.eid})
+
+# only cleanup entities table, remaining stuff should be cleaned by a c-c
+# db-check to be run after this script
+if duplicates:
+ print('found %s duplicate entries' % len(duplicates))
+ from pprint import pprint
+ pprint(duplicates)
+
+print(len(todelete), 'entities will be deleted')
+for etype, entities in todelete.items():
+ print('deleting', etype, [e.login for e in entities])
+ system_source.delete_info_multi(session, entities, source_name)
+
+
+
+source_ent = rql('CWSource S WHERE S eid %(s)s', {'s': source.eid}).get_entity(0, 0)
+source_ent.cw_set(type=u"ldapfeed", parser=u"ldapfeed")
+
+
+if raw_input('Commit?') in 'yY':
+ print('committing')
+ commit()
+else:
+ rollback()
+ print('rolled back')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/scripts/pyroforge2datafeed.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/scripts/pyroforge2datafeed.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,134 @@
+"""turn a pyro source into a datafeed source
+
+Once this script is run, execute c-c db-check to cleanup relation tables.
+"""
+from __future__ import print_function
+
+import sys
+
+try:
+ source_name, = __args__
+ source = repo.sources_by_uri[source_name]
+except ValueError:
+ print('you should specify the source name as script argument (i.e. after --'
+ ' on the command line)')
+ sys.exit(1)
+except KeyError:
+ print('%s is not an active source' % source_name)
+ sys.exit(1)
+
+# check source is reachable before doing anything
+try:
+ source.get_connection()._repo
+except AttributeError:
+ print('%s is not reachable. Fix this before running this script' % source_name)
+ sys.exit(1)
+
+raw_input('Ensure you have shutdown all instances of this application before continuing.'
+ ' Type enter when ready.')
+
+system_source = repo.system_source
+
+from base64 import b64encode
+from cubicweb.server.edition import EditedEntity
+
+DONT_GET_BACK_ETYPES = set(( # XXX edit as desired
+ 'State',
+ 'RecipeStep', 'RecipeStepInput', 'RecipeStepOutput',
+ 'RecipeTransition', 'RecipeTransitionCondition',
+ 'NarvalConditionExpression', 'Recipe',
+ # XXX TestConfig
+ ))
+
+
+print('******************** backport entity content ***************************')
+
+from cubicweb.server import debugged
+todelete = {}
+host = source.config['base-url'].split('://')[1]
+for entity in rql('Any X WHERE X cw_source S, S eid %(s)s', {'s': source.eid}).entities():
+ etype = entity.cw_etype
+ if not source.support_entity(etype):
+ print("source doesn't support %s, delete %s" % (etype, entity.eid))
+ elif etype in DONT_GET_BACK_ETYPES:
+ print('ignore %s, delete %s' % (etype, entity.eid))
+ else:
+ try:
+ entity.complete()
+ if not host in entity.cwuri:
+ print('SKIP foreign entity', entity.cwuri, source.config['base-url'])
+ continue
+ except Exception:
+ print('%s %s much probably deleted, delete it (extid %s)' % (
+ etype, entity.eid, entity.cw_metainformation()['extid']))
+ else:
+ print('get back', etype, entity.eid)
+ entity.cw_edited = EditedEntity(entity, **entity.cw_attr_cache)
+ system_source.add_entity(session, entity)
+ sql("UPDATE entities SET asource=%(asource)s, source='system', extid=%(extid)s "
+ "WHERE eid=%(eid)s", {'asource': source_name,
+ 'extid': b64encode(entity.cwuri),
+ 'eid': entity.eid})
+ continue
+ todelete.setdefault(etype, []).append(entity)
+
+# only cleanup entities table, remaining stuff should be cleaned by a c-c
+# db-check to be run after this script
+for entities in todelete.values():
+ system_source.delete_info_multi(session, entities, source_name)
+
+
+print('******************** backport mapping **********************************')
+session.disable_hook_categories('cw.sources')
+mapping = []
+for mappart in rql('Any X,SCH WHERE X cw_schema SCH, X cw_for_source S, S eid %(s)s',
+ {'s': source.eid}).entities():
+ schemaent = mappart.cw_schema[0]
+ if schemaent.cw_etype != 'CWEType':
+ assert schemaent.cw_etype == 'CWRType'
+ sch = schema._eid_index[schemaent.eid]
+ for rdef in sch.rdefs.values():
+ if not source.support_entity(rdef.subject) \
+ or not source.support_entity(rdef.object):
+ continue
+ if rdef.subject in DONT_GET_BACK_ETYPES \
+ and rdef.object in DONT_GET_BACK_ETYPES:
+ print('dont map', rdef)
+ continue
+ if rdef.subject in DONT_GET_BACK_ETYPES:
+ options = u'action=link\nlinkattr=name'
+ roles = 'object',
+ elif rdef.object in DONT_GET_BACK_ETYPES:
+ options = u'action=link\nlinkattr=name'
+ roles = 'subject',
+ else:
+ options = u'action=copy'
+ if rdef.rtype in ('use_environment',):
+ roles = 'object',
+ else:
+ roles = 'subject',
+ print('map', rdef, options, roles)
+ for role in roles:
+ mapping.append( (
+ (str(rdef.subject), str(rdef.rtype), str(rdef.object)),
+ options + '\nrole=%s' % role) )
+ mappart.cw_delete()
+
+source_ent = rql('CWSource S WHERE S eid %(s)s', {'s': source.eid}).get_entity(0, 0)
+source_ent.init_mapping(mapping)
+
+# change source properties
+config = u'''synchronize=yes
+synchronization-interval=10min
+delete-entities=no
+'''
+rql('SET X type "datafeed", X parser "cw.entityxml", X url %(url)s, X config %(config)s '
+ 'WHERE X eid %(x)s',
+ {'x': source.eid, 'config': config,
+ 'url': source.config['base-url']+'/project'})
+
+
+commit()
+
+from cubes.apycot import recipes
+recipes.create_quick_recipe(session)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/scripts/repair_file_1-9_migration.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/scripts/repair_file_1-9_migration.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,52 @@
+"""execute this script if you've migration to file >= 1.9.0 with cubicweb <= 3.9.2
+
+FYI, this migration occurred :
+* on our intranet on July 07 2010
+* on our extranet on July 16 2010
+"""
+from __future__ import print_function
+
+try:
+ backupinstance, = __args__
+except ValueError:
+ print('USAGE: cubicweb-ctl shell repair_file_1-9_migration.py -- ')
+ print()
+ print('you should restored the backup on a new instance, accessible through pyro')
+
+from cubicweb import cwconfig, dbapi
+from cubicweb.server.session import hooks_control
+
+defaultadmin = repo.config.default_admin_config
+backupcfg = cwconfig.instance_configuration(backupinstance)
+backupcfg.repairing = True
+backuprepo, backupcnx = dbapi.in_memory_repo_cnx(backupcfg, defaultadmin['login'],
+ password=defaultadmin['password'],
+ host='localhost')
+backupcu = backupcnx.cursor()
+
+with hooks_control(session, session.HOOKS_DENY_ALL):
+ rql('SET X is Y WHERE X is File, Y name "File", NOT X is Y')
+ rql('SET X is_instance_of Y WHERE X is File, Y name "File", NOT X is_instance_of Y')
+ for rtype, in backupcu.execute('DISTINCT Any RTN WHERE X relation_type RT, RT name RTN,'
+ 'X from_entity Y, Y name "Image", X is CWRelation, '
+ 'EXISTS(XX is CWRelation, XX relation_type RT, '
+ 'XX from_entity YY, YY name "File")'):
+ if rtype in ('is', 'is_instance_of'):
+ continue
+ print(rtype)
+ for feid, xeid in backupcu.execute('Any F,X WHERE F %s X, F is IN (File,Image)' % rtype):
+ print('restoring relation %s between file %s and %s' % (rtype, feid, xeid), end=' ')
+ print(rql('SET F %s X WHERE F eid %%(f)s, X eid %%(x)s, NOT F %s X' % (rtype, rtype),
+ {'f': feid, 'x': xeid}))
+
+ for rtype, in backupcu.execute('DISTINCT Any RTN WHERE X relation_type RT, RT name RTN,'
+ 'X to_entity Y, Y name "Image", X is CWRelation, '
+ 'EXISTS(XX is CWRelation, XX relation_type RT, '
+ 'XX to_entity YY, YY name "File")'):
+ print(rtype)
+ for feid, xeid in backupcu.execute('Any F,X WHERE X %s F, F is IN (File,Image)' % rtype):
+ print('restoring relation %s between %s and file %s' % (rtype, xeid, feid), end=' ')
+ print(rql('SET X %s F WHERE F eid %%(f)s, X eid %%(x)s, NOT X %s F' % (rtype, rtype),
+ {'f': feid, 'x': xeid}))
+
+commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/misc/scripts/repair_splitbrain_ldapuser_source.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/misc/scripts/repair_splitbrain_ldapuser_source.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,109 @@
+"""
+CAUTION: READ THIS CAREFULLY
+
+Sometimes it happens that ldap (specifically ldapuser type) source
+yield "ghost" users. The reasons may vary (server upgrade while some
+instances are still running & syncing with the ldap source, unmanaged
+updates to the upstream ldap, etc.).
+
+This script was written and refined enough times that we are confident
+in that it does something reasonnable (at least it did for the
+target application).
+
+However you should really REALLY understand what it does before
+deciding to apply it for you. And then ADAPT it tou your needs.
+
+"""
+from __future__ import print_function
+
+import base64
+from collections import defaultdict
+
+from cubicweb.server.session import hooks_control
+
+try:
+ source_name, = __args__
+ source = repo.sources_by_uri[source_name]
+except ValueError:
+ print('you should specify the source name as script argument (i.e. after --'
+ ' on the command line)')
+ sys.exit(1)
+except KeyError:
+ print('%s is not an active source' % source_name)
+ sys.exit(1)
+
+# check source is reachable before doing anything
+if not source.get_connection().cnx:
+ print('%s is not reachable. Fix this before running this script' % source_name)
+ sys.exit(1)
+
+def find_dupes():
+ # XXX this retrieves entities from a source name "ldap"
+ # you will want to adjust
+ rset = sql("SELECT eid, extid FROM entities WHERE source='%s'" % source_name)
+ extid2eids = defaultdict(list)
+ for eid, extid in rset:
+ extid2eids[extid].append(eid)
+ return dict((base64.b64decode(extid).lower(), eids)
+ for extid, eids in extid2eids.items()
+ if len(eids) > 1)
+
+def merge_dupes(dupes, docommit=False):
+ gone_eids = []
+ CWUser = schema['CWUser']
+ for extid, eids in dupes.items():
+ newest = eids.pop() # we merge everything on the newest
+ print('merging ghosts of', extid, 'into', newest)
+ # now we merge pairwise into the newest
+ for old in eids:
+ subst = {'old': old, 'new': newest}
+ print(' merging', old)
+ gone_eids.append(old)
+ for rschema in CWUser.subject_relations():
+ if rschema.final or rschema == 'identity':
+ continue
+ if CWUser.rdef(rschema, 'subject').composite == 'subject':
+ # old 'composite' property is wiped ...
+ # think about email addresses, excel preferences
+ for eschema in rschema.objects():
+ rql('DELETE %s X WHERE U %s X, U eid %%(old)s' % (eschema, rschema), subst)
+ else:
+ # relink the new user to its old relations
+ rql('SET NU %s X WHERE NU eid %%(new)s, NOT NU %s X, OU %s X, OU eid %%(old)s' %
+ (rschema, rschema, rschema), subst)
+ # delete the old relations
+ rql('DELETE U %s X WHERE U eid %%(old)s' % rschema, subst)
+ # same thing ...
+ for rschema in CWUser.object_relations():
+ if rschema.final or rschema == 'identity':
+ continue
+ rql('SET X %s NU WHERE NU eid %%(new)s, NOT X %s NU, X %s OU, OU eid %%(old)s' %
+ (rschema, rschema, rschema), subst)
+ rql('DELETE X %s U WHERE U eid %%(old)s' % rschema, subst)
+ if not docommit:
+ rollback()
+ return
+ commit() # XXX flushing operations is wanted rather than really committing
+ print('clean up entities table')
+ sql('DELETE FROM entities WHERE eid IN (%s)' % (', '.join(str(x) for x in gone_eids)))
+ commit()
+
+def main():
+ dupes = find_dupes()
+ if not dupes:
+ print('No duplicate user')
+ return
+
+ print('Found %s duplicate user instances' % len(dupes))
+
+ while True:
+ print('Fix or dry-run? (f/d) ... or Ctrl-C to break out')
+ answer = raw_input('> ')
+ if answer.lower() not in 'fd':
+ continue
+ print('Please STOP THE APPLICATION INSTANCES (service or interactive), and press Return when done.')
+ raw_input('')
+ with hooks_control(session, session.HOOKS_DENY_ALL):
+ merge_dupes(dupes, docommit=answer=='f')
+
+main()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/mttransforms.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/mttransforms.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,121 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""mime type transformation engine for cubicweb, based on mtconverter"""
+
+__docformat__ = "restructuredtext en"
+
+from logilab import mtconverter
+
+from logilab.mtconverter.engine import TransformEngine
+from logilab.mtconverter.transform import Transform
+from logilab.mtconverter import (register_base_transforms,
+ register_pil_transforms,
+ register_pygments_transforms)
+
+from cubicweb.utils import UStringIO
+from cubicweb.uilib import rest_publish, markdown_publish, html_publish
+
+HTML_MIMETYPES = ('text/html', 'text/xhtml', 'application/xhtml+xml')
+
+# CubicWeb specific transformations
+
+class rest_to_html(Transform):
+ inputs = ('text/rest', 'text/x-rst')
+ output = 'text/html'
+ def _convert(self, trdata):
+ return rest_publish(trdata.appobject, trdata.decode())
+
+class markdown_to_html(Transform):
+ inputs = ('text/markdown', 'text/x-markdown')
+ output = 'text/html'
+ def _convert(self, trdata):
+ return markdown_publish(trdata.appobject, trdata.decode())
+
+class html_to_html(Transform):
+ inputs = HTML_MIMETYPES
+ output = 'text/html'
+ def _convert(self, trdata):
+ return html_publish(trdata.appobject, trdata.data)
+
+
+# Instantiate and configure the transformation engine
+
+mtconverter.UNICODE_POLICY = 'replace'
+
+ENGINE = TransformEngine()
+ENGINE.add_transform(rest_to_html())
+ENGINE.add_transform(markdown_to_html())
+ENGINE.add_transform(html_to_html())
+
+try:
+ from cubicweb.ext.tal import CubicWebContext, compile_template
+except ImportError:
+ HAS_TAL = False
+ from cubicweb import schema
+ schema.NEED_PERM_FORMATS.remove('text/cubicweb-page-template')
+
+else:
+ HAS_TAL = True
+
+ class ept_to_html(Transform):
+ inputs = ('text/cubicweb-page-template',)
+ output = 'text/html'
+ output_encoding = 'utf-8'
+ def _convert(self, trdata):
+ context = CubicWebContext()
+ appobject = trdata.appobject
+ context.update({'self': appobject, 'rset': appobject.cw_rset,
+ 'req': appobject._cw,
+ '_' : appobject._cw._,
+ 'user': appobject._cw.user})
+ output = UStringIO()
+ template = compile_template(trdata.encode(self.output_encoding))
+ template.expand(context, output)
+ return output.getvalue()
+
+ ENGINE.add_transform(ept_to_html())
+
+if register_pil_transforms(ENGINE, verb=False):
+ HAS_PIL_TRANSFORMS = True
+else:
+ HAS_PIL_TRANSFORMS = False
+
+try:
+ from logilab.mtconverter.transforms import pygmentstransforms
+ for mt in ('text/plain',) + HTML_MIMETYPES:
+ try:
+ pygmentstransforms.mimetypes.remove(mt)
+ except ValueError:
+ continue
+ register_pygments_transforms(ENGINE, verb=False)
+
+ def patch_convert(cls):
+ def _convert(self, trdata, origconvert=cls._convert):
+ add_css = getattr(trdata.appobject._cw, 'add_css', None)
+ if add_css is not None:
+ # session has no add_css, only http request
+ add_css('pygments.css')
+ return origconvert(self, trdata)
+ cls._convert = _convert
+ patch_convert(pygmentstransforms.PygmentsHTMLTransform)
+
+ HAS_PYGMENTS_TRANSFORMS = True
+except ImportError:
+ HAS_PYGMENTS_TRANSFORMS = False
+
+register_base_transforms(ENGINE, verb=False)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/multipart.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/multipart.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,416 @@
+# -*- coding: utf-8 -*-
+'''
+Parser for multipart/form-data
+==============================
+
+This module provides a parser for the multipart/form-data format. It can read
+from a file, a socket or a WSGI environment. The parser can be used to replace
+cgi.FieldStorage (without the bugs) and works with Python 2.5+ and 3.x (2to3).
+
+Licence (MIT)
+-------------
+
+ Copyright (c) 2010, Marcel Hellkamp.
+ Inspired by the Werkzeug library: http://werkzeug.pocoo.org/
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+'''
+
+__author__ = 'Marcel Hellkamp'
+__version__ = '0.1'
+__license__ = 'MIT'
+
+from tempfile import TemporaryFile
+from wsgiref.headers import Headers
+import re, sys
+try:
+ from io import BytesIO
+except ImportError: # pragma: no cover (fallback for Python 2.5)
+ from StringIO import StringIO as BytesIO
+
+from six import PY3, text_type
+from six.moves.urllib.parse import parse_qs
+
+##############################################################################
+################################ Helper & Misc ################################
+##############################################################################
+# Some of these were copied from bottle: http://bottle.paws.de/
+
+try:
+ from collections import MutableMapping as DictMixin
+except ImportError: # pragma: no cover (fallback for Python 2.5)
+ from UserDict import DictMixin
+
+class MultiDict(DictMixin):
+ """ A dict that remembers old values for each key """
+ def __init__(self, *a, **k):
+ self.dict = dict()
+ for k, v in dict(*a, **k).items():
+ self[k] = v
+
+ def __len__(self): return len(self.dict)
+ def __iter__(self): return iter(self.dict)
+ def __contains__(self, key): return key in self.dict
+ def __delitem__(self, key): del self.dict[key]
+ def keys(self): return self.dict.keys()
+ def __getitem__(self, key): return self.get(key, KeyError, -1)
+ def __setitem__(self, key, value): self.append(key, value)
+
+ def append(self, key, value): self.dict.setdefault(key, []).append(value)
+ def replace(self, key, value): self.dict[key] = [value]
+ def getall(self, key): return self.dict.get(key) or []
+
+ def get(self, key, default=None, index=-1):
+ if key not in self.dict and default != KeyError:
+ return [default][index]
+ return self.dict[key][index]
+
+ def iterallitems(self):
+ for key, values in self.dict.items():
+ for value in values:
+ yield key, value
+
+def tob(data, enc='utf8'): # Convert strings to bytes (py2 and py3)
+ return data.encode(enc) if isinstance(data, text_type) else data
+
+def copy_file(stream, target, maxread=-1, buffer_size=2*16):
+ ''' Read from :stream and write to :target until :maxread or EOF. '''
+ size, read = 0, stream.read
+ while 1:
+ to_read = buffer_size if maxread < 0 else min(buffer_size, maxread-size)
+ part = read(to_read)
+ if not part: return size
+ target.write(part)
+ size += len(part)
+
+##############################################################################
+################################ Header Parser ################################
+##############################################################################
+
+_special = re.escape('()<>@,;:\\"/[]?={} \t')
+_re_special = re.compile('[%s]' % _special)
+_qstr = '"(?:\\\\.|[^"])*"' # Quoted string
+_value = '(?:[^%s]+|%s)' % (_special, _qstr) # Save or quoted string
+_option = '(?:;|^)\s*([^%s]+)\s*=\s*(%s)' % (_special, _value)
+_re_option = re.compile(_option) # key=value part of an Content-Type like header
+
+def header_quote(val):
+ if not _re_special.search(val):
+ return val
+ return '"' + val.replace('\\','\\\\').replace('"','\\"') + '"'
+
+def header_unquote(val, filename=False):
+ if val[0] == val[-1] == '"':
+ val = val[1:-1]
+ if val[1:3] == ':\\' or val[:2] == '\\\\':
+ val = val.split('\\')[-1] # fix ie6 bug: full path --> filename
+ return val.replace('\\\\','\\').replace('\\"','"')
+ return val
+
+def parse_options_header(header, options=None):
+ if ';' not in header:
+ return header.lower().strip(), {}
+ ctype, tail = header.split(';', 1)
+ options = options or {}
+ for match in _re_option.finditer(tail):
+ key = match.group(1).lower()
+ value = header_unquote(match.group(2), key=='filename')
+ options[key] = value
+ return ctype, options
+
+##############################################################################
+################################## Multipart ##################################
+##############################################################################
+
+
+class MultipartError(ValueError): pass
+
+
+class MultipartParser(object):
+
+ def __init__(self, stream, boundary, content_length=-1,
+ disk_limit=2**30, mem_limit=2**20, memfile_limit=2**18,
+ buffer_size=2**16, charset='latin1'):
+ ''' Parse a multipart/form-data byte stream. This object is an iterator
+ over the parts of the message.
+
+ :param stream: A file-like stream. Must implement ``.read(size)``.
+ :param boundary: The multipart boundary as a byte string.
+ :param content_length: The maximum number of bytes to read.
+ '''
+ self.stream, self.boundary = stream, boundary
+ self.content_length = content_length
+ self.disk_limit = disk_limit
+ self.memfile_limit = memfile_limit
+ self.mem_limit = min(mem_limit, self.disk_limit)
+ self.buffer_size = min(buffer_size, self.mem_limit)
+ self.charset = charset
+ if self.buffer_size - 6 < len(boundary): # "--boundary--\r\n"
+ raise MultipartError('Boundary does not fit into buffer_size.')
+ self._done = []
+ self._part_iter = None
+
+ def __iter__(self):
+ ''' Iterate over the parts of the multipart message. '''
+ if not self._part_iter:
+ self._part_iter = self._iterparse()
+ for part in self._done:
+ yield part
+ for part in self._part_iter:
+ self._done.append(part)
+ yield part
+
+ def parts(self):
+ ''' Returns a list with all parts of the multipart message. '''
+ return list(iter(self))
+
+ def get(self, name, default=None):
+ ''' Return the first part with that name or a default value (None). '''
+ for part in self:
+ if name == part.name:
+ return part
+ return default
+
+ def get_all(self, name):
+ ''' Return a list of parts with that name. '''
+ return [p for p in self if p.name == name]
+
+ def _lineiter(self):
+ ''' Iterate over a binary file-like object line by line. Each line is
+ returned as a (line, line_ending) tuple. If the line does not fit
+ into self.buffer_size, line_ending is empty and the rest of the line
+ is returned with the next iteration.
+ '''
+ read = self.stream.read
+ maxread, maxbuf = self.content_length, self.buffer_size
+ _bcrnl = tob('\r\n')
+ _bcr = _bcrnl[:1]
+ _bnl = _bcrnl[1:]
+ _bempty = _bcrnl[:0] # b'rn'[:0] -> b''
+ buffer = _bempty # buffer for the last (partial) line
+ while 1:
+ data = read(maxbuf if maxread < 0 else min(maxbuf, maxread))
+ maxread -= len(data)
+ lines = (buffer+data).splitlines(True)
+ len_first_line = len(lines[0])
+ # be sure that the first line does not become too big
+ if len_first_line > self.buffer_size:
+ # at the same time don't split a '\r\n' accidentally
+ if (len_first_line == self.buffer_size+1 and
+ lines[0].endswith(_bcrnl)):
+ splitpos = self.buffer_size - 1
+ else:
+ splitpos = self.buffer_size
+ lines[:1] = [lines[0][:splitpos],
+ lines[0][splitpos:]]
+ if data:
+ buffer = lines[-1]
+ lines = lines[:-1]
+ for line in lines:
+ if line.endswith(_bcrnl): yield line[:-2], _bcrnl
+ elif line.endswith(_bnl): yield line[:-1], _bnl
+ elif line.endswith(_bcr): yield line[:-1], _bcr
+ else: yield line, _bempty
+ if not data:
+ break
+
+ def _iterparse(self):
+ lines, line = self._lineiter(), ''
+ separator = tob('--') + tob(self.boundary)
+ terminator = tob('--') + tob(self.boundary) + tob('--')
+ # Consume first boundary. Ignore leading blank lines
+ for line, nl in lines:
+ if line: break
+ if line != separator:
+ raise MultipartError("Stream does not start with boundary")
+ # For each part in stream...
+ mem_used, disk_used = 0, 0 # Track used resources to prevent DoS
+ is_tail = False # True if the last line was incomplete (cutted)
+ opts = {'buffer_size': self.buffer_size,
+ 'memfile_limit': self.memfile_limit,
+ 'charset': self.charset}
+ part = MultipartPart(**opts)
+ for line, nl in lines:
+ if line == terminator and not is_tail:
+ part.file.seek(0)
+ yield part
+ break
+ elif line == separator and not is_tail:
+ if part.is_buffered(): mem_used += part.size
+ else: disk_used += part.size
+ part.file.seek(0)
+ yield part
+ part = MultipartPart(**opts)
+ else:
+ is_tail = not nl # The next line continues this one
+ part.feed(line, nl)
+ if part.is_buffered():
+ if part.size + mem_used > self.mem_limit:
+ raise MultipartError("Memory limit reached.")
+ elif part.size + disk_used > self.disk_limit:
+ raise MultipartError("Disk limit reached.")
+ if line != terminator:
+ raise MultipartError("Unexpected end of multipart stream.")
+
+
+class MultipartPart(object):
+
+ def __init__(self, buffer_size=2**16, memfile_limit=2**18, charset='latin1'):
+ self.headerlist = []
+ self.headers = None
+ self.file = False
+ self.size = 0
+ self._buf = tob('')
+ self.disposition, self.name, self.filename = None, None, None
+ self.content_type, self.charset = None, charset
+ self.memfile_limit = memfile_limit
+ self.buffer_size = buffer_size
+
+ def feed(self, line, nl=''):
+ if self.file:
+ return self.write_body(line, nl)
+ return self.write_header(line, nl)
+
+ def write_header(self, line, nl):
+ line = line.decode(self.charset or 'latin1')
+ if not nl: raise MultipartError('Unexpected end of line in header.')
+ if not line.strip(): # blank line -> end of header segment
+ self.finish_header()
+ elif line[0] in ' \t' and self.headerlist:
+ name, value = self.headerlist.pop()
+ self.headerlist.append((name, value+line.strip()))
+ else:
+ if ':' not in line:
+ raise MultipartError("Syntax error in header: No colon.")
+ name, value = line.split(':', 1)
+ self.headerlist.append((name.strip(), value.strip()))
+
+ def write_body(self, line, nl):
+ if not line and not nl: return # This does not even flush the buffer
+ self.size += len(line) + len(self._buf)
+ self.file.write(self._buf + line)
+ self._buf = nl
+ if self.content_length > 0 and self.size > self.content_length:
+ raise MultipartError('Size of body exceeds Content-Length header.')
+ if self.size > self.memfile_limit and isinstance(self.file, BytesIO):
+ # TODO: What about non-file uploads that exceed the memfile_limit?
+ self.file, old = TemporaryFile(mode='w+b'), self.file
+ old.seek(0)
+ copy_file(old, self.file, self.size, self.buffer_size)
+
+ def finish_header(self):
+ self.file = BytesIO()
+ self.headers = Headers(self.headerlist)
+ cdis = self.headers.get('Content-Disposition','')
+ ctype = self.headers.get('Content-Type','')
+ clen = self.headers.get('Content-Length','-1')
+ if not cdis:
+ raise MultipartError('Content-Disposition header is missing.')
+ self.disposition, self.options = parse_options_header(cdis)
+ self.name = self.options.get('name')
+ self.filename = self.options.get('filename')
+ self.content_type, options = parse_options_header(ctype)
+ self.charset = options.get('charset') or self.charset
+ self.content_length = int(self.headers.get('Content-Length','-1'))
+
+ def is_buffered(self):
+ ''' Return true if the data is fully buffered in memory.'''
+ return isinstance(self.file, BytesIO)
+
+ @property
+ def value(self):
+ ''' Data decoded with the specified charset '''
+ pos = self.file.tell()
+ self.file.seek(0)
+ val = self.file.read()
+ self.file.seek(pos)
+ return val.decode(self.charset)
+
+ def save_as(self, path):
+ fp = open(path, 'wb')
+ pos = self.file.tell()
+ try:
+ self.file.seek(0)
+ size = copy_file(self.file, fp)
+ finally:
+ self.file.seek(pos)
+ return size
+
+##############################################################################
+#################################### WSGI ####################################
+##############################################################################
+
+def parse_form_data(environ, charset='utf8', strict=False, **kw):
+ ''' Parse form data from an environ dict and return a (forms, files) tuple.
+ Both tuple values are dictionaries with the form-field name as a key
+ (unicode) and lists as values (multiple values per key are possible).
+ The forms-dictionary contains form-field values as unicode strings.
+ The files-dictionary contains :class:`MultipartPart` instances, either
+ because the form-field was a file-upload or the value is to big to fit
+ into memory limits.
+
+ :param environ: An WSGI environment dict.
+ :param charset: The charset to use if unsure. (default: utf8)
+ :param strict: If True, raise :exc:`MultipartError` on any parsing
+ errors. These are silently ignored by default.
+ '''
+
+ forms, files = MultiDict(), MultiDict()
+ try:
+ if environ.get('REQUEST_METHOD','GET').upper() not in ('POST', 'PUT'):
+ raise MultipartError("Request method other than POST or PUT.")
+ content_length = int(environ.get('CONTENT_LENGTH', '-1'))
+ content_type = environ.get('CONTENT_TYPE', '')
+ if not content_type:
+ raise MultipartError("Missing Content-Type header.")
+ content_type, options = parse_options_header(content_type)
+ stream = environ.get('wsgi.input') or BytesIO()
+ kw['charset'] = charset = options.get('charset', charset)
+ if content_type == 'multipart/form-data':
+ boundary = options.get('boundary','')
+ if not boundary:
+ raise MultipartError("No boundary for multipart/form-data.")
+ for part in MultipartParser(stream, boundary, content_length, **kw):
+ if part.filename or not part.is_buffered():
+ files[part.name] = part
+ else: # TODO: Big form-fields are in the files dict. really?
+ forms[part.name] = part.value
+ elif content_type in ('application/x-www-form-urlencoded',
+ 'application/x-url-encoded'):
+ mem_limit = kw.get('mem_limit', 2**20)
+ if content_length > mem_limit:
+ raise MultipartError("Request too big. Increase MAXMEM.")
+ data = stream.read(mem_limit)
+ if stream.read(1): # These is more that does not fit mem_limit
+ raise MultipartError("Request too big. Increase MAXMEM.")
+ if PY3:
+ data = data.decode('ascii')
+ data = parse_qs(data, keep_blank_values=True)
+ for key, values in data.items():
+ for value in values:
+ if PY3:
+ forms[key] = value
+ else:
+ forms[key.decode(charset)] = value.decode(charset)
+ else:
+ raise MultipartError("Unsupported content type.")
+ except MultipartError:
+ if strict: raise
+ return forms, files
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/predicates.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/predicates.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1421 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Predicate classes
+"""
+
+__docformat__ = "restructuredtext en"
+
+import logging
+from warnings import warn
+from operator import eq
+
+from six import string_types, integer_types
+from six.moves import range
+
+from logilab.common.deprecation import deprecated
+from logilab.common.registry import Predicate, objectify_predicate, yes
+
+from yams.schema import BASE_TYPES, role_name
+from rql.nodes import Function
+
+from cubicweb import (Unauthorized, NoSelectableObject, NotAnEntity,
+ CW_EVENT_MANAGER, role)
+from cubicweb.uilib import eid_param
+from cubicweb.schema import split_expression
+
+yes = deprecated('[3.15] import yes() from use logilab.common.registry')(yes)
+
+
+# abstract predicates / mixin helpers ###########################################
+
+class PartialPredicateMixIn(object):
+ """convenience mix-in for predicates that will look into the containing
+ class to find missing information.
+
+ cf. `cubicweb.web.action.LinkToEntityAction` for instance
+ """
+ def __call__(self, cls, *args, **kwargs):
+ self.complete(cls)
+ return super(PartialPredicateMixIn, self).__call__(cls, *args, **kwargs)
+
+
+class EClassPredicate(Predicate):
+ """abstract class for predicates working on *entity class(es)* specified
+ explicitly or found of the result set.
+
+ Here are entity lookup / scoring rules:
+
+ * if `entity` is specified, return score for this entity's class
+
+ * elif `rset`, `select` and `filtered_variable` are specified, return score
+ for the possible classes for variable in the given rql :class:`Select`
+ node
+
+ * elif `rset` and `row` are specified, return score for the class of the
+ entity found in the specified cell, using column specified by `col` or 0
+
+ * elif `rset` is specified return score for each entity class found in the
+ column specified specified by the `col` argument or in column 0 if not
+ specified
+
+ When there are several classes to be evaluated, return the sum of scores for
+ each entity class unless:
+
+ - `mode` == 'all' (the default) and some entity class is scored
+ to 0, in which case 0 is returned
+
+ - `mode` == 'any', in which case the first non-zero score is
+ returned
+
+ - `accept_none` is False and some cell in the column has a None value
+ (this may occurs with outer join)
+ """
+ def __init__(self, once_is_enough=None, accept_none=True, mode='all'):
+ if once_is_enough is not None:
+ warn("[3.14] once_is_enough is deprecated, use mode='any'",
+ DeprecationWarning, stacklevel=2)
+ if once_is_enough:
+ mode = 'any'
+ assert mode in ('any', 'all'), 'bad mode %s' % mode
+ self.once_is_enough = mode == 'any'
+ self.accept_none = accept_none
+
+ def __call__(self, cls, req, rset=None, row=None, col=0, entity=None,
+ select=None, filtered_variable=None,
+ accept_none=None,
+ **kwargs):
+ if entity is not None:
+ return self.score_class(entity.__class__, req)
+ if not rset:
+ return 0
+ if select is not None and filtered_variable is not None:
+ etypes = set(sol[filtered_variable.name] for sol in select.solutions)
+ elif row is None:
+ if accept_none is None:
+ accept_none = self.accept_none
+ if not accept_none and \
+ any(row[col] is None for row in rset):
+ return 0
+ etypes = rset.column_types(col)
+ else:
+ etype = rset.description[row][col]
+ # may have None in rset.description on outer join
+ if etype is None or rset.rows[row][col] is None:
+ return 0
+ etypes = (etype,)
+ score = 0
+ for etype in etypes:
+ escore = self.score(cls, req, etype)
+ if not escore and not self.once_is_enough:
+ return 0
+ elif self.once_is_enough:
+ return escore
+ score += escore
+ return score
+
+ def score(self, cls, req, etype):
+ if etype in BASE_TYPES:
+ return 0
+ return self.score_class(req.vreg['etypes'].etype_class(etype), req)
+
+ def score_class(self, eclass, req):
+ raise NotImplementedError()
+
+
+class EntityPredicate(EClassPredicate):
+ """abstract class for predicates working on *entity instance(s)* specified
+ explicitly or found of the result set.
+
+ Here are entity lookup / scoring rules:
+
+ * if `entity` is specified, return score for this entity
+
+ * elif `row` is specified, return score for the entity found in the
+ specified cell, using column specified by `col` or 0
+
+ * else return the sum of scores for each entity found in the column
+ specified specified by the `col` argument or in column 0 if not specified,
+ unless:
+
+ - `mode` == 'all' (the default) and some entity class is scored
+ to 0, in which case 0 is returned
+
+ - `mode` == 'any', in which case the first non-zero score is
+ returned
+
+ - `accept_none` is False and some cell in the column has a None value
+ (this may occurs with outer join)
+
+ .. Note::
+ using :class:`EntityPredicate` or :class:`EClassPredicate` as base predicate
+ class impacts performance, since when no entity or row is specified the
+ later works on every different *entity class* found in the result set,
+ while the former works on each *entity* (eg each row of the result set),
+ which may be much more costly.
+ """
+
+ def __call__(self, cls, req, rset=None, row=None, col=0, accept_none=None,
+ entity=None, **kwargs):
+ if not rset and entity is None:
+ return 0
+ score = 0
+ if entity is not None:
+ score = self.score_entity(entity)
+ elif row is None:
+ col = col or 0
+ if accept_none is None:
+ accept_none = self.accept_none
+ for row, rowvalue in enumerate(rset.rows):
+ if rowvalue[col] is None: # outer join
+ if not accept_none:
+ return 0
+ continue
+ escore = self.score(req, rset, row, col)
+ if not escore and not self.once_is_enough:
+ return 0
+ elif self.once_is_enough:
+ return escore
+ score += escore
+ else:
+ col = col or 0
+ etype = rset.description[row][col]
+ if etype is not None: # outer join
+ score = self.score(req, rset, row, col)
+ return score
+
+ def score(self, req, rset, row, col):
+ try:
+ return self.score_entity(rset.get_entity(row, col))
+ except NotAnEntity:
+ return 0
+
+ def score_entity(self, entity):
+ raise NotImplementedError()
+
+
+class ExpectedValuePredicate(Predicate):
+ """Take a list of expected values as initializer argument and store them
+ into the :attr:`expected` set attribute. You may also give a set as single
+ argument, which will then be referenced as set of expected values,
+ allowing modifications to the given set to be considered.
+
+ You should implement one of :meth:`_values_set(cls, req, **kwargs)` or
+ :meth:`_get_value(cls, req, **kwargs)` method which should respectively
+ return the set of values or the unique possible value for the given context.
+
+ You may also specify a `mode` behaviour as argument, as explained below.
+
+ Returned score is:
+
+ - 0 if `mode` == 'all' (the default) and at least one expected
+ values isn't found
+
+ - 0 if `mode` == 'any' and no expected values isn't found at all
+
+ - else the number of matching values
+
+ Notice `mode` = 'any' with a single expected value has no effect at all.
+ """
+ def __init__(self, *expected, **kwargs):
+ assert expected, self
+ if len(expected) == 1 and isinstance(expected[0], (set, dict)):
+ self.expected = expected[0]
+ else:
+ self.expected = frozenset(expected)
+ mode = kwargs.pop('mode', 'all')
+ assert mode in ('any', 'all'), 'bad mode %s' % mode
+ self.once_is_enough = mode == 'any'
+ assert not kwargs, 'unexpected arguments %s' % kwargs
+
+ def __str__(self):
+ return '%s(%s)' % (self.__class__.__name__,
+ ','.join(sorted(str(s) for s in self.expected)))
+
+ def __call__(self, cls, req, **kwargs):
+ values = self._values_set(cls, req, **kwargs)
+ if isinstance(values, dict):
+ if isinstance(self.expected, dict):
+ matching = 0
+ for key, expected_value in self.expected.items():
+ if key in values:
+ if (isinstance(expected_value, (list, tuple, frozenset, set))
+ and values[key] in expected_value):
+ matching += 1
+ elif values[key] == expected_value:
+ matching += 1
+ if isinstance(self.expected, (set, frozenset)):
+ values = frozenset(values)
+ matching = len(values & self.expected)
+ else:
+ matching = len(values & self.expected)
+ if self.once_is_enough:
+ return matching
+ if matching == len(self.expected):
+ return matching
+ return 0
+
+ def _values_set(self, cls, req, **kwargs):
+ return frozenset( (self._get_value(cls, req, **kwargs),) )
+
+ def _get_value(self, cls, req, **kwargs):
+ raise NotImplementedError()
+
+
+# bare predicates ##############################################################
+
+class match_kwargs(ExpectedValuePredicate):
+ """Return non-zero score if parameter names specified as initializer
+ arguments are specified in the input context.
+
+
+ Return a score corresponding to the number of expected parameters.
+
+ When multiple parameters are expected, all of them should be found in
+ the input context unless `mode` keyword argument is given to 'any',
+ in which case a single matching parameter is enough.
+ """
+
+ def _values_set(self, cls, req, **kwargs):
+ return kwargs
+
+
+class appobject_selectable(Predicate):
+ """Return 1 if another appobject is selectable using the same input context.
+
+ Initializer arguments:
+
+ * `registry`, a registry name
+
+ * `regids`, object identifiers in this registry, one of them should be
+ selectable.
+ """
+ selectable_score = 1
+ def __init__(self, registry, *regids):
+ self.registry = registry
+ self.regids = regids
+
+ def __call__(self, cls, req, **kwargs):
+ for regid in self.regids:
+ if req.vreg[self.registry].select_or_none(regid, req, **kwargs) is not None:
+ return self.selectable_score
+ return 0
+
+
+class adaptable(appobject_selectable):
+ """Return 1 if another appobject is selectable using the same input context.
+
+ Initializer arguments:
+
+ * `regids`, adapter identifiers (e.g. interface names) to which the context
+ (usually entities) should be adaptable. One of them should be selectable
+ when multiple identifiers are given.
+ """
+ def __init__(self, *regids):
+ super(adaptable, self).__init__('adapters', *regids)
+
+ def __call__(self, cls, req, **kwargs):
+ kwargs.setdefault('accept_none', False)
+ score = super(adaptable, self).__call__(cls, req, **kwargs)
+ if score == 0 and kwargs.get('rset') and len(kwargs['rset']) > 1 and not 'row' in kwargs:
+ # on rset containing several entity types, each row may be
+ # individually adaptable, while the whole rset won't be if the
+ # same adapter can't be used for each type
+ for row in range(len(kwargs['rset'])):
+ kwargs.setdefault('col', 0)
+ _score = super(adaptable, self).__call__(cls, req, row=row, **kwargs)
+ if not _score:
+ return 0
+ # adjust score per row as expected by default adjust_score
+ # implementation
+ score += self.adjust_score(_score)
+ else:
+ score = self.adjust_score(score)
+ return score
+
+ @staticmethod
+ def adjust_score(score):
+ # being adaptable to an interface should takes precedence other
+ # is_instance('Any'), but not other explicit
+ # is_instance('SomeEntityType'), and, for **a single entity**:
+ # * is_instance('Any') score is 1
+ # * is_instance('SomeEntityType') score is at least 2
+ if score >= 2:
+ return score - 0.5
+ if score == 1:
+ return score + 0.5
+ return score
+
+
+class configuration_values(Predicate):
+ """Return 1 if the instance has an option set to a given value(s) in its
+ configuration file.
+ """
+ # XXX this predicate could be evaluated on startup
+ def __init__(self, key, values):
+ self._key = key
+ if not isinstance(values, (tuple, list)):
+ values = (values,)
+ self._values = frozenset(values)
+
+ def __call__(self, cls, req, **kwargs):
+ try:
+ return self._score
+ except AttributeError:
+ if req is None:
+ config = kwargs['repo'].config
+ else:
+ config = req.vreg.config
+ self._score = config[self._key] in self._values
+ return self._score
+
+
+# rset predicates ##############################################################
+
+@objectify_predicate
+def none_rset(cls, req, rset=None, **kwargs):
+ """Return 1 if the result set is None (eg usually not specified)."""
+ if rset is None:
+ return 1
+ return 0
+
+
+# XXX == ~ none_rset
+@objectify_predicate
+def any_rset(cls, req, rset=None, **kwargs):
+ """Return 1 for any result set, whatever the number of rows in it, even 0."""
+ if rset is not None:
+ return 1
+ return 0
+
+
+@objectify_predicate
+def nonempty_rset(cls, req, rset=None, **kwargs):
+ """Return 1 for result set containing one ore more rows."""
+ if rset:
+ return 1
+ return 0
+
+
+# XXX == ~ nonempty_rset
+@objectify_predicate
+def empty_rset(cls, req, rset=None, **kwargs):
+ """Return 1 for result set which doesn't contain any row."""
+ if rset is not None and len(rset) == 0:
+ return 1
+ return 0
+
+
+# XXX == multi_lines_rset(1)
+@objectify_predicate
+def one_line_rset(cls, req, rset=None, row=None, **kwargs):
+ """Return 1 if the result set is of size 1, or greater but a specific row in
+ the result set is specified ('row' argument).
+ """
+ if rset is None and 'entity' in kwargs:
+ return 1
+ if rset is not None and (row is not None or len(rset) == 1):
+ return 1
+ return 0
+
+
+class multi_lines_rset(Predicate):
+ """Return 1 if the operator expression matches between `num` elements
+ in the result set and the `expected` value if defined.
+
+ By default, multi_lines_rset(expected) matches equality expression:
+ `nb` row(s) in result set equals to expected value
+ But, you can perform richer comparisons by overriding default operator:
+ multi_lines_rset(expected, operator.gt)
+
+ If `expected` is None, return 1 if the result set contains *at least*
+ two rows.
+ If rset is None, return 0.
+ """
+ def __init__(self, expected=None, operator=eq):
+ self.expected = expected
+ self.operator = operator
+
+ def match_expected(self, num):
+ if self.expected is None:
+ return num > 1
+ return self.operator(num, self.expected)
+
+ def __call__(self, cls, req, rset=None, **kwargs):
+ return int(rset is not None and self.match_expected(len(rset)))
+
+
+class multi_columns_rset(multi_lines_rset):
+ """If `nb` is specified, return 1 if the result set has exactly `nb` column
+ per row. Else (`nb` is None), return 1 if the result set contains *at least*
+ two columns per row. Return 0 for empty result set.
+ """
+
+ def __call__(self, cls, req, rset=None, **kwargs):
+ # 'or 0' since we *must not* return None. Also don't use rset.rows so
+ # this selector will work if rset is a simple list of list.
+ return rset and self.match_expected(len(rset[0])) or 0
+
+
+class paginated_rset(Predicate):
+ """Return 1 or more for result set with more rows than one or more page
+ size. You can specify expected number of pages to the initializer (default
+ to one), and you'll get that number of pages as score if the result set is
+ big enough.
+
+ Page size is searched in (respecting order):
+ * a `page_size` argument
+ * a `page_size` form parameters
+ * the `navigation.page-size` property (see :ref:`PersistentProperties`)
+ """
+ def __init__(self, nbpages=1):
+ assert nbpages > 0
+ self.nbpages = nbpages
+
+ def __call__(self, cls, req, rset=None, **kwargs):
+ if rset is None:
+ return 0
+ page_size = kwargs.get('page_size')
+ if page_size is None:
+ page_size = req.form.get('page_size')
+ if page_size is not None:
+ try:
+ page_size = int(page_size)
+ except ValueError:
+ page_size = None
+ if page_size is None:
+ page_size = req.property_value('navigation.page-size')
+ if len(rset) <= (page_size*self.nbpages):
+ return 0
+ return self.nbpages
+
+
+@objectify_predicate
+def sorted_rset(cls, req, rset=None, **kwargs):
+ """Return 1 for sorted result set (e.g. from an RQL query containing an
+ ORDERBY clause), with exception that it will return 0 if the rset is
+ 'ORDERBY FTIRANK(VAR)' (eg sorted by rank value of the has_text index).
+ """
+ if rset is None:
+ return 0
+ selects = rset.syntax_tree().children
+ if (len(selects) > 1 or
+ not selects[0].orderby or
+ (isinstance(selects[0].orderby[0].term, Function) and
+ selects[0].orderby[0].term.name == 'FTIRANK')
+ ):
+ return 0
+ return 2
+
+
+# XXX == multi_etypes_rset(1)
+@objectify_predicate
+def one_etype_rset(cls, req, rset=None, col=0, **kwargs):
+ """Return 1 if the result set contains entities which are all of the same
+ type in the column specified by the `col` argument of the input context, or
+ in column 0.
+ """
+ if rset is None:
+ return 0
+ if len(rset.column_types(col)) != 1:
+ return 0
+ return 1
+
+
+class multi_etypes_rset(multi_lines_rset):
+ """If `nb` is specified, return 1 if the result set contains `nb` different
+ types of entities in the column specified by the `col` argument of the input
+ context, or in column 0. If `nb` is None, return 1 if the result set contains
+ *at least* two different types of entities.
+ """
+
+ def __call__(self, cls, req, rset=None, col=0, **kwargs):
+ # 'or 0' since we *must not* return None
+ return rset and self.match_expected(len(rset.column_types(col))) or 0
+
+
+@objectify_predicate
+def logged_user_in_rset(cls, req, rset=None, row=None, col=0, **kwargs):
+ """Return positive score if the result set at the specified row / col
+ contains the eid of the logged user.
+ """
+ if rset is None:
+ return 0
+ return req.user.eid == rset[row or 0][col]
+
+
+# entity predicates #############################################################
+
+class composite_etype(Predicate):
+ """Return 1 for composite entities.
+
+ A composite entity has an etype for which at least one relation
+ definition points in its direction with the
+ composite='subject'/'object' notation.
+ """
+
+ def __call__(self, cls, req, **kwargs):
+ entity = kwargs.pop('entity', None)
+ if entity is None:
+ return 0
+ return entity.e_schema.is_composite
+
+
+
+class non_final_entity(EClassPredicate):
+ """Return 1 for entity of a non final entity type(s). Remember, "final"
+ entity types are String, Int, etc... This is equivalent to
+ `is_instance('Any')` but more optimized.
+
+ See :class:`~cubicweb.predicates.EClassPredicate` documentation for entity
+ class lookup / score rules according to the input context.
+ """
+ def score(self, cls, req, etype):
+ if etype in BASE_TYPES:
+ return 0
+ return 1
+
+ def score_class(self, eclass, req):
+ return 1 # necessarily true if we're there
+
+
+
+def _reset_is_instance_cache(vreg):
+ vreg._is_instance_predicate_cache = {}
+
+CW_EVENT_MANAGER.bind('before-registry-reset', _reset_is_instance_cache)
+
+class is_instance(EClassPredicate):
+ """Return non-zero score for entity that is an instance of the one of given
+ type(s). If multiple arguments are given, matching one of them is enough.
+
+ Entity types should be given as string, the corresponding class will be
+ fetched from the registry at selection time.
+
+ See :class:`~cubicweb.predicates.EClassPredicate` documentation for entity
+ class lookup / score rules according to the input context.
+
+ .. note:: the score will reflect class proximity so the most specific object
+ will be selected.
+ """
+
+ def __init__(self, *expected_etypes, **kwargs):
+ super(is_instance, self).__init__(**kwargs)
+ self.expected_etypes = expected_etypes
+ for etype in self.expected_etypes:
+ assert isinstance(etype, string_types), etype
+
+ def __str__(self):
+ return '%s(%s)' % (self.__class__.__name__,
+ ','.join(str(s) for s in self.expected_etypes))
+
+ def score_class(self, eclass, req):
+ # cache on vreg to avoid reloading issues
+ try:
+ cache = req.vreg._is_instance_predicate_cache
+ except AttributeError:
+ # XXX 'before-registry-reset' not called for db-api connections
+ cache = req.vreg._is_instance_predicate_cache = {}
+ try:
+ expected_eclasses = cache[self]
+ except KeyError:
+ # turn list of entity types as string into a list of
+ # (entity class, parent classes)
+ etypesreg = req.vreg['etypes']
+ expected_eclasses = cache[self] = []
+ for etype in self.expected_etypes:
+ try:
+ expected_eclasses.append(etypesreg.etype_class(etype))
+ except KeyError:
+ continue # entity type not in the schema
+ parents, any = req.vreg['etypes'].parent_classes(eclass.__regid__)
+ score = 0
+ for expectedcls in expected_eclasses:
+ # adjust score according to class proximity
+ if expectedcls is eclass:
+ score += len(parents) + 4
+ elif expectedcls is any: # Any
+ score += 1
+ else:
+ for index, basecls in enumerate(reversed(parents)):
+ if expectedcls is basecls:
+ score += index + 3
+ break
+ return score
+
+
+class score_entity(EntityPredicate):
+ """Return score according to an arbitrary function given as argument which
+ will be called with input content entity as argument.
+
+ This is a very useful predicate that will usually interest you since it
+ allows a lot of things without having to write a specific predicate.
+
+ The function can return arbitrary value which will be casted to an integer
+ value at the end.
+
+ See :class:`~cubicweb.predicates.EntityPredicate` documentation for entity
+ lookup / score rules according to the input context.
+ """
+ def __init__(self, scorefunc, once_is_enough=None, mode='all'):
+ super(score_entity, self).__init__(mode=mode, once_is_enough=once_is_enough)
+ def intscore(*args, **kwargs):
+ score = scorefunc(*args, **kwargs)
+ if not score:
+ return 0
+ if isinstance(score, integer_types):
+ return score
+ return 1
+ self.score_entity = intscore
+
+
+class has_mimetype(EntityPredicate):
+ """Return 1 if the entity adapt to IDownloadable and has the given MIME type.
+
+ You can give 'image/' to match any image for instance, or 'image/png' to match
+ only PNG images.
+ """
+ def __init__(self, mimetype, once_is_enough=None, mode='all'):
+ super(has_mimetype, self).__init__(mode=mode, once_is_enough=once_is_enough)
+ self.mimetype = mimetype
+
+ def score_entity(self, entity):
+ idownloadable = entity.cw_adapt_to('IDownloadable')
+ if idownloadable is None:
+ return 0
+ mt = idownloadable.download_content_type()
+ if not (mt and mt.startswith(self.mimetype)):
+ return 0
+ return 1
+
+
+class relation_possible(EntityPredicate):
+ """Return 1 for entity that supports the relation, provided that the
+ request's user may do some `action` on it (see below).
+
+ The relation is specified by the following initializer arguments:
+
+ * `rtype`, the name of the relation
+
+ * `role`, the role of the entity in the relation, either 'subject' or
+ 'object', default to 'subject'
+
+ * `target_etype`, optional name of an entity type that should be supported
+ at the other end of the relation
+
+ * `action`, a relation schema action (e.g. one of 'read', 'add', 'delete',
+ default to 'read') which must be granted to the user, else a 0 score will
+ be returned. Give None if you don't want any permission checking.
+
+ * `strict`, boolean (default to False) telling what to do when the user has
+ not globally the permission for the action (eg the action is not granted
+ to one of the user's groups)
+
+ - when strict is False, if there are some local role defined for this
+ action (e.g. using rql expressions), then the permission will be
+ considered as granted
+
+ - when strict is True, then the permission will be actually checked for
+ each entity
+
+ Setting `strict` to True impacts performance for large result set since
+ you'll then get the :class:`~cubicweb.predicates.EntityPredicate` behaviour
+ while otherwise you get the :class:`~cubicweb.predicates.EClassPredicate`'s
+ one. See those classes documentation for entity lookup / score rules
+ according to the input context.
+ """
+
+ def __init__(self, rtype, role='subject', target_etype=None,
+ action='read', strict=False, **kwargs):
+ super(relation_possible, self).__init__(**kwargs)
+ self.rtype = rtype
+ self.role = role
+ self.target_etype = target_etype
+ self.action = action
+ self.strict = strict
+
+ # hack hack hack
+ def __call__(self, cls, req, **kwargs):
+ # hack hack hack
+ if self.strict:
+ return EntityPredicate.__call__(self, cls, req, **kwargs)
+ return EClassPredicate.__call__(self, cls, req, **kwargs)
+
+ def score(self, *args):
+ if self.strict:
+ return EntityPredicate.score(self, *args)
+ return EClassPredicate.score(self, *args)
+
+ def _get_rschema(self, eclass):
+ eschema = eclass.e_schema
+ try:
+ if self.role == 'object':
+ return eschema.objrels[self.rtype]
+ else:
+ return eschema.subjrels[self.rtype]
+ except KeyError:
+ return None
+
+ def score_class(self, eclass, req):
+ rschema = self._get_rschema(eclass)
+ if rschema is None:
+ return 0 # relation not supported
+ eschema = eclass.e_schema
+ if self.target_etype is not None:
+ try:
+ rdef = rschema.role_rdef(eschema, self.target_etype, self.role)
+ except KeyError:
+ return 0
+ if self.action and not rdef.may_have_permission(self.action, req):
+ return 0
+ teschema = req.vreg.schema.eschema(self.target_etype)
+ if not teschema.may_have_permission('read', req):
+ return 0
+ elif self.action:
+ return rschema.may_have_permission(self.action, req, eschema, self.role)
+ return 1
+
+ def score_entity(self, entity):
+ rschema = self._get_rschema(entity)
+ if rschema is None:
+ return 0 # relation not supported
+ if self.action:
+ if self.target_etype is not None:
+ try:
+ rschema = rschema.role_rdef(entity.e_schema,
+ self.target_etype, self.role)
+ except KeyError:
+ return 0
+ if self.role == 'subject':
+ if not rschema.has_perm(entity._cw, self.action, fromeid=entity.eid):
+ return 0
+ elif not rschema.has_perm(entity._cw, self.action, toeid=entity.eid):
+ return 0
+ if self.target_etype is not None:
+ req = entity._cw
+ teschema = req.vreg.schema.eschema(self.target_etype)
+ if not teschema.may_have_permission('read', req):
+ return 0
+ return 1
+
+
+class partial_relation_possible(PartialPredicateMixIn, relation_possible):
+ """Same as :class:~`cubicweb.predicates.relation_possible`, but will look for
+ attributes of the selected class to get information which is otherwise
+ expected by the initializer, except for `action` and `strict` which are kept
+ as initializer arguments.
+
+ This is useful to predefine predicate of an abstract class designed to be
+ customized.
+ """
+ def __init__(self, action='read', **kwargs):
+ super(partial_relation_possible, self).__init__(None, None, None,
+ action, **kwargs)
+
+ def complete(self, cls):
+ self.rtype = cls.rtype
+ self.role = role(cls)
+ self.target_etype = getattr(cls, 'target_etype', None)
+
+
+class has_related_entities(EntityPredicate):
+ """Return 1 if entity support the specified relation and has some linked
+ entities by this relation , optionally filtered according to the specified
+ target type.
+
+ The relation is specified by the following initializer arguments:
+
+ * `rtype`, the name of the relation
+
+ * `role`, the role of the entity in the relation, either 'subject' or
+ 'object', default to 'subject'.
+
+ * `target_etype`, optional name of an entity type that should be found
+ at the other end of the relation
+
+ See :class:`~cubicweb.predicates.EntityPredicate` documentation for entity
+ lookup / score rules according to the input context.
+ """
+ def __init__(self, rtype, role='subject', target_etype=None, **kwargs):
+ super(has_related_entities, self).__init__(**kwargs)
+ self.rtype = rtype
+ self.role = role
+ self.target_etype = target_etype
+
+ def score_entity(self, entity):
+ relpossel = relation_possible(self.rtype, self.role, self.target_etype)
+ if not relpossel.score_class(entity.__class__, entity._cw):
+ return 0
+ rset = entity.related(self.rtype, self.role)
+ if self.target_etype:
+ return any(r for r in rset.description if r[0] == self.target_etype)
+ return rset and 1 or 0
+
+
+class partial_has_related_entities(PartialPredicateMixIn, has_related_entities):
+ """Same as :class:~`cubicweb.predicates.has_related_entity`, but will look
+ for attributes of the selected class to get information which is otherwise
+ expected by the initializer.
+
+ This is useful to predefine predicate of an abstract class designed to be
+ customized.
+ """
+ def __init__(self, **kwargs):
+ super(partial_has_related_entities, self).__init__(None, None, None,
+ **kwargs)
+
+ def complete(self, cls):
+ self.rtype = cls.rtype
+ self.role = role(cls)
+ self.target_etype = getattr(cls, 'target_etype', None)
+
+
+class has_permission(EntityPredicate):
+ """Return non-zero score if request's user has the permission to do the
+ requested action on the entity. `action` is an entity schema action (eg one
+ of 'read', 'add', 'delete', 'update').
+
+ Here are entity lookup / scoring rules:
+
+ * if `entity` is specified, check permission is granted for this entity
+
+ * elif `row` is specified, check permission is granted for the entity found
+ in the specified cell
+
+ * else check permission is granted for each entity found in the column
+ specified specified by the `col` argument or in column 0
+ """
+ def __init__(self, action):
+ self.action = action
+
+ # don't use EntityPredicate.__call__ but this optimized implementation to
+ # avoid considering each entity when it's not necessary
+ def __call__(self, cls, req, rset=None, row=None, col=0, entity=None, **kwargs):
+ if entity is not None:
+ return self.score_entity(entity)
+ if rset is None:
+ return 0
+ if row is None:
+ score = 0
+ need_local_check = []
+ geteschema = req.vreg.schema.eschema
+ user = req.user
+ action = self.action
+ for etype in rset.column_types(0):
+ if etype in BASE_TYPES:
+ return 0
+ eschema = geteschema(etype)
+ if not user.matching_groups(eschema.get_groups(action)):
+ if eschema.has_local_role(action):
+ # have to ckeck local roles
+ need_local_check.append(eschema)
+ continue
+ else:
+ # even a local role won't be enough
+ return 0
+ score += 1
+ if need_local_check:
+ # check local role for entities of necessary types
+ for i, row in enumerate(rset):
+ if not rset.description[i][col] in need_local_check:
+ continue
+ # micro-optimisation instead of calling self.score(req,
+ # rset, i, col): rset may be large
+ if not rset.get_entity(i, col).cw_has_perm(action):
+ return 0
+ score += 1
+ return score
+ return self.score(req, rset, row, col)
+
+ def score_entity(self, entity):
+ if entity.cw_has_perm(self.action):
+ return 1
+ return 0
+
+
+class has_add_permission(EClassPredicate):
+ """Return 1 if request's user has the add permission on entity type
+ specified in the `etype` initializer argument, or according to entity found
+ in the input content if not specified.
+
+ It also check that then entity type is not a strict subobject (e.g. may only
+ be used as a composed of another entity).
+
+ See :class:`~cubicweb.predicates.EClassPredicate` documentation for entity
+ class lookup / score rules according to the input context when `etype` is
+ not specified.
+ """
+ def __init__(self, etype=None, **kwargs):
+ super(has_add_permission, self).__init__(**kwargs)
+ self.etype = etype
+
+ def __call__(self, cls, req, **kwargs):
+ if self.etype is None:
+ return super(has_add_permission, self).__call__(cls, req, **kwargs)
+ return self.score(cls, req, self.etype)
+
+ def score_class(self, eclass, req):
+ eschema = eclass.e_schema
+ if eschema.final or eschema.is_subobject(strict=True) \
+ or not eschema.has_perm(req, 'add'):
+ return 0
+ return 1
+
+
+class rql_condition(EntityPredicate):
+ """Return non-zero score if arbitrary rql specified in `expression`
+ initializer argument return some results for entity found in the input
+ context. Returned score is the number of items returned by the rql
+ condition.
+
+ `expression` is expected to be a string containing an rql expression, which
+ must use 'X' variable to represent the context entity and may use 'U' to
+ represent the request's user.
+
+ .. warning::
+ If simply testing value of some attribute/relation of context entity (X),
+ you should rather use the :class:`score_entity` predicate which will
+ benefit from the ORM's request entities cache.
+
+ See :class:`~cubicweb.predicates.EntityPredicate` documentation for entity
+ lookup / score rules according to the input context.
+ """
+ def __init__(self, expression, once_is_enough=None, mode='all', user_condition=False):
+ super(rql_condition, self).__init__(mode=mode, once_is_enough=once_is_enough)
+ self.user_condition = user_condition
+ if user_condition:
+ rql = 'Any COUNT(U) WHERE U eid %%(u)s, %s' % expression
+ elif 'U' in frozenset(split_expression(expression)):
+ rql = 'Any COUNT(X) WHERE X eid %%(x)s, U eid %%(u)s, %s' % expression
+ else:
+ rql = 'Any COUNT(X) WHERE X eid %%(x)s, %s' % expression
+ self.rql = rql
+
+ def __str__(self):
+ return '%s(%r)' % (self.__class__.__name__, self.rql)
+
+ def __call__(self, cls, req, **kwargs):
+ if self.user_condition:
+ try:
+ return req.execute(self.rql, {'u': req.user.eid})[0][0]
+ except Unauthorized:
+ return 0
+ else:
+ return super(rql_condition, self).__call__(cls, req, **kwargs)
+
+ def _score(self, req, eid):
+ try:
+ return req.execute(self.rql, {'x': eid, 'u': req.user.eid})[0][0]
+ except Unauthorized:
+ return 0
+
+ def score(self, req, rset, row, col):
+ return self._score(req, rset[row][col])
+
+ def score_entity(self, entity):
+ return self._score(entity._cw, entity.eid)
+
+
+# workflow predicates ###########################################################
+
+class is_in_state(score_entity):
+ """Return 1 if entity is in one of the states given as argument list
+
+ You should use this instead of your own :class:`score_entity` predicate to
+ avoid some gotchas:
+
+ * possible views gives a fake entity with no state
+ * you must use the latest tr info thru the workflow adapter for repository
+ side checking of the current state
+
+ In debug mode, this predicate can raise :exc:`ValueError` for unknown states names
+ (only checked on entities without a custom workflow)
+
+ :rtype: int
+ """
+ def __init__(self, *expected):
+ assert expected, self
+ self.expected = frozenset(expected)
+ def score(entity, expected=self.expected):
+ adapted = entity.cw_adapt_to('IWorkflowable')
+ # in debug mode only (time consuming)
+ if entity._cw.vreg.config.debugmode:
+ # validation can only be done for generic etype workflow because
+ # expected transition list could have been changed for a custom
+ # workflow (for the current entity)
+ if not entity.custom_workflow:
+ self._validate(adapted)
+ return self._score(adapted)
+ super(is_in_state, self).__init__(score)
+
+ def _score(self, adapted):
+ trinfo = adapted.latest_trinfo()
+ if trinfo is None: # entity is probably in it's initial state
+ statename = adapted.state
+ else:
+ statename = trinfo.new_state.name
+ return statename in self.expected
+
+ def _validate(self, adapted):
+ wf = adapted.current_workflow
+ valid = [n.name for n in wf.reverse_state_of]
+ unknown = sorted(self.expected.difference(valid))
+ if unknown:
+ raise ValueError("%s: unknown state(s): %s"
+ % (wf.name, ",".join(unknown)))
+
+ def __str__(self):
+ return '%s(%s)' % (self.__class__.__name__,
+ ','.join(str(s) for s in self.expected))
+
+
+def on_fire_transition(etype, tr_names, from_state_name=None):
+ """Return 1 when entity of the type `etype` is going through transition of
+ a name included in `tr_names`.
+
+ You should use this predicate on 'after_add_entity' hook, since it's actually
+ looking for addition of `TrInfo` entities. Hence in the hook, `self.entity`
+ will reference the matching `TrInfo` entity, allowing to get all the
+ transition details (including the entity to which is applied the transition
+ but also its original state, transition, destination state, user...).
+
+ See :class:`cubicweb.entities.wfobjs.TrInfo` for more information.
+ """
+ if from_state_name is not None:
+ warn("on_fire_transition's from_state_name argument is unused", DeprecationWarning)
+ if isinstance(tr_names, string_types):
+ tr_names = set((tr_names,))
+ def match_etype_and_transition(trinfo):
+ # take care trinfo.transition is None when calling change_state
+ return (trinfo.transition and trinfo.transition.name in tr_names
+ # is_instance() first two arguments are 'cls' (unused, so giving
+ # None is fine) and the request/session
+ and is_instance(etype)(None, trinfo._cw, entity=trinfo.for_entity))
+
+ return is_instance('TrInfo') & score_entity(match_etype_and_transition)
+
+
+class match_transition(ExpectedValuePredicate):
+ """Return 1 if `transition` argument is found in the input context which has
+ a `.name` attribute matching one of the expected names given to the
+ initializer.
+
+ This predicate is expected to be used to customise the status change form in
+ the web ui.
+ """
+ def __call__(self, cls, req, transition=None, **kwargs):
+ # XXX check this is a transition that apply to the object?
+ if transition is None:
+ treid = req.form.get('treid', None)
+ if treid:
+ transition = req.entity_from_eid(treid)
+ if transition is not None and getattr(transition, 'name', None) in self.expected:
+ return 1
+ return 0
+
+
+# logged user predicates ########################################################
+
+@objectify_predicate
+def no_cnx(cls, req, **kwargs):
+ """Return 1 if the web session has no connection set. This occurs when
+ anonymous access is not allowed and user isn't authenticated.
+ """
+ if not req.cnx:
+ return 1
+ return 0
+
+
+@objectify_predicate
+def authenticated_user(cls, req, **kwargs):
+ """Return 1 if the user is authenticated (i.e. not the anonymous user).
+ """
+ if req.session.anonymous_session:
+ return 0
+ return 1
+
+
+@objectify_predicate
+def anonymous_user(cls, req, **kwargs):
+ """Return 1 if the user is not authenticated (i.e. is the anonymous user).
+ """
+ if req.session.anonymous_session:
+ return 1
+ return 0
+
+
+class match_user_groups(ExpectedValuePredicate):
+ """Return a non-zero score if request's user is in at least one of the
+ groups given as initializer argument. Returned score is the number of groups
+ in which the user is.
+
+ If the special 'owners' group is given and `rset` is specified in the input
+ context:
+
+ * if `row` is specified check the entity at the given `row`/`col` (default
+ to 0) is owned by the user
+
+ * else check all entities in `col` (default to 0) are owned by the user
+ """
+
+ def __call__(self, cls, req, rset=None, row=None, col=0, **kwargs):
+ if not getattr(req, 'cnx', True): # default to True for repo session instances
+ return 0
+ user = req.user
+ if user is None:
+ return int('guests' in self.expected)
+ score = user.matching_groups(self.expected)
+ if not score and 'owners' in self.expected and rset:
+ if row is not None:
+ if not user.owns(rset[row][col]):
+ return 0
+ score = 1
+ else:
+ score = all(user.owns(r[col]) for r in rset)
+ return score
+
+# Web request predicates ########################################################
+
+# XXX deprecate
+@objectify_predicate
+def primary_view(cls, req, view=None, **kwargs):
+ """Return 1 if:
+
+ * *no view is specified* in the input context
+
+ * a view is specified and its `.is_primary()` method return True
+
+ This predicate is usually used by contextual components that only want to
+ appears for the primary view of an entity.
+ """
+ if view is not None and not view.is_primary():
+ return 0
+ return 1
+
+
+@objectify_predicate
+def contextual(cls, req, view=None, **kwargs):
+ """Return 1 if view's contextual property is true"""
+ if view is not None and view.contextual:
+ return 1
+ return 0
+
+
+class match_view(ExpectedValuePredicate):
+ """Return 1 if a view is specified an as its registry id is in one of the
+ expected view id given to the initializer.
+ """
+ def __call__(self, cls, req, view=None, **kwargs):
+ if view is None or not view.__regid__ in self.expected:
+ return 0
+ return 1
+
+
+class match_context(ExpectedValuePredicate):
+
+ def __call__(self, cls, req, context=None, **kwargs):
+ if not context in self.expected:
+ return 0
+ return 1
+
+
+# XXX deprecate
+@objectify_predicate
+def match_context_prop(cls, req, context=None, **kwargs):
+ """Return 1 if:
+
+ * no `context` is specified in input context (take care to confusion, here
+ `context` refers to a string given as an argument to the input context...)
+
+ * specified `context` is matching the context property value for the
+ appobject using this predicate
+
+ * the appobject's context property value is None
+
+ This predicate is usually used by contextual components that want to appears
+ in a configurable place.
+ """
+ if context is None:
+ return 1
+ propval = req.property_value('%s.%s.context' % (cls.__registry__,
+ cls.__regid__))
+ if propval and context != propval:
+ return 0
+ return 1
+
+
+class match_search_state(ExpectedValuePredicate):
+ """Return 1 if the current request search state is in one of the expected
+ states given to the initializer.
+
+ Known search states are either 'normal' or 'linksearch' (eg searching for an
+ object to create a relation with another).
+
+ This predicate is usually used by action that want to appears or not according
+ to the ui search state.
+ """
+
+ def __call__(self, cls, req, **kwargs):
+ try:
+ if not req.search_state[0] in self.expected:
+ return 0
+ except AttributeError:
+ return 1 # class doesn't care about search state, accept it
+ return 1
+
+
+class match_form_params(ExpectedValuePredicate):
+ """Return non-zero score if parameter names specified as initializer
+ arguments are specified in request's form parameters.
+
+ Return a score corresponding to the number of expected parameters.
+
+ When multiple parameters are expected, all of them should be found in
+ the input context unless `mode` keyword argument is given to 'any',
+ in which case a single matching parameter is enough.
+ """
+
+ def __init__(self, *expected, **kwargs):
+ """override default __init__ to allow either named or positional
+ parameters.
+ """
+ if kwargs and expected:
+ raise ValueError("match_form_params() can't be called with both "
+ "positional and named arguments")
+ if expected:
+ if len(expected) == 1 and not isinstance(expected[0], string_types):
+ raise ValueError("match_form_params() positional arguments "
+ "must be strings")
+ super(match_form_params, self).__init__(*expected)
+ else:
+ super(match_form_params, self).__init__(kwargs)
+
+ def _values_set(self, cls, req, **kwargs):
+ return req.form
+
+
+class match_http_method(ExpectedValuePredicate):
+ """Return non-zero score if one of the HTTP methods specified as
+ initializer arguments is the HTTP method of the request (GET, POST, ...).
+ """
+
+ def __call__(self, cls, req, **kwargs):
+ return int(req.http_method() in self.expected)
+
+
+class match_edited_type(ExpectedValuePredicate):
+ """return non-zero if main edited entity type is the one specified as
+ initializer argument, or is among initializer arguments if `mode` == 'any'.
+ """
+
+ def _values_set(self, cls, req, **kwargs):
+ try:
+ return frozenset((req.form['__type:%s' % req.form['__maineid']],))
+ except KeyError:
+ return frozenset()
+
+
+class match_form_id(ExpectedValuePredicate):
+ """return non-zero if request form identifier is the one specified as
+ initializer argument, or is among initializer arguments if `mode` == 'any'.
+ """
+
+ def _values_set(self, cls, req, **kwargs):
+ try:
+ return frozenset((req.form['__form_id'],))
+ except KeyError:
+ return frozenset()
+
+
+class specified_etype_implements(is_instance):
+ """Return non-zero score if the entity type specified by an 'etype' key
+ searched in (by priority) input context kwargs and request form parameters
+ match a known entity type (case insensitivly), and it's associated entity
+ class is of one of the type(s) given to the initializer. If multiple
+ arguments are given, matching one of them is enough.
+
+ .. note:: as with :class:`~cubicweb.predicates.is_instance`, entity types
+ should be given as string and the score will reflect class
+ proximity so the most specific object will be selected.
+
+ This predicate is usually used by views holding entity creation forms (since
+ we've no result set to work on).
+ """
+
+ def __call__(self, cls, req, **kwargs):
+ try:
+ etype = kwargs['etype']
+ except KeyError:
+ try:
+ etype = req.form['etype']
+ except KeyError:
+ return 0
+ else:
+ # only check this is a known type if etype comes from req.form,
+ # else we want the error to propagate
+ try:
+ etype = req.vreg.case_insensitive_etypes[etype.lower()]
+ req.form['etype'] = etype
+ except KeyError:
+ return 0
+ score = self.score_class(req.vreg['etypes'].etype_class(etype), req)
+ if score:
+ eschema = req.vreg.schema.eschema(etype)
+ if eschema.may_have_permission('add', req):
+ return score
+ return 0
+
+
+class attribute_edited(EntityPredicate):
+ """Scores if the specified attribute has been edited This is useful for
+ selection of forms by the edit controller.
+
+ The initial use case is on a form, in conjunction with match_transition,
+ which will not score at edit time::
+
+ is_instance('Version') & (match_transition('ready') |
+ attribute_edited('publication_date'))
+ """
+ def __init__(self, attribute, once_is_enough=None, mode='all'):
+ super(attribute_edited, self).__init__(mode=mode, once_is_enough=once_is_enough)
+ self._attribute = attribute
+
+ def score_entity(self, entity):
+ return eid_param(role_name(self._attribute, 'subject'), entity.eid) in entity._cw.form
+
+
+# Other predicates ##############################################################
+
+class match_exception(ExpectedValuePredicate):
+ """Return 1 if exception given as `exc` in the input context is an instance
+ of one of the class given on instanciation of this predicate.
+ """
+ def __init__(self, *expected):
+ assert expected, self
+ # we want a tuple, not a set as done in the parent class
+ self.expected = expected
+
+ def __call__(self, cls, req, exc=None, **kwargs):
+ if exc is not None and isinstance(exc, self.expected):
+ return 1
+ return 0
+
+
+@objectify_predicate
+def debug_mode(cls, req, rset=None, **kwargs):
+ """Return 1 if running in debug mode."""
+ return req.vreg.config.debugmode and 1 or 0
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/pylintext.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/pylintext.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,50 @@
+"""https://pastebin.logilab.fr/show/860/"""
+
+from astroid import MANAGER, InferenceError, nodes, scoped_nodes
+from astroid.builder import AstroidBuilder
+
+def turn_function_to_class(node):
+ """turn a Function node into a Class node (in-place)"""
+ node.__class__ = scoped_nodes.Class
+ node.bases = ()
+ # remove return nodes so that we don't get warned about 'return outside
+ # function' by pylint
+ for rnode in node.nodes_of_class(nodes.Return):
+ rnode.parent.body.remove(rnode)
+ # that seems to be enough :)
+
+
+def cubicweb_transform(module):
+ # handle objectify_predicate decorator (and its former name until bw compat
+ # is kept). Only look at module level functions, should be enough.
+ for assnodes in module.locals.values():
+ for node in assnodes:
+ if isinstance(node, scoped_nodes.Function) and node.decorators:
+ for decorator in node.decorators.nodes:
+ try:
+ for infered in decorator.infer():
+ if infered.name in ('objectify_predicate', 'objectify_selector'):
+ turn_function_to_class(node)
+ break
+ else:
+ continue
+ break
+ except InferenceError:
+ continue
+ # add yams base types into 'yams.buildobjs', astng doesn't grasp globals()
+ # magic in there
+ if module.name == 'yams.buildobjs':
+ from yams import BASE_TYPES
+ for etype in BASE_TYPES:
+ module.locals[etype] = [scoped_nodes.Class(etype, None)]
+ # add data() to uiprops module
+ if module.name.split('.')[-1] == 'uiprops':
+ fake = AstroidBuilder(MANAGER).string_build('''
+def data(string):
+ return u''
+''')
+ module.locals['data'] = fake.locals['data']
+
+def register(linter):
+ """called when loaded by pylint --load-plugins, nothing to do here"""
+ MANAGER.register_transform(nodes.Module, cubicweb_transform)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/pytestconf.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/pytestconf.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,48 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""pytest configuration file: we need this to properly remove ressources
+cached on test classes, at least until we've proper support for teardown_class
+"""
+import sys
+from os.path import split, splitext
+from logilab.common.pytest import PyTester
+
+class CustomPyTester(PyTester):
+ def testfile(self, filename, batchmode=False):
+ try:
+ return super(CustomPyTester, self).testfile(filename, batchmode)
+ finally:
+ modname = splitext(split(filename)[1])[0]
+ try:
+ module = sys.modules[modname]
+ except KeyError:
+ # error during test module import
+ return
+ for cls in vars(module).values():
+ if getattr(cls, '__module__', None) != modname:
+ continue
+ clean_repo_test_cls(cls)
+
+def clean_repo_test_cls(cls):
+ if 'repo' in cls.__dict__:
+ if not cls.repo.shutting_down:
+ cls.repo.shutdown()
+ del cls.repo
+ for clsattr in ('cnx', 'config', '_config', 'vreg', 'schema'):
+ if clsattr in cls.__dict__:
+ delattr(cls, clsattr)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/repoapi.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/repoapi.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,71 @@
+# copyright 2013-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Official API to access the content of a repository
+"""
+from warnings import warn
+
+from six import add_metaclass
+
+from logilab.common.deprecation import class_deprecated
+
+from cubicweb.utils import parse_repo_uri
+from cubicweb import AuthenticationError
+from cubicweb.server.session import Connection
+
+
+### public API ######################################################
+
+def get_repository(uri=None, config=None, vreg=None):
+ """get a repository for the given URI or config/vregistry (in case we're
+ loading the repository for a client, eg web server, configuration).
+
+ The returned repository may be an in-memory repository or a proxy object
+ using a specific RPC method, depending on the given URI.
+ """
+ if uri is not None:
+ warn('[3.22] get_repository only wants a config')
+
+ assert config is not None, 'get_repository(config=config)'
+ return config.repository(vreg)
+
+def connect(repo, login, **kwargs):
+ """Take credential and return associated Connection.
+
+ raise AuthenticationError if the credential are invalid."""
+ sessionid = repo.connect(login, **kwargs)
+ session = repo._get_session(sessionid)
+ # XXX the autoclose_session should probably be handle on the session directly
+ # this is something to consider once we have proper server side Connection.
+ return Connection(session)
+
+def anonymous_cnx(repo):
+ """return a Connection for Anonymous user.
+
+ raises an AuthenticationError if anonymous usage is not allowed
+ """
+ anoninfo = getattr(repo.config, 'anonymous_user', lambda: None)()
+ if anoninfo is None: # no anonymous user
+ raise AuthenticationError('anonymous access is not authorized')
+ anon_login, anon_password = anoninfo
+ # use vreg's repository cache
+ return connect(repo, anon_login, password=anon_password)
+
+
+@add_metaclass(class_deprecated)
+class ClientConnection(Connection):
+ __deprecation_warning__ = '[3.20] %(cls)s is deprecated, use Connection instead'
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/req.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/req.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,508 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Base class for request/session"""
+
+__docformat__ = "restructuredtext en"
+
+from warnings import warn
+from datetime import time, datetime, timedelta
+
+from six import PY2, PY3, text_type
+from six.moves.urllib.parse import parse_qs, parse_qsl, quote as urlquote, unquote as urlunquote, urlsplit, urlunsplit
+
+from logilab.common.decorators import cached
+from logilab.common.deprecation import deprecated
+from logilab.common.date import ustrftime, strptime, todate, todatetime
+
+from rql.utils import rqlvar_maker
+
+from cubicweb import (Unauthorized, NoSelectableObject, NoResultError,
+ MultipleResultsError, uilib)
+from cubicweb.rset import ResultSet
+
+ONESECOND = timedelta(0, 1, 0)
+CACHE_REGISTRY = {}
+
+class FindEntityError(Exception):
+ """raised when find_one_entity() can not return one and only one entity"""
+
+class Cache(dict):
+ def __init__(self):
+ super(Cache, self).__init__()
+ _now = datetime.now()
+ self.cache_creation_date = _now
+ self.latest_cache_lookup = _now
+
+
+class RequestSessionBase(object):
+ """base class containing stuff shared by server session and web request
+
+ request/session is the main resources accessor, mainly through it's vreg
+ attribute:
+
+ :attribute vreg: the instance's registry
+ :attribute vreg.schema: the instance's schema
+ :attribute vreg.config: the instance's configuration
+ """
+ is_request = True # False for repository session
+
+ def __init__(self, vreg):
+ self.vreg = vreg
+ try:
+ encoding = vreg.property_value('ui.encoding')
+ except Exception: # no vreg or property not registered
+ encoding = 'utf-8'
+ self.encoding = encoding
+ # cache result of execution for (rql expr / eids),
+ # should be emptied on commit/rollback of the server session / web
+ # connection
+ self.user = None
+ self.local_perm_cache = {}
+ self._ = text_type
+
+ def _set_user(self, orig_user):
+ """set the user for this req_session_base
+
+ A special method is needed to ensure the linked user is linked to the
+ connection too.
+ """
+ rset = self.eid_rset(orig_user.eid, 'CWUser')
+ user_cls = self.vreg['etypes'].etype_class('CWUser')
+ user = user_cls(self, rset, row=0, groups=orig_user.groups,
+ properties=orig_user.properties)
+ user.cw_attr_cache['login'] = orig_user.login # cache login
+ self.user = user
+ self.set_entity_cache(user)
+ self.set_language(user.prefered_language())
+
+
+ def set_language(self, lang):
+ """install i18n configuration for `lang` translation.
+
+ Raises :exc:`KeyError` if translation doesn't exist.
+ """
+ self.lang = lang
+ gettext, pgettext = self.vreg.config.translations[lang]
+ # use _cw.__ to translate a message without registering it to the catalog
+ self._ = self.__ = gettext
+ self.pgettext = pgettext
+
+ def get_option_value(self, option):
+ raise NotImplementedError
+
+ def property_value(self, key):
+ """return value of the property with the given key, giving priority to
+ user specific value if any, else using site value
+ """
+ if self.user:
+ val = self.user.property_value(key)
+ if val is not None:
+ return val
+ return self.vreg.property_value(key)
+
+ def etype_rset(self, etype, size=1):
+ """return a fake result set for a particular entity type"""
+ rset = ResultSet([('A',)]*size, '%s X' % etype,
+ description=[(etype,)]*size)
+ def get_entity(row, col=0, etype=etype, req=self, rset=rset):
+ return req.vreg['etypes'].etype_class(etype)(req, rset, row, col)
+ rset.get_entity = get_entity
+ rset.req = self
+ return rset
+
+ def eid_rset(self, eid, etype=None):
+ """return a result set for the given eid without doing actual query
+ (we have the eid, we can suppose it exists and user has access to the
+ entity)
+ """
+ eid = int(eid)
+ if etype is None:
+ etype = self.entity_metas(eid)['type']
+ rset = ResultSet([(eid,)], 'Any X WHERE X eid %(x)s', {'x': eid},
+ [(etype,)])
+ rset.req = self
+ return rset
+
+ def empty_rset(self):
+ """ return a guaranteed empty result """
+ rset = ResultSet([], 'Any X WHERE X eid -1')
+ rset.req = self
+ return rset
+
+ def entity_from_eid(self, eid, etype=None):
+ """return an entity instance for the given eid. No query is done"""
+ try:
+ return self.entity_cache(eid)
+ except KeyError:
+ rset = self.eid_rset(eid, etype)
+ entity = rset.get_entity(0, 0)
+ self.set_entity_cache(entity)
+ return entity
+
+ def entity_cache(self, eid):
+ raise KeyError
+
+ def set_entity_cache(self, entity):
+ pass
+
+ def create_entity(self, etype, **kwargs):
+ """add a new entity of the given type
+
+ Example (in a shell session):
+
+ >>> c = create_entity('Company', name=u'Logilab')
+ >>> create_entity('Person', firstname=u'John', surname=u'Doe',
+ ... works_for=c)
+
+ """
+ cls = self.vreg['etypes'].etype_class(etype)
+ return cls.cw_instantiate(self.execute, **kwargs)
+
+ @deprecated('[3.18] use find(etype, **kwargs).entities()')
+ def find_entities(self, etype, **kwargs):
+ """find entities of the given type and attribute values.
+
+ >>> users = find_entities('CWGroup', name=u'users')
+ >>> groups = find_entities('CWGroup')
+ """
+ return self.find(etype, **kwargs).entities()
+
+ @deprecated('[3.18] use find(etype, **kwargs).one()')
+ def find_one_entity(self, etype, **kwargs):
+ """find one entity of the given type and attribute values.
+ raise :exc:`FindEntityError` if can not return one and only one entity.
+
+ >>> users = find_one_entity('CWGroup', name=u'users')
+ >>> groups = find_one_entity('CWGroup')
+ Exception()
+ """
+ try:
+ return self.find(etype, **kwargs).one()
+ except (NoResultError, MultipleResultsError) as e:
+ raise FindEntityError("%s: (%s, %s)" % (str(e), etype, kwargs))
+
+ def find(self, etype, **kwargs):
+ """find entities of the given type and attribute values.
+
+ :returns: A :class:`ResultSet`
+
+ >>> users = find('CWGroup', name=u"users").one()
+ >>> groups = find('CWGroup').entities()
+ """
+ parts = ['Any X WHERE X is %s' % etype]
+ varmaker = rqlvar_maker(defined='X')
+ eschema = self.vreg.schema.eschema(etype)
+ for attr, value in kwargs.items():
+ if isinstance(value, list) or isinstance(value, tuple):
+ raise NotImplementedError("List of values are not supported")
+ if hasattr(value, 'eid'):
+ kwargs[attr] = value.eid
+ if attr.startswith('reverse_'):
+ attr = attr[8:]
+ assert attr in eschema.objrels, \
+ '%s not in %s object relations' % (attr, eschema)
+ parts.append(
+ '%(varname)s %(attr)s X, '
+ '%(varname)s eid %%(reverse_%(attr)s)s'
+ % {'attr': attr, 'varname': next(varmaker)})
+ else:
+ assert attr in eschema.subjrels, \
+ '%s not in %s subject relations' % (attr, eschema)
+ parts.append('X %(attr)s %%(%(attr)s)s' % {'attr': attr})
+
+ rql = ', '.join(parts)
+
+ return self.execute(rql, kwargs)
+
+ def ensure_ro_rql(self, rql):
+ """raise an exception if the given rql is not a select query"""
+ first = rql.split(None, 1)[0].lower()
+ if first in ('insert', 'set', 'delete'):
+ raise Unauthorized(self._('only select queries are authorized'))
+
+ def get_cache(self, cachename):
+ """cachename should be dotted names as in :
+
+ - cubicweb.mycache
+ - cubes.blog.mycache
+ - etc.
+ """
+ warn.warning('[3.19] .get_cache will disappear soon. '
+ 'Distributed caching mechanisms are being introduced instead.'
+ 'Other caching mechanism can be used more reliably '
+ 'to the same effect.',
+ DeprecationWarning)
+ if cachename in CACHE_REGISTRY:
+ cache = CACHE_REGISTRY[cachename]
+ else:
+ cache = CACHE_REGISTRY[cachename] = Cache()
+ _now = datetime.now()
+ if _now > cache.latest_cache_lookup + ONESECOND:
+ ecache = self.execute(
+ 'Any C,T WHERE C is CWCache, C name %(name)s, C timestamp T',
+ {'name':cachename}).get_entity(0,0)
+ cache.latest_cache_lookup = _now
+ if not ecache.valid(cache.cache_creation_date):
+ cache.clear()
+ cache.cache_creation_date = _now
+ return cache
+
+ # url generation methods ##################################################
+
+ def build_url(self, *args, **kwargs):
+ """return an absolute URL using params dictionary key/values as URL
+ parameters. Values are automatically URL quoted, and the
+ publishing method to use may be specified or will be guessed.
+
+ if ``__secure__`` argument is True, the request will try to build a
+ https url.
+
+ raises :exc:`ValueError` if None is found in arguments
+ """
+ # use *args since we don't want first argument to be "anonymous" to
+ # avoid potential clash with kwargs
+ method = None
+ if args:
+ assert len(args) == 1, 'only 0 or 1 non-named-argument expected'
+ method = args[0]
+ if method is None:
+ method = 'view'
+ # XXX I (adim) think that if method is passed explicitly, we should
+ # not try to process it and directly call req.build_url()
+ base_url = kwargs.pop('base_url', None)
+ if base_url is None:
+ secure = kwargs.pop('__secure__', None)
+ base_url = self.base_url(secure=secure)
+ if '_restpath' in kwargs:
+ assert method == 'view', repr(method)
+ path = kwargs.pop('_restpath')
+ else:
+ path = method
+ if not kwargs:
+ return u'%s%s' % (base_url, path)
+ return u'%s%s?%s' % (base_url, path, self.build_url_params(**kwargs))
+
+ def build_url_params(self, **kwargs):
+ """return encoded params to incorporate them in a URL"""
+ args = []
+ for param, values in kwargs.items():
+ if not isinstance(values, (list, tuple)):
+ values = (values,)
+ for value in values:
+ assert value is not None
+ args.append(u'%s=%s' % (param, self.url_quote(value)))
+ return '&'.join(args)
+
+ def url_quote(self, value, safe=''):
+ """urllib.quote is not unicode safe, use this method to do the
+ necessary encoding / decoding. Also it's designed to quote each
+ part of a url path and so the '/' character will be encoded as well.
+ """
+ if PY2 and isinstance(value, unicode):
+ quoted = urlquote(value.encode(self.encoding), safe=safe)
+ return unicode(quoted, self.encoding)
+ return urlquote(str(value), safe=safe)
+
+ def url_unquote(self, quoted):
+ """returns a unicode unquoted string
+
+ decoding is based on `self.encoding` which is the encoding
+ used in `url_quote`
+ """
+ if PY3:
+ return urlunquote(quoted)
+ if isinstance(quoted, unicode):
+ quoted = quoted.encode(self.encoding)
+ try:
+ return unicode(urlunquote(quoted), self.encoding)
+ except UnicodeDecodeError: # might occurs on manually typed URLs
+ return unicode(urlunquote(quoted), 'iso-8859-1')
+
+ def url_parse_qsl(self, querystring):
+ """return a list of (key, val) found in the url quoted query string"""
+ if PY3:
+ for key, val in parse_qsl(querystring):
+ yield key, val
+ return
+ if isinstance(querystring, unicode):
+ querystring = querystring.encode(self.encoding)
+ for key, val in parse_qsl(querystring):
+ try:
+ yield unicode(key, self.encoding), unicode(val, self.encoding)
+ except UnicodeDecodeError: # might occurs on manually typed URLs
+ yield unicode(key, 'iso-8859-1'), unicode(val, 'iso-8859-1')
+
+
+ def rebuild_url(self, url, **newparams):
+ """return the given url with newparams inserted. If any new params
+ is already specified in the url, it's overriden by the new value
+
+ newparams may only be mono-valued.
+ """
+ if PY2 and isinstance(url, unicode):
+ url = url.encode(self.encoding)
+ schema, netloc, path, query, fragment = urlsplit(url)
+ query = parse_qs(query)
+ # sort for testing predictability
+ for key, val in sorted(newparams.items()):
+ query[key] = (self.url_quote(val),)
+ query = '&'.join(u'%s=%s' % (param, value)
+ for param, values in sorted(query.items())
+ for value in values)
+ return urlunsplit((schema, netloc, path, query, fragment))
+
+ # bound user related methods ###############################################
+
+ @cached
+ def user_data(self):
+ """returns a dictionary with this user's information.
+
+ The keys are :
+
+ login
+ The user login
+
+ name
+ The user name, returned by user.name()
+
+ email
+ The user principal email
+
+ """
+ userinfo = {}
+ user = self.user
+ userinfo['login'] = user.login
+ userinfo['name'] = user.name()
+ userinfo['email'] = user.cw_adapt_to('IEmailable').get_email()
+ return userinfo
+
+ # formating methods #######################################################
+
+ def view(self, __vid, rset=None, __fallback_oid=None, __registry='views',
+ initargs=None, w=None, **kwargs):
+ """Select object with the given id (`__oid`) then render it. If the
+ object isn't selectable, try to select fallback object if
+ `__fallback_oid` is specified.
+
+ If specified `initargs` is expected to be a dictionary containing
+ arguments that should be given to selection (hence to object's __init__
+ as well), but not to render(). Other arbitrary keyword arguments will be
+ given to selection *and* to render(), and so should be handled by
+ object's call or cell_call method..
+ """
+ if initargs is None:
+ initargs = kwargs
+ else:
+ initargs.update(kwargs)
+ try:
+ view = self.vreg[__registry].select(__vid, self, rset=rset, **initargs)
+ except NoSelectableObject:
+ if __fallback_oid is None:
+ raise
+ view = self.vreg[__registry].select(__fallback_oid, self,
+ rset=rset, **initargs)
+ return view.render(w=w, **kwargs)
+
+ def printable_value(self, attrtype, value, props=None, displaytime=True,
+ formatters=uilib.PRINTERS):
+ """return a displayablye value (i.e. unicode string)"""
+ if value is None:
+ return u''
+ try:
+ as_string = formatters[attrtype]
+ except KeyError:
+ self.error('given bad attrtype %s', attrtype)
+ return unicode(value)
+ return as_string(value, self, props, displaytime)
+
+ def format_date(self, date, date_format=None, time=False):
+ """return a string for a date time according to instance's
+ configuration
+ """
+ if date is not None:
+ if date_format is None:
+ if time:
+ date_format = self.property_value('ui.datetime-format')
+ else:
+ date_format = self.property_value('ui.date-format')
+ return ustrftime(date, date_format)
+ return u''
+
+ def format_time(self, time):
+ """return a string for a time according to instance's
+ configuration
+ """
+ if time is not None:
+ return ustrftime(time, self.property_value('ui.time-format'))
+ return u''
+
+ def format_float(self, num):
+ """return a string for floating point number according to instance's
+ configuration
+ """
+ if num is not None:
+ return self.property_value('ui.float-format') % num
+ return u''
+
+ def parse_datetime(self, value, etype='Datetime'):
+ """get a datetime or time from a string (according to etype)
+ Datetime formatted as Date are accepted
+ """
+ assert etype in ('Datetime', 'Date', 'Time'), etype
+ # XXX raise proper validation error
+ if etype == 'Datetime':
+ format = self.property_value('ui.datetime-format')
+ try:
+ return todatetime(strptime(value, format))
+ except ValueError:
+ pass
+ elif etype == 'Time':
+ format = self.property_value('ui.time-format')
+ try:
+ # (adim) I can't find a way to parse a Time with a custom format
+ date = strptime(value, format) # this returns a DateTime
+ return time(date.hour, date.minute, date.second)
+ except ValueError:
+ raise ValueError(self._('can\'t parse %(value)r (expected %(format)s)')
+ % {'value': value, 'format': format})
+ try:
+ format = self.property_value('ui.date-format')
+ dt = strptime(value, format)
+ if etype == 'Datetime':
+ return todatetime(dt)
+ return todate(dt)
+ except ValueError:
+ raise ValueError(self._('can\'t parse %(value)r (expected %(format)s)')
+ % {'value': value, 'format': format})
+
+ def _base_url(self, secure=None):
+ if secure:
+ return self.vreg.config.get('https-url') or self.vreg.config['base-url']
+ return self.vreg.config['base-url']
+
+ def base_url(self, secure=None):
+ """return the root url of the instance
+ """
+ url = self._base_url(secure=secure)
+ return url if url is None else url.rstrip('/') + '/'
+
+ # abstract methods to override according to the web front-end #############
+
+ def describe(self, eid, asdict=False):
+ """return a tuple (type, sourceuri, extid) for the entity with id """
+ raise NotImplementedError
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/rqlrewrite.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/rqlrewrite.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,933 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""RQL rewriting utilities : insert rql expression snippets into rql syntax
+tree.
+
+This is used for instance for read security checking in the repository.
+"""
+__docformat__ = "restructuredtext en"
+
+from six import text_type, string_types
+
+from rql import nodes as n, stmts, TypeResolverException
+from rql.utils import common_parent
+
+from yams import BadSchemaDefinition
+
+from logilab.common import tempattr
+from logilab.common.graph import has_path
+
+from cubicweb import Unauthorized
+from cubicweb.schema import RRQLExpression
+
+def cleanup_solutions(rqlst, solutions):
+ for sol in solutions:
+ for vname in list(sol):
+ if not (vname in rqlst.defined_vars or vname in rqlst.aliases):
+ del sol[vname]
+
+
+def add_types_restriction(schema, rqlst, newroot=None, solutions=None):
+ if newroot is None:
+ assert solutions is None
+ if hasattr(rqlst, '_types_restr_added'):
+ return
+ solutions = rqlst.solutions
+ newroot = rqlst
+ rqlst._types_restr_added = True
+ else:
+ assert solutions is not None
+ rqlst = rqlst.stmt
+ eschema = schema.eschema
+ allpossibletypes = {}
+ for solution in solutions:
+ for varname, etype in solution.items():
+ # XXX not considering aliases by design, right ?
+ if varname not in newroot.defined_vars or eschema(etype).final:
+ continue
+ allpossibletypes.setdefault(varname, set()).add(etype)
+ # XXX could be factorized with add_etypes_restriction from rql 0.31
+ for varname in sorted(allpossibletypes):
+ var = newroot.defined_vars[varname]
+ stinfo = var.stinfo
+ if stinfo.get('uidrel') is not None:
+ continue # eid specified, no need for additional type specification
+ try:
+ typerel = rqlst.defined_vars[varname].stinfo.get('typerel')
+ except KeyError:
+ assert varname in rqlst.aliases
+ continue
+ if newroot is rqlst and typerel is not None:
+ mytyperel = typerel
+ else:
+ for vref in var.references():
+ rel = vref.relation()
+ if rel and rel.is_types_restriction():
+ mytyperel = rel
+ break
+ else:
+ mytyperel = None
+ possibletypes = allpossibletypes[varname]
+ if mytyperel is not None:
+ if mytyperel.r_type == 'is_instance_of':
+ # turn is_instance_of relation into a is relation since we've
+ # all possible solutions and don't want to bother with
+ # potential is_instance_of incompatibility
+ mytyperel.r_type = 'is'
+ if len(possibletypes) > 1:
+ node = n.Function('IN')
+ for etype in sorted(possibletypes):
+ node.append(n.Constant(etype, 'etype'))
+ else:
+ etype = next(iter(possibletypes))
+ node = n.Constant(etype, 'etype')
+ comp = mytyperel.children[1]
+ comp.replace(comp.children[0], node)
+ else:
+ # variable has already some strict types restriction. new
+ # possible types can only be a subset of existing ones, so only
+ # remove no more possible types
+ for cst in mytyperel.get_nodes(n.Constant):
+ if not cst.value in possibletypes:
+ cst.parent.remove(cst)
+ else:
+ # we have to add types restriction
+ if stinfo.get('scope') is not None:
+ rel = var.scope.add_type_restriction(var, possibletypes)
+ else:
+ # tree is not annotated yet, no scope set so add the restriction
+ # to the root
+ rel = newroot.add_type_restriction(var, possibletypes)
+ stinfo['typerel'] = rel
+ stinfo['possibletypes'] = possibletypes
+
+
+def remove_solutions(origsolutions, solutions, defined):
+ """when a rqlst has been generated from another by introducing security
+ assertions, this method returns solutions which are contained in orig
+ solutions
+ """
+ newsolutions = []
+ for origsol in origsolutions:
+ for newsol in solutions[:]:
+ for var, etype in origsol.items():
+ try:
+ if newsol[var] != etype:
+ try:
+ defined[var].stinfo['possibletypes'].remove(newsol[var])
+ except KeyError:
+ pass
+ break
+ except KeyError:
+ # variable has been rewritten
+ continue
+ else:
+ newsolutions.append(newsol)
+ solutions.remove(newsol)
+ return newsolutions
+
+
+def _add_noinvariant(noinvariant, restricted, select, nbtrees):
+ # a variable can actually be invariant if it has not been restricted for
+ # security reason or if security assertion hasn't modified the possible
+ # solutions for the query
+ for vname in restricted:
+ try:
+ var = select.defined_vars[vname]
+ except KeyError:
+ # this is an alias
+ continue
+ if nbtrees != 1 or len(var.stinfo['possibletypes']) != 1:
+ noinvariant.add(var)
+
+
+def _expand_selection(terms, selected, aliases, select, newselect):
+ for term in terms:
+ for vref in term.iget_nodes(n.VariableRef):
+ if not vref.name in selected:
+ select.append_selected(vref)
+ colalias = newselect.get_variable(vref.name, len(aliases))
+ aliases.append(n.VariableRef(colalias))
+ selected.add(vref.name)
+
+def _has_multiple_cardinality(etypes, rdef, ttypes_func, cardindex):
+ """return True if relation definitions from entity types (`etypes`) to
+ target types returned by the `ttypes_func` function all have single (1 or ?)
+ cardinality.
+ """
+ for etype in etypes:
+ for ttype in ttypes_func(etype):
+ if rdef(etype, ttype).cardinality[cardindex] in '+*':
+ return True
+ return False
+
+def _compatible_relation(relations, stmt, sniprel):
+ """Search among given rql relation nodes if there is one 'compatible' with the
+ snippet relation, and return it if any, else None.
+
+ A relation is compatible if it:
+ * belongs to the currently processed statement,
+ * isn't negged (i.e. direct parent is a NOT node)
+ * isn't optional (outer join) or similarly as the snippet relation
+ """
+ for rel in relations:
+ # don't share if relation's scope is not the current statement
+ if rel.scope is not stmt:
+ continue
+ # don't share neged relation
+ if rel.neged(strict=True):
+ continue
+ # don't share optional relation, unless the snippet relation is
+ # similarly optional
+ if rel.optional and rel.optional != sniprel.optional:
+ continue
+ return rel
+ return None
+
+
+def iter_relations(stinfo):
+ # this is a function so that test may return relation in a predictable order
+ return stinfo['relations'] - stinfo['rhsrelations']
+
+
+class Unsupported(Exception):
+ """raised when an rql expression can't be inserted in some rql query
+ because it create an unresolvable query (eg no solutions found)
+ """
+
+class VariableFromSubQuery(Exception):
+ """flow control exception to indicate that a variable is coming from a
+ subquery, and let parent act accordingly
+ """
+ def __init__(self, variable):
+ self.variable = variable
+
+
+class RQLRewriter(object):
+ """Insert some rql snippets into another rql syntax tree, for security /
+ relation vocabulary. This implies that it should only restrict results of
+ the original query, not generate new ones. Hence, inserted snippets are
+ inserted under an EXISTS node.
+
+ This class *isn't thread safe*.
+ """
+
+ def __init__(self, session):
+ self.session = session
+ vreg = session.vreg
+ self.schema = vreg.schema
+ self.annotate = vreg.rqlhelper.annotate
+ self._compute_solutions = vreg.solutions
+
+ def compute_solutions(self):
+ self.annotate(self.select)
+ try:
+ self._compute_solutions(self.session, self.select, self.kwargs)
+ except TypeResolverException:
+ raise Unsupported(str(self.select))
+ if len(self.select.solutions) < len(self.solutions):
+ raise Unsupported()
+
+ def insert_local_checks(self, select, kwargs,
+ localchecks, restricted, noinvariant):
+ """
+ select: the rql syntax tree Select node
+ kwargs: query arguments
+
+ localchecks: {(('Var name', (rqlexpr1, rqlexpr2)),
+ ('Var name1', (rqlexpr1, rqlexpr23))): [solution]}
+
+ (see querier._check_permissions docstring for more information)
+
+ restricted: set of variable names to which an rql expression has to be
+ applied
+
+ noinvariant: set of variable names that can't be considered has
+ invariant due to security reason (will be filed by this method)
+ """
+ nbtrees = len(localchecks)
+ myunion = union = select.parent
+ # transform in subquery when len(localchecks)>1 and groups
+ if nbtrees > 1 and (select.orderby or select.groupby or
+ select.having or select.has_aggregat or
+ select.distinct or
+ select.limit or select.offset):
+ newselect = stmts.Select()
+ # only select variables in subqueries
+ origselection = select.selection
+ select.select_only_variables()
+ select.has_aggregat = False
+ # create subquery first so correct node are used on copy
+ # (eg ColumnAlias instead of Variable)
+ aliases = [n.VariableRef(newselect.get_variable(vref.name, i))
+ for i, vref in enumerate(select.selection)]
+ selected = set(vref.name for vref in aliases)
+ # now copy original selection and groups
+ for term in origselection:
+ newselect.append_selected(term.copy(newselect))
+ if select.orderby:
+ sortterms = []
+ for sortterm in select.orderby:
+ sortterms.append(sortterm.copy(newselect))
+ for fnode in sortterm.get_nodes(n.Function):
+ if fnode.name == 'FTIRANK':
+ # we've to fetch the has_text relation as well
+ var = fnode.children[0].variable
+ rel = next(iter(var.stinfo['ftirels']))
+ assert not rel.ored(), 'unsupported'
+ newselect.add_restriction(rel.copy(newselect))
+ # remove relation from the orig select and
+ # cleanup variable stinfo
+ rel.parent.remove(rel)
+ var.stinfo['ftirels'].remove(rel)
+ var.stinfo['relations'].remove(rel)
+ # XXX not properly re-annotated after security insertion?
+ newvar = newselect.get_variable(var.name)
+ newvar.stinfo.setdefault('ftirels', set()).add(rel)
+ newvar.stinfo.setdefault('relations', set()).add(rel)
+ newselect.set_orderby(sortterms)
+ _expand_selection(select.orderby, selected, aliases, select, newselect)
+ select.orderby = () # XXX dereference?
+ if select.groupby:
+ newselect.set_groupby([g.copy(newselect) for g in select.groupby])
+ _expand_selection(select.groupby, selected, aliases, select, newselect)
+ select.groupby = () # XXX dereference?
+ if select.having:
+ newselect.set_having([g.copy(newselect) for g in select.having])
+ _expand_selection(select.having, selected, aliases, select, newselect)
+ select.having = () # XXX dereference?
+ if select.limit:
+ newselect.limit = select.limit
+ select.limit = None
+ if select.offset:
+ newselect.offset = select.offset
+ select.offset = 0
+ myunion = stmts.Union()
+ newselect.set_with([n.SubQuery(aliases, myunion)], check=False)
+ newselect.distinct = select.distinct
+ solutions = [sol.copy() for sol in select.solutions]
+ cleanup_solutions(newselect, solutions)
+ newselect.set_possible_types(solutions)
+ # if some solutions doesn't need rewriting, insert original
+ # select as first union subquery
+ if () in localchecks:
+ myunion.append(select)
+ # we're done, replace original select by the new select with
+ # subqueries (more added in the loop below)
+ union.replace(select, newselect)
+ elif not () in localchecks:
+ union.remove(select)
+ for lcheckdef, lchecksolutions in localchecks.items():
+ if not lcheckdef:
+ continue
+ myrqlst = select.copy(solutions=lchecksolutions)
+ myunion.append(myrqlst)
+ # in-place rewrite + annotation / simplification
+ lcheckdef = [({var: 'X'}, rqlexprs) for var, rqlexprs in lcheckdef]
+ self.rewrite(myrqlst, lcheckdef, kwargs)
+ _add_noinvariant(noinvariant, restricted, myrqlst, nbtrees)
+ if () in localchecks:
+ select.set_possible_types(localchecks[()])
+ add_types_restriction(self.schema, select)
+ _add_noinvariant(noinvariant, restricted, select, nbtrees)
+ self.annotate(union)
+
+ def rewrite(self, select, snippets, kwargs, existingvars=None):
+ """
+ snippets: (varmap, list of rql expression)
+ with varmap a *dict* {select var: snippet var}
+ """
+ self.select = select
+ # remove_solutions used below require a copy
+ self.solutions = solutions = select.solutions[:]
+ self.kwargs = kwargs
+ self.u_varname = None
+ self.removing_ambiguity = False
+ self.exists_snippet = {}
+ self.pending_keys = []
+ self.existingvars = existingvars
+ # we have to annotate the rqlst before inserting snippets, even though
+ # we'll have to redo it later
+ self.annotate(select)
+ self.insert_snippets(snippets)
+ if not self.exists_snippet and self.u_varname:
+ # U has been inserted than cancelled, cleanup
+ select.undefine_variable(select.defined_vars[self.u_varname])
+ # clean solutions according to initial solutions
+ newsolutions = remove_solutions(solutions, select.solutions,
+ select.defined_vars)
+ assert len(newsolutions) >= len(solutions), (
+ 'rewritten rql %s has lost some solutions, there is probably '
+ 'something wrong in your schema permission (for instance using a '
+ 'RQLExpression which inserts a relation which doesn\'t exist in '
+ 'the schema)\nOrig solutions: %s\nnew solutions: %s' % (
+ select, solutions, newsolutions))
+ if len(newsolutions) > len(solutions):
+ newsolutions = self.remove_ambiguities(snippets, newsolutions)
+ assert newsolutions
+ select.solutions = newsolutions
+ add_types_restriction(self.schema, select)
+
+ def insert_snippets(self, snippets, varexistsmap=None):
+ self.rewritten = {}
+ for varmap, rqlexprs in snippets:
+ if isinstance(varmap, dict):
+ varmap = tuple(sorted(varmap.items()))
+ else:
+ assert isinstance(varmap, tuple), varmap
+ if varexistsmap is not None and not varmap in varexistsmap:
+ continue
+ self.insert_varmap_snippets(varmap, rqlexprs, varexistsmap)
+
+ def init_from_varmap(self, varmap, varexistsmap=None):
+ self.varmap = varmap
+ self.revvarmap = {}
+ self.varinfos = []
+ for i, (selectvar, snippetvar) in enumerate(varmap):
+ assert snippetvar in 'SOX'
+ self.revvarmap[snippetvar] = (selectvar, i)
+ vi = {}
+ self.varinfos.append(vi)
+ try:
+ vi['const'] = int(selectvar)
+ vi['rhs_rels'] = vi['lhs_rels'] = {}
+ except ValueError:
+ try:
+ vi['stinfo'] = sti = self.select.defined_vars[selectvar].stinfo
+ except KeyError:
+ vi['stinfo'] = sti = self._subquery_variable(selectvar)
+ if varexistsmap is None:
+ # build an index for quick access to relations
+ vi['rhs_rels'] = {}
+ for rel in sti.get('rhsrelations', []):
+ vi['rhs_rels'].setdefault(rel.r_type, []).append(rel)
+ vi['lhs_rels'] = {}
+ for rel in sti.get('relations', []):
+ if not rel in sti.get('rhsrelations', []):
+ vi['lhs_rels'].setdefault(rel.r_type, []).append(rel)
+ else:
+ vi['rhs_rels'] = vi['lhs_rels'] = {}
+
+ def _subquery_variable(self, selectvar):
+ raise VariableFromSubQuery(selectvar)
+
+ def insert_varmap_snippets(self, varmap, rqlexprs, varexistsmap):
+ try:
+ self.init_from_varmap(varmap, varexistsmap)
+ except VariableFromSubQuery as ex:
+ # variable may have been moved to a newly inserted subquery
+ # we should insert snippet in that subquery
+ subquery = self.select.aliases[ex.variable].query
+ assert len(subquery.children) == 1, subquery
+ subselect = subquery.children[0]
+ RQLRewriter(self.session).rewrite(subselect, [(varmap, rqlexprs)],
+ self.kwargs)
+ return
+ self._insert_scope = None
+ previous = None
+ inserted = False
+ for rqlexpr in rqlexprs:
+ self.current_expr = rqlexpr
+ if varexistsmap is None:
+ try:
+ new = self.insert_snippet(varmap, rqlexpr.snippet_rqlst, previous)
+ except Unsupported:
+ continue
+ inserted = True
+ if new is not None and self._insert_scope is None:
+ self.exists_snippet[rqlexpr] = new
+ previous = previous or new
+ else:
+ # called to reintroduce snippet due to ambiguity creation,
+ # so skip snippets which are not introducing this ambiguity
+ exists = varexistsmap[varmap]
+ if self.exists_snippet.get(rqlexpr) is exists:
+ self.insert_snippet(varmap, rqlexpr.snippet_rqlst, exists)
+ if varexistsmap is None and not inserted:
+ # no rql expression found matching rql solutions. User has no access right
+ raise Unauthorized() # XXX may also be because of bad constraints in schema definition
+
+ def insert_snippet(self, varmap, snippetrqlst, previous=None):
+ new = snippetrqlst.where.accept(self)
+ existing = self.existingvars
+ self.existingvars = None
+ try:
+ return self._insert_snippet(varmap, previous, new)
+ finally:
+ self.existingvars = existing
+
+ def _inserted_root(self, new):
+ if not isinstance(new, (n.Exists, n.Not)):
+ new = n.Exists(new)
+ return new
+
+ def _insert_snippet(self, varmap, previous, new):
+ """insert `new` snippet into the syntax tree, which have been rewritten
+ using `varmap`. In cases where an action is protected by several rql
+ expresssion, `previous` will be the first rql expression which has been
+ inserted, and so should be ORed with the following expressions.
+ """
+ if new is not None:
+ if self._insert_scope is None:
+ insert_scope = None
+ for vi in self.varinfos:
+ scope = vi.get('stinfo', {}).get('scope', self.select)
+ if insert_scope is None:
+ insert_scope = scope
+ else:
+ insert_scope = common_parent(scope, insert_scope)
+ else:
+ insert_scope = self._insert_scope
+ if self._insert_scope is None and any(vi.get('stinfo', {}).get('optrelations')
+ for vi in self.varinfos):
+ assert previous is None
+ self._insert_scope, new = self.snippet_subquery(varmap, new)
+ self.insert_pending()
+ #self._insert_scope = None
+ return new
+ new = self._inserted_root(new)
+ if previous is None:
+ insert_scope.add_restriction(new)
+ else:
+ grandpa = previous.parent
+ or_ = n.Or(previous, new)
+ grandpa.replace(previous, or_)
+ if not self.removing_ambiguity:
+ try:
+ self.compute_solutions()
+ except Unsupported:
+ # some solutions have been lost, can't apply this rql expr
+ if previous is None:
+ self.current_statement().remove_node(new, undefine=True)
+ else:
+ grandpa.replace(or_, previous)
+ self._cleanup_inserted(new)
+ raise
+ else:
+ with tempattr(self, '_insert_scope', new):
+ self.insert_pending()
+ return new
+ self.insert_pending()
+
+ def insert_pending(self):
+ """pending_keys hold variable referenced by U has__permission X
+ relation.
+
+ Once the snippet introducing this has been inserted and solutions
+ recomputed, we have to insert snippet defined for of entity
+ types taken by X
+ """
+ stmt = self.current_statement()
+ while self.pending_keys:
+ key, action = self.pending_keys.pop()
+ try:
+ varname = self.rewritten[key]
+ except KeyError:
+ try:
+ varname = self.revvarmap[key[-1]][0]
+ except KeyError:
+ # variable isn't used anywhere else, we can't insert security
+ raise Unauthorized()
+ ptypes = stmt.defined_vars[varname].stinfo['possibletypes']
+ if len(ptypes) > 1:
+ # XXX dunno how to handle this
+ self.session.error(
+ 'cant check security of %s, ambigous type for %s in %s',
+ stmt, varname, key[0]) # key[0] == the rql expression
+ raise Unauthorized()
+ etype = next(iter(ptypes))
+ eschema = self.schema.eschema(etype)
+ if not eschema.has_perm(self.session, action):
+ rqlexprs = eschema.get_rqlexprs(action)
+ if not rqlexprs:
+ raise Unauthorized()
+ self.insert_snippets([({varname: 'X'}, rqlexprs)])
+
+ def snippet_subquery(self, varmap, transformedsnippet):
+ """introduce the given snippet in a subquery"""
+ subselect = stmts.Select()
+ snippetrqlst = n.Exists(transformedsnippet.copy(subselect))
+ get_rschema = self.schema.rschema
+ aliases = []
+ done = set()
+ for i, (selectvar, _) in enumerate(varmap):
+ need_null_test = False
+ subselectvar = subselect.get_variable(selectvar)
+ subselect.append_selected(n.VariableRef(subselectvar))
+ aliases.append(selectvar)
+ todo = [(selectvar, self.varinfos[i]['stinfo'])]
+ while todo:
+ varname, stinfo = todo.pop()
+ done.add(varname)
+ for rel in iter_relations(stinfo):
+ if rel in done:
+ continue
+ done.add(rel)
+ rschema = get_rschema(rel.r_type)
+ if rschema.final or rschema.inlined:
+ rel.children[0].name = varname # XXX explain why
+ subselect.add_restriction(rel.copy(subselect))
+ for vref in rel.children[1].iget_nodes(n.VariableRef):
+ if isinstance(vref.variable, n.ColumnAlias):
+ # XXX could probably be handled by generating the
+ # subquery into the detected subquery
+ raise BadSchemaDefinition(
+ "cant insert security because of usage two inlined "
+ "relations in this query. You should probably at "
+ "least uninline %s" % rel.r_type)
+ subselect.append_selected(vref.copy(subselect))
+ aliases.append(vref.name)
+ self.select.remove_node(rel)
+ # when some inlined relation has to be copied in the
+ # subquery and that relation is optional, we need to
+ # test that either value is NULL or that the snippet
+ # condition is satisfied
+ if varname == selectvar and rel.optional and rschema.inlined:
+ need_null_test = True
+ # also, if some attributes or inlined relation of the
+ # object variable are accessed, we need to get all those
+ # from the subquery as well
+ if vref.name not in done and rschema.inlined:
+ # we can use vref here define in above for loop
+ ostinfo = vref.variable.stinfo
+ for orel in iter_relations(ostinfo):
+ orschema = get_rschema(orel.r_type)
+ if orschema.final or orschema.inlined:
+ todo.append( (vref.name, ostinfo) )
+ break
+ if need_null_test:
+ snippetrqlst = n.Or(
+ n.make_relation(subselect.get_variable(selectvar), 'is',
+ (None, None), n.Constant,
+ operator='='),
+ snippetrqlst)
+ subselect.add_restriction(snippetrqlst)
+ if self.u_varname:
+ # generate an identifier for the substitution
+ argname = subselect.allocate_varname()
+ while argname in self.kwargs:
+ argname = subselect.allocate_varname()
+ subselect.add_constant_restriction(subselect.get_variable(self.u_varname),
+ 'eid', text_type(argname), 'Substitute')
+ self.kwargs[argname] = self.session.user.eid
+ add_types_restriction(self.schema, subselect, subselect,
+ solutions=self.solutions)
+ myunion = stmts.Union()
+ myunion.append(subselect)
+ aliases = [n.VariableRef(self.select.get_variable(name, i))
+ for i, name in enumerate(aliases)]
+ self.select.add_subquery(n.SubQuery(aliases, myunion), check=False)
+ self._cleanup_inserted(transformedsnippet)
+ try:
+ self.compute_solutions()
+ except Unsupported:
+ # some solutions have been lost, can't apply this rql expr
+ self.select.remove_subquery(self.select.with_[-1])
+ raise
+ return subselect, snippetrqlst
+
+ def remove_ambiguities(self, snippets, newsolutions):
+ # the snippet has introduced some ambiguities, we have to resolve them
+ # "manually"
+ variantes = self.build_variantes(newsolutions)
+ # insert "is" where necessary
+ varexistsmap = {}
+ self.removing_ambiguity = True
+ for (erqlexpr, varmap, oldvarname), etype in variantes[0].items():
+ varname = self.rewritten[(erqlexpr, varmap, oldvarname)]
+ var = self.select.defined_vars[varname]
+ exists = var.references()[0].scope
+ exists.add_constant_restriction(var, 'is', etype, 'etype')
+ varexistsmap[varmap] = exists
+ # insert ORED exists where necessary
+ for variante in variantes[1:]:
+ self.insert_snippets(snippets, varexistsmap)
+ for key, etype in variante.items():
+ varname = self.rewritten[key]
+ try:
+ var = self.select.defined_vars[varname]
+ except KeyError:
+ # not a newly inserted variable
+ continue
+ exists = var.references()[0].scope
+ exists.add_constant_restriction(var, 'is', etype, 'etype')
+ # recompute solutions
+ self.compute_solutions()
+ # clean solutions according to initial solutions
+ return remove_solutions(self.solutions, self.select.solutions,
+ self.select.defined_vars)
+
+ def build_variantes(self, newsolutions):
+ variantes = set()
+ for sol in newsolutions:
+ variante = []
+ for key, newvar in self.rewritten.items():
+ variante.append( (key, sol[newvar]) )
+ variantes.add(tuple(variante))
+ # rebuild variantes as dict
+ variantes = [dict(variante) for variante in variantes]
+ # remove variable which have always the same type
+ for key in self.rewritten:
+ it = iter(variantes)
+ etype = next(it)[key]
+ for variante in it:
+ if variante[key] != etype:
+ break
+ else:
+ for variante in variantes:
+ del variante[key]
+ return variantes
+
+ def _cleanup_inserted(self, node):
+ # cleanup inserted variable references
+ removed = set()
+ for vref in node.iget_nodes(n.VariableRef):
+ vref.unregister_reference()
+ if not vref.variable.stinfo['references']:
+ # no more references, undefine the variable
+ del self.select.defined_vars[vref.name]
+ removed.add(vref.name)
+ for key, newvar in list(self.rewritten.items()):
+ if newvar in removed:
+ del self.rewritten[key]
+
+
+ def _may_be_shared_with(self, sniprel, target):
+ """if the snippet relation can be skipped to use a relation from the
+ original query, return that relation node
+ """
+ if sniprel.neged(strict=True):
+ return None # no way
+ rschema = self.schema.rschema(sniprel.r_type)
+ stmt = self.current_statement()
+ for vi in self.varinfos:
+ try:
+ if target == 'object':
+ orels = vi['lhs_rels'][sniprel.r_type]
+ cardindex = 0
+ ttypes_func = rschema.objects
+ rdef = rschema.rdef
+ else: # target == 'subject':
+ orels = vi['rhs_rels'][sniprel.r_type]
+ cardindex = 1
+ ttypes_func = rschema.subjects
+ rdef = lambda x, y: rschema.rdef(y, x)
+ except KeyError:
+ # may be raised by vi['xhs_rels'][sniprel.r_type]
+ continue
+ # if cardinality isn't in '?1', we can't ignore the snippet relation
+ # and use variable from the original query
+ if _has_multiple_cardinality(vi['stinfo']['possibletypes'], rdef,
+ ttypes_func, cardindex):
+ continue
+ orel = _compatible_relation(orels, stmt, sniprel)
+ if orel is not None:
+ return orel
+ return None
+
+ def _use_orig_term(self, snippet_varname, term):
+ key = (self.current_expr, self.varmap, snippet_varname)
+ if key in self.rewritten:
+ stmt = self.current_statement()
+ insertedvar = stmt.defined_vars.pop(self.rewritten[key])
+ for inserted_vref in insertedvar.references():
+ inserted_vref.parent.replace(inserted_vref, term.copy(stmt))
+ self.rewritten[key] = term.name
+
+ def _get_varname_or_term(self, vname):
+ stmt = self.current_statement()
+ if vname == 'U':
+ stmt = self.select
+ if self.u_varname is None:
+ self.u_varname = stmt.allocate_varname()
+ # generate an identifier for the substitution
+ argname = stmt.allocate_varname()
+ while argname in self.kwargs:
+ argname = stmt.allocate_varname()
+ # insert "U eid %(u)s"
+ stmt.add_constant_restriction(
+ stmt.get_variable(self.u_varname),
+ 'eid', text_type(argname), 'Substitute')
+ self.kwargs[argname] = self.session.user.eid
+ return self.u_varname
+ key = (self.current_expr, self.varmap, vname)
+ try:
+ return self.rewritten[key]
+ except KeyError:
+ self.rewritten[key] = newvname = stmt.allocate_varname()
+ return newvname
+
+ # visitor methods ##########################################################
+
+ def _visit_binary(self, node, cls):
+ newnode = cls()
+ for c in node.children:
+ new = c.accept(self)
+ if new is None:
+ continue
+ newnode.append(new)
+ if len(newnode.children) == 0:
+ return None
+ if len(newnode.children) == 1:
+ return newnode.children[0]
+ return newnode
+
+ def _visit_unary(self, node, cls):
+ newc = node.children[0].accept(self)
+ if newc is None:
+ return None
+ newnode = cls()
+ newnode.append(newc)
+ return newnode
+
+ def visit_and(self, node):
+ return self._visit_binary(node, n.And)
+
+ def visit_or(self, node):
+ return self._visit_binary(node, n.Or)
+
+ def visit_not(self, node):
+ return self._visit_unary(node, n.Not)
+
+ def visit_exists(self, node):
+ return self._visit_unary(node, n.Exists)
+
+ def keep_var(self, varname):
+ if varname in 'SO':
+ return varname in self.existingvars
+ if varname == 'U':
+ return True
+ vargraph = self.current_expr.vargraph
+ for existingvar in self.existingvars:
+ #path = has_path(vargraph, varname, existingvar)
+ if not varname in vargraph or has_path(vargraph, varname, existingvar):
+ return True
+ # no path from this variable to an existing variable
+ return False
+
+ def visit_relation(self, node):
+ lhs, rhs = node.get_variable_parts()
+ # remove relations where an unexistant variable and or a variable linked
+ # to an unexistant variable is used.
+ if self.existingvars:
+ if not self.keep_var(lhs.name):
+ return
+ if node.r_type in ('has_add_permission', 'has_update_permission',
+ 'has_delete_permission', 'has_read_permission'):
+ assert lhs.name == 'U'
+ action = node.r_type.split('_')[1]
+ key = (self.current_expr, self.varmap, rhs.name)
+ self.pending_keys.append( (key, action) )
+ return
+ if isinstance(rhs, n.VariableRef):
+ if self.existingvars and not self.keep_var(rhs.name):
+ return
+ if lhs.name in self.revvarmap and rhs.name != 'U':
+ orel = self._may_be_shared_with(node, 'object')
+ if orel is not None:
+ self._use_orig_term(rhs.name, orel.children[1].children[0])
+ return
+ elif rhs.name in self.revvarmap and lhs.name != 'U':
+ orel = self._may_be_shared_with(node, 'subject')
+ if orel is not None:
+ self._use_orig_term(lhs.name, orel.children[0])
+ return
+ rel = n.Relation(node.r_type, node.optional)
+ for c in node.children:
+ rel.append(c.accept(self))
+ return rel
+
+ def visit_comparison(self, node):
+ cmp_ = n.Comparison(node.operator)
+ for c in node.children:
+ cmp_.append(c.accept(self))
+ return cmp_
+
+ def visit_mathexpression(self, node):
+ cmp_ = n.MathExpression(node.operator)
+ for c in node.children:
+ cmp_.append(c.accept(self))
+ return cmp_
+
+ def visit_function(self, node):
+ """generate filter name for a function"""
+ function_ = n.Function(node.name)
+ for c in node.children:
+ function_.append(c.accept(self))
+ return function_
+
+ def visit_constant(self, node):
+ """generate filter name for a constant"""
+ return n.Constant(node.value, node.type)
+
+ def visit_variableref(self, node):
+ """get the sql name for a variable reference"""
+ stmt = self.current_statement()
+ if node.name in self.revvarmap:
+ selectvar, index = self.revvarmap[node.name]
+ vi = self.varinfos[index]
+ if vi.get('const') is not None:
+ return n.Constant(vi['const'], 'Int')
+ return n.VariableRef(stmt.get_variable(selectvar))
+ vname_or_term = self._get_varname_or_term(node.name)
+ if isinstance(vname_or_term, string_types):
+ return n.VariableRef(stmt.get_variable(vname_or_term))
+ # shared term
+ return vname_or_term.copy(stmt)
+
+ def current_statement(self):
+ if self._insert_scope is None:
+ return self.select
+ return self._insert_scope.stmt
+
+
+class RQLRelationRewriter(RQLRewriter):
+ """Insert some rql snippets into another rql syntax tree, replacing computed
+ relations by their associated rule.
+
+ This class *isn't thread safe*.
+ """
+ def __init__(self, session):
+ super(RQLRelationRewriter, self).__init__(session)
+ self.rules = {}
+ for rschema in self.schema.iter_computed_relations():
+ self.rules[rschema.type] = RRQLExpression(rschema.rule)
+
+ def rewrite(self, union, kwargs=None):
+ self.kwargs = kwargs
+ self.removing_ambiguity = False
+ self.existingvars = None
+ self.pending_keys = None
+ for relation in union.iget_nodes(n.Relation):
+ if relation.r_type in self.rules:
+ self.select = relation.stmt
+ self.solutions = solutions = self.select.solutions[:]
+ self.current_expr = self.rules[relation.r_type]
+ self._insert_scope = relation.scope
+ self.rewritten = {}
+ lhs, rhs = relation.get_variable_parts()
+ varmap = {lhs.name: 'S', rhs.name: 'O'}
+ self.init_from_varmap(tuple(sorted(varmap.items())))
+ self.insert_snippet(varmap, self.current_expr.snippet_rqlst)
+ self.select.remove_node(relation)
+
+ def _subquery_variable(self, selectvar):
+ return self.select.aliases[selectvar].stinfo
+
+ def _inserted_root(self, new):
+ return new
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/rset.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/rset.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,730 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""The `ResultSet` class which is returned as result of an rql query"""
+__docformat__ = "restructuredtext en"
+
+from warnings import warn
+
+from six import PY3
+from six.moves import range
+
+from logilab.common import nullobject
+from logilab.common.decorators import cached, clear_cache, copy_cache
+from rql import nodes, stmts
+
+from cubicweb import NotAnEntity, NoResultError, MultipleResultsError
+
+
+_MARKER = nullobject()
+
+
+class ResultSet(object):
+ """A result set wraps a RQL query result. This object implements
+ partially the list protocol to allow direct use as a list of
+ result rows.
+
+ :type rowcount: int
+ :param rowcount: number of rows in the result
+
+ :type rows: list
+ :param rows: list of rows of result
+
+ :type description: list
+ :param description:
+ result's description, using the same structure as the result itself
+
+ :type rql: str or unicode
+ :param rql: the original RQL query string
+ """
+
+ def __init__(self, results, rql, args=None, description=None, rqlst=None):
+ if rqlst is not None:
+ warn('[3.20] rqlst parameter is deprecated',
+ DeprecationWarning, stacklevel=2)
+ self.rows = results
+ self.rowcount = results and len(results) or 0
+ # original query and arguments
+ self.rql = rql
+ self.args = args
+ # entity types for each cell (same shape as rows)
+ # maybe discarded if specified when the query has been executed
+ if description is None:
+ self.description = []
+ else:
+ self.description = description
+ # set to (limit, offset) when a result set is limited using the
+ # .limit method
+ self.limited = None
+ # set by the cursor which returned this resultset
+ self.req = None
+ # actions cache
+ self._rsetactions = None
+
+ def __str__(self):
+ if not self.rows:
+ return '' % self.rql
+ return '' % (self.rql, len(self.rows))
+
+ def __repr__(self):
+ if not self.rows:
+ return '' % self.rql
+ rows = self.rows
+ if len(rows) > 10:
+ rows = rows[:10] + ['...']
+ if len(rows) > 1:
+ # add a line break before first entity if more that one.
+ pattern = ''
+ else:
+ pattern = ''
+
+ if not self.description:
+ return pattern % (self.rql, len(self.rows),
+ '\n'.join(str(r) for r in rows))
+ return pattern % (self.rql, len(self.rows),
+ '\n'.join('%s (%s)' % (r, d)
+ for r, d in zip(rows, self.description)))
+
+ def possible_actions(self, **kwargs):
+ if self._rsetactions is None:
+ self._rsetactions = {}
+ if kwargs:
+ key = tuple(sorted(kwargs.items()))
+ else:
+ key = None
+ try:
+ return self._rsetactions[key]
+ except KeyError:
+ actions = self.req.vreg['actions'].poss_visible_objects(
+ self.req, rset=self, **kwargs)
+ self._rsetactions[key] = actions
+ return actions
+
+ def __len__(self):
+ """returns the result set's size"""
+ return self.rowcount
+
+ def __getitem__(self, i):
+ """returns the ith element of the result set"""
+ return self.rows[i] #ResultSetRow(self.rows[i])
+
+ def __iter__(self):
+ """Returns an iterator over rows"""
+ return iter(self.rows)
+
+ def __add__(self, rset):
+ # XXX buggy implementation (.rql and .args attributes at least much
+ # probably differ)
+ # at least rql could be fixed now that we have union and sub-queries
+ # but I tend to think that since we have that, we should not need this
+ # method anymore (syt)
+ rset = ResultSet(self.rows+rset.rows, self.rql, self.args,
+ self.description + rset.description)
+ rset.req = self.req
+ return rset
+
+ def copy(self, rows=None, descr=None):
+ if rows is None:
+ rows = self.rows[:]
+ descr = self.description[:]
+ rset = ResultSet(rows, self.rql, self.args, descr)
+ rset.req = self.req
+ return rset
+
+ def transformed_rset(self, transformcb):
+ """ the result set according to a given column types
+
+ :type transormcb: callable(row, desc)
+ :param transformcb:
+ a callable which should take a row and its type description as
+ parameters, and return the transformed row and type description.
+
+
+ :type col: int
+ :param col: the column index
+
+ :rtype: `ResultSet`
+ """
+ rows, descr = [], []
+ rset = self.copy(rows, descr)
+ for row, desc in zip(self.rows, self.description):
+ nrow, ndesc = transformcb(row, desc)
+ if ndesc: # transformcb returns None for ndesc to skip that row
+ rows.append(nrow)
+ descr.append(ndesc)
+ rset.rowcount = len(rows)
+ return rset
+
+ def filtered_rset(self, filtercb, col=0):
+ """filter the result set according to a given filtercb
+
+ :type filtercb: callable(entity)
+ :param filtercb:
+ a callable which should take an entity as argument and return
+ False if it should be skipped, else True
+
+ :type col: int
+ :param col: the column index
+
+ :rtype: `ResultSet`
+ """
+ rows, descr = [], []
+ rset = self.copy(rows, descr)
+ for i in range(len(self)):
+ if not filtercb(self.get_entity(i, col)):
+ continue
+ rows.append(self.rows[i])
+ descr.append(self.description[i])
+ rset.rowcount = len(rows)
+ return rset
+
+
+ def sorted_rset(self, keyfunc, reverse=False, col=0):
+ """sorts the result set according to a given keyfunc
+
+ :type keyfunc: callable(entity)
+ :param keyfunc:
+ a callable which should take an entity as argument and return
+ the value used to compare and sort
+
+ :type reverse: bool
+ :param reverse: if the result should be reversed
+
+ :type col: int
+ :param col: the column index. if col = -1, the whole row are used
+
+ :rtype: `ResultSet`
+ """
+ rows, descr = [], []
+ rset = self.copy(rows, descr)
+ if col >= 0:
+ entities = sorted(enumerate(self.entities(col)),
+ key=lambda t: keyfunc(t[1]), reverse=reverse)
+ else:
+ entities = sorted(enumerate(self),
+ key=lambda t: keyfunc(t[1]), reverse=reverse)
+ for index, _ in entities:
+ rows.append(self.rows[index])
+ descr.append(self.description[index])
+ rset.rowcount = len(rows)
+ return rset
+
+ def split_rset(self, keyfunc=None, col=0, return_dict=False):
+ """splits the result set in multiple result sets according to
+ a given key
+
+ :type keyfunc: callable(entity or FinalType)
+ :param keyfunc:
+ a callable which should take a value of the rset in argument and
+ return the value used to group the value. If not define, raw value
+ of the specified columns is used.
+
+ :type col: int
+ :param col: the column index. if col = -1, the whole row are used
+
+ :type return_dict: Boolean
+ :param return_dict: If true, the function return a mapping
+ (key -> rset) instead of a list of rset
+
+ :rtype: List of `ResultSet` or mapping of `ResultSet`
+
+ """
+ result = []
+ mapping = {}
+ for idx, line in enumerate(self):
+ if col >= 0:
+ try:
+ key = self.get_entity(idx, col)
+ except NotAnEntity:
+ key = line[col]
+ else:
+ key = line
+ if keyfunc is not None:
+ key = keyfunc(key)
+
+ if key not in mapping:
+ rows, descr = [], []
+ rset = self.copy(rows, descr)
+ mapping[key] = rset
+ result.append(rset)
+ else:
+ rset = mapping[key]
+ rset.rows.append(self.rows[idx])
+ rset.description.append(self.description[idx])
+ for rset in result:
+ rset.rowcount = len(rset.rows)
+ if return_dict:
+ return mapping
+ else:
+ return result
+
+ def limited_rql(self):
+ """returns a printable rql for the result set associated to the object,
+ with limit/offset correctly set according to maximum page size and
+ currently displayed page when necessary
+ """
+ # try to get page boundaries from the navigation component
+ # XXX we should probably not have a ref to this component here (eg in
+ # cubicweb)
+ nav = self.req.vreg['components'].select_or_none('navigation', self.req,
+ rset=self)
+ if nav:
+ start, stop = nav.page_boundaries()
+ rql = self._limit_offset_rql(stop - start, start)
+ # result set may have be limited manually in which case navigation won't
+ # apply
+ elif self.limited:
+ rql = self._limit_offset_rql(*self.limited)
+ # navigation component doesn't apply and rset has not been limited, no
+ # need to limit query
+ else:
+ rql = self.printable_rql()
+ return rql
+
+ def _limit_offset_rql(self, limit, offset):
+ rqlst = self.syntax_tree()
+ if len(rqlst.children) == 1:
+ select = rqlst.children[0]
+ olimit, ooffset = select.limit, select.offset
+ select.limit, select.offset = limit, offset
+ rql = rqlst.as_string(kwargs=self.args)
+ # restore original limit/offset
+ select.limit, select.offset = olimit, ooffset
+ else:
+ newselect = stmts.Select()
+ newselect.limit = limit
+ newselect.offset = offset
+ aliases = [nodes.VariableRef(newselect.get_variable(chr(65+i), i))
+ for i in range(len(rqlst.children[0].selection))]
+ for vref in aliases:
+ newselect.append_selected(nodes.VariableRef(vref.variable))
+ newselect.set_with([nodes.SubQuery(aliases, rqlst)], check=False)
+ newunion = stmts.Union()
+ newunion.append(newselect)
+ rql = newunion.as_string(kwargs=self.args)
+ rqlst.parent = None
+ return rql
+
+ def limit(self, limit, offset=0, inplace=False):
+ """limit the result set to the given number of rows optionally starting
+ from an index different than 0
+
+ :type limit: int
+ :param limit: the maximum number of results
+
+ :type offset: int
+ :param offset: the offset index
+
+ :type inplace: bool
+ :param inplace:
+ if true, the result set is modified in place, else a new result set
+ is returned and the original is left unmodified
+
+ :rtype: `ResultSet`
+ """
+ stop = limit+offset
+ rows = self.rows[offset:stop]
+ descr = self.description[offset:stop]
+ if inplace:
+ rset = self
+ rset.rows, rset.description = rows, descr
+ rset.rowcount = len(rows)
+ clear_cache(rset, 'description_struct')
+ if offset:
+ clear_cache(rset, 'get_entity')
+ # we also have to fix/remove from the request entity cache entities
+ # which get a wrong rset reference by this limit call
+ for entity in self.req.cached_entities():
+ if entity.cw_rset is self:
+ if offset <= entity.cw_row < stop:
+ entity.cw_row = entity.cw_row - offset
+ else:
+ entity.cw_rset = entity.as_rset()
+ entity.cw_row = entity.cw_col = 0
+ else:
+ rset = self.copy(rows, descr)
+ if not offset:
+ # can copy built entity caches
+ copy_cache(rset, 'get_entity', self)
+ rset.limited = (limit, offset)
+ return rset
+
+ def printable_rql(self, encoded=_MARKER):
+ """return the result set's origin rql as a string, with arguments
+ substitued
+ """
+ if encoded is not _MARKER:
+ warn('[3.21] the "encoded" argument is deprecated', DeprecationWarning)
+ encoding = self.req.encoding
+ rqlstr = self.syntax_tree().as_string(kwargs=self.args)
+ if PY3:
+ return rqlstr
+ # sounds like we get encoded or unicode string due to a bug in as_string
+ if not encoded:
+ if isinstance(rqlstr, unicode):
+ return rqlstr
+ return unicode(rqlstr, encoding)
+ else:
+ if isinstance(rqlstr, unicode):
+ return rqlstr.encode(encoding)
+ return rqlstr
+
+ # client helper methods ###################################################
+
+ def entities(self, col=0):
+ """iter on entities with eid in the `col` column of the result set"""
+ for i in range(len(self)):
+ # may have None values in case of outer join (or aggregat on eid
+ # hacks)
+ if self.rows[i][col] is not None:
+ yield self.get_entity(i, col)
+
+ def iter_rows_with_entities(self):
+ """ iterates over rows, and for each row
+ eids are converted to plain entities
+ """
+ for i, row in enumerate(self):
+ _row = []
+ for j, col in enumerate(row):
+ try:
+ _row.append(self.get_entity(i, j) if col is not None else col)
+ except NotAnEntity:
+ _row.append(col)
+ yield _row
+
+ def complete_entity(self, row, col=0, skip_bytes=True):
+ """short cut to get an completed entity instance for a particular
+ row (all instance's attributes have been fetched)
+ """
+ entity = self.get_entity(row, col)
+ entity.complete(skip_bytes=skip_bytes)
+ return entity
+
+ @cached
+ def get_entity(self, row, col):
+ """convenience method for query retrieving a single entity, returns a
+ partially initialized Entity instance.
+
+ .. warning::
+
+ Due to the cache wrapping this function, you should NEVER give row as
+ a named parameter (i.e. `rset.get_entity(0, 1)` is OK but
+ `rset.get_entity(row=0, col=1)` isn't)
+
+ :type row,col: int, int
+ :param row,col:
+ row and col numbers localizing the entity among the result's table
+
+ :return: the partially initialized `Entity` instance
+ """
+ etype = self.description[row][col]
+ try:
+ eschema = self.req.vreg.schema.eschema(etype)
+ if eschema.final:
+ raise NotAnEntity(etype)
+ except KeyError:
+ raise NotAnEntity(etype)
+ return self._build_entity(row, col)
+
+ def one(self, col=0):
+ """Retrieve exactly one entity from the query.
+
+ If the result set is empty, raises :exc:`NoResultError`.
+ If the result set has more than one row, raises
+ :exc:`MultipleResultsError`.
+
+ :type col: int
+ :param col: The column localising the entity in the unique row
+
+ :return: the partially initialized `Entity` instance
+ """
+ if len(self) == 1:
+ return self.get_entity(0, col)
+ elif len(self) == 0:
+ raise NoResultError("No row was found for one()")
+ else:
+ raise MultipleResultsError("Multiple rows were found for one()")
+
+ def _build_entity(self, row, col):
+ """internal method to get a single entity, returns a partially
+ initialized Entity instance.
+
+ partially means that only attributes selected in the RQL query will be
+ directly assigned to the entity.
+
+ :type row,col: int, int
+ :param row,col:
+ row and col numbers localizing the entity among the result's table
+
+ :return: the partially initialized `Entity` instance
+ """
+ req = self.req
+ if req is None:
+ raise AssertionError('dont call get_entity with no req on the result set')
+ rowvalues = self.rows[row]
+ eid = rowvalues[col]
+ assert eid is not None
+ # return cached entity if exists. This also avoids potential recursion
+ # XXX should we consider updating a cached entity with possible
+ # new attributes found in this resultset ?
+ try:
+ entity = req.entity_cache(eid)
+ except KeyError:
+ pass
+ else:
+ if entity.cw_rset is None:
+ # entity has no rset set, this means entity has been created by
+ # the querier (req is a repository session) and so jas no rset
+ # info. Add it.
+ entity.cw_rset = self
+ entity.cw_row = row
+ entity.cw_col = col
+ return entity
+ # build entity instance
+ etype = self.description[row][col]
+ entity = self.req.vreg['etypes'].etype_class(etype)(req, rset=self,
+ row=row, col=col)
+ entity.eid = eid
+ # cache entity
+ req.set_entity_cache(entity)
+ # try to complete the entity if there are some additional columns
+ if len(rowvalues) > 1:
+ eschema = entity.e_schema
+ eid_col, attr_cols, rel_cols = self._rset_structure(eschema, col)
+ entity.eid = rowvalues[eid_col]
+ for attr, col_idx in attr_cols.items():
+ entity.cw_attr_cache[attr] = rowvalues[col_idx]
+ for (rtype, role), col_idx in rel_cols.items():
+ value = rowvalues[col_idx]
+ if value is None:
+ if role == 'subject':
+ rql = 'Any Y WHERE X %s Y, X eid %s'
+ else:
+ rql = 'Any Y WHERE Y %s X, X eid %s'
+ rrset = ResultSet([], rql % (rtype, entity.eid))
+ rrset.req = req
+ else:
+ rrset = self._build_entity(row, col_idx).as_rset()
+ entity.cw_set_relation_cache(rtype, role, rrset)
+ return entity
+
+ @cached
+ def _rset_structure(self, eschema, entity_col):
+ eid_col = col = entity_col
+ rqlst = self.syntax_tree()
+ get_rschema = eschema.schema.rschema
+ attr_cols = {}
+ rel_cols = {}
+ if rqlst.TYPE == 'select':
+ # UNION query, find the subquery from which this entity has been
+ # found
+ select, col = rqlst.locate_subquery(entity_col, eschema.type, self.args)
+ else:
+ select = rqlst
+ # take care, due to outer join support, we may find None
+ # values for non final relation
+ for i, attr, role in attr_desc_iterator(select, col, entity_col):
+ rschema = get_rschema(attr)
+ if rschema.final:
+ if attr == 'eid':
+ eid_col = i
+ else:
+ attr_cols[attr] = i
+ else:
+ # XXX takefirst=True to remove warning triggered by ambiguous relations
+ rdef = eschema.rdef(attr, role, takefirst=True)
+ # only keep value if it can't be multivalued
+ if rdef.role_cardinality(role) in '1?':
+ rel_cols[(attr, role)] = i
+ return eid_col, attr_cols, rel_cols
+
+ @cached
+ def syntax_tree(self):
+ """return the syntax tree (:class:`rql.stmts.Union`) for the
+ originating query. You can expect it to have solutions
+ computed and it will be properly annotated.
+ """
+ return self.req.vreg.parse(self.req, self.rql, self.args)
+
+ @cached
+ def column_types(self, col):
+ """return the list of different types in the column with the given col
+
+ :type col: int
+ :param col: the index of the desired column
+
+ :rtype: list
+ :return: the different entities type found in the column
+ """
+ return frozenset(struc[-1][col] for struc in self.description_struct())
+
+ @cached
+ def description_struct(self):
+ """return a list describing sequence of results with the same
+ description, e.g. :
+ [[0, 4, ('Bug',)]
+ [[0, 4, ('Bug',), [5, 8, ('Story',)]
+ [[0, 3, ('Project', 'Version',)]]
+ """
+ result = []
+ last = None
+ for i, row in enumerate(self.description):
+ if row != last:
+ if last is not None:
+ result[-1][1] = i - 1
+ result.append( [i, None, row] )
+ last = row
+ if last is not None:
+ result[-1][1] = i
+ return result
+
+ def _locate_query_params(self, rqlst, row, col):
+ locate_query_col = col
+ etype = self.description[row][col]
+ # final type, find a better one to locate the correct subquery
+ # (ambiguous if possible)
+ eschema = self.req.vreg.schema.eschema
+ if eschema(etype).final:
+ for select in rqlst.children:
+ try:
+ myvar = select.selection[col].variable
+ except AttributeError:
+ # not a variable
+ continue
+ for i in range(len(select.selection)):
+ if i == col:
+ continue
+ coletype = self.description[row][i]
+ # None description possible on column resulting from an
+ # outer join
+ if coletype is None or eschema(coletype).final:
+ continue
+ try:
+ ivar = select.selection[i].variable
+ except AttributeError:
+ # not a variable
+ continue
+ # check variables don't comes from a subquery or are both
+ # coming from the same subquery
+ if getattr(ivar, 'query', None) is getattr(myvar, 'query', None):
+ etype = coletype
+ locate_query_col = i
+ if len(self.column_types(i)) > 1:
+ return etype, locate_query_col
+ return etype, locate_query_col
+
+ @cached
+ def related_entity(self, row, col):
+ """given an cell of the result set, try to return a (entity, relation
+ name) tuple to which this cell is linked.
+
+ This is especially useful when the cell is an attribute of an entity,
+ to get the entity to which this attribute belongs to.
+ """
+ rqlst = self.syntax_tree()
+ # UNION query, we've first to find a 'pivot' column to use to get the
+ # actual query from which the row is coming
+ etype, locate_query_col = self._locate_query_params(rqlst, row, col)
+ # now find the query from which this entity has been found. Returned
+ # select node may be a subquery with different column indexes.
+ select = rqlst.locate_subquery(locate_query_col, etype, self.args)[0]
+ # then get the index of root query's col in the subquery
+ col = rqlst.subquery_selection_index(select, col)
+ if col is None:
+ # XXX unexpected, should fix subquery_selection_index ?
+ return None, None
+ try:
+ myvar = select.selection[col].variable
+ except AttributeError:
+ # not a variable
+ return None, None
+ rel = myvar.main_relation()
+ if rel is not None:
+ index = rel.children[0].root_selection_index()
+ if index is not None and self.rows[row][index]:
+ try:
+ entity = self.get_entity(row, index)
+ return entity, rel.r_type
+ except NotAnEntity as exc:
+ return None, None
+ return None, None
+
+ @cached
+ def searched_text(self):
+ """returns the searched text in case of full-text search
+
+ :return: searched text or `None` if the query is not
+ a full-text query
+ """
+ rqlst = self.syntax_tree()
+ for rel in rqlst.iget_nodes(nodes.Relation):
+ if rel.r_type == 'has_text':
+ __, rhs = rel.get_variable_parts()
+ return rhs.eval(self.args)
+ return None
+
+def _get_variable(term):
+ # XXX rewritten const
+ # use iget_nodes for (hack) case where we have things like MAX(V)
+ for vref in term.iget_nodes(nodes.VariableRef):
+ return vref.variable
+
+def attr_desc_iterator(select, selectidx, rootidx):
+ """return an iterator on a list of 2-uple (index, attr_relation)
+ localizing attribute relations of the main variable in a result's row
+
+ :type rqlst: rql.stmts.Select
+ :param rqlst: the RQL syntax tree to describe
+
+ :return:
+ a generator on (index, relation, target) describing column being
+ attribute of the main variable
+ """
+ rootselect = select
+ while rootselect.parent.parent is not None:
+ rootselect = rootselect.parent.parent.parent
+ rootmain = rootselect.selection[selectidx]
+ rootmainvar = _get_variable(rootmain)
+ assert rootmainvar
+ root = rootselect.parent
+ selectmain = select.selection[selectidx]
+ for i, term in enumerate(rootselect.selection):
+ try:
+ # don't use _get_variable here: if the term isn't a variable
+ # (function...), we don't want it to be used as an entity attribute
+ # or relation's value (XXX beside MAX/MIN trick?)
+ rootvar = term.variable
+ except AttributeError:
+ continue
+ if rootvar.name == rootmainvar.name:
+ continue
+ if select is not rootselect and isinstance(rootvar, nodes.ColumnAlias):
+ term = select.selection[root.subquery_selection_index(select, i)]
+ var = _get_variable(term)
+ if var is None:
+ continue
+ for ref in var.references():
+ rel = ref.relation()
+ if rel is None or rel.is_types_restriction():
+ continue
+ lhs, rhs = rel.get_variable_parts()
+ if selectmain.is_equivalent(lhs):
+ if rhs.is_equivalent(term):
+ yield (i, rel.r_type, 'subject')
+ elif selectmain.is_equivalent(rhs):
+ if lhs.is_equivalent(term):
+ yield (i, rel.r_type, 'object')
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/rtags.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/rtags.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,270 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""
+A RelationTag object is an object which allows to link a configuration
+information to a relation definition. For instance, the standard
+primary view uses a RelationTag object (uicfg.primaryview_section) to
+get the section to display relations.
+
+.. sourcecode:: python
+
+ # display ``entry_of`` relations in the ``relations`` section in the ``BlogEntry`` primary view
+ uicfg.primaryview_section.tag_subject_of(('BlogEntry', 'entry_of', '*'),
+ 'relations')
+
+ # hide every relation ``entry_of`` in the ``Blog`` primary view
+ uicfg.primaryview_section.tag_object_of(('*', 'entry_of', 'Blog'), 'hidden')
+
+Three primitives are defined:
+ * ``tag_subject_of`` tag a relation in the subject's context
+ * ``tag_object_of`` tag a relation in the object's context
+ * ``tag_attribute`` shortcut for tag_subject_of
+"""
+__docformat__ = "restructuredtext en"
+
+import logging
+from warnings import warn
+
+from six import string_types
+
+from logilab.common.logging_ext import set_log_methods
+from logilab.common.registry import RegistrableInstance, yes
+
+def _ensure_str_key(key):
+ return tuple(str(k) for k in key)
+
+class RegistrableRtags(RegistrableInstance):
+ __registry__ = 'uicfg'
+ __select__ = yes()
+
+
+class RelationTags(RegistrableRtags):
+ """a tag store for full relation definitions :
+
+ (subject type, relation type, object type, tagged)
+
+ allowing to set tags using wildcard (eg '*') as subject type / object type
+
+ This class associates a single tag to each key.
+ """
+ _allowed_values = None
+ # _init expected to be a method (introduced in 3.17), while _initfunc a
+ # function given as __init__ argument and kept for bw compat
+ _init = _initfunc = None
+
+ def __init__(self):
+ self._tagdefs = {}
+
+ def __repr__(self):
+ # find a way to have more infos but keep it readable
+ # (in error messages in case of an ambiguity for instance)
+ return '%s (%s): %s' % (id(self), self.__regid__, self.__class__)
+
+ # dict compat
+ def __getitem__(self, key):
+ return self.get(*key)
+ __contains__ = __getitem__
+
+ def clear(self):
+ self._tagdefs.clear()
+
+ def _get_keys(self, stype, rtype, otype, tagged):
+ keys = []
+ if '*' not in (stype, otype):
+ keys.append(('*', rtype, '*', tagged))
+ if '*' != stype:
+ keys.append(('*', rtype, otype, tagged))
+ if '*' != otype:
+ keys.append((stype, rtype, '*', tagged))
+ keys.append((stype, rtype, otype, tagged))
+ return keys
+
+ def init(self, schema, check=True):
+ # XXX check existing keys against schema
+ if check:
+ for (stype, rtype, otype, tagged), value in list(self._tagdefs.items()):
+ for ertype in (stype, rtype, otype):
+ if ertype != '*' and not ertype in schema:
+ self.warning('removing rtag %s: %s, %s undefined in schema',
+ (stype, rtype, otype, tagged), value, ertype)
+ self.del_rtag(stype, rtype, otype, tagged)
+ break
+ if self._init is not None:
+ self.apply(schema, self._init)
+
+ def apply(self, schema, func):
+ for eschema in schema.entities():
+ if eschema.final:
+ continue
+ for rschema, tschemas, role in eschema.relation_definitions(True):
+ for tschema in tschemas:
+ if role == 'subject':
+ sschema, oschema = eschema, tschema
+ else:
+ sschema, oschema = tschema, eschema
+ func(sschema, rschema, oschema, role)
+
+ # rtag declaration api ####################################################
+
+ def tag_attribute(self, key, *args, **kwargs):
+ key = list(key)
+ key.append('*')
+ key.append('subject')
+ self.tag_relation(key, *args, **kwargs)
+
+ def tag_subject_of(self, key, *args, **kwargs):
+ key = list(key)
+ key.append('subject')
+ self.tag_relation(key, *args, **kwargs)
+
+ def tag_object_of(self, key, *args, **kwargs):
+ key = list(key)
+ key.append('object')
+ self.tag_relation(key, *args, **kwargs)
+
+ def tag_relation(self, key, tag):
+ assert len(key) == 4, 'bad key: %s' % list(key)
+ if self._allowed_values is not None:
+ assert tag in self._allowed_values, \
+ '%r is not an allowed tag (should be in %s)' % (
+ tag, self._allowed_values)
+ self._tagdefs[_ensure_str_key(key)] = tag
+ return tag
+
+ def _tag_etype_attr(self, etype, attr, desttype='*', *args, **kwargs):
+ if isinstance(attr, string_types):
+ attr, role = attr, 'subject'
+ else:
+ attr, role = attr
+ if role == 'subject':
+ self.tag_subject_of((etype, attr, desttype), *args, **kwargs)
+ else:
+ self.tag_object_of((desttype, attr, etype), *args, **kwargs)
+
+
+ # rtag runtime api ########################################################
+
+ def del_rtag(self, *key):
+ del self._tagdefs[key]
+
+ def get(self, *key):
+ for key in reversed(self._get_keys(*key)):
+ try:
+ return self._tagdefs[key]
+ except KeyError:
+ continue
+ return None
+
+ def etype_get(self, etype, rtype, role, ttype='*'):
+ if role == 'subject':
+ return self.get(etype, rtype, ttype, role)
+ return self.get(ttype, rtype, etype, role)
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
+
+class RelationTagsSet(RelationTags):
+ """This class associates a set of tags to each key.
+ """
+ tag_container_cls = set
+
+ def tag_relation(self, key, tag):
+ rtags = self._tagdefs.setdefault(_ensure_str_key(key),
+ self.tag_container_cls())
+ rtags.add(tag)
+ return rtags
+
+ def get(self, stype, rtype, otype, tagged):
+ rtags = self.tag_container_cls()
+ for key in self._get_keys(stype, rtype, otype, tagged):
+ try:
+ rtags.update(self._tagdefs[key])
+ except KeyError:
+ continue
+ return rtags
+
+
+class RelationTagsDict(RelationTagsSet):
+ """This class associates a set of tags to each key."""
+ tag_container_cls = dict
+
+ def tag_relation(self, key, tag):
+ key = _ensure_str_key(key)
+ try:
+ rtags = self._tagdefs[key]
+ rtags.update(tag)
+ return rtags
+ except KeyError:
+ self._tagdefs[key] = tag
+ return tag
+
+ def setdefault(self, key, tagkey, tagvalue):
+ key = _ensure_str_key(key)
+ try:
+ rtags = self._tagdefs[key]
+ rtags.setdefault(tagkey, tagvalue)
+ return rtags
+ except KeyError:
+ self._tagdefs[key] = {tagkey: tagvalue}
+ return self._tagdefs[key]
+
+
+class RelationTagsBool(RelationTags):
+ _allowed_values = frozenset((True, False))
+
+
+class NoTargetRelationTagsDict(RelationTagsDict):
+
+ @property
+ def name(self):
+ return self.__class__.name
+
+ # tag_subject_of / tag_object_of issue warning if '*' is not given as target
+ # type, while tag_relation handle it silently since it may be used during
+ # initialization
+ def tag_subject_of(self, key, tag):
+ subj, rtype, obj = key
+ if obj != '*':
+ self.warning('using explict target type in %s.tag_subject_of() '
+ 'has no effect, use (%s, %s, "*") instead of (%s, %s, %s)',
+ self.name, subj, rtype, subj, rtype, obj)
+ super(NoTargetRelationTagsDict, self).tag_subject_of((subj, rtype, '*'), tag)
+
+ def tag_object_of(self, key, tag):
+ subj, rtype, obj = key
+ if subj != '*':
+ self.warning('using explict subject type in %s.tag_object_of() '
+ 'has no effect, use ("*", %s, %s) instead of (%s, %s, %s)',
+ self.name, rtype, obj, subj, rtype, obj)
+ super(NoTargetRelationTagsDict, self).tag_object_of(('*', rtype, obj), tag)
+
+ def tag_relation(self, key, tag):
+ if key[-1] == 'subject' and key[-2] != '*':
+ if isinstance(key, tuple):
+ key = list(key)
+ key[-2] = '*'
+ elif key[-1] == 'object' and key[0] != '*':
+ if isinstance(key, tuple):
+ key = list(key)
+ key[0] = '*'
+ super(NoTargetRelationTagsDict, self).tag_relation(key, tag)
+
+
+set_log_methods(RelationTags, logging.getLogger('cubicweb.rtags'))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/schema.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/schema.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1458 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""classes to define schemas for CubicWeb"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import re
+from os.path import join, basename
+from logging import getLogger
+from warnings import warn
+
+from six import PY2, text_type, string_types, add_metaclass
+from six.moves import range
+
+from logilab.common import tempattr
+from logilab.common.decorators import cached, clear_cache, monkeypatch, cachedproperty
+from logilab.common.logging_ext import set_log_methods
+from logilab.common.deprecation import deprecated, class_moved, moved
+from logilab.common.textutils import splitstrip
+from logilab.common.graph import get_cycles
+
+import yams
+from yams import BadSchemaDefinition, buildobjs as ybo
+from yams.schema import Schema, ERSchema, EntitySchema, RelationSchema, \
+ RelationDefinitionSchema, PermissionMixIn, role_name
+from yams.constraints import (BaseConstraint, FormatConstraint, BoundaryConstraint,
+ IntervalBoundConstraint, StaticVocabularyConstraint)
+from yams.reader import (CONSTRAINTS, PyFileReader, SchemaLoader,
+ cleanup_sys_modules, fill_schema_from_namespace)
+
+from rql import parse, nodes, RQLSyntaxError, TypeResolverException
+from rql.analyze import ETypeResolver
+
+import cubicweb
+from cubicweb import ETYPE_NAME_MAP, ValidationError, Unauthorized, _
+
+try:
+ from cubicweb import server
+except ImportError:
+ # We need to lookup DEBUG from there,
+ # however a pure dbapi client may not have it.
+ class server(object): pass
+ server.DEBUG = False
+
+
+PURE_VIRTUAL_RTYPES = set(('identity', 'has_text',))
+VIRTUAL_RTYPES = set(('eid', 'identity', 'has_text',))
+
+# set of meta-relations available for every entity types
+META_RTYPES = set((
+ 'owned_by', 'created_by', 'is', 'is_instance_of', 'identity',
+ 'eid', 'creation_date', 'cw_source', 'modification_date', 'has_text', 'cwuri',
+ ))
+WORKFLOW_RTYPES = set(('custom_workflow', 'in_state', 'wf_info_for'))
+WORKFLOW_DEF_RTYPES = set(('workflow_of', 'state_of', 'transition_of',
+ 'initial_state', 'default_workflow',
+ 'allowed_transition', 'destination_state',
+ 'from_state', 'to_state', 'condition',
+ 'subworkflow', 'subworkflow_state', 'subworkflow_exit',
+ 'by_transition',
+ ))
+SYSTEM_RTYPES = set(('in_group', 'require_group',
+ # cwproperty
+ 'for_user',
+ 'cw_schema', 'cw_import_of', 'cw_for_source',
+ 'cw_host_config_of',
+ )) | WORKFLOW_RTYPES
+NO_I18NCONTEXT = META_RTYPES | WORKFLOW_RTYPES
+
+SKIP_COMPOSITE_RELS = [('cw_source', 'subject')]
+
+# set of entity and relation types used to build the schema
+SCHEMA_TYPES = set((
+ 'CWEType', 'CWRType', 'CWComputedRType', 'CWAttribute', 'CWRelation',
+ 'CWConstraint', 'CWConstraintType', 'CWUniqueTogetherConstraint',
+ 'RQLExpression',
+ 'specializes',
+ 'relation_type', 'from_entity', 'to_entity',
+ 'constrained_by', 'cstrtype',
+ 'constraint_of', 'relations',
+ 'read_permission', 'add_permission',
+ 'delete_permission', 'update_permission',
+ ))
+
+WORKFLOW_TYPES = set(('Transition', 'State', 'TrInfo', 'Workflow',
+ 'WorkflowTransition', 'BaseTransition',
+ 'SubWorkflowExitPoint'))
+
+INTERNAL_TYPES = set(('CWProperty', 'CWCache', 'ExternalUri', 'CWDataImport',
+ 'CWSource', 'CWSourceHostConfig', 'CWSourceSchemaConfig'))
+
+UNIQUE_CONSTRAINTS = ('SizeConstraint', 'FormatConstraint',
+ 'StaticVocabularyConstraint',
+ 'RQLVocabularyConstraint')
+
+_LOGGER = getLogger('cubicweb.schemaloader')
+
+# entity and relation schema created from serialized schema have an eid
+ybo.ETYPE_PROPERTIES += ('eid',)
+ybo.RTYPE_PROPERTIES += ('eid',)
+
+def build_schema_from_namespace(items):
+ schema = CubicWebSchema('noname')
+ fill_schema_from_namespace(schema, items, register_base_types=False)
+ return schema
+
+# Bases for manipulating RQL in schema #########################################
+
+def guess_rrqlexpr_mainvars(expression):
+ defined = set(split_expression(expression))
+ mainvars = set()
+ if 'S' in defined:
+ mainvars.add('S')
+ if 'O' in defined:
+ mainvars.add('O')
+ if 'U' in defined:
+ mainvars.add('U')
+ if not mainvars:
+ raise BadSchemaDefinition('unable to guess selection variables in %r'
+ % expression)
+ return mainvars
+
+def split_expression(rqlstring):
+ for expr in rqlstring.split(','):
+ for noparen1 in expr.split('('):
+ for noparen2 in noparen1.split(')'):
+ for word in noparen2.split():
+ yield word
+
+def normalize_expression(rqlstring):
+ """normalize an rql expression to ease schema synchronization (avoid
+ suppressing and reinserting an expression if only a space has been
+ added/removed for instance)
+ """
+ union = parse(u'Any 1 WHERE %s' % rqlstring).as_string()
+ if PY2 and isinstance(union, str):
+ union = union.decode('utf-8')
+ return union.split(' WHERE ', 1)[1]
+
+
+def _check_valid_formula(rdef, formula_rqlst):
+ """Check the formula is a valid RQL query with some restriction (no union,
+ single selected node, etc.), raise BadSchemaDefinition if not
+ """
+ if len(formula_rqlst.children) != 1:
+ raise BadSchemaDefinition('computed attribute %(attr)s on %(etype)s: '
+ 'can not use UNION in formula %(form)r' %
+ {'attr' : rdef.rtype,
+ 'etype' : rdef.subject.type,
+ 'form' : rdef.formula})
+ select = formula_rqlst.children[0]
+ if len(select.selection) != 1:
+ raise BadSchemaDefinition('computed attribute %(attr)s on %(etype)s: '
+ 'can only select one term in formula %(form)r' %
+ {'attr' : rdef.rtype,
+ 'etype' : rdef.subject.type,
+ 'form' : rdef.formula})
+ term = select.selection[0]
+ types = set(term.get_type(sol) for sol in select.solutions)
+ if len(types) != 1:
+ raise BadSchemaDefinition('computed attribute %(attr)s on %(etype)s: '
+ 'multiple possible types (%(types)s) for formula %(form)r' %
+ {'attr' : rdef.rtype,
+ 'etype' : rdef.subject.type,
+ 'types' : list(types),
+ 'form' : rdef.formula})
+ computed_type = types.pop()
+ expected_type = rdef.object.type
+ if computed_type != expected_type:
+ raise BadSchemaDefinition('computed attribute %(attr)s on %(etype)s: '
+ 'computed attribute type (%(comp_type)s) mismatch with '
+ 'specified type (%(attr_type)s)' %
+ {'attr' : rdef.rtype,
+ 'etype' : rdef.subject.type,
+ 'comp_type' : computed_type,
+ 'attr_type' : expected_type})
+
+
+class RQLExpression(object):
+ """Base class for RQL expression used in schema (constraints and
+ permissions)
+ """
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+ # to be defined in concrete classes
+ rqlst = None
+ predefined_variables = None
+ full_rql = None
+
+ def __init__(self, expression, mainvars, eid):
+ """
+ :type mainvars: sequence of RQL variables' names. Can be provided as a
+ comma separated string.
+ :param mainvars: names of the variables being selected.
+
+ """
+ self.eid = eid # eid of the entity representing this rql expression
+ assert mainvars, 'bad mainvars %s' % mainvars
+ if isinstance(mainvars, string_types):
+ mainvars = set(splitstrip(mainvars))
+ elif not isinstance(mainvars, set):
+ mainvars = set(mainvars)
+ self.mainvars = mainvars
+ self.expression = normalize_expression(expression)
+ try:
+ self.full_rql = self.rqlst.as_string()
+ except RQLSyntaxError:
+ raise RQLSyntaxError(expression)
+ for mainvar in mainvars:
+ # if variable is predefined, an extra reference is inserted
+ # automatically (`VAR eid %(v)s`)
+ if mainvar in self.predefined_variables:
+ min_refs = 3
+ else:
+ min_refs = 2
+ if len(self.rqlst.defined_vars[mainvar].references()) < min_refs:
+ _LOGGER.warn('You did not use the %s variable in your RQL '
+ 'expression %s', mainvar, self)
+ # syntax tree used by read security (inserted in queries when necessary)
+ self.snippet_rqlst = parse(self.minimal_rql, print_errors=False).children[0]
+ # graph of links between variables, used by rql rewriter
+ self.vargraph = vargraph(self.rqlst)
+ # useful for some instrumentation, e.g. localperms permcheck command
+ self.package = ybo.PACKAGE
+
+ def __str__(self):
+ return self.full_rql
+ def __repr__(self):
+ return '%s(%s)' % (self.__class__.__name__, self.full_rql)
+
+ def __lt__(self, other):
+ if hasattr(other, 'expression'):
+ return self.expression < other.expression
+ return True
+
+ def __eq__(self, other):
+ if hasattr(other, 'expression'):
+ return self.expression == other.expression
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __hash__(self):
+ return hash(self.expression)
+
+ def __deepcopy__(self, memo):
+ return self.__class__(self.expression, self.mainvars)
+ def __getstate__(self):
+ return (self.expression, self.mainvars)
+ def __setstate__(self, state):
+ self.__init__(*state)
+
+ @cachedproperty
+ def rqlst(self):
+ select = parse(self.minimal_rql, print_errors=False).children[0]
+ defined = set(split_expression(self.expression))
+ for varname in self.predefined_variables:
+ if varname in defined:
+ select.add_eid_restriction(select.get_variable(varname), varname.lower(), 'Substitute')
+ return select
+
+ # permission rql expression specific stuff #################################
+
+ @cached
+ def transform_has_permission(self):
+ found = None
+ rqlst = self.rqlst
+ for var in rqlst.defined_vars.values():
+ for varref in var.references():
+ rel = varref.relation()
+ if rel is None:
+ continue
+ try:
+ prefix, action, suffix = rel.r_type.split('_')
+ except ValueError:
+ continue
+ if prefix != 'has' or suffix != 'permission' or \
+ not action in ('add', 'delete', 'update', 'read'):
+ continue
+ if found is None:
+ found = []
+ rqlst.save_state()
+ assert rel.children[0].name == 'U'
+ objvar = rel.children[1].children[0].variable
+ rqlst.remove_node(rel)
+ selected = [v.name for v in rqlst.get_selected_variables()]
+ if objvar.name not in selected:
+ colindex = len(selected)
+ rqlst.add_selected(objvar)
+ else:
+ colindex = selected.index(objvar.name)
+ found.append((action, colindex))
+ # remove U eid %(u)s if U is not used in any other relation
+ uvrefs = rqlst.defined_vars['U'].references()
+ if len(uvrefs) == 1:
+ rqlst.remove_node(uvrefs[0].relation())
+ if found is not None:
+ rql = rqlst.as_string()
+ if len(rqlst.selection) == 1 and isinstance(rqlst.where, nodes.Relation):
+ # only "Any X WHERE X eid %(x)s" remaining, no need to execute the rql
+ keyarg = rqlst.selection[0].name.lower()
+ else:
+ keyarg = None
+ rqlst.recover()
+ return rql, found, keyarg
+ return rqlst.as_string(), None, None
+
+ def _check(self, _cw, **kwargs):
+ """return True if the rql expression is matching the given relation
+ between fromeid and toeid
+
+ _cw may be a request or a server side transaction
+ """
+ creating = kwargs.get('creating')
+ if not creating and self.eid is not None:
+ key = (self.eid, tuple(sorted(kwargs.items())))
+ try:
+ return _cw.local_perm_cache[key]
+ except KeyError:
+ pass
+ rql, has_perm_defs, keyarg = self.transform_has_permission()
+ # when creating an entity, expression related to X satisfied
+ if creating and 'X' in self.rqlst.defined_vars:
+ return True
+ if keyarg is None:
+ kwargs.setdefault('u', _cw.user.eid)
+ try:
+ rset = _cw.execute(rql, kwargs, build_descr=True)
+ except NotImplementedError:
+ self.critical('cant check rql expression, unsupported rql %s', rql)
+ if self.eid is not None:
+ _cw.local_perm_cache[key] = False
+ return False
+ except TypeResolverException as ex:
+ # some expression may not be resolvable with current kwargs
+ # (type conflict)
+ self.warning('%s: %s', rql, str(ex))
+ if self.eid is not None:
+ _cw.local_perm_cache[key] = False
+ return False
+ except Unauthorized as ex:
+ self.debug('unauthorized %s: %s', rql, str(ex))
+ if self.eid is not None:
+ _cw.local_perm_cache[key] = False
+ return False
+ else:
+ rset = _cw.eid_rset(kwargs[keyarg])
+ # if no special has_*_permission relation in the rql expression, just
+ # check the result set contains something
+ if has_perm_defs is None:
+ if rset:
+ if self.eid is not None:
+ _cw.local_perm_cache[key] = True
+ return True
+ elif rset:
+ # check every special has_*_permission relation is satisfied
+ get_eschema = _cw.vreg.schema.eschema
+ try:
+ for eaction, col in has_perm_defs:
+ for i in range(len(rset)):
+ eschema = get_eschema(rset.description[i][col])
+ eschema.check_perm(_cw, eaction, eid=rset[i][col])
+ if self.eid is not None:
+ _cw.local_perm_cache[key] = True
+ return True
+ except Unauthorized:
+ pass
+ if self.eid is not None:
+ _cw.local_perm_cache[key] = False
+ return False
+
+ @property
+ def minimal_rql(self):
+ return 'Any %s WHERE %s' % (','.join(sorted(self.mainvars)),
+ self.expression)
+
+
+
+# rql expressions for use in permission definition #############################
+
+class ERQLExpression(RQLExpression):
+ predefined_variables = 'XU'
+
+ def __init__(self, expression, mainvars=None, eid=None):
+ RQLExpression.__init__(self, expression, mainvars or 'X', eid)
+
+ def check(self, _cw, eid=None, creating=False, **kwargs):
+ if 'X' in self.rqlst.defined_vars:
+ if eid is None:
+ if creating:
+ return self._check(_cw, creating=True, **kwargs)
+ return False
+ assert creating == False
+ return self._check(_cw, x=eid, **kwargs)
+ return self._check(_cw, **kwargs)
+
+
+class CubicWebRelationDefinitionSchema(RelationDefinitionSchema):
+ def constraint_by_eid(self, eid):
+ for cstr in self.constraints:
+ if cstr.eid == eid:
+ return cstr
+ raise ValueError('No constraint with eid %d' % eid)
+
+ def rql_expression(self, expression, mainvars=None, eid=None):
+ """rql expression factory"""
+ if self.rtype.final:
+ return ERQLExpression(expression, mainvars, eid)
+ return RRQLExpression(expression, mainvars, eid)
+
+ def check_permission_definitions(self):
+ super(CubicWebRelationDefinitionSchema, self).check_permission_definitions()
+ schema = self.subject.schema
+ for action, groups in self.permissions.items():
+ for group_or_rqlexpr in groups:
+ if action == 'read' and \
+ isinstance(group_or_rqlexpr, RQLExpression):
+ msg = "can't use rql expression for read permission of %s"
+ raise BadSchemaDefinition(msg % self)
+ if self.final and isinstance(group_or_rqlexpr, RRQLExpression):
+ msg = "can't use RRQLExpression on %s, use an ERQLExpression"
+ raise BadSchemaDefinition(msg % self)
+ if not self.final and isinstance(group_or_rqlexpr, ERQLExpression):
+ msg = "can't use ERQLExpression on %s, use a RRQLExpression"
+ raise BadSchemaDefinition(msg % self)
+
+def vargraph(rqlst):
+ """ builds an adjacency graph of variables from the rql syntax tree, e.g:
+ Any O,S WHERE T subworkflow_exit S, T subworkflow WF, O state_of WF
+ => {'WF': ['O', 'T'], 'S': ['T'], 'T': ['WF', 'S'], 'O': ['WF']}
+ """
+ vargraph = {}
+ for relation in rqlst.get_nodes(nodes.Relation):
+ try:
+ rhsvarname = relation.children[1].children[0].variable.name
+ lhsvarname = relation.children[0].name
+ except AttributeError:
+ pass
+ else:
+ vargraph.setdefault(lhsvarname, []).append(rhsvarname)
+ vargraph.setdefault(rhsvarname, []).append(lhsvarname)
+ #vargraph[(lhsvarname, rhsvarname)] = relation.r_type
+ return vargraph
+
+
+class GeneratedConstraint(object):
+ def __init__(self, rqlst, mainvars):
+ self.snippet_rqlst = rqlst
+ self.mainvars = mainvars
+ self.vargraph = vargraph(rqlst)
+
+
+class RRQLExpression(RQLExpression):
+ predefined_variables = 'SOU'
+
+ def __init__(self, expression, mainvars=None, eid=None):
+ if mainvars is None:
+ mainvars = guess_rrqlexpr_mainvars(expression)
+ RQLExpression.__init__(self, expression, mainvars, eid)
+
+ def check(self, _cw, fromeid=None, toeid=None):
+ kwargs = {}
+ if 'S' in self.rqlst.defined_vars:
+ if fromeid is None:
+ return False
+ kwargs['s'] = fromeid
+ if 'O' in self.rqlst.defined_vars:
+ if toeid is None:
+ return False
+ kwargs['o'] = toeid
+ return self._check(_cw, **kwargs)
+
+
+# In yams, default 'update' perm for attributes granted to managers and owners.
+# Within cw, we want to default to users who may edit the entity holding the
+# attribute.
+# These default permissions won't be checked by the security hooks:
+# since they delegate checking to the entity, we can skip actual checks.
+ybo.DEFAULT_ATTRPERMS['update'] = ('managers', ERQLExpression('U has_update_permission X'))
+ybo.DEFAULT_ATTRPERMS['add'] = ('managers', ERQLExpression('U has_add_permission X'))
+
+# we don't want 'add' or 'delete' permissions on computed relation types
+# (they're hardcoded to '()' on computed relation definitions)
+if 'add' in yams.DEFAULT_COMPUTED_RELPERMS:
+ del yams.DEFAULT_COMPUTED_RELPERMS['add']
+if 'delete' in yams.DEFAULT_COMPUTED_RELPERMS:
+ del yams.DEFAULT_COMPUTED_RELPERMS['delete']
+
+
+PUB_SYSTEM_ENTITY_PERMS = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers',),
+ 'delete': ('managers',),
+ 'update': ('managers',),
+ }
+PUB_SYSTEM_REL_PERMS = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers',),
+ 'delete': ('managers',),
+ }
+PUB_SYSTEM_ATTR_PERMS = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers',),
+ 'update': ('managers',),
+ }
+RO_REL_PERMS = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': (),
+ 'delete': (),
+ }
+RO_ATTR_PERMS = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ybo.DEFAULT_ATTRPERMS['add'],
+ 'update': (),
+ }
+
+# XXX same algorithm as in reorder_cubes and probably other place,
+# may probably extract a generic function
+def order_eschemas(eschemas):
+ """return entity schemas ordered such that entity types which specializes an
+ other one appears after that one
+ """
+ graph = {}
+ for eschema in eschemas:
+ if eschema.specializes():
+ graph[eschema] = set((eschema.specializes(),))
+ else:
+ graph[eschema] = set()
+ cycles = get_cycles(graph)
+ if cycles:
+ cycles = '\n'.join(' -> '.join(cycle) for cycle in cycles)
+ raise Exception('cycles in entity schema specialization: %s'
+ % cycles)
+ eschemas = []
+ while graph:
+ # sorted to get predictable results
+ for eschema, deps in sorted(graph.items()):
+ if not deps:
+ eschemas.append(eschema)
+ del graph[eschema]
+ for deps in graph.values():
+ try:
+ deps.remove(eschema)
+ except KeyError:
+ continue
+ return eschemas
+
+def bw_normalize_etype(etype):
+ if etype in ETYPE_NAME_MAP:
+ msg = '%s has been renamed to %s, please update your code' % (
+ etype, ETYPE_NAME_MAP[etype])
+ warn(msg, DeprecationWarning, stacklevel=4)
+ etype = ETYPE_NAME_MAP[etype]
+ return etype
+
+def display_name(req, key, form='', context=None):
+ """return a internationalized string for the key (schema entity or relation
+ name) in a given form
+ """
+ assert form in ('', 'plural', 'subject', 'object')
+ if form == 'subject':
+ form = ''
+ if form:
+ key = key + '_' + form
+ # ensure unicode
+ if context is not None:
+ return text_type(req.pgettext(context, key))
+ else:
+ return text_type(req._(key))
+
+
+# Schema objects definition ###################################################
+
+def ERSchema_display_name(self, req, form='', context=None):
+ """return a internationalized string for the entity/relation type name in
+ a given form
+ """
+ return display_name(req, self.type, form, context)
+ERSchema.display_name = ERSchema_display_name
+
+@cached
+def get_groups(self, action):
+ """return the groups authorized to perform on entities of
+ this type
+
+ :type action: str
+ :param action: the name of a permission
+
+ :rtype: tuple
+ :return: names of the groups with the given permission
+ """
+ assert action in self.ACTIONS, action
+ #assert action in self._groups, '%s %s' % (self, action)
+ try:
+ return frozenset(g for g in self.permissions[action] if isinstance(g, string_types))
+ except KeyError:
+ return ()
+PermissionMixIn.get_groups = get_groups
+
+@cached
+def get_rqlexprs(self, action):
+ """return the rql expressions representing queries to check the user is allowed
+ to perform on entities of this type
+
+ :type action: str
+ :param action: the name of a permission
+
+ :rtype: tuple
+ :return: the rql expressions with the given permission
+ """
+ assert action in self.ACTIONS, action
+ #assert action in self._rqlexprs, '%s %s' % (self, action)
+ try:
+ return tuple(g for g in self.permissions[action] if not isinstance(g, string_types))
+ except KeyError:
+ return ()
+PermissionMixIn.get_rqlexprs = get_rqlexprs
+
+orig_set_action_permissions = PermissionMixIn.set_action_permissions
+def set_action_permissions(self, action, permissions):
+ """set the groups and rql expressions allowing to perform on
+ entities of this type
+
+ :type action: str
+ :param action: the name of a permission
+
+ :type permissions: tuple
+ :param permissions: the groups and rql expressions allowing the given action
+ """
+ orig_set_action_permissions(self, action, tuple(permissions))
+ clear_cache(self, 'get_rqlexprs')
+ clear_cache(self, 'get_groups')
+PermissionMixIn.set_action_permissions = set_action_permissions
+
+def has_local_role(self, action):
+ """return true if the action *may* be granted locally (i.e. either rql
+ expressions or the owners group are used in security definition)
+
+ XXX this method is only there since we don't know well how to deal with
+ 'add' action checking. Also find a better name would be nice.
+ """
+ assert action in self.ACTIONS, action
+ if self.get_rqlexprs(action):
+ return True
+ if action in ('update', 'delete'):
+ return 'owners' in self.get_groups(action)
+ return False
+PermissionMixIn.has_local_role = has_local_role
+
+def may_have_permission(self, action, req):
+ if action != 'read' and not (self.has_local_role('read') or
+ self.has_perm(req, 'read')):
+ return False
+ return self.has_local_role(action) or self.has_perm(req, action)
+PermissionMixIn.may_have_permission = may_have_permission
+
+def has_perm(self, _cw, action, **kwargs):
+ """return true if the action is granted globally or locally"""
+ try:
+ self.check_perm(_cw, action, **kwargs)
+ return True
+ except Unauthorized:
+ return False
+PermissionMixIn.has_perm = has_perm
+
+
+def check_perm(self, _cw, action, **kwargs):
+ # NB: _cw may be a server transaction or a request object.
+ #
+ # check user is in an allowed group, if so that's enough internal
+ # transactions should always stop there
+ DBG = False
+ if server.DEBUG & server.DBG_SEC:
+ if action in server._SECURITY_CAPS:
+ _self_str = str(self)
+ if server._SECURITY_ITEMS:
+ if any(item in _self_str for item in server._SECURITY_ITEMS):
+ DBG = True
+ else:
+ DBG = True
+ groups = self.get_groups(action)
+ if _cw.user.matching_groups(groups):
+ if DBG:
+ print('check_perm: %r %r: user matches %s' % (action, _self_str, groups))
+ return
+ # if 'owners' in allowed groups, check if the user actually owns this
+ # object, if so that's enough
+ #
+ # NB: give _cw to user.owns since user is not be bound to a transaction on
+ # the repository side
+ if 'owners' in groups and (
+ kwargs.get('creating')
+ or ('eid' in kwargs and _cw.user.owns(kwargs['eid']))):
+ if DBG:
+ print('check_perm: %r %r: user is owner or creation time' %
+ (action, _self_str))
+ return
+ # else if there is some rql expressions, check them
+ if DBG:
+ print('check_perm: %r %r %s' %
+ (action, _self_str, [(rqlexpr, kwargs, rqlexpr.check(_cw, **kwargs))
+ for rqlexpr in self.get_rqlexprs(action)]))
+ if any(rqlexpr.check(_cw, **kwargs)
+ for rqlexpr in self.get_rqlexprs(action)):
+ return
+ raise Unauthorized(action, str(self))
+PermissionMixIn.check_perm = check_perm
+
+
+CubicWebRelationDefinitionSchema._RPROPERTIES['eid'] = None
+# remember rproperties defined at this point. Others will have to be serialized in
+# CWAttribute.extra_props
+KNOWN_RPROPERTIES = CubicWebRelationDefinitionSchema.ALL_PROPERTIES()
+
+
+class CubicWebEntitySchema(EntitySchema):
+ """a entity has a type, a set of subject and or object relations
+ the entity schema defines the possible relations for a given type and some
+ constraints on those relations
+ """
+ def __init__(self, schema=None, edef=None, eid=None, **kwargs):
+ super(CubicWebEntitySchema, self).__init__(schema, edef, **kwargs)
+ if eid is None and edef is not None:
+ eid = getattr(edef, 'eid', None)
+ self.eid = eid
+
+ def targets(self, role):
+ assert role in ('subject', 'object')
+ if role == 'subject':
+ return self.subjrels.values()
+ return self.objrels.values()
+
+ @cachedproperty
+ def composite_rdef_roles(self):
+ """Return all relation definitions that define the current entity
+ type as a composite.
+ """
+ rdef_roles = []
+ for role in ('subject', 'object'):
+ for rschema in self.targets(role):
+ if rschema.final:
+ continue
+ for rdef in rschema.rdefs.values():
+ if (role == 'subject' and rdef.subject == self) or \
+ (role == 'object' and rdef.object == self):
+ crole = rdef.composite
+ if crole == role:
+ rdef_roles.append((rdef, role))
+ return rdef_roles
+
+ @cachedproperty
+ def is_composite(self):
+ return bool(len(self.composite_rdef_roles))
+
+ def check_permission_definitions(self):
+ super(CubicWebEntitySchema, self).check_permission_definitions()
+ for groups in self.permissions.values():
+ for group_or_rqlexpr in groups:
+ if isinstance(group_or_rqlexpr, RRQLExpression):
+ msg = "can't use RRQLExpression on %s, use an ERQLExpression"
+ raise BadSchemaDefinition(msg % self.type)
+
+ def is_subobject(self, strict=False, skiprels=None):
+ if skiprels is None:
+ skiprels = SKIP_COMPOSITE_RELS
+ else:
+ skiprels += SKIP_COMPOSITE_RELS
+ return super(CubicWebEntitySchema, self).is_subobject(strict,
+ skiprels=skiprels)
+
+ def attribute_definitions(self):
+ """return an iterator on attribute definitions
+
+ attribute relations are a subset of subject relations where the
+ object's type is a final entity
+
+ an attribute definition is a 2-uple :
+ * name of the relation
+ * schema of the destination entity type
+ """
+ iter = super(CubicWebEntitySchema, self).attribute_definitions()
+ for rschema, attrschema in iter:
+ if rschema.type == 'has_text':
+ continue
+ yield rschema, attrschema
+
+ def main_attribute(self):
+ """convenience method that returns the *main* (i.e. the first non meta)
+ attribute defined in the entity schema
+ """
+ for rschema, _ in self.attribute_definitions():
+ if not (rschema in META_RTYPES
+ or self.is_metadata(rschema)):
+ return rschema
+
+ def add_subject_relation(self, rschema):
+ """register the relation schema as possible subject relation"""
+ super(CubicWebEntitySchema, self).add_subject_relation(rschema)
+ if rschema.final:
+ if self.rdef(rschema).get('fulltextindexed'):
+ self._update_has_text()
+ elif rschema.fulltext_container:
+ self._update_has_text()
+
+ def add_object_relation(self, rschema):
+ """register the relation schema as possible object relation"""
+ super(CubicWebEntitySchema, self).add_object_relation(rschema)
+ if rschema.fulltext_container:
+ self._update_has_text()
+
+ def del_subject_relation(self, rtype):
+ super(CubicWebEntitySchema, self).del_subject_relation(rtype)
+ if 'has_text' in self.subjrels:
+ self._update_has_text(deletion=True)
+
+ def del_object_relation(self, rtype):
+ super(CubicWebEntitySchema, self).del_object_relation(rtype)
+ if 'has_text' in self.subjrels:
+ self._update_has_text(deletion=True)
+
+ def _update_has_text(self, deletion=False):
+ may_need_has_text, has_has_text = False, False
+ need_has_text = None
+ for rschema in self.subject_relations():
+ if rschema.final:
+ if rschema == 'has_text':
+ has_has_text = True
+ elif self.rdef(rschema).get('fulltextindexed'):
+ may_need_has_text = True
+ elif rschema.fulltext_container:
+ if rschema.fulltext_container == 'subject':
+ may_need_has_text = True
+ else:
+ need_has_text = False
+ for rschema in self.object_relations():
+ if rschema.fulltext_container:
+ if rschema.fulltext_container == 'object':
+ may_need_has_text = True
+ else:
+ need_has_text = False
+ if need_has_text is None:
+ need_has_text = may_need_has_text
+ if need_has_text and not has_has_text and not deletion:
+ rdef = ybo.RelationDefinition(self.type, 'has_text', 'String',
+ __permissions__=RO_ATTR_PERMS)
+ self.schema.add_relation_def(rdef)
+ elif not need_has_text and has_has_text:
+ # use rschema.del_relation_def and not schema.del_relation_def to
+ # avoid deleting the relation type accidentally...
+ self.schema['has_text'].del_relation_def(self, self.schema['String'])
+
+ def schema_entity(self): # XXX @property for consistency with meta
+ """return True if this entity type is used to build the schema"""
+ return self.type in SCHEMA_TYPES
+
+ def rql_expression(self, expression, mainvars=None, eid=None):
+ """rql expression factory"""
+ return ERQLExpression(expression, mainvars, eid)
+
+
+class CubicWebRelationSchema(PermissionMixIn, RelationSchema):
+ permissions = {}
+ ACTIONS = ()
+ rdef_class = CubicWebRelationDefinitionSchema
+
+ def __init__(self, schema=None, rdef=None, eid=None, **kwargs):
+ if rdef is not None:
+ # if this relation is inlined
+ self.inlined = rdef.inlined
+ super(CubicWebRelationSchema, self).__init__(schema, rdef, **kwargs)
+ if eid is None and rdef is not None:
+ eid = getattr(rdef, 'eid', None)
+ self.eid = eid
+
+ def init_computed_relation(self, rdef):
+ self.ACTIONS = ('read',)
+ super(CubicWebRelationSchema, self).init_computed_relation(rdef)
+
+ def advertise_new_add_permission(self):
+ pass
+
+ def check_permission_definitions(self):
+ RelationSchema.check_permission_definitions(self)
+ PermissionMixIn.check_permission_definitions(self)
+
+ @property
+ def meta(self):
+ return self.type in META_RTYPES
+
+ def schema_relation(self): # XXX @property for consistency with meta
+ """return True if this relation type is used to build the schema"""
+ return self.type in SCHEMA_TYPES
+
+ def may_have_permission(self, action, req, eschema=None, role=None):
+ if eschema is not None:
+ for tschema in self.targets(eschema, role):
+ rdef = self.role_rdef(eschema, tschema, role)
+ if rdef.may_have_permission(action, req):
+ return True
+ else:
+ for rdef in self.rdefs.values():
+ if rdef.may_have_permission(action, req):
+ return True
+ return False
+
+ def has_perm(self, _cw, action, **kwargs):
+ """return true if the action is granted globally or locally"""
+ if self.final:
+ assert not ('fromeid' in kwargs or 'toeid' in kwargs), kwargs
+ assert action in ('read', 'update')
+ if 'eid' in kwargs:
+ subjtype = _cw.entity_metas(kwargs['eid'])['type']
+ else:
+ subjtype = objtype = None
+ else:
+ assert not 'eid' in kwargs, kwargs
+ assert action in ('read', 'add', 'delete')
+ if 'fromeid' in kwargs:
+ subjtype = _cw.entity_metas(kwargs['fromeid'])['type']
+ elif 'frometype' in kwargs:
+ subjtype = kwargs.pop('frometype')
+ else:
+ subjtype = None
+ if 'toeid' in kwargs:
+ objtype = _cw.entity_metas(kwargs['toeid'])['type']
+ elif 'toetype' in kwargs:
+ objtype = kwargs.pop('toetype')
+ else:
+ objtype = None
+ if objtype and subjtype:
+ return self.rdef(subjtype, objtype).has_perm(_cw, action, **kwargs)
+ elif subjtype:
+ for tschema in self.targets(subjtype, 'subject'):
+ rdef = self.rdef(subjtype, tschema)
+ if not rdef.has_perm(_cw, action, **kwargs):
+ return False
+ elif objtype:
+ for tschema in self.targets(objtype, 'object'):
+ rdef = self.rdef(tschema, objtype)
+ if not rdef.has_perm(_cw, action, **kwargs):
+ return False
+ else:
+ for rdef in self.rdefs.values():
+ if not rdef.has_perm(_cw, action, **kwargs):
+ return False
+ return True
+
+ @deprecated('use .rdef(subjtype, objtype).role_cardinality(role)')
+ def cardinality(self, subjtype, objtype, target):
+ return self.rdef(subjtype, objtype).role_cardinality(target)
+
+
+class CubicWebSchema(Schema):
+ """set of entities and relations schema defining the possible data sets
+ used in an application
+
+ :type name: str
+ :ivar name: name of the schema, usually the instance identifier
+
+ :type base: str
+ :ivar base: path of the directory where the schema is defined
+ """
+ reading_from_database = False
+ entity_class = CubicWebEntitySchema
+ relation_class = CubicWebRelationSchema
+ no_specialization_inference = ('identity',)
+
+ def __init__(self, *args, **kwargs):
+ self._eid_index = {}
+ super(CubicWebSchema, self).__init__(*args, **kwargs)
+ ybo.register_base_types(self)
+ rschema = self.add_relation_type(ybo.RelationType('eid'))
+ rschema.final = True
+ rschema = self.add_relation_type(ybo.RelationType('has_text'))
+ rschema.final = True
+ rschema = self.add_relation_type(ybo.RelationType('identity'))
+ rschema.final = False
+
+ etype_name_re = r'[A-Z][A-Za-z0-9]*[a-z]+[A-Za-z0-9]*$'
+ def add_entity_type(self, edef):
+ edef.name = str(edef.name)
+ edef.name = bw_normalize_etype(edef.name)
+ if not re.match(self.etype_name_re, edef.name):
+ raise BadSchemaDefinition(
+ '%r is not a valid name for an entity type. It should start '
+ 'with an upper cased letter and be followed by at least a '
+ 'lower cased letter' % edef.name)
+ eschema = super(CubicWebSchema, self).add_entity_type(edef)
+ if not eschema.final:
+ # automatically add the eid relation to non final entity types
+ rdef = ybo.RelationDefinition(eschema.type, 'eid', 'Int',
+ cardinality='11', uid=True,
+ __permissions__=RO_ATTR_PERMS)
+ self.add_relation_def(rdef)
+ rdef = ybo.RelationDefinition(eschema.type, 'identity', eschema.type,
+ __permissions__=RO_REL_PERMS)
+ self.add_relation_def(rdef)
+ self._eid_index[eschema.eid] = eschema
+ return eschema
+
+ def add_relation_type(self, rdef):
+ if not rdef.name.islower():
+ raise BadSchemaDefinition(
+ '%r is not a valid name for a relation type. It should be '
+ 'lower cased' % rdef.name)
+ rdef.name = str(rdef.name)
+ rschema = super(CubicWebSchema, self).add_relation_type(rdef)
+ self._eid_index[rschema.eid] = rschema
+ return rschema
+
+ def add_relation_def(self, rdef):
+ """build a part of a relation schema
+ (i.e. add a relation between two specific entity's types)
+
+ :type subject: str
+ :param subject: entity's type that is subject of the relation
+
+ :type rtype: str
+ :param rtype: the relation's type (i.e. the name of the relation)
+
+ :type obj: str
+ :param obj: entity's type that is object of the relation
+
+ :rtype: RelationSchema
+ :param: the newly created or just completed relation schema
+ """
+ rdef.name = rdef.name.lower()
+ rdef.subject = bw_normalize_etype(rdef.subject)
+ rdef.object = bw_normalize_etype(rdef.object)
+ rdefs = super(CubicWebSchema, self).add_relation_def(rdef)
+ if rdefs:
+ try:
+ self._eid_index[rdef.eid] = rdefs
+ except AttributeError:
+ pass # not a serialized schema
+ return rdefs
+
+ def del_relation_type(self, rtype):
+ rschema = self.rschema(rtype)
+ self._eid_index.pop(rschema.eid, None)
+ super(CubicWebSchema, self).del_relation_type(rtype)
+
+ def del_relation_def(self, subjtype, rtype, objtype):
+ for k, v in self._eid_index.items():
+ if not isinstance(v, RelationDefinitionSchema):
+ continue
+ if v.subject == subjtype and v.rtype == rtype and v.object == objtype:
+ del self._eid_index[k]
+ break
+ super(CubicWebSchema, self).del_relation_def(subjtype, rtype, objtype)
+
+ def del_entity_type(self, etype):
+ eschema = self.eschema(etype)
+ self._eid_index.pop(eschema.eid, None)
+ # deal with has_text first, else its automatic deletion (see above)
+ # may trigger an error in ancestor's del_entity_type method
+ if 'has_text' in eschema.subject_relations():
+ self.del_relation_def(etype, 'has_text', 'String')
+ super(CubicWebSchema, self).del_entity_type(etype)
+
+ def schema_by_eid(self, eid):
+ return self._eid_index[eid]
+
+ def iter_computed_attributes(self):
+ for relation in self.relations():
+ for rdef in relation.rdefs.values():
+ if rdef.final and rdef.formula is not None:
+ yield rdef
+
+ def iter_computed_relations(self):
+ for relation in self.relations():
+ if relation.rule:
+ yield relation
+
+ def finalize(self):
+ super(CubicWebSchema, self).finalize()
+ self.finalize_computed_attributes()
+ self.finalize_computed_relations()
+
+ def finalize_computed_attributes(self):
+ """Check computed attributes validity (if any), else raise
+ `BadSchemaDefinition`
+ """
+ analyzer = ETypeResolver(self)
+ for rdef in self.iter_computed_attributes():
+ rqlst = parse(rdef.formula)
+ select = rqlst.children[0]
+ select.add_type_restriction(select.defined_vars['X'], str(rdef.subject))
+ analyzer.visit(select)
+ _check_valid_formula(rdef, rqlst)
+ rdef.formula_select = select # avoid later recomputation
+
+
+ def finalize_computed_relations(self):
+ """Build relation definitions for computed relations
+
+ The subject and object types are infered using rql analyzer.
+ """
+ analyzer = ETypeResolver(self)
+ for rschema in self.iter_computed_relations():
+ # XXX rule is valid if both S and O are defined and not in an exists
+ rqlexpr = RRQLExpression(rschema.rule)
+ rqlst = rqlexpr.snippet_rqlst
+ analyzer.visit(rqlst)
+ couples = set((sol['S'], sol['O']) for sol in rqlst.solutions)
+ for subjtype, objtype in couples:
+ if self[objtype].final:
+ raise BadSchemaDefinition('computed relations cannot be final')
+ rdef = ybo.RelationDefinition(
+ subjtype, rschema.type, objtype,
+ __permissions__={'add': (),
+ 'delete': (),
+ 'read': rschema.permissions['read']})
+ rdef.infered = True
+ self.add_relation_def(rdef)
+
+ def rebuild_infered_relations(self):
+ super(CubicWebSchema, self).rebuild_infered_relations()
+ self.finalize_computed_attributes()
+ self.finalize_computed_relations()
+
+
+# additional cw specific constraints ###########################################
+
+# these are implemented as CHECK constraints in sql, don't do the work
+# twice
+StaticVocabularyConstraint.check = lambda *args: True
+IntervalBoundConstraint.check = lambda *args: True
+BoundaryConstraint.check = lambda *args: True
+
+class BaseRQLConstraint(RRQLExpression, BaseConstraint):
+ """base class for rql constraints"""
+ distinct_query = None
+
+ def serialize(self):
+ # start with a semicolon for bw compat, see below
+ return ';' + ','.join(sorted(self.mainvars)) + ';' + self.expression
+
+ @classmethod
+ def deserialize(cls, value):
+ _, mainvars, expression = value.split(';', 2)
+ return cls(expression, mainvars)
+
+ def check(self, entity, rtype, value):
+ """return true if the value satisfy the constraint, else false"""
+ # implemented as a hook in the repository
+ return 1
+
+ def __str__(self):
+ if self.distinct_query:
+ selop = 'Any'
+ else:
+ selop = 'DISTINCT Any'
+ return '%s(%s %s WHERE %s)' % (self.__class__.__name__, selop,
+ ','.join(sorted(self.mainvars)),
+ self.expression)
+
+ def __repr__(self):
+ return '<%s @%#x>' % (self.__str__(), id(self))
+
+
+class RQLVocabularyConstraint(BaseRQLConstraint):
+ """the rql vocabulary constraint:
+
+ limits the proposed values to a set of entities returned by an rql query,
+ but this is not enforced at the repository level
+
+ `expression` is an additional rql restriction that will be added to
+ a predefined query, where the S and O variables respectively represent
+ the subject and the object of the relation
+
+ `mainvars` is a set of variables that should be used as selection variables
+ (i.e. `'Any %s WHERE ...' % mainvars`). If not specified, an attempt will be
+ made to guess it based on the variables used in the expression.
+ """
+
+ def repo_check(self, session, eidfrom, rtype, eidto):
+ """raise ValidationError if the relation doesn't satisfy the constraint
+ """
+ pass # this is a vocabulary constraint, not enforced
+
+
+class RepoEnforcedRQLConstraintMixIn(object):
+
+ def __init__(self, expression, mainvars=None, msg=None):
+ super(RepoEnforcedRQLConstraintMixIn, self).__init__(expression, mainvars)
+ self.msg = msg
+
+ def serialize(self):
+ # start with a semicolon for bw compat, see below
+ return ';%s;%s\n%s' % (','.join(sorted(self.mainvars)), self.expression,
+ self.msg or '')
+
+ @classmethod
+ def deserialize(cls, value):
+ value, msg = value.split('\n', 1)
+ _, mainvars, expression = value.split(';', 2)
+ return cls(expression, mainvars, msg)
+
+ def repo_check(self, session, eidfrom, rtype, eidto=None):
+ """raise ValidationError if the relation doesn't satisfy the constraint
+ """
+ if not self.match_condition(session, eidfrom, eidto):
+ # XXX at this point if both or neither of S and O are in mainvar we
+ # dunno if the validation error `occurred` on eidfrom or eidto (from
+ # user interface point of view)
+ #
+ # possible enhancement: check entity being created, it's probably
+ # the main eid unless this is a composite relation
+ if eidto is None or 'S' in self.mainvars or not 'O' in self.mainvars:
+ maineid = eidfrom
+ qname = role_name(rtype, 'subject')
+ else:
+ maineid = eidto
+ qname = role_name(rtype, 'object')
+ if self.msg:
+ msg = session._(self.msg)
+ else:
+ msg = '%(constraint)s %(expression)s failed' % {
+ 'constraint': session._(self.type()),
+ 'expression': self.expression}
+ raise ValidationError(maineid, {qname: msg})
+
+ def exec_query(self, _cw, eidfrom, eidto):
+ if eidto is None:
+ # checking constraint for an attribute relation
+ expression = 'S eid %(s)s, ' + self.expression
+ args = {'s': eidfrom}
+ else:
+ expression = 'S eid %(s)s, O eid %(o)s, ' + self.expression
+ args = {'s': eidfrom, 'o': eidto}
+ if 'U' in self.rqlst.defined_vars:
+ expression = 'U eid %(u)s, ' + expression
+ args['u'] = _cw.user.eid
+ rql = 'Any %s WHERE %s' % (','.join(sorted(self.mainvars)), expression)
+ if self.distinct_query:
+ rql = 'DISTINCT ' + rql
+ return _cw.execute(rql, args, build_descr=False)
+
+
+class RQLConstraint(RepoEnforcedRQLConstraintMixIn, BaseRQLConstraint):
+ """the rql constraint is similar to the RQLVocabularyConstraint but
+ are also enforced at the repository level
+ """
+ distinct_query = False
+
+ def match_condition(self, session, eidfrom, eidto):
+ return self.exec_query(session, eidfrom, eidto)
+
+
+class RQLUniqueConstraint(RepoEnforcedRQLConstraintMixIn, BaseRQLConstraint):
+ """the unique rql constraint check that the result of the query isn't
+ greater than one.
+
+ You *must* specify `mainvars` when instantiating the constraint since there
+ is no way to guess it correctly (e.g. if using S,O or U the constraint will
+ always be satisfied because we've to use a DISTINCT query).
+ """
+ # XXX turns mainvars into a required argument in __init__
+ distinct_query = True
+
+ def match_condition(self, session, eidfrom, eidto):
+ return len(self.exec_query(session, eidfrom, eidto)) <= 1
+
+
+# workflow extensions #########################################################
+
+from yams.buildobjs import _add_relation as yams_add_relation
+
+class workflowable_definition(ybo.metadefinition):
+ """extends default EntityType's metaclass to add workflow relations
+ (i.e. in_state, wf_info_for and custom_workflow). This is the default
+ metaclass for WorkflowableEntityType.
+ """
+ def __new__(mcs, name, bases, classdict):
+ abstract = classdict.pop('__abstract__', False)
+ cls = super(workflowable_definition, mcs).__new__(mcs, name, bases,
+ classdict)
+ if not abstract:
+ make_workflowable(cls)
+ return cls
+
+
+@add_metaclass(workflowable_definition)
+class WorkflowableEntityType(ybo.EntityType):
+ """Use this base class instead of :class:`EntityType` to have workflow
+ relations (i.e. `in_state`, `wf_info_for` and `custom_workflow`) on your
+ entity type.
+ """
+ __abstract__ = True
+
+
+def make_workflowable(cls, in_state_descr=None):
+ """Adds workflow relations as :class:`WorkflowableEntityType`, but usable on
+ existing classes which are not using that base class.
+ """
+ existing_rels = set(rdef.name for rdef in cls.__relations__)
+ # let relation types defined in cw.schemas.workflow carrying
+ # cardinality, constraints and other relation definition properties
+ etype = getattr(cls, 'name', cls.__name__)
+ if 'custom_workflow' not in existing_rels:
+ rdef = ybo.RelationDefinition(etype, 'custom_workflow', 'Workflow')
+ yams_add_relation(cls.__relations__, rdef)
+ if 'in_state' not in existing_rels:
+ rdef = ybo.RelationDefinition(etype, 'in_state', 'State',
+ description=in_state_descr)
+ yams_add_relation(cls.__relations__, rdef)
+ if 'wf_info_for' not in existing_rels:
+ rdef = ybo.RelationDefinition('TrInfo', 'wf_info_for', etype)
+ yams_add_relation(cls.__relations__, rdef)
+
+
+# schema loading ##############################################################
+
+CONSTRAINTS['RQLConstraint'] = RQLConstraint
+CONSTRAINTS['RQLUniqueConstraint'] = RQLUniqueConstraint
+CONSTRAINTS['RQLVocabularyConstraint'] = RQLVocabularyConstraint
+CONSTRAINTS.pop('MultipleStaticVocabularyConstraint', None) # don't want this in cw yams schema
+PyFileReader.context.update(CONSTRAINTS)
+
+
+class BootstrapSchemaLoader(SchemaLoader):
+ """cubicweb specific schema loader, loading only schema necessary to read
+ the persistent schema
+ """
+ schemacls = CubicWebSchema
+
+ def load(self, config, path=(), **kwargs):
+ """return a Schema instance from the schema definition read
+ from
+ """
+ return super(BootstrapSchemaLoader, self).load(
+ path, config.appid, register_base_types=False, **kwargs)
+
+ def _load_definition_files(self, cubes=None):
+ # bootstraping, ignore cubes
+ filepath = join(cubicweb.CW_SOFTWARE_ROOT, 'schemas', 'bootstrap.py')
+ self.info('loading %s', filepath)
+ with tempattr(ybo, 'PACKAGE', 'cubicweb'): # though we don't care here
+ self.handle_file(filepath)
+
+ def unhandled_file(self, filepath):
+ """called when a file without handler associated has been found"""
+ self.warning('ignoring file %r', filepath)
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
+class CubicWebSchemaLoader(BootstrapSchemaLoader):
+ """cubicweb specific schema loader, automatically adding metadata to the
+ instance's schema
+ """
+
+ def load(self, config, **kwargs):
+ """return a Schema instance from the schema definition read
+ from
+ """
+ self.info('loading %s schemas', ', '.join(config.cubes()))
+ self.extrapath = {}
+ for cubesdir in config.cubes_search_path():
+ if cubesdir != config.CUBES_DIR:
+ self.extrapath[cubesdir] = 'cubes'
+ if config.apphome:
+ path = tuple(reversed([config.apphome] + config.cubes_path()))
+ else:
+ path = tuple(reversed(config.cubes_path()))
+ try:
+ return super(CubicWebSchemaLoader, self).load(config, path=path, **kwargs)
+ finally:
+ # we've to cleanup modules imported from cubicweb.schemas as well
+ cleanup_sys_modules([join(cubicweb.CW_SOFTWARE_ROOT, 'schemas')])
+
+ def _load_definition_files(self, cubes):
+ for filepath in (join(cubicweb.CW_SOFTWARE_ROOT, 'schemas', 'bootstrap.py'),
+ join(cubicweb.CW_SOFTWARE_ROOT, 'schemas', 'base.py'),
+ join(cubicweb.CW_SOFTWARE_ROOT, 'schemas', 'workflow.py'),
+ join(cubicweb.CW_SOFTWARE_ROOT, 'schemas', 'Bookmark.py')):
+ self.info('loading %s', filepath)
+ with tempattr(ybo, 'PACKAGE', 'cubicweb'):
+ self.handle_file(filepath)
+ for cube in cubes:
+ for filepath in self.get_schema_files(cube):
+ with tempattr(ybo, 'PACKAGE', basename(cube)):
+ self.handle_file(filepath)
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
+
+set_log_methods(CubicWebSchemaLoader, getLogger('cubicweb.schemaloader'))
+set_log_methods(BootstrapSchemaLoader, getLogger('cubicweb.bootstrapschemaloader'))
+set_log_methods(RQLExpression, getLogger('cubicweb.schema'))
+
+# _() is just there to add messages to the catalog, don't care about actual
+# translation
+MAY_USE_TEMPLATE_FORMAT = set(('managers',))
+NEED_PERM_FORMATS = [_('text/cubicweb-page-template')]
+
+@monkeypatch(FormatConstraint)
+def vocabulary(self, entity=None, form=None):
+ cw = None
+ if form is None and entity is not None:
+ cw = entity._cw
+ elif form is not None:
+ cw = form._cw
+ if cw is not None:
+ if hasattr(cw, 'write_security'): # test it's a session and not a request
+ # cw is a server session
+ hasperm = not cw.write_security or \
+ not cw.is_hook_category_activated('integrity') or \
+ cw.user.matching_groups(MAY_USE_TEMPLATE_FORMAT)
+ else:
+ hasperm = cw.user.matching_groups(MAY_USE_TEMPLATE_FORMAT)
+ if hasperm:
+ return self.regular_formats + tuple(NEED_PERM_FORMATS)
+ return self.regular_formats
+
+# XXX itou for some Statement methods
+from rql import stmts
+orig_get_etype = stmts.ScopeNode.get_etype
+def bw_get_etype(self, name):
+ return orig_get_etype(self, bw_normalize_etype(name))
+stmts.ScopeNode.get_etype = bw_get_etype
+
+orig_add_main_variable_delete = stmts.Delete.add_main_variable
+def bw_add_main_variable_delete(self, etype, vref):
+ return orig_add_main_variable_delete(self, bw_normalize_etype(etype), vref)
+stmts.Delete.add_main_variable = bw_add_main_variable_delete
+
+orig_add_main_variable_insert = stmts.Insert.add_main_variable
+def bw_add_main_variable_insert(self, etype, vref):
+ return orig_add_main_variable_insert(self, bw_normalize_etype(etype), vref)
+stmts.Insert.add_main_variable = bw_add_main_variable_insert
+
+orig_set_statement_type = stmts.Select.set_statement_type
+def bw_set_statement_type(self, etype):
+ return orig_set_statement_type(self, bw_normalize_etype(etype))
+stmts.Select.set_statement_type = bw_set_statement_type
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/schemas/Bookmark.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/schemas/Bookmark.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,49 @@
+# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""the Bookmark entity type for internal links
+
+"""
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from yams.buildobjs import EntityType, RelationType, SubjectRelation, String
+from cubicweb.schema import RRQLExpression
+
+class Bookmark(EntityType):
+ """bookmarks are used to have user's specific internal links"""
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers', 'users',),
+ 'delete': ('managers', 'owners',),
+ 'update': ('managers', 'owners',),
+ }
+
+ title = String(required=True, maxsize=128, internationalizable=True)
+ path = String(maxsize=2048, required=True,
+ description=_("relative url of the bookmarked page"))
+
+ bookmarked_by = SubjectRelation('CWUser',
+ description=_("users using this bookmark"))
+
+
+class bookmarked_by(RelationType):
+ __permissions__ = {'read': ('managers', 'users', 'guests',),
+ # test user in users group to avoid granting permission to anonymous user
+ 'add': ('managers', RRQLExpression('O identity U, U in_group G, G name "users"')),
+ 'delete': ('managers', RRQLExpression('O identity U, U in_group G, G name "users"')),
+ }
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/schemas/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/schemas/__init__.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,51 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""some constants and classes to define schema permissions"""
+
+__docformat__ = "restructuredtext en"
+
+from cubicweb.schema import RO_REL_PERMS, RO_ATTR_PERMS, \
+ PUB_SYSTEM_ENTITY_PERMS, PUB_SYSTEM_REL_PERMS, \
+ ERQLExpression, RRQLExpression
+
+# permissions for "meta" entity type (readable by anyone, can only be
+# added/deleted by managers)
+META_ETYPE_PERMS = PUB_SYSTEM_ENTITY_PERMS # XXX deprecates
+# permissions for "meta" relation type (readable by anyone, can only be
+# added/deleted by managers)
+META_RTYPE_PERMS = PUB_SYSTEM_REL_PERMS # XXX deprecates
+# permissions for relation type that should only set by hooks using unsafe
+# execute, readable by anyone
+HOOKS_RTYPE_PERMS = RO_REL_PERMS # XXX deprecates
+
+
+from logilab.common.modutils import LazyObject
+from logilab.common.deprecation import deprecated
+class MyLazyObject(LazyObject):
+
+ def _getobj(self):
+ try:
+ return super(MyLazyObject, self)._getobj()
+ except ImportError:
+ raise ImportError('In cubicweb 3.14, function %s has been moved to '
+ 'cube localperms. Install it first.' % self.obj)
+
+for name in ('xperm', 'xexpr', 'xrexpr', 'xorexpr', 'sexpr', 'restricted_sexpr',
+ 'restricted_oexpr', 'oexpr', 'relxperm', 'relxexpr', '_perm'):
+ msg = '[3.14] import %s from cubes.localperms' % name
+ globals()[name] = deprecated(msg, name=name, doc='deprecated')(MyLazyObject('cubes.localperms', name))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/schemas/_regproc.mysql.sql
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/schemas/_regproc.mysql.sql Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,22 @@
+/* -*- sql -*-
+
+ mysql specific registered procedures,
+
+*/
+
+/* XXX limit_size version dealing with format as postgres version does.
+ XXX mysql doesn't support overloading, each function should have a different name
+
+ NOTE: fulltext renamed since it cause a mysql name conflict
+ */
+
+CREATE FUNCTION text_limit_size(vfulltext TEXT, maxsize INT)
+RETURNS TEXT
+NO SQL
+BEGIN
+ IF LENGTH(vfulltext) < maxsize THEN
+ RETURN vfulltext;
+ ELSE
+ RETURN SUBSTRING(vfulltext from 1 for maxsize) || '...';
+ END IF;
+END ;;
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/schemas/_regproc.postgres.sql
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/schemas/_regproc.postgres.sql Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,55 @@
+/* -*- sql -*-
+
+ postgres specific registered procedures,
+ require the plpgsql language installed
+
+*/
+
+DROP FUNCTION IF EXISTS comma_join (anyarray) CASCADE;
+CREATE FUNCTION comma_join (anyarray) RETURNS text AS $$
+ SELECT array_to_string($1, ', ')
+$$ LANGUAGE SQL;;
+
+
+DROP FUNCTION IF EXISTS cw_array_append_unique (anyarray, anyelement) CASCADE;
+CREATE FUNCTION cw_array_append_unique (anyarray, anyelement) RETURNS anyarray AS $$
+ SELECT array_append($1, (SELECT $2 WHERE $2 <> ALL($1)))
+$$ LANGUAGE SQL;;
+
+DROP AGGREGATE IF EXISTS group_concat (anyelement) CASCADE;
+CREATE AGGREGATE group_concat (
+ basetype = anyelement,
+ sfunc = cw_array_append_unique,
+ stype = anyarray,
+ finalfunc = comma_join,
+ initcond = '{}'
+);;
+
+
+DROP FUNCTION IF EXISTS limit_size (fulltext text, format text, maxsize integer);
+CREATE FUNCTION limit_size (fulltext text, format text, maxsize integer) RETURNS text AS $$
+DECLARE
+ plaintext text;
+BEGIN
+ IF char_length(fulltext) < maxsize THEN
+ RETURN fulltext;
+ END IF;
+ IF format = 'text/html' OR format = 'text/xhtml' OR format = 'text/xml' THEN
+ plaintext := regexp_replace(fulltext, '<[a-zA-Z/][^>]*>', '', 'g');
+ ELSE
+ plaintext := fulltext;
+ END IF;
+ IF char_length(plaintext) < maxsize THEN
+ RETURN plaintext;
+ ELSE
+ RETURN substring(plaintext from 1 for maxsize) || '...';
+ END IF;
+END
+$$ LANGUAGE plpgsql;;
+
+DROP FUNCTION IF EXISTS text_limit_size (fulltext text, maxsize integer);
+CREATE FUNCTION text_limit_size (fulltext text, maxsize integer) RETURNS text AS $$
+BEGIN
+ RETURN limit_size(fulltext, 'text/plain', maxsize);
+END
+$$ LANGUAGE plpgsql;;
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/schemas/base.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/schemas/base.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,383 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""core CubicWeb schema, but not necessary at bootstrap time"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from yams.buildobjs import (EntityType, RelationType, RelationDefinition,
+ SubjectRelation,
+ String, TZDatetime, Datetime, Password, Interval,
+ Boolean, UniqueConstraint)
+from cubicweb.schema import (
+ RQLConstraint, WorkflowableEntityType, ERQLExpression, RRQLExpression,
+ PUB_SYSTEM_ENTITY_PERMS, PUB_SYSTEM_REL_PERMS, PUB_SYSTEM_ATTR_PERMS,
+ RO_ATTR_PERMS)
+
+class CWUser(WorkflowableEntityType):
+ """define a CubicWeb user"""
+ __permissions__ = {
+ 'read': ('managers', 'users', ERQLExpression('X identity U')),
+ 'add': ('managers',),
+ 'delete': ('managers',),
+ 'update': ('managers', ERQLExpression('X identity U, NOT U in_group G, G name "guests"'),),
+ }
+
+ login = String(required=True, unique=True, maxsize=64,
+ description=_('unique identifier used to connect to the application'))
+ upassword = Password(required=True) # password is a reserved word for mysql
+ firstname = String(maxsize=64)
+ surname = String(maxsize=64)
+ last_login_time = TZDatetime(description=_('last connection date'))
+ in_group = SubjectRelation('CWGroup', cardinality='+*',
+ constraints=[RQLConstraint('NOT O name "owners"')],
+ description=_('groups grant permissions to the user'))
+
+
+class EmailAddress(EntityType):
+ """an electronic mail address associated to a short alias"""
+ __permissions__ = {
+ # application that wishes public email, or use it for something else
+ # than users (eg Company, Person), should explicitly change permissions
+ 'read': ('managers', ERQLExpression('U use_email X')),
+ 'add': ('managers', 'users',),
+ 'delete': ('managers', 'owners', ERQLExpression('P use_email X, U has_update_permission P')),
+ 'update': ('managers', 'owners', ERQLExpression('P use_email X, U has_update_permission P')),
+ }
+
+ alias = String(fulltextindexed=True, maxsize=56)
+ address = String(required=True, fulltextindexed=True,
+ indexed=True, unique=True, maxsize=128)
+ prefered_form = SubjectRelation('EmailAddress', cardinality='?*',
+ description=_('when multiple addresses are equivalent \
+(such as python-projects@logilab.org and python-projects@lists.logilab.org), set this \
+to indicate which is the preferred form.'))
+
+class use_email(RelationType):
+ fulltext_container = 'subject'
+
+
+class use_email_relation(RelationDefinition):
+ """user's email account"""
+ name = "use_email"
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers', RRQLExpression('U has_update_permission S'),),
+ 'delete': ('managers', RRQLExpression('U has_update_permission S'),),
+ }
+ subject = "CWUser"
+ object = "EmailAddress"
+ cardinality = '*?'
+ composite = 'subject'
+
+
+class primary_email(RelationDefinition):
+ """the prefered email"""
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers', RRQLExpression('U has_update_permission S'),),
+ 'delete': ('managers', RRQLExpression('U has_update_permission S'),),
+ }
+ subject = "CWUser"
+ object = "EmailAddress"
+ cardinality = '??'
+ constraints= [RQLConstraint('S use_email O')]
+
+
+class prefered_form(RelationType):
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',),
+ # XXX should have update __permissions__ on both subject and object,
+ # though by doing this we will probably have no way to add
+ # this relation in the web ui. The easiest way to acheive this
+ # is probably to be able to have "U has_update_permission O" as
+ # RQLConstraint of the relation definition, though this is not yet
+ # possible
+ 'add': ('managers', RRQLExpression('U has_update_permission S'),),
+ 'delete': ('managers', RRQLExpression('U has_update_permission S'),),
+ }
+
+class in_group(RelationType):
+ """core relation indicating a user's groups"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+
+class owned_by(RelationType):
+ """core relation indicating owners of an entity. This relation
+ implicitly put the owner into the owners group for the entity
+ """
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers', RRQLExpression('S owned_by U'),),
+ 'delete': ('managers', RRQLExpression('S owned_by U'),),
+ }
+ # 0..n cardinality for entities created by internal session (no attached user)
+ # and to support later deletion of a user which has created some entities
+ cardinality = '**'
+ subject = '*'
+ object = 'CWUser'
+
+class created_by(RelationType):
+ """core relation indicating the original creator of an entity"""
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers',),
+ 'delete': ('managers',),
+ }
+ # 0..1 cardinality for entities created by internal session (no attached user)
+ # and to support later deletion of a user which has created some entities
+ cardinality = '?*'
+ subject = '*'
+ object = 'CWUser'
+
+
+class creation_date(RelationType):
+ """creation time of an entity"""
+ __permissions__ = PUB_SYSTEM_ATTR_PERMS
+ cardinality = '11'
+ subject = '*'
+ object = 'TZDatetime'
+
+
+class modification_date(RelationType):
+ """latest modification time of an entity"""
+ __permissions__ = PUB_SYSTEM_ATTR_PERMS
+ cardinality = '11'
+ subject = '*'
+ object = 'TZDatetime'
+
+
+class cwuri(RelationType):
+ """internal entity uri"""
+ __permissions__ = RO_ATTR_PERMS
+ cardinality = '11'
+ subject = '*'
+ object = 'String'
+
+
+# XXX find a better relation name
+class for_user(RelationType):
+ """link a property to the user which want this property customization. Unless
+ you're a site manager, this relation will be handled automatically.
+ """
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers',),
+ 'delete': ('managers',),
+ }
+ inlined = True
+ subject = 'CWProperty'
+ object = 'CWUser'
+ composite = 'object'
+ cardinality = '?*'
+
+
+class ExternalUri(EntityType):
+ """a URI representing an object in external data store"""
+ uri = String(required=True, unique=True, maxsize=256,
+ description=_('the URI of the object'))
+
+
+class same_as(RelationType):
+ """generic relation to specify that an external entity represent the same
+ object as a local one:
+ http://www.w3.org/TR/owl-ref/#sameAs-def
+ """
+ #NOTE: You'll have to explicitly declare which entity types can have a
+ #same_as relation
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers', 'users'),
+ 'delete': ('managers', 'owners'),
+ }
+ cardinality = '**'
+ symmetric = True
+ # NOTE: the 'object = ExternalUri' declaration will still be mandatory
+ # in the cube's schema.
+ object = 'ExternalUri'
+
+
+class CWCache(EntityType):
+ """a simple cache entity characterized by a name and
+ a validity date.
+
+ The target application is responsible for updating timestamp
+ when necessary to invalidate the cache (typically in hooks).
+
+ Also, checkout the AppObject.get_cache() method.
+ """
+ # XXX only handle by hooks, shouldn't be readable/editable at all through
+ # the ui and so no permissions should be granted, no?
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers',),
+ 'update': ('managers', 'users',), # XXX
+ 'delete': ('managers',),
+ }
+
+ name = String(required=True, unique=True, maxsize=128,
+ description=_('name of the cache'))
+ timestamp = TZDatetime(default='NOW')
+
+
+class CWSource(EntityType):
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers',),
+ 'update': ('managers',),
+ 'delete': ('managers',),
+ }
+ name = String(required=True, unique=True, maxsize=128,
+ description=_('name of the source'))
+ type = String(required=True, maxsize=20, description=_('type of the source'))
+ config = String(description=_('source\'s configuration. One key=value per '
+ 'line, authorized keys depending on the '
+ 'source\'s type'),
+ __permissions__={
+ 'read': ('managers',),
+ 'add': ('managers',),
+ 'update': ('managers',),
+ })
+ # put this here and not in a subclass even if it's only for some sources
+ # since having subclasses on generic relation (cw_source) double the number
+ # of rdef in the schema, and make ms planning harder since queries solutions
+ # may changes when sources are specified
+ url = String(description=_('URLs from which content will be imported. You can put one url per line'))
+ parser = String(description=_('parser to use to extract entities from content retrieved at given URLs.'))
+ latest_retrieval = TZDatetime(description=_('latest synchronization time'))
+ in_synchronization = TZDatetime(description=_('start timestamp of the currently in synchronization, or NULL when no synchronization in progress.'))
+
+
+ENTITY_MANAGERS_PERMISSIONS = {
+ 'read': ('managers',),
+ 'add': ('managers',),
+ 'update': ('managers',),
+ 'delete': ('managers',),
+ }
+RELATION_MANAGERS_PERMISSIONS = {
+ 'read': ('managers',),
+ 'add': ('managers',),
+ 'delete': ('managers',),
+ }
+
+
+class CWSourceHostConfig(EntityType):
+ __permissions__ = ENTITY_MANAGERS_PERMISSIONS
+ __unique_together__ = [('match_host', 'cw_host_config_of')]
+ match_host = String(required=True, maxsize=128,
+ description=_('regexp matching host(s) to which this config applies'))
+ config = String(required=True,
+ description=_('Source\'s configuration for a particular host. '
+ 'One key=value per line, authorized keys '
+ 'depending on the source\'s type, overriding '
+ 'values defined on the source.'),
+ __permissions__={
+ 'read': ('managers',),
+ 'add': ('managers',),
+ 'update': ('managers',),
+ })
+
+
+class cw_host_config_of(RelationDefinition):
+ __permissions__ = RELATION_MANAGERS_PERMISSIONS
+ subject = 'CWSourceHostConfig'
+ object = 'CWSource'
+ cardinality = '1*'
+ composite = 'object'
+ inlined = True
+
+class cw_source(RelationDefinition):
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers',),
+ 'delete': ('managers',),
+ }
+ subject = '*'
+ object = 'CWSource'
+ cardinality = '1*'
+ composite = 'object'
+
+
+class CWDataImport(EntityType):
+ __permissions__ = ENTITY_MANAGERS_PERMISSIONS
+ start_timestamp = TZDatetime()
+ end_timestamp = TZDatetime()
+ log = String()
+ status = String(required=True, internationalizable=True, indexed=True,
+ default='in progress',
+ vocabulary=[_('in progress'), _('success'), _('failed')])
+
+class cw_import_of(RelationDefinition):
+ __permissions__ = RELATION_MANAGERS_PERMISSIONS
+ subject = 'CWDataImport'
+ object = 'CWSource'
+ cardinality = '1*'
+ composite = 'object'
+
+
+class CWSourceSchemaConfig(EntityType):
+ __permissions__ = ENTITY_MANAGERS_PERMISSIONS
+ cw_for_source = SubjectRelation(
+ 'CWSource', inlined=True, cardinality='1*', composite='object',
+ __permissions__=RELATION_MANAGERS_PERMISSIONS)
+ options = String(description=_('allowed options depends on the source type'))
+
+
+class rtype_cw_schema(RelationDefinition):
+ __permissions__ = RELATION_MANAGERS_PERMISSIONS
+ name = 'cw_schema'
+ subject = 'CWSourceSchemaConfig'
+ object = ('CWEType', 'CWRType')
+ inlined = True
+ cardinality = '1*'
+ composite = 'object'
+ constraints = [RQLConstraint('NOT O final TRUE')]
+
+class rdef_cw_schema(RelationDefinition):
+ __permissions__ = RELATION_MANAGERS_PERMISSIONS
+ name = 'cw_schema'
+ subject = 'CWSourceSchemaConfig'
+ object = 'CWRelation'
+ inlined = True
+ cardinality = '1*'
+ composite = 'object'
+
+# "abtract" relation types, no definition in cubicweb itself ###################
+
+class identical_to(RelationType):
+ """identical to"""
+ symmetric = True
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',),
+ # XXX should have update __permissions__ on both subject and object,
+ # though by doing this we will probably have no way to add
+ # this relation in the web ui. The easiest way to acheive this
+ # is probably to be able to have "U has_update_permission O" as
+ # RQLConstraint of the relation definition, though this is not yet
+ # possible
+ 'add': ('managers', RRQLExpression('U has_update_permission S'),),
+ 'delete': ('managers', RRQLExpression('U has_update_permission S'),),
+ }
+
+class see_also(RelationType):
+ """generic relation to link one entity to another"""
+ symmetric = True
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers', RRQLExpression('U has_update_permission S'),),
+ 'delete': ('managers', RRQLExpression('U has_update_permission S'),),
+ }
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/schemas/bootstrap.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/schemas/bootstrap.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,357 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""core CubicWeb schema necessary for bootstrapping the actual instance's schema
+"""
+
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from yams.buildobjs import (EntityType, RelationType, RelationDefinition, Bytes,
+ SubjectRelation, RichString, String, Boolean, Int)
+from cubicweb.schema import (
+ RQLConstraint,
+ PUB_SYSTEM_ENTITY_PERMS, PUB_SYSTEM_REL_PERMS, PUB_SYSTEM_ATTR_PERMS
+ )
+
+# not restricted since as "is" is handled as other relations, guests need
+# access to this
+class CWEType(EntityType):
+ """define an entity type, used to build the instance schema"""
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ name = String(required=True, indexed=True, internationalizable=True,
+ unique=True, maxsize=64)
+ description = RichString(internationalizable=True,
+ description=_('semantic description of this entity type'))
+ # necessary to filter using RQL
+ final = Boolean(default=False, description=_('automatic'))
+
+
+class CWRType(EntityType):
+ """define a relation type, used to build the instance schema"""
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ name = String(required=True, indexed=True, internationalizable=True,
+ unique=True, maxsize=64)
+ description = RichString(internationalizable=True,
+ description=_('semantic description of this relation type'))
+ symmetric = Boolean(description=_('is this relation equivalent in both direction ?'))
+ inlined = Boolean(description=_('is this relation physically inlined? you should know what you\'re doing if you are changing this!'))
+ fulltext_container = String(description=_('if full text content of subject/object entity '
+ 'should be added to other side entity (the container).'),
+ vocabulary=('', _('subject'), _('object')),
+ maxsize=8, default=None)
+ final = Boolean(description=_('automatic'))
+
+
+class CWComputedRType(EntityType):
+ """define a virtual relation type, used to build the instance schema"""
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ name = String(required=True, indexed=True, internationalizable=True,
+ unique=True, maxsize=64)
+ description = RichString(internationalizable=True,
+ description=_('semantic description of this relation type'))
+ rule = String(required=True)
+
+
+class CWAttribute(EntityType):
+ """define a final relation: link a final relation type from a non final
+ entity to a final entity type.
+
+ used to build the instance schema
+ """
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ relation_type = SubjectRelation('CWRType', cardinality='1*',
+ constraints=[RQLConstraint('O final TRUE')],
+ composite='object')
+ from_entity = SubjectRelation('CWEType', cardinality='1*',
+ constraints=[RQLConstraint('O final FALSE')],
+ composite='object')
+ to_entity = SubjectRelation('CWEType', cardinality='1*',
+ constraints=[RQLConstraint('O final TRUE')],
+ composite='object')
+ constrained_by = SubjectRelation('CWConstraint', cardinality='*1', composite='subject')
+
+ cardinality = String(maxsize=2, internationalizable=True,
+ vocabulary=[_('?1'), _('11')],
+ description=_('subject/object cardinality'))
+ ordernum = Int(description=('control subject entity\'s relations order'), default=0)
+
+ formula = String(maxsize=2048)
+ indexed = Boolean(description=_('create an index for quick search on this attribute'))
+ fulltextindexed = Boolean(description=_('index this attribute\'s value in the plain text index'))
+ internationalizable = Boolean(description=_('is this attribute\'s value translatable'))
+ defaultval = Bytes(description=_('default value as gziped pickled python object'))
+ extra_props = Bytes(description=_('additional type specific properties'))
+
+ description = RichString(internationalizable=True,
+ description=_('semantic description of this attribute'))
+
+
+CARDINALITY_VOCAB = [_('?*'), _('1*'), _('+*'), _('**'),
+ _('?+'), _('1+'), _('++'), _('*+'),
+ _('?1'), _('11'), _('+1'), _('*1'),
+ _('??'), _('1?'), _('+?'), _('*?')]
+
+class CWRelation(EntityType):
+ """define a non final relation: link a non final relation type from a non
+ final entity to a non final entity type.
+
+ used to build the instance schema
+ """
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ relation_type = SubjectRelation('CWRType', cardinality='1*',
+ constraints=[RQLConstraint('O final FALSE')],
+ composite='object')
+ from_entity = SubjectRelation('CWEType', cardinality='1*',
+ constraints=[RQLConstraint('O final FALSE')],
+ composite='object')
+ to_entity = SubjectRelation('CWEType', cardinality='1*',
+ constraints=[RQLConstraint('O final FALSE')],
+ composite='object')
+ constrained_by = SubjectRelation('CWConstraint', cardinality='*1', composite='subject')
+
+ cardinality = String(maxsize=2, internationalizable=True,
+ vocabulary=CARDINALITY_VOCAB,
+ description=_('subject/object cardinality'))
+ ordernum = Int(description=_('control subject entity\'s relations order'),
+ default=0)
+ composite = String(description=_('is the subject/object entity of the relation '
+ 'composed of the other ? This implies that when '
+ 'the composite is deleted, composants are also '
+ 'deleted.'),
+ vocabulary=('', _('subject'), _('object')),
+ maxsize=8, default=None)
+
+ description = RichString(internationalizable=True,
+ description=_('semantic description of this relation'))
+
+
+# not restricted since it has to be read when checking allowed transitions
+class RQLExpression(EntityType):
+ """define a rql expression used to define permissions"""
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ exprtype = String(required=True, vocabulary=['ERQLExpression', 'RRQLExpression'])
+ mainvars = String(maxsize=8,
+ description=_('name of the main variables which should be '
+ 'used in the selection if necessary (comma '
+ 'separated)'))
+ expression = String(required=True,
+ description=_('restriction part of a rql query. '
+ 'For entity rql expression, X and U are '
+ 'predefined respectivly to the current object and to '
+ 'the request user. For relation rql expression, '
+ 'S, O and U are predefined respectivly to the current '
+ 'relation\'subject, object and to '
+ 'the request user. '))
+
+
+class CWConstraint(EntityType):
+ """define a schema constraint"""
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ cstrtype = SubjectRelation('CWConstraintType', cardinality='1*')
+ value = String(description=_('depends on the constraint type'))
+
+
+class CWUniqueTogetherConstraint(EntityType):
+ """defines a sql-level multicolumn unique index"""
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ name = String(required=True, unique=True, maxsize=64)
+ constraint_of = SubjectRelation('CWEType', cardinality='1*', composite='object',
+ inlined=True)
+ relations = SubjectRelation('CWRType', cardinality='+*',
+ constraints=[RQLConstraint(
+ 'S constraint_of ET, RDEF relation_type O, RDEF from_entity ET, '
+ 'O final TRUE OR (O final FALSE AND O inlined TRUE)')])
+
+
+class CWConstraintType(EntityType):
+ """define a schema constraint type"""
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ name = String(required=True, indexed=True, internationalizable=True,
+ unique=True, maxsize=64)
+
+
+# not restricted since it has to be read when checking allowed transitions
+class CWGroup(EntityType):
+ """define a CubicWeb users group"""
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ name = String(required=True, indexed=True, internationalizable=True,
+ unique=True, maxsize=64)
+
+
+class CWProperty(EntityType):
+ """used for cubicweb configuration. Once a property has been created you
+ can't change the key.
+ """
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers', 'users',),
+ 'update': ('managers', 'owners',),
+ 'delete': ('managers', 'owners',),
+ }
+ # key is a reserved word for mysql
+ pkey = String(required=True, internationalizable=True, maxsize=256,
+ description=_('defines what\'s the property is applied for. '
+ 'You must select this first to be able to set '
+ 'value'))
+ value = String(internationalizable=True, maxsize=256)
+
+class relation_type(RelationType):
+ """link a relation definition to its relation type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ inlined = True
+
+class from_entity(RelationType):
+ """link a relation definition to its subject entity type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ inlined = True
+
+class to_entity(RelationType):
+ """link a relation definition to its object entity type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ inlined = True
+
+class constrained_by(RelationType):
+ """constraints applying on this relation"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+
+class cstrtype(RelationType):
+ """constraint factory"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ inlined = True
+
+
+class read_permission_cwgroup(RelationDefinition):
+ """groups allowed to read entities/relations of this type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ name = 'read_permission'
+ subject = ('CWEType', 'CWAttribute', 'CWRelation', 'CWComputedRType')
+ object = 'CWGroup'
+ cardinality = '**'
+
+class add_permission_cwgroup(RelationDefinition):
+ """groups allowed to add entities/relations of this type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ name = 'add_permission'
+ subject = ('CWEType', 'CWRelation', 'CWAttribute')
+ object = 'CWGroup'
+ cardinality = '**'
+
+class delete_permission_cwgroup(RelationDefinition):
+ """groups allowed to delete entities/relations of this type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ name = 'delete_permission'
+ subject = ('CWEType', 'CWRelation')
+ object = 'CWGroup'
+ cardinality = '**'
+
+class update_permission_cwgroup(RelationDefinition):
+ """groups allowed to update entities/relations of this type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ name = 'update_permission'
+ subject = ('CWEType', 'CWAttribute')
+ object = 'CWGroup'
+ cardinality = '**'
+
+class read_permission_rqlexpr(RelationDefinition):
+ """rql expression allowing to read entities/relations of this type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ name = 'read_permission'
+ subject = ('CWEType', 'CWAttribute', 'CWRelation', 'CWComputedRType')
+ object = 'RQLExpression'
+ cardinality = '*?'
+ composite = 'subject'
+
+class add_permission_rqlexpr(RelationDefinition):
+ """rql expression allowing to add entities/relations of this type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ name = 'add_permission'
+ subject = ('CWEType', 'CWRelation', 'CWAttribute')
+ object = 'RQLExpression'
+ cardinality = '*?'
+ composite = 'subject'
+
+class delete_permission_rqlexpr(RelationDefinition):
+ """rql expression allowing to delete entities/relations of this type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ name = 'delete_permission'
+ subject = ('CWEType', 'CWRelation')
+ object = 'RQLExpression'
+ cardinality = '*?'
+ composite = 'subject'
+
+class update_permission_rqlexpr(RelationDefinition):
+ """rql expression allowing to update entities/relations of this type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ name = 'update_permission'
+ subject = ('CWEType', 'CWAttribute')
+ object = 'RQLExpression'
+ cardinality = '*?'
+ composite = 'subject'
+
+
+class is_(RelationType):
+ """core relation indicating the type of an entity
+ """
+ name = 'is'
+ # don't explicitly set composite here, this is handled anyway
+ #composite = 'object'
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': (),
+ 'delete': (),
+ }
+ cardinality = '1*'
+ subject = '*'
+ object = 'CWEType'
+
+class is_instance_of(RelationType):
+ """core relation indicating the types (including specialized types)
+ of an entity
+ """
+ # don't explicitly set composite here, this is handled anyway
+ #composite = 'object'
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': (),
+ 'delete': (),
+ }
+ cardinality = '+*'
+ subject = '*'
+ object = 'CWEType'
+
+class specializes(RelationType):
+ name = 'specializes'
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers',),
+ 'delete': ('managers',),
+ }
+ cardinality = '?*'
+ subject = 'CWEType'
+ object = 'CWEType'
+
+def post_build_callback(schema):
+ """set attributes permissions for schema/workflow entities"""
+ from cubicweb.schema import SCHEMA_TYPES, WORKFLOW_TYPES, META_RTYPES
+ wftypes = WORKFLOW_TYPES - set(('TrInfo',))
+ for eschema in schema.entities():
+ if eschema in SCHEMA_TYPES or eschema in wftypes:
+ for rschema in eschema.subject_relations():
+ if rschema.final and not rschema in META_RTYPES:
+ rdef = eschema.rdef(rschema)
+ rdef.permissions = PUB_SYSTEM_ATTR_PERMS
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/schemas/workflow.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/schemas/workflow.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,283 @@
+# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""workflow related schemas
+
+"""
+__docformat__ = "restructuredtext en"
+from cubicweb import _
+
+from yams.buildobjs import (EntityType, RelationType, RelationDefinition,
+ SubjectRelation,
+ RichString, String, Int)
+from cubicweb.schema import RQLConstraint
+from cubicweb.schemas import (PUB_SYSTEM_ENTITY_PERMS, PUB_SYSTEM_REL_PERMS,
+ RO_REL_PERMS)
+
+class Workflow(EntityType):
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+
+ name = String(required=True, indexed=True, internationalizable=True,
+ maxsize=256)
+ description = RichString(default_format='text/rest',
+ description=_('semantic description of this workflow'))
+
+ workflow_of = SubjectRelation('CWEType', cardinality='+*',
+ description=_('entity types which may use this workflow'),
+ constraints=[RQLConstraint('O final FALSE')])
+
+ initial_state = SubjectRelation('State', cardinality='?*',
+ constraints=[RQLConstraint('O state_of S',
+ msg=_('state doesn\'t belong to this workflow'))],
+ description=_('initial state for this workflow'))
+
+
+class default_workflow(RelationType):
+ """default workflow for an entity type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+
+ subject = 'CWEType'
+ object = 'Workflow'
+ cardinality = '?*'
+ constraints = [RQLConstraint('S final FALSE, O workflow_of S',
+ msg=_('workflow isn\'t a workflow for this type'))]
+
+
+class State(EntityType):
+ """used to associate simple states to an entity type and/or to define
+ workflows
+ """
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ __unique_together__ = [('name', 'state_of')]
+ name = String(required=True, indexed=True, internationalizable=True, maxsize=256)
+ description = RichString(default_format='text/rest',
+ description=_('semantic description of this state'))
+
+ # XXX should be on BaseTransition w/ AND/OR selectors when we will
+ # implements #345274
+ allowed_transition = SubjectRelation('BaseTransition', cardinality='**',
+ constraints=[RQLConstraint('S state_of WF, O transition_of WF',
+ msg=_('state and transition don\'t belong the the same workflow'))],
+ description=_('allowed transitions from this state'))
+ state_of = SubjectRelation('Workflow', cardinality='1*', composite='object', inlined=True,
+ description=_('workflow to which this state belongs'))
+
+
+class BaseTransition(EntityType):
+ """abstract base class for transitions"""
+ __permissions__ = PUB_SYSTEM_ENTITY_PERMS
+ __unique_together__ = [('name', 'transition_of')]
+
+ name = String(required=True, indexed=True, internationalizable=True, maxsize=256)
+ type = String(vocabulary=(_('normal'), _('auto')), default='normal')
+ description = RichString(description=_('semantic description of this transition'))
+
+ transition_of = SubjectRelation('Workflow', cardinality='1*', composite='object', inlined=True,
+ description=_('workflow to which this transition belongs'))
+
+
+class require_group(RelationDefinition):
+ """group in which a user should be to be allowed to pass this transition"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ subject = 'BaseTransition'
+ object = 'CWGroup'
+
+
+class condition(RelationDefinition):
+ """a RQL expression which should return some results, else the transition
+ won't be available.
+
+ This query may use X and U variables that will respectivly represents the
+ current entity and the current user.
+ """
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ subject = 'BaseTransition'
+ object = 'RQLExpression'
+ cardinality = '*?'
+ composite = 'subject'
+
+
+class Transition(BaseTransition):
+ """use to define a transition from one or multiple states to a destination
+ states in workflow's definitions. Transition without destination state will
+ go back to the state from which we arrived to the current state.
+ """
+ __specializes_schema__ = True
+
+ destination_state = SubjectRelation(
+ 'State', cardinality='?*',
+ constraints=[RQLConstraint('S transition_of WF, O state_of WF',
+ msg=_('state and transition don\'t belong the the same workflow'))],
+ description=_('destination state for this transition'))
+
+
+class WorkflowTransition(BaseTransition):
+ """special transition allowing to go through a sub-workflow"""
+ __specializes_schema__ = True
+
+ subworkflow = SubjectRelation('Workflow', cardinality='1*',
+ constraints=[RQLConstraint('S transition_of WF, WF workflow_of ET, O workflow_of ET',
+ msg=_('subworkflow isn\'t a workflow for the same types as the transition\'s workflow'))]
+ )
+ # XXX use exit_of and inline it
+ subworkflow_exit = SubjectRelation('SubWorkflowExitPoint', cardinality='*1',
+ composite='subject')
+
+
+class SubWorkflowExitPoint(EntityType):
+ """define how we get out from a sub-workflow"""
+ subworkflow_state = SubjectRelation(
+ 'State', cardinality='1*',
+ constraints=[RQLConstraint('T subworkflow_exit S, T subworkflow WF, O state_of WF',
+ msg=_('exit state must be a subworkflow state'))],
+ description=_('subworkflow state'))
+ destination_state = SubjectRelation(
+ 'State', cardinality='?*',
+ constraints=[RQLConstraint('T subworkflow_exit S, T transition_of WF, O state_of WF',
+ msg=_('destination state must be in the same workflow as our parent transition'))],
+ description=_('destination state. No destination state means that transition '
+ 'should go back to the state from which we\'ve entered the '
+ 'subworkflow.'))
+
+
+class TrInfo(EntityType):
+ """workflow history item"""
+ # 'add' security actually done by hooks
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',), # XXX U has_read_permission O ?
+ 'add': ('managers', 'users', 'guests',),
+ 'delete': (), # XXX should we allow managers to delete TrInfo?
+ 'update': ('managers', 'owners',),
+ }
+ # The unique_together constraint ensures that 2 repositories
+ # sharing the db won't be able to fire a transition simultaneously
+ # on the same entity tr_count is filled in the FireTransitionHook
+ # to the number of TrInfo attached to the entity on which we
+ # attempt to fire a transition. In other word, it contains the
+ # rank of the TrInfo for that entity, and the constraint says we
+ # cannot have 2 TrInfo with the same rank.
+ __unique_together__ = [('tr_count', 'wf_info_for')]
+ from_state = SubjectRelation('State', cardinality='1*', inlined=True)
+ to_state = SubjectRelation('State', cardinality='1*', inlined=True)
+ # make by_transition optional because we want to allow managers to set
+ # entity into an arbitrary state without having to respect wf transition
+ by_transition = SubjectRelation('BaseTransition', cardinality='?*')
+ comment = RichString(fulltextindexed=True, default_format='text/plain')
+ tr_count = Int(description='autocomputed attribute used to ensure transition coherency')
+ # get actor and date time using owned_by and creation_date
+
+class from_state(RelationType):
+ __permissions__ = RO_REL_PERMS.copy()
+ inlined = True
+
+class to_state(RelationType):
+ __permissions__ = RO_REL_PERMS.copy()
+ inlined = True
+
+class by_transition(RelationType):
+ # 'add' security actually done by hooks
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers', 'users', 'guests',),
+ 'delete': (),
+ }
+ inlined = True
+
+
+class workflow_of(RelationType):
+ """link a workflow to one or more entity type"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+
+class state_of(RelationType):
+ """link a state to one or more workflow"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ inlined = True
+
+class transition_of(RelationType):
+ """link a transition to one or more workflow"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ inlined = True
+
+class destination_state(RelationType):
+ """destination state of a transition"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ inlined = True
+
+class allowed_transition(RelationType):
+ """allowed transitions from this state"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+
+class initial_state(RelationType):
+ """indicate which state should be used by default when an entity using
+ states is created
+ """
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ inlined = True
+
+
+class subworkflow(RelationType):
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ inlined = True
+
+class exit_point(RelationType):
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+
+class subworkflow_state(RelationType):
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+ inlined = True
+
+
+# "abstract" relations, set by WorkflowableEntityType ##########################
+
+class custom_workflow(RelationType):
+ """allow to set a specific workflow for an entity"""
+ __permissions__ = PUB_SYSTEM_REL_PERMS
+
+ cardinality = '?*'
+ constraints = [RQLConstraint('S is ET, O workflow_of ET',
+ msg=_('workflow isn\'t a workflow for this type'))]
+ object = 'Workflow'
+
+
+class wf_info_for(RelationType):
+ """link a transition information to its object"""
+ # 'add' security actually done by hooks
+ __permissions__ = {
+ 'read': ('managers', 'users', 'guests',),
+ 'add': ('managers', 'users', 'guests',),
+ 'delete': (),
+ }
+ inlined = True
+
+ cardinality = '1*'
+ composite = 'object'
+ fulltext_container = composite
+ subject = 'TrInfo'
+
+
+class in_state(RelationType):
+ """indicate the current state of an entity"""
+ __permissions__ = RO_REL_PERMS
+
+ # not inlined intentionnally since when using ldap sources, user'state
+ # has to be stored outside the CWUser table
+ inlined = False
+
+ cardinality = '1*'
+ constraints = [RQLConstraint('S is ET, O state_of WF, WF workflow_of ET',
+ msg=_('state doesn\'t apply to this entity\'s type'))]
+ object = 'State'
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/selectors.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/selectors.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,107 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+from warnings import warn
+
+from six import string_types
+
+from logilab.common.deprecation import deprecated, class_renamed
+
+from cubicweb.predicates import *
+
+
+warn('[3.15] cubicweb.selectors renamed into cubicweb.predicates',
+ DeprecationWarning, stacklevel=2)
+
+# XXX pre 3.15 bw compat
+from cubicweb.appobject import (objectify_selector, traced_selection,
+ lltrace, yes)
+
+ExpectedValueSelector = class_renamed('ExpectedValueSelector',
+ ExpectedValuePredicate)
+EClassSelector = class_renamed('EClassSelector', EClassPredicate)
+EntitySelector = class_renamed('EntitySelector', EntityPredicate)
+
+
+class on_transition(is_in_state):
+ """Return 1 if entity is in one of the transitions given as argument list
+
+ Especially useful to match passed transition to enable notifications when
+ your workflow allows several transition to the same states.
+
+ Note that if workflow `change_state` adapter method is used, this predicate
+ will not be triggered.
+
+ You should use this instead of your own :class:`score_entity` predicate to
+ avoid some gotchas:
+
+ * possible views gives a fake entity with no state
+ * you must use the latest tr info thru the workflow adapter for repository
+ side checking of the current state
+
+ In debug mode, this predicate can raise:
+ :raises: :exc:`ValueError` for unknown transition names
+ (etype workflow only not checked in custom workflow)
+
+ :rtype: int
+ """
+ @deprecated('[3.12] on_transition is deprecated, you should rather use '
+ 'on_fire_transition(etype, trname)')
+ def __init__(self, *expected):
+ super(on_transition, self).__init__(*expected)
+
+ def _score(self, adapted):
+ trinfo = adapted.latest_trinfo()
+ if trinfo and trinfo.by_transition:
+ return trinfo.by_transition[0].name in self.expected
+
+ def _validate(self, adapted):
+ wf = adapted.current_workflow
+ valid = [n.name for n in wf.reverse_transition_of]
+ unknown = sorted(self.expected.difference(valid))
+ if unknown:
+ raise ValueError("%s: unknown transition(s): %s"
+ % (wf.name, ",".join(unknown)))
+
+
+entity_implements = class_renamed('entity_implements', is_instance)
+
+class _but_etype(EntityPredicate):
+ """accept if the given entity types are not found in the result set.
+
+ See `EntityPredicate` documentation for behaviour when row is not specified.
+
+ :param *etypes: entity types (`string_types`) which should be refused
+ """
+ def __init__(self, *etypes):
+ super(_but_etype, self).__init__()
+ self.but_etypes = etypes
+
+ def score(self, req, rset, row, col):
+ if rset.description[row][col] in self.but_etypes:
+ return 0
+ return 1
+
+but_etype = class_renamed('but_etype', _but_etype, 'use ~is_instance(*etypes) instead')
+
+# XXX deprecated the one_* variants of predicates below w/ multi_xxx(nb=1)?
+# take care at the implementation though (looking for the 'row' argument's
+# value)
+two_lines_rset = class_renamed('two_lines_rset', multi_lines_rset)
+two_cols_rset = class_renamed('two_cols_rset', multi_columns_rset)
+two_etypes_rset = class_renamed('two_etypes_rset', multi_etypes_rset)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/__init__.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,364 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Server subcube of cubicweb : defines objects used only on the server
+(repository) side
+
+The server module contains functions to initialize a new repository.
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import sys
+from os.path import join, exists
+from glob import glob
+from contextlib import contextmanager
+
+from six import text_type, string_types
+from six.moves import filter
+
+from logilab.common.modutils import LazyObject
+from logilab.common.textutils import splitstrip
+from logilab.common.registry import yes
+from logilab import database
+
+from yams import BASE_GROUPS
+
+from cubicweb import CW_SOFTWARE_ROOT
+from cubicweb.appobject import AppObject
+
+class ShuttingDown(BaseException):
+ """raised when trying to access some resources while the repository is
+ shutting down. Inherit from BaseException so that `except Exception` won't
+ catch it.
+ """
+
+# server-side services #########################################################
+
+class Service(AppObject):
+ """Base class for services.
+
+ A service is a selectable object that performs an action server-side.
+ Use :class:`cubicweb.dbapi.Connection.call_service` to call them from
+ the web-side.
+
+ When inheriting this class, do not forget to define at least the __regid__
+ attribute (and probably __select__ too).
+ """
+ __registry__ = 'services'
+ __select__ = yes()
+
+ def call(self, **kwargs):
+ raise NotImplementedError
+
+
+# server-side debugging ########################################################
+
+# server debugging flags. They may be combined using binary operators.
+
+#:no debug information
+DBG_NONE = 0 #: no debug information
+#: rql execution information
+DBG_RQL = 1
+#: executed sql
+DBG_SQL = 2
+#: repository events
+DBG_REPO = 4
+#: multi-sources
+DBG_MS = 8
+#: hooks
+DBG_HOOKS = 16
+#: operations
+DBG_OPS = 32
+#: security
+DBG_SEC = 64
+#: more verbosity
+DBG_MORE = 128
+#: all level enabled
+DBG_ALL = DBG_RQL + DBG_SQL + DBG_REPO + DBG_MS + DBG_HOOKS + DBG_OPS + DBG_SEC + DBG_MORE
+
+_SECURITY_ITEMS = []
+_SECURITY_CAPS = ['read', 'add', 'update', 'delete', 'transition']
+
+#: current debug mode
+DEBUG = 0
+
+@contextmanager
+def tunesecurity(items=(), capabilities=()):
+ """Context manager to use in conjunction with DBG_SEC.
+
+ This allows some tuning of:
+ * the monitored capabilities ('read', 'add', ....)
+ * the object being checked by the security checkers
+
+ When no item is given, all of them will be watched.
+ By default all capabilities are monitored, unless specified.
+
+ Example use::
+
+ from cubicweb.server import debugged, DBG_SEC, tunesecurity
+ with debugged(DBG_SEC):
+ with tunesecurity(items=('Elephant', 'trumps'),
+ capabilities=('update', 'delete')):
+ babar.cw_set(trumps=celeste)
+ flore.cw_delete()
+
+ ==>
+
+ check_perm: 'update' 'relation Elephant.trumps.Elephant'
+ [(ERQLExpression(Any X WHERE U has_update_permission X, X eid %(x)s, U eid %(u)s),
+ {'eid': 2167}, True)]
+ check_perm: 'delete' 'Elephant'
+ [(ERQLExpression(Any X WHERE U has_delete_permission X, X eid %(x)s, U eid %(u)s),
+ {'eid': 2168}, True)]
+
+ """
+ olditems = _SECURITY_ITEMS[:]
+ _SECURITY_ITEMS.extend(list(items))
+ oldactions = _SECURITY_CAPS[:]
+ _SECURITY_CAPS[:] = capabilities
+ yield
+ _SECURITY_ITEMS[:] = olditems
+ _SECURITY_CAPS[:] = oldactions
+
+def set_debug(debugmode):
+ """change the repository debugging mode"""
+ global DEBUG
+ if not debugmode:
+ DEBUG = 0
+ return
+ if isinstance(debugmode, string_types):
+ for mode in splitstrip(debugmode, sep='|'):
+ DEBUG |= globals()[mode]
+ else:
+ DEBUG |= debugmode
+
+class debugged(object):
+ """Context manager and decorator to help debug the repository.
+
+ It can be used either as a context manager:
+
+ >>> with debugged('DBG_RQL | DBG_REPO'):
+ ... # some code in which you want to debug repository activity,
+ ... # seing information about RQL being executed an repository events.
+
+ or as a function decorator:
+
+ >>> @debugged('DBG_RQL | DBG_REPO')
+ ... def some_function():
+ ... # some code in which you want to debug repository activity,
+ ... # seing information about RQL being executed an repository events
+
+ The debug mode will be reset to its original value when leaving the "with"
+ block or the decorated function.
+ """
+ def __init__(self, debugmode):
+ self.debugmode = debugmode
+ self._clevel = None
+
+ def __enter__(self):
+ """enter with block"""
+ self._clevel = DEBUG
+ set_debug(self.debugmode)
+
+ def __exit__(self, exctype, exc, traceback):
+ """leave with block"""
+ set_debug(self._clevel)
+ return traceback is None
+
+ def __call__(self, func):
+ """decorate function"""
+ def wrapped(*args, **kwargs):
+ _clevel = DEBUG
+ set_debug(self.debugmode)
+ try:
+ return func(*args, **kwargs)
+ finally:
+ set_debug(self._clevel)
+ return wrapped
+
+# database initialization ######################################################
+
+def create_user(session, login, pwd, *groups):
+ # monkey patch this method if you want to customize admin/anon creation
+ # (that maybe necessary if you change CWUser's schema)
+ user = session.create_entity('CWUser', login=login, upassword=pwd)
+ for group in groups:
+ session.execute('SET U in_group G WHERE U eid %(u)s, G name %(group)s',
+ {'u': user.eid, 'group': text_type(group)})
+ return user
+
+def init_repository(config, interactive=True, drop=False, vreg=None,
+ init_config=None):
+ """initialise a repository database by creating tables add filling them
+ with the minimal set of entities (ie at least the schema, base groups and
+ a initial user)
+ """
+ from cubicweb.repoapi import get_repository, connect
+ from cubicweb.server.repository import Repository
+ from cubicweb.server.utils import manager_userpasswd
+ from cubicweb.server.sqlutils import sqlexec, sqlschema, sql_drop_all_user_tables
+ from cubicweb.server.sqlutils import _SQL_DROP_ALL_USER_TABLES_FILTER_FUNCTION as drop_filter
+ # configuration to avoid db schema loading and user'state checking
+ # on connection
+ config.creating = True
+ config.consider_user_state = False
+ config.cubicweb_appobject_path = set(('hooks', 'entities'))
+ config.cube_appobject_path = set(('hooks', 'entities'))
+ # only enable the system source at initialization time
+ repo = Repository(config, vreg=vreg)
+ if init_config is not None:
+ # further config initialization once it has been bootstrapped
+ init_config(config)
+ schema = repo.schema
+ sourcescfg = config.read_sources_file()
+ source = sourcescfg['system']
+ driver = source['db-driver']
+ with repo.internal_cnx() as cnx:
+ sqlcnx = cnx.cnxset.cnx
+ sqlcursor = cnx.cnxset.cu
+ execute = sqlcursor.execute
+ if drop:
+ helper = database.get_db_helper(driver)
+ dropsql = sql_drop_all_user_tables(helper, sqlcursor)
+ # We may fail dropping some tables because of table dependencies, in a first pass.
+ # So, we try a second drop sequence to drop remaining tables if needed.
+ # Note that 2 passes is an arbitrary choice as it seems enough for our usecases
+ # (looping may induce infinite recursion when user have no rights for example).
+ # Here we try to keep code simple and backend independent. That's why we don't try to
+ # distinguish remaining tables (missing privileges, dependencies, ...).
+ failed = sqlexec(dropsql, execute, cnx=sqlcnx,
+ pbtitle='-> dropping tables (first pass)')
+ if failed:
+ failed = sqlexec(failed, execute, cnx=sqlcnx,
+ pbtitle='-> dropping tables (second pass)')
+ remainings = list(filter(drop_filter, helper.list_tables(sqlcursor)))
+ assert not remainings, 'Remaining tables: %s' % ', '.join(remainings)
+ handler = config.migration_handler(schema, interactive=False, repo=repo, cnx=cnx)
+ # install additional driver specific sql files
+ handler.cmd_install_custom_sql_scripts()
+ for cube in reversed(config.cubes()):
+ handler.cmd_install_custom_sql_scripts(cube)
+ _title = '-> creating tables '
+ print(_title, end=' ')
+ # schema entities and relations tables
+ # can't skip entities table even if system source doesn't support them,
+ # they are used sometimes by generated sql. Keeping them empty is much
+ # simpler than fixing this...
+ schemasql = sqlschema(schema, driver)
+ #skip_entities=[str(e) for e in schema.entities()
+ # if not repo.system_source.support_entity(str(e))])
+ failed = sqlexec(schemasql, execute, pbtitle=_title, delimiter=';;')
+ if failed:
+ print('The following SQL statements failed. You should check your schema.')
+ print(failed)
+ raise Exception('execution of the sql schema failed, you should check your schema')
+ sqlcursor.close()
+ sqlcnx.commit()
+ with repo.internal_cnx() as cnx:
+ # insert entity representing the system source
+ ssource = cnx.create_entity('CWSource', type=u'native', name=u'system')
+ repo.system_source.eid = ssource.eid
+ cnx.execute('SET X cw_source X WHERE X eid %(x)s', {'x': ssource.eid})
+ # insert base groups and default admin
+ print('-> inserting default user and default groups.')
+ try:
+ login = text_type(sourcescfg['admin']['login'])
+ pwd = sourcescfg['admin']['password']
+ except KeyError:
+ if interactive:
+ msg = 'enter login and password of the initial manager account'
+ login, pwd = manager_userpasswd(msg=msg, confirm=True)
+ else:
+ login, pwd = text_type(source['db-user']), source['db-password']
+ # sort for eid predicatability as expected in some server tests
+ for group in sorted(BASE_GROUPS):
+ cnx.create_entity('CWGroup', name=text_type(group))
+ admin = create_user(cnx, login, pwd, u'managers')
+ cnx.execute('SET X owned_by U WHERE X is IN (CWGroup,CWSource), U eid %(u)s',
+ {'u': admin.eid})
+ cnx.commit()
+ repo.shutdown()
+ # re-login using the admin user
+ config._cubes = None # avoid assertion error
+ repo = get_repository(config=config)
+ with connect(repo, login, password=pwd) as cnx:
+ with cnx.security_enabled(False, False):
+ repo.system_source.eid = ssource.eid # redo this manually
+ handler = config.migration_handler(schema, interactive=False,
+ cnx=cnx, repo=repo)
+ # serialize the schema
+ initialize_schema(config, schema, handler)
+ # yoo !
+ cnx.commit()
+ repo.system_source.init_creating()
+ cnx.commit()
+ repo.shutdown()
+ # restore initial configuration
+ config.creating = False
+ config.consider_user_state = True
+ # (drop instance attribute to get back to class attribute)
+ del config.cubicweb_appobject_path
+ del config.cube_appobject_path
+ print('-> database for instance %s initialized.' % config.appid)
+
+
+def initialize_schema(config, schema, mhandler, event='create'):
+ from cubicweb.server.schemaserial import serialize_schema
+ cnx = mhandler.cnx
+ cubes = config.cubes()
+ # deactivate every hooks but those responsible to set metadata
+ # so, NO INTEGRITY CHECKS are done, to have quicker db creation.
+ # Active integrity is kept else we may pb such as two default
+ # workflows for one entity type.
+ with cnx.deny_all_hooks_but('metadata', 'activeintegrity'):
+ # execute cubicweb's pre script
+ mhandler.cmd_exec_event_script('pre%s' % event)
+ # execute cubes pre script if any
+ for cube in reversed(cubes):
+ mhandler.cmd_exec_event_script('pre%s' % event, cube)
+ # execute instance's pre script (useful in tests)
+ mhandler.cmd_exec_event_script('pre%s' % event, apphome=True)
+ # enter instance'schema into the database
+ serialize_schema(cnx, schema)
+ cnx.commit()
+ # execute cubicweb's post script
+ mhandler.cmd_exec_event_script('post%s' % event)
+ # execute cubes'post script if any
+ for cube in reversed(cubes):
+ mhandler.cmd_exec_event_script('post%s' % event, cube)
+ # execute instance's post script (useful in tests)
+ mhandler.cmd_exec_event_script('post%s' % event, apphome=True)
+
+
+# sqlite'stored procedures have to be registered at connection opening time
+from logilab.database import SQL_CONNECT_HOOKS
+
+# add to this set relations which should have their add security checking done
+# *BEFORE* adding the actual relation (done after by default)
+BEFORE_ADD_RELATIONS = set(('owned_by',))
+
+# add to this set relations which should have their add security checking done
+# *at COMMIT TIME* (done after by default)
+ON_COMMIT_ADD_RELATIONS = set(())
+
+# available sources registry
+SOURCE_TYPES = {'native': LazyObject('cubicweb.server.sources.native', 'NativeSQLSource'),
+ 'datafeed': LazyObject('cubicweb.server.sources.datafeed', 'DataFeedSource'),
+ 'ldapfeed': LazyObject('cubicweb.server.sources.ldapfeed', 'LDAPFeedSource'),
+ }
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/checkintegrity.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/checkintegrity.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,410 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Integrity checking tool for instances:
+
+* integrity of a CubicWeb repository. Hum actually only the system database is
+ checked.
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import sys
+from datetime import datetime
+
+from logilab.common.shellutils import ProgressBar
+
+from cubicweb.schema import PURE_VIRTUAL_RTYPES, VIRTUAL_RTYPES, UNIQUE_CONSTRAINTS
+from cubicweb.server.sqlutils import SQL_PREFIX
+
+def notify_fixed(fix):
+ if fix:
+ sys.stderr.write(' [FIXED]')
+ sys.stderr.write('\n')
+
+def has_eid(cnx, sqlcursor, eid, eids):
+ """return true if the eid is a valid eid"""
+ if eid in eids:
+ return eids[eid]
+ sqlcursor.execute('SELECT type FROM entities WHERE eid=%s' % eid)
+ try:
+ etype = sqlcursor.fetchone()[0]
+ except Exception:
+ eids[eid] = False
+ return False
+ if etype not in cnx.vreg.schema:
+ eids[eid] = False
+ return False
+ sqlcursor.execute('SELECT * FROM %s%s WHERE %seid=%s' % (SQL_PREFIX, etype,
+ SQL_PREFIX, eid))
+ result = sqlcursor.fetchall()
+ if len(result) == 0:
+ eids[eid] = False
+ return False
+ elif len(result) > 1:
+ msg = (' More than one entity with eid %s exists in source!\n'
+ ' WARNING : Unable to fix this, do it yourself!\n')
+ sys.stderr.write(msg % eid)
+ eids[eid] = True
+ return True
+
+# XXX move to yams?
+def etype_fti_containers(eschema, _done=None):
+ if _done is None:
+ _done = set()
+ _done.add(eschema)
+ containers = tuple(eschema.fulltext_containers())
+ if containers:
+ for rschema, target in containers:
+ if target == 'object':
+ targets = rschema.objects(eschema)
+ else:
+ targets = rschema.subjects(eschema)
+ for targeteschema in targets:
+ if targeteschema in _done:
+ continue
+ _done.add(targeteschema)
+ for container in etype_fti_containers(targeteschema, _done):
+ yield container
+ else:
+ yield eschema
+
+def reindex_entities(schema, cnx, withpb=True, etypes=None):
+ """reindex all entities in the repository"""
+ # deactivate modification_date hook since we don't want them
+ # to be updated due to the reindexation
+ repo = cnx.repo
+ dbhelper = repo.system_source.dbhelper
+ cursor = cnx.cnxset.cu
+ if not dbhelper.has_fti_table(cursor):
+ print('no text index table')
+ dbhelper.init_fti(cursor)
+ repo.system_source.do_fti = True # ensure full-text indexation is activated
+ if etypes is None:
+ print('Reindexing entities')
+ etypes = set()
+ for eschema in schema.entities():
+ if eschema.final:
+ continue
+ indexable_attrs = tuple(eschema.indexable_attributes()) # generator
+ if not indexable_attrs:
+ continue
+ for container in etype_fti_containers(eschema):
+ etypes.add(container)
+ # clear fti table first
+ cnx.system_sql('DELETE FROM %s' % dbhelper.fti_table)
+ else:
+ print('Reindexing entities of type %s' % \
+ ', '.join(sorted(str(e) for e in etypes)))
+ # clear fti table first. Use subquery for sql compatibility
+ cnx.system_sql("DELETE FROM %s WHERE EXISTS(SELECT 1 FROM ENTITIES "
+ "WHERE eid=%s AND type IN (%s))" % (
+ dbhelper.fti_table, dbhelper.fti_uid_attr,
+ ','.join("'%s'" % etype for etype in etypes)))
+ if withpb:
+ pb = ProgressBar(len(etypes) + 1)
+ pb.update()
+ # reindex entities by generating rql queries which set all indexable
+ # attribute to their current value
+ source = repo.system_source
+ for eschema in etypes:
+ etype_class = cnx.vreg['etypes'].etype_class(str(eschema))
+ for rset in etype_class.cw_fti_index_rql_limit(cnx):
+ source.fti_index_entities(cnx, rset.entities())
+ # clear entity cache to avoid high memory consumption on big tables
+ cnx.drop_entity_cache()
+ if withpb:
+ pb.update()
+ if withpb:
+ pb.finish()
+
+
+def check_schema(schema, cnx, eids, fix=1):
+ """check serialized schema"""
+ print('Checking serialized schema')
+ rql = ('Any COUNT(X),RN,SN,ON,CTN GROUPBY RN,SN,ON,CTN ORDERBY 1 '
+ 'WHERE X is CWConstraint, R constrained_by X, '
+ 'R relation_type RT, RT name RN, R from_entity ST, ST name SN, '
+ 'R to_entity OT, OT name ON, X cstrtype CT, CT name CTN')
+ for count, rn, sn, on, cstrname in cnx.execute(rql):
+ if count == 1:
+ continue
+ if cstrname in UNIQUE_CONSTRAINTS:
+ print("ERROR: got %s %r constraints on relation %s.%s.%s" % (
+ count, cstrname, sn, rn, on))
+ if fix:
+ print('dunno how to fix, do it yourself')
+
+
+
+def check_text_index(schema, cnx, eids, fix=1):
+ """check all entities registered in the text index"""
+ print('Checking text index')
+ msg = ' Entity with eid %s exists in the text index but in no source (autofix will remove from text index)'
+ cursor = cnx.system_sql('SELECT uid FROM appears;')
+ for row in cursor.fetchall():
+ eid = row[0]
+ if not has_eid(cnx, cursor, eid, eids):
+ sys.stderr.write(msg % eid)
+ if fix:
+ cnx.system_sql('DELETE FROM appears WHERE uid=%s;' % eid)
+ notify_fixed(fix)
+
+
+def check_entities(schema, cnx, eids, fix=1):
+ """check all entities registered in the repo system table"""
+ print('Checking entities system table')
+ # system table but no source
+ msg = ' Entity %s with eid %s exists in the system table but in no source (autofix will delete the entity)'
+ cursor = cnx.system_sql('SELECT eid,type FROM entities;')
+ for row in cursor.fetchall():
+ eid, etype = row
+ if not has_eid(cnx, cursor, eid, eids):
+ sys.stderr.write(msg % (etype, eid))
+ if fix:
+ cnx.system_sql('DELETE FROM entities WHERE eid=%s;' % eid)
+ notify_fixed(fix)
+ # source in entities, but no relation cw_source
+ # XXX this (get_versions) requires a second connection to the db when we already have one open
+ applcwversion = cnx.repo.get_versions().get('cubicweb')
+ if applcwversion >= (3, 13, 1): # entities.asource appeared in 3.13.1
+ cursor = cnx.system_sql('SELECT e.eid FROM entities as e, cw_CWSource as s '
+ 'WHERE s.cw_name=e.asource AND '
+ 'NOT EXISTS(SELECT 1 FROM cw_source_relation as cs '
+ ' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid) '
+ 'ORDER BY e.eid')
+ msg = (' Entity with eid %s refers to source in entities table, '
+ 'but is missing relation cw_source (autofix will create the relation)\n')
+ for row in cursor.fetchall():
+ sys.stderr.write(msg % row[0])
+ if fix:
+ cnx.system_sql('INSERT INTO cw_source_relation (eid_from, eid_to) '
+ 'SELECT e.eid, s.cw_eid FROM entities as e, cw_CWSource as s '
+ 'WHERE s.cw_name=e.asource AND NOT EXISTS(SELECT 1 FROM cw_source_relation as cs '
+ ' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid)')
+ notify_fixed(True)
+ # inconsistencies for 'is'
+ msg = ' %s #%s is missing relation "is" (autofix will create the relation)\n'
+ cursor = cnx.system_sql('SELECT e.type, e.eid FROM entities as e, cw_CWEType as s '
+ 'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_relation as cs '
+ ' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid) '
+ 'ORDER BY e.eid')
+ for row in cursor.fetchall():
+ sys.stderr.write(msg % tuple(row))
+ if fix:
+ cnx.system_sql('INSERT INTO is_relation (eid_from, eid_to) '
+ 'SELECT e.eid, s.cw_eid FROM entities as e, cw_CWEType as s '
+ 'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_relation as cs '
+ ' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid)')
+ notify_fixed(True)
+ # inconsistencies for 'is_instance_of'
+ msg = ' %s #%s is missing relation "is_instance_of" (autofix will create the relation)\n'
+ cursor = cnx.system_sql('SELECT e.type, e.eid FROM entities as e, cw_CWEType as s '
+ 'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_instance_of_relation as cs '
+ ' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid) '
+ 'ORDER BY e.eid')
+ for row in cursor.fetchall():
+ sys.stderr.write(msg % tuple(row))
+ if fix:
+ cnx.system_sql('INSERT INTO is_instance_of_relation (eid_from, eid_to) '
+ 'SELECT e.eid, s.cw_eid FROM entities as e, cw_CWEType as s '
+ 'WHERE s.cw_name=e.type AND NOT EXISTS(SELECT 1 FROM is_instance_of_relation as cs '
+ ' WHERE cs.eid_from=e.eid AND cs.eid_to=s.cw_eid)')
+ notify_fixed(True)
+ print('Checking entities tables')
+ msg = ' Entity with eid %s exists in the %s table but not in the system table (autofix will delete the entity)'
+ for eschema in schema.entities():
+ if eschema.final:
+ continue
+ table = SQL_PREFIX + eschema.type
+ column = SQL_PREFIX + 'eid'
+ cursor = cnx.system_sql('SELECT %s FROM %s;' % (column, table))
+ for row in cursor.fetchall():
+ eid = row[0]
+ # eids is full since we have fetched everything from the entities table,
+ # no need to call has_eid
+ if not eid in eids or not eids[eid]:
+ sys.stderr.write(msg % (eid, eschema.type))
+ if fix:
+ cnx.system_sql('DELETE FROM %s WHERE %s=%s;' % (table, column, eid))
+ notify_fixed(fix)
+
+
+def bad_related_msg(rtype, target, eid, fix):
+ msg = ' A relation %s with %s eid %s exists but no such entity in sources'
+ sys.stderr.write(msg % (rtype, target, eid))
+ notify_fixed(fix)
+
+def bad_inlined_msg(rtype, parent_eid, eid, fix):
+ msg = (' An inlined relation %s from %s to %s exists but the latter '
+ 'entity does not exist')
+ sys.stderr.write(msg % (rtype, parent_eid, eid))
+ notify_fixed(fix)
+
+
+def check_relations(schema, cnx, eids, fix=1):
+ """check that eids referenced by relations are registered in the repo system
+ table
+ """
+ print('Checking relations')
+ for rschema in schema.relations():
+ if rschema.final or rschema.type in PURE_VIRTUAL_RTYPES:
+ continue
+ if rschema.inlined:
+ for subjtype in rschema.subjects():
+ table = SQL_PREFIX + str(subjtype)
+ column = SQL_PREFIX + str(rschema)
+ sql = 'SELECT cw_eid,%s FROM %s WHERE %s IS NOT NULL;' % (
+ column, table, column)
+ cursor = cnx.system_sql(sql)
+ for row in cursor.fetchall():
+ parent_eid, eid = row
+ if not has_eid(cnx, cursor, eid, eids):
+ bad_inlined_msg(rschema, parent_eid, eid, fix)
+ if fix:
+ sql = 'UPDATE %s SET %s=NULL WHERE %s=%s;' % (
+ table, column, column, eid)
+ cnx.system_sql(sql)
+ continue
+ try:
+ cursor = cnx.system_sql('SELECT eid_from FROM %s_relation;' % rschema)
+ except Exception as ex:
+ # usually because table doesn't exist
+ print('ERROR', ex)
+ continue
+ for row in cursor.fetchall():
+ eid = row[0]
+ if not has_eid(cnx, cursor, eid, eids):
+ bad_related_msg(rschema, 'subject', eid, fix)
+ if fix:
+ sql = 'DELETE FROM %s_relation WHERE eid_from=%s;' % (
+ rschema, eid)
+ cnx.system_sql(sql)
+ cursor = cnx.system_sql('SELECT eid_to FROM %s_relation;' % rschema)
+ for row in cursor.fetchall():
+ eid = row[0]
+ if not has_eid(cnx, cursor, eid, eids):
+ bad_related_msg(rschema, 'object', eid, fix)
+ if fix:
+ sql = 'DELETE FROM %s_relation WHERE eid_to=%s;' % (
+ rschema, eid)
+ cnx.system_sql(sql)
+
+
+def check_mandatory_relations(schema, cnx, eids, fix=1):
+ """check entities missing some mandatory relation"""
+ print('Checking mandatory relations')
+ msg = '%s #%s is missing mandatory %s relation %s (autofix will delete the entity)'
+ for rschema in schema.relations():
+ if rschema.final or rschema in PURE_VIRTUAL_RTYPES or rschema in ('is', 'is_instance_of'):
+ continue
+ smandatory = set()
+ omandatory = set()
+ for rdef in rschema.rdefs.values():
+ if rdef.cardinality[0] in '1+':
+ smandatory.add(rdef.subject)
+ if rdef.cardinality[1] in '1+':
+ omandatory.add(rdef.object)
+ for role, etypes in (('subject', smandatory), ('object', omandatory)):
+ for etype in etypes:
+ if role == 'subject':
+ rql = 'Any X WHERE NOT X %s Y, X is %s' % (rschema, etype)
+ else:
+ rql = 'Any X WHERE NOT Y %s X, X is %s' % (rschema, etype)
+ for entity in cnx.execute(rql).entities():
+ sys.stderr.write(msg % (entity.cw_etype, entity.eid, role, rschema))
+ if fix:
+ #if entity.cw_describe()['source']['uri'] == 'system': XXX
+ entity.cw_delete() # XXX this is BRUTAL!
+ notify_fixed(fix)
+
+
+def check_mandatory_attributes(schema, cnx, eids, fix=1):
+ """check for entities stored in the system source missing some mandatory
+ attribute
+ """
+ print('Checking mandatory attributes')
+ msg = '%s #%s is missing mandatory attribute %s (autofix will delete the entity)'
+ for rschema in schema.relations():
+ if not rschema.final or rschema in VIRTUAL_RTYPES:
+ continue
+ for rdef in rschema.rdefs.values():
+ if rdef.cardinality[0] in '1+':
+ rql = 'Any X WHERE X %s NULL, X is %s, X cw_source S, S name "system"' % (
+ rschema, rdef.subject)
+ for entity in cnx.execute(rql).entities():
+ sys.stderr.write(msg % (entity.cw_etype, entity.eid, rschema))
+ if fix:
+ entity.cw_delete()
+ notify_fixed(fix)
+
+
+def check_metadata(schema, cnx, eids, fix=1):
+ """check entities has required metadata
+
+ FIXME: rewrite using RQL queries ?
+ """
+ print('Checking metadata')
+ cursor = cnx.system_sql("SELECT DISTINCT type FROM entities;")
+ eidcolumn = SQL_PREFIX + 'eid'
+ msg = ' %s with eid %s has no %s (autofix will set it to now)'
+ for etype, in cursor.fetchall():
+ if etype not in cnx.vreg.schema:
+ sys.stderr.write('entities table references unknown type %s\n' %
+ etype)
+ if fix:
+ cnx.system_sql("DELETE FROM entities WHERE type = %(type)s",
+ {'type': etype})
+ continue
+ table = SQL_PREFIX + etype
+ for rel, default in ( ('creation_date', datetime.utcnow()),
+ ('modification_date', datetime.utcnow()), ):
+ column = SQL_PREFIX + rel
+ cursor = cnx.system_sql("SELECT %s FROM %s WHERE %s is NULL"
+ % (eidcolumn, table, column))
+ for eid, in cursor.fetchall():
+ sys.stderr.write(msg % (etype, eid, rel))
+ if fix:
+ cnx.system_sql("UPDATE %s SET %s=%%(v)s WHERE %s=%s ;"
+ % (table, column, eidcolumn, eid),
+ {'v': default})
+ notify_fixed(fix)
+
+
+def check(repo, cnx, checks, reindex, fix, withpb=True):
+ """check integrity of instance's repository,
+ using given user and password to locally connect to the repository
+ (no running cubicweb server needed)
+ """
+ # yo, launch checks
+ if checks:
+ eids_cache = {}
+ with cnx.security_enabled(read=False, write=False): # ensure no read security
+ for check in checks:
+ check_func = globals()['check_%s' % check]
+ check_func(repo.schema, cnx, eids_cache, fix=fix)
+ if fix:
+ cnx.commit()
+ else:
+ print()
+ if not fix:
+ print('WARNING: Diagnostic run, nothing has been corrected')
+ if reindex:
+ cnx.rollback()
+ reindex_entities(repo.schema, cnx, withpb=withpb)
+ cnx.commit()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/cwzmq.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/cwzmq.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+# copyright 2012-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+
+from threading import Thread
+from logging import getLogger
+
+import zmq
+from zmq.eventloop import ioloop
+import zmq.eventloop.zmqstream
+
+from cubicweb import set_log_methods
+
+
+ctx = zmq.Context()
+
+
+class ZMQComm(object):
+ """
+ A simple ZMQ-based notification bus.
+
+ There should at most one instance of this class attached to a
+ Repository. A typical usage may be something like::
+
+ def callback(msg):
+ self.info('received message: %s', ' '.join(msg))
+ repo.app_instances_bus.subscribe('hello', callback)
+
+ to subsribe to the 'hello' kind of message. On the other side, to
+ emit a notification, call::
+
+ repo.app_instances_bus.publish(['hello', 'world'])
+
+ See http://docs.cubicweb.org for more details.
+ """
+ def __init__(self):
+ self.ioloop = ioloop.IOLoop()
+ self._topics = {}
+ self._subscribers = []
+ self.publisher = None
+
+ def add_publisher(self, address):
+ assert self.publisher is None, "more than one publisher is not supported"
+ self.publisher = Publisher(self.ioloop, address)
+
+ def add_subscription(self, topic, callback):
+ for subscriber in self._subscribers:
+ subscriber.subscribe(topic, callback)
+ self._topics[topic] = callback
+
+ def add_subscriber(self, address):
+ subscriber = Subscriber(self.ioloop, address)
+ for topic, callback in self._topics.items():
+ subscriber.subscribe(topic, callback)
+ self._subscribers.append(subscriber)
+
+ def publish(self, msg):
+ if self.publisher is None:
+ return
+ self.publisher.send(msg)
+
+ def start(self):
+ Thread(target=self.ioloop.start).start()
+
+ def stop(self):
+ self.ioloop.add_callback(self.ioloop.stop)
+
+ def __del__(self):
+ self.ioloop.close()
+
+
+class Publisher(object):
+ def __init__(self, ioloop, address):
+ self.address = address
+ self._topics = {}
+ self._subscribers = []
+ self.ioloop = ioloop
+ def callback():
+ s = ctx.socket(zmq.PUB)
+ self.stream = zmq.eventloop.zmqstream.ZMQStream(s, io_loop=ioloop)
+ self.stream.bind(self.address)
+ self.debug('start publisher on %s', self.address)
+ ioloop.add_callback(callback)
+
+ def send(self, msg):
+ self.ioloop.add_callback(lambda:self.stream.send_multipart(msg))
+
+
+class Subscriber(object):
+ def __init__(self, ioloop, address):
+ self.address = address
+ self.dispatch_table = {}
+ self.ioloop = ioloop
+ def callback():
+ s = ctx.socket(zmq.SUB)
+ self.stream = zmq.eventloop.zmqstream.ZMQStream(s, io_loop=ioloop)
+ self.stream.on_recv(self.dispatch)
+ self.stream.connect(self.address)
+ self.debug('start subscriber on %s', self.address)
+ ioloop.add_callback(callback)
+
+ def dispatch(self, msg):
+ try:
+ f = self.dispatch_table[msg[0]]
+ except KeyError:
+ return
+ f(msg)
+
+ def subscribe(self, topic, callback):
+ self.dispatch_table[topic] = callback
+ self.ioloop.add_callback(lambda: self.stream.setsockopt(zmq.SUBSCRIBE, topic))
+
+
+set_log_methods(Publisher, getLogger('cubicweb.zmq.pub'))
+set_log_methods(Subscriber, getLogger('cubicweb.zmq.sub'))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/edition.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/edition.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,159 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""helper classes to handle server-side edition of entities"""
+__docformat__ = "restructuredtext en"
+
+from copy import copy
+from yams import ValidationError
+
+
+_MARKER = object()
+
+class dict_protocol_catcher(object):
+ def __init__(self, entity):
+ self.__entity = entity
+ def __getitem__(self, attr):
+ return self.__entity.cw_edited[attr]
+ def __setitem__(self, attr, value):
+ self.__entity.cw_edited[attr] = value
+ def __getattr__(self, attr):
+ return getattr(self.__entity, attr)
+
+
+class EditedEntity(dict):
+ """encapsulate entities attributes being written by an RQL query"""
+ def __init__(self, entity, **kwargs):
+ super(EditedEntity, self).__init__(**kwargs)
+ self.entity = entity
+ self.skip_security = set()
+ self.querier_pending_relations = {}
+ self.saved = False
+
+ def __hash__(self):
+ # dict|set keyable
+ return hash(id(self))
+
+ def __lt__(self, other):
+ # we don't want comparison by value inherited from dict
+ raise NotImplementedError
+
+ def __eq__(self, other):
+ return self is other
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __setitem__(self, attr, value):
+ assert attr != 'eid'
+ # don't add attribute into skip_security if already in edited
+ # attributes, else we may accidentally skip a desired security check
+ if attr not in self:
+ self.skip_security.add(attr)
+ self.edited_attribute(attr, value)
+
+ def __delitem__(self, attr):
+ assert not self.saved, 'too late to modify edited attributes'
+ super(EditedEntity, self).__delitem__(attr)
+ self.entity.cw_attr_cache.pop(attr, None)
+
+ def __copy__(self):
+ # default copy protocol fails in EditedEntity.__setitem__ because
+ # copied entity has no skip_security attribute at this point
+ return EditedEntity(self.entity, **self)
+
+ def pop(self, attr, *args):
+ # don't update skip_security by design (think to storage api)
+ assert not self.saved, 'too late to modify edited attributes'
+ value = super(EditedEntity, self).pop(attr, *args)
+ self.entity.cw_attr_cache.pop(attr, *args)
+ return value
+
+ def setdefault(self, attr, default):
+ assert attr != 'eid'
+ # don't add attribute into skip_security if already in edited
+ # attributes, else we may accidentally skip a desired security check
+ if attr not in self:
+ self[attr] = default
+ return self[attr]
+
+ def update(self, values, skipsec=True):
+ if skipsec:
+ setitem = self.__setitem__
+ else:
+ setitem = self.edited_attribute
+ for attr, value in values.items():
+ setitem(attr, value)
+
+ def edited_attribute(self, attr, value):
+ """attribute being edited by a rql query: should'nt be added to
+ skip_security
+ """
+ assert not self.saved, 'too late to modify edited attributes'
+ super(EditedEntity, self).__setitem__(attr, value)
+ self.entity.cw_attr_cache[attr] = value
+ if self.entity._cw.vreg.schema.rschema(attr).final:
+ self.entity._cw_dont_cache_attribute(attr)
+
+ def oldnewvalue(self, attr):
+ """returns the couple (old attr value, new attr value)
+
+ NOTE: will only work in a before_update_entity hook
+ """
+ assert not self.saved, 'too late to get the old value'
+ # get new value and remove from local dict to force a db query to
+ # fetch old value
+ newvalue = self.entity.cw_attr_cache.pop(attr, _MARKER)
+ oldvalue = getattr(self.entity, attr)
+ if newvalue is not _MARKER:
+ self.entity.cw_attr_cache[attr] = newvalue
+ else:
+ newvalue = oldvalue
+ return oldvalue, newvalue
+
+ def set_defaults(self):
+ """set default values according to the schema"""
+ for attr, value in self.entity.e_schema.defaults():
+ if not attr in self:
+ self[str(attr)] = value
+
+ def check(self, creation=False):
+ """check the entity edition against its schema. Only final relation
+ are checked here, constraint on actual relations are checked in hooks
+ """
+ entity = self.entity
+ if creation:
+ # on creations, we want to check all relations, especially
+ # required attributes
+ relations = [rschema for rschema in entity.e_schema.subject_relations()
+ if rschema.final and rschema.type != 'eid']
+ else:
+ relations = [entity._cw.vreg.schema.rschema(rtype)
+ for rtype in self]
+ try:
+ entity.e_schema.check(dict_protocol_catcher(entity),
+ creation=creation, relations=relations)
+ except ValidationError as ex:
+ ex.entity = self.entity.eid
+ raise
+
+ def clone(self):
+ thecopy = EditedEntity(copy(self.entity))
+ thecopy.entity.cw_attr_cache = copy(self.entity.cw_attr_cache)
+ thecopy.entity._cw_related_cache = {}
+ thecopy.update(self, skipsec=False)
+ return thecopy
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/hook.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/hook.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1024 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""
+Generalities
+------------
+
+Paraphrasing the `emacs`_ documentation, let us say that hooks are an important
+mechanism for customizing an application. A hook is basically a list of
+functions to be called on some well-defined occasion (this is called `running
+the hook`).
+
+.. _`emacs`: http://www.gnu.org/software/emacs/manual/html_node/emacs/Hooks.html
+
+Hooks
+~~~~~
+
+In |cubicweb|, hooks are subclasses of the :class:`~cubicweb.server.hook.Hook`
+class. They are selected over a set of pre-defined `events` (and possibly more
+conditions, hooks being selectable appobjects like views and components). They
+should implement a :meth:`~cubicweb.server.hook.Hook.__call__` method that will
+be called when the hook is triggered.
+
+There are two families of events: data events (before / after any individual
+update of an entity / or a relation in the repository) and server events (such
+as server startup or shutdown). In a typical application, most of the hooks are
+defined over data events.
+
+Also, some :class:`~cubicweb.server.hook.Operation` may be registered by hooks,
+which will be fired when the transaction is commited or rolled back.
+
+The purpose of data event hooks is usually to complement the data model as
+defined in the schema, which is static by nature and only provide a restricted
+builtin set of dynamic constraints, with dynamic or value driven behaviours.
+For instance they can serve the following purposes:
+
+* enforcing constraints that the static schema cannot express (spanning several
+ entities/relations, exotic value ranges and cardinalities, etc.)
+
+* implement computed attributes
+
+It is functionally equivalent to a `database trigger`_, except that database
+triggers definition languages are not standardized, hence not portable (for
+instance, PL/SQL works with Oracle and PostgreSQL but not SqlServer nor Sqlite).
+
+.. _`database trigger`: http://en.wikipedia.org/wiki/Database_trigger
+
+
+.. hint::
+
+ It is a good practice to write unit tests for each hook. See an example in
+ :ref:`hook_test`
+
+Operations
+~~~~~~~~~~
+
+Operations are subclasses of the :class:`~cubicweb.server.hook.Operation` class
+that may be created by hooks and scheduled to happen on `precommit`,
+`postcommit` or `rollback` event (i.e. respectivly before/after a commit or
+before a rollback of a transaction).
+
+Hooks are being fired immediately on data operations, and it is sometime
+necessary to delay the actual work down to a time where we can expect all
+information to be there, or when all other hooks have run (though take case
+since operations may themselves trigger hooks). Also while the order of
+execution of hooks is data dependant (and thus hard to predict), it is possible
+to force an order on operations.
+
+So, for such case where you may miss some information that may be set later in
+the transaction, you should instantiate an operation in the hook.
+
+Operations may be used to:
+
+* implements a validation check which needs that all relations be already set on
+ an entity
+
+* process various side effects associated with a transaction such as filesystem
+ udpates, mail notifications, etc.
+
+
+Events
+------
+
+Hooks are mostly defined and used to handle `dataflow`_ operations. It
+means as data gets in (entities added, updated, relations set or
+unset), specific events are issued and the Hooks matching these events
+are called.
+
+You can get the event that triggered a hook by accessing its `event`
+attribute.
+
+.. _`dataflow`: http://en.wikipedia.org/wiki/Dataflow
+
+
+Entity modification related events
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When called for one of these events, hook will have an `entity` attribute
+containing the entity instance.
+
+- `before_add_entity`, `before_update_entity`:
+
+ On those events, you can access the modified attributes of the entity using
+ the `entity.cw_edited` dictionary. The values can be modified and the old
+ values can be retrieved.
+
+ If you modify the `entity.cw_edited` dictionary in the hook, that is before
+ the database operations take place, you will avoid the need to process a whole
+ new rql query and the underlying backend query (eg usually sql) will contain
+ the modified data. For example:
+
+ .. sourcecode:: python
+
+ self.entity.cw_edited['age'] = 42
+
+ will modify the age before it is written to the backend storage.
+
+ Similarly, removing an attribute from `cw_edited` will cancel its
+ modification:
+
+ .. sourcecode:: python
+
+ del self.entity.cw_edited['age']
+
+ On a `before_update_entity` event, you can access the old and new values:
+
+ .. sourcecode:: python
+
+ old, new = entity.cw_edited.oldnewvalue('age')
+
+- `after_add_entity`, `after_update_entity`
+
+ On those events, you can get the list of attributes that were modified using
+ the `entity.cw_edited` dictionary, but you can not modify it or get the old
+ value of an attribute.
+
+- `before_delete_entity`, `after_delete_entity`
+
+ On those events, the entity has no `cw_edited` dictionary.
+
+.. note:: `self.entity.cw_set(age=42)` will set the `age` attribute to
+ 42. But to do so, it will generate a rql query that will have to be processed,
+ hence may trigger some hooks, etc. This could lead to infinitely looping hooks.
+
+Relation modification related events
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When called for one of these events, hook will have `eidfrom`, `rtype`, `eidto`
+attributes containing respectively the eid of the subject entity, the relation
+type and the eid of the object entity.
+
+* `before_add_relation`, `before_delete_relation`
+
+ On those events, you can still get the original relation by issuing a rql query.
+
+* `after_add_relation`, `after_delete_relation`
+
+Specific selectors are shipped for these kinds of events, see in particular
+:class:`~cubicweb.server.hook.match_rtype`.
+
+Also note that relations can be added or deleted, but not updated.
+
+Non data events
+~~~~~~~~~~~~~~~
+
+Hooks called on server start/maintenance/stop event (e.g.
+`server_startup`, `server_maintenance`, `before_server_shutdown`,
+`server_shutdown`) have a `repo` attribute, but *their `_cw` attribute
+is None*. The `server_startup` is called on regular startup, while
+`server_maintenance` is called on cubicweb-ctl upgrade or shell
+commands. `server_shutdown` is called anyway but connections to the
+native source is impossible; `before_server_shutdown` handles that.
+
+Hooks called on backup/restore event (eg `server_backup`,
+`server_restore`) have a `repo` and a `timestamp` attributes, but
+*their `_cw` attribute is None*.
+
+Hooks called on session event (eg `session_open`, `session_close`) have no
+special attribute.
+
+
+API
+---
+
+Hooks control
+~~~~~~~~~~~~~
+
+It is sometimes convenient to explicitly enable or disable some hooks. For
+instance if you want to disable some integrity checking hook. This can be
+controlled more finely through the `category` class attribute, which is a string
+giving a category name. One can then uses the
+:meth:`~cubicweb.server.session.Connection.deny_all_hooks_but` and
+:meth:`~cubicweb.server.session.Connection.allow_all_hooks_but` context managers to
+explicitly enable or disable some categories.
+
+The existing categories are:
+
+* ``security``, security checking hooks
+
+* ``worfklow``, workflow handling hooks
+
+* ``metadata``, hooks setting meta-data on newly created entities
+
+* ``notification``, email notification hooks
+
+* ``integrity``, data integrity checking hooks
+
+* ``activeintegrity``, data integrity consistency hooks, that you should **never**
+ want to disable
+
+* ``syncsession``, hooks synchronizing existing sessions
+
+* ``syncschema``, hooks synchronizing instance schema (including the physical database)
+
+* ``email``, email address handling hooks
+
+* ``bookmark``, bookmark entities handling hooks
+
+
+Nothing precludes one to invent new categories and use existing mechanisms to
+filter them in or out.
+
+
+Hooks specific predicates
+~~~~~~~~~~~~~~~~~~~~~~~~~
+.. autoclass:: cubicweb.server.hook.match_rtype
+.. autoclass:: cubicweb.server.hook.match_rtype_sets
+
+
+Hooks and operations classes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. autoclass:: cubicweb.server.hook.Hook
+.. autoclass:: cubicweb.server.hook.Operation
+.. autoclass:: cubicweb.server.hook.LateOperation
+.. autoclass:: cubicweb.server.hook.DataOperationMixIn
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+from warnings import warn
+from logging import getLogger
+from itertools import chain
+
+from logilab.common.decorators import classproperty, cached
+from logilab.common.deprecation import deprecated, class_renamed
+from logilab.common.logging_ext import set_log_methods
+from logilab.common.registry import (NotPredicate, OrPredicate,
+ objectify_predicate)
+
+from cubicweb import RegistryNotFound, server
+from cubicweb.cwvreg import CWRegistry, CWRegistryStore
+from cubicweb.predicates import ExpectedValuePredicate, is_instance
+from cubicweb.appobject import AppObject
+
+ENTITIES_HOOKS = set(('before_add_entity', 'after_add_entity',
+ 'before_update_entity', 'after_update_entity',
+ 'before_delete_entity', 'after_delete_entity'))
+RELATIONS_HOOKS = set(('before_add_relation', 'after_add_relation' ,
+ 'before_delete_relation','after_delete_relation'))
+SYSTEM_HOOKS = set(('server_backup', 'server_restore',
+ 'server_startup', 'server_maintenance',
+ 'server_shutdown', 'before_server_shutdown',
+ 'session_open', 'session_close'))
+ALL_HOOKS = ENTITIES_HOOKS | RELATIONS_HOOKS | SYSTEM_HOOKS
+
+def _iter_kwargs(entities, eids_from_to, kwargs):
+ if not entities and not eids_from_to:
+ yield kwargs
+ elif entities:
+ for entity in entities:
+ kwargs['entity'] = entity
+ yield kwargs
+ else:
+ for subject, object in eids_from_to:
+ kwargs.update({'eidfrom': subject, 'eidto': object})
+ yield kwargs
+
+
+class HooksRegistry(CWRegistry):
+
+ def register(self, obj, **kwargs):
+ obj.check_events()
+ super(HooksRegistry, self).register(obj, **kwargs)
+
+ def call_hooks(self, event, cnx=None, **kwargs):
+ """call `event` hooks for an entity or a list of entities (passed
+ respectively as the `entity` or ``entities`` keyword argument).
+ """
+ kwargs['event'] = event
+ if cnx is None: # True for events such as server_start
+ for hook in sorted(self.possible_objects(cnx, **kwargs),
+ key=lambda x: x.order):
+ hook()
+ else:
+ if 'entities' in kwargs:
+ assert 'entity' not in kwargs, \
+ 'can\'t pass "entities" and "entity" arguments simultaneously'
+ assert 'eids_from_to' not in kwargs, \
+ 'can\'t pass "entities" and "eids_from_to" arguments simultaneously'
+ entities = kwargs.pop('entities')
+ eids_from_to = []
+ elif 'eids_from_to' in kwargs:
+ entities = []
+ eids_from_to = kwargs.pop('eids_from_to')
+ else:
+ entities = []
+ eids_from_to = []
+ pruned = self.get_pruned_hooks(cnx, event,
+ entities, eids_from_to, kwargs)
+
+ # by default, hooks are executed with security turned off
+ with cnx.security_enabled(read=False):
+ for _kwargs in _iter_kwargs(entities, eids_from_to, kwargs):
+ hooks = sorted(self.filtered_possible_objects(pruned, cnx, **_kwargs),
+ key=lambda x: x.order)
+ debug = server.DEBUG & server.DBG_HOOKS
+ with cnx.security_enabled(write=False):
+ with cnx.running_hooks_ops():
+ for hook in hooks:
+ if debug:
+ print(event, _kwargs, hook)
+ hook()
+
+ def get_pruned_hooks(self, cnx, event, entities, eids_from_to, kwargs):
+ """return a set of hooks that should not be considered by filtered_possible objects
+
+ the idea is to make a first pass over all the hooks in the
+ registry and to mark put some of them in a pruned list. The
+ pruned hooks are the one which:
+
+ * are disabled at the connection level
+
+ * have a selector containing a :class:`match_rtype` or an
+ :class:`is_instance` predicate which does not match the rtype / etype
+ of the relations / entities for which we are calling the hooks. This
+ works because the repository calls the hooks grouped by rtype or by
+ etype when using the entities or eids_to_from keyword arguments
+
+ Only hooks with a simple predicate or an AndPredicate of simple
+ predicates are considered for disabling.
+
+ """
+ if 'entity' in kwargs:
+ entities = [kwargs['entity']]
+ if len(entities):
+ look_for_selector = is_instance
+ etype = entities[0].__regid__
+ elif 'rtype' in kwargs:
+ look_for_selector = match_rtype
+ etype = None
+ else: # nothing to prune, how did we get there ???
+ return set()
+ cache_key = (event, kwargs.get('rtype'), etype)
+ pruned = cnx.pruned_hooks_cache.get(cache_key)
+ if pruned is not None:
+ return pruned
+ pruned = set()
+ cnx.pruned_hooks_cache[cache_key] = pruned
+ if look_for_selector is not None:
+ for id, hooks in self.items():
+ for hook in hooks:
+ enabled_cat, main_filter = hook.filterable_selectors()
+ if enabled_cat is not None:
+ if not enabled_cat(hook, cnx):
+ pruned.add(hook)
+ continue
+ if main_filter is not None:
+ if isinstance(main_filter, match_rtype) and \
+ (main_filter.frometypes is not None or \
+ main_filter.toetypes is not None):
+ continue
+ first_kwargs = next(_iter_kwargs(entities, eids_from_to, kwargs))
+ if not main_filter(hook, cnx, **first_kwargs):
+ pruned.add(hook)
+ return pruned
+
+
+ def filtered_possible_objects(self, pruned, *args, **kwargs):
+ for appobjects in self.values():
+ if pruned:
+ filtered_objects = [obj for obj in appobjects if obj not in pruned]
+ if not filtered_objects:
+ continue
+ else:
+ filtered_objects = appobjects
+ obj = self._select_best(filtered_objects,
+ *args, **kwargs)
+ if obj is None:
+ continue
+ yield obj
+
+class HooksManager(object):
+ def __init__(self, vreg):
+ self.vreg = vreg
+
+ def call_hooks(self, event, cnx=None, **kwargs):
+ try:
+ registry = self.vreg['%s_hooks' % event]
+ except RegistryNotFound:
+ return # no hooks for this event
+ registry.call_hooks(event, cnx, **kwargs)
+
+
+for event in ALL_HOOKS:
+ CWRegistryStore.REGISTRY_FACTORY['%s_hooks' % event] = HooksRegistry
+
+
+# some hook specific predicates #################################################
+
+@objectify_predicate
+def enabled_category(cls, req, **kwargs):
+ if req is None:
+ return True # XXX how to deactivate server startup / shutdown event
+ return req.is_hook_activated(cls)
+
+@objectify_predicate
+def issued_from_user_query(cls, req, **kwargs):
+ return 0 if req.hooks_in_progress else 1
+
+from_dbapi_query = class_renamed('from_dbapi_query',
+ issued_from_user_query,
+ message='[3.21] ')
+
+
+class rechain(object):
+ def __init__(self, *iterators):
+ self.iterators = iterators
+ def __iter__(self):
+ return iter(chain(*self.iterators))
+
+
+class match_rtype(ExpectedValuePredicate):
+ """accept if the relation type is found in expected ones. Optional
+ named parameters `frometypes` and `toetypes` can be used to restrict
+ target subject and/or object entity types of the relation.
+
+ :param \*expected: possible relation types
+ :param frometypes: candidate entity types as subject of relation
+ :param toetypes: candidate entity types as object of relation
+ """
+ def __init__(self, *expected, **more):
+ self.expected = expected
+ self.frometypes = more.pop('frometypes', None)
+ self.toetypes = more.pop('toetypes', None)
+ assert not more, "unexpected kwargs in match_rtype: %s" % more
+
+ def __call__(self, cls, req, *args, **kwargs):
+ if kwargs.get('rtype') not in self.expected:
+ return 0
+ if self.frometypes is not None and \
+ req.entity_metas(kwargs['eidfrom'])['type'] not in self.frometypes:
+ return 0
+ if self.toetypes is not None and \
+ req.entity_metas(kwargs['eidto'])['type'] not in self.toetypes:
+ return 0
+ return 1
+
+
+class match_rtype_sets(ExpectedValuePredicate):
+ """accept if the relation type is in one of the sets given as initializer
+ argument. The goal of this predicate is that it keeps reference to original sets,
+ so modification to thoses sets are considered by the predicate. For instance
+
+ .. sourcecode:: python
+
+ MYSET = set()
+
+ class Hook1(Hook):
+ __regid__ = 'hook1'
+ __select__ = Hook.__select__ & match_rtype_sets(MYSET)
+ ...
+
+ class Hook2(Hook):
+ __regid__ = 'hook2'
+ __select__ = Hook.__select__ & match_rtype_sets(MYSET)
+
+ Client code can now change `MYSET`, this will changes the selection criteria
+ of :class:`Hook1` and :class:`Hook1`.
+ """
+
+ def __init__(self, *expected):
+ self.expected = expected
+
+ def __call__(self, cls, req, *args, **kwargs):
+ for rel_set in self.expected:
+ if kwargs.get('rtype') in rel_set:
+ return 1
+ return 0
+
+
+# base class for hook ##########################################################
+
+class Hook(AppObject):
+ """Base class for hook.
+
+ Hooks being appobjects like views, they have a `__regid__` and a `__select__`
+ class attribute. Like all appobjects, hooks have the `self._cw` attribute which
+ represents the current connection. In entity hooks, a `self.entity` attribute is
+ also present.
+
+ The `events` tuple is used by the base class selector to dispatch the hook
+ on the right events. It is possible to dispatch on multiple events at once
+ if needed (though take care as hook attribute may vary as described above).
+
+ .. Note::
+
+ Do not forget to extend the base class selectors as in:
+
+ .. sourcecode:: python
+
+ class MyHook(Hook):
+ __regid__ = 'whatever'
+ __select__ = Hook.__select__ & is_instance('Person')
+
+ else your hooks will be called madly, whatever the event.
+ """
+ __select__ = enabled_category()
+ # set this in derivated classes
+ events = None
+ category = None
+ order = 0
+ # stop pylint from complaining about missing attributes in Hooks classes
+ eidfrom = eidto = entity = rtype = repo = None
+
+ @classmethod
+ @cached
+ def filterable_selectors(cls):
+ search = cls.__select__.search_selector
+ if search((NotPredicate, OrPredicate)):
+ return None, None
+ enabled_cat = search(enabled_category)
+ main_filter = search((is_instance, match_rtype))
+ return enabled_cat, main_filter
+
+ @classmethod
+ def check_events(cls):
+ try:
+ for event in cls.events:
+ if event not in ALL_HOOKS:
+ raise Exception('bad event %s on %s.%s' % (
+ event, cls.__module__, cls.__name__))
+ except AttributeError:
+ raise
+ except TypeError:
+ raise Exception('bad .events attribute %s on %s.%s' % (
+ cls.events, cls.__module__, cls.__name__))
+
+ @classmethod
+ def __registered__(cls, reg):
+ cls.check_events()
+
+ @classproperty
+ def __registries__(cls):
+ if cls.events is None:
+ return []
+ return ['%s_hooks' % ev for ev in cls.events]
+
+ known_args = set(('entity', 'rtype', 'eidfrom', 'eidto', 'repo', 'timestamp'))
+ def __init__(self, req, event, **kwargs):
+ for arg in self.known_args:
+ if arg in kwargs:
+ setattr(self, arg, kwargs.pop(arg))
+ super(Hook, self).__init__(req, **kwargs)
+ self.event = event
+
+set_log_methods(Hook, getLogger('cubicweb.hook'))
+
+
+# abtract hooks for relation propagation #######################################
+# See example usage in hooks of the nosylist cube
+
+class PropagateRelationHook(Hook):
+ """propagate some `main_rtype` relation on entities linked as object of
+ `subject_relations` or as subject of `object_relations` (the watched
+ relations).
+
+ This hook ensure that when one of the watched relation is added, the
+ `main_rtype` relation is added to the target entity of the relation.
+ Notice there are no default behaviour defined when a watched relation is
+ deleted, you'll have to handle this by yourself.
+
+ You usually want to use the :class:`match_rtype_sets` predicate on concrete
+ classes.
+ """
+ events = ('after_add_relation',)
+
+ # to set in concrete class
+ main_rtype = None
+ subject_relations = None
+ object_relations = None
+
+ def __call__(self):
+ assert self.main_rtype
+ for eid in (self.eidfrom, self.eidto):
+ etype = self._cw.entity_metas(eid)['type']
+ if self.main_rtype not in self._cw.vreg.schema.eschema(etype).subjrels:
+ return
+ if self.rtype in self.subject_relations:
+ meid, seid = self.eidfrom, self.eidto
+ else:
+ assert self.rtype in self.object_relations
+ meid, seid = self.eidto, self.eidfrom
+ self._cw.execute(
+ 'SET E %s P WHERE X %s P, X eid %%(x)s, E eid %%(e)s, NOT E %s P'
+ % (self.main_rtype, self.main_rtype, self.main_rtype),
+ {'x': meid, 'e': seid})
+
+
+class PropagateRelationAddHook(Hook):
+ """Propagate to entities at the end of watched relations when a `main_rtype`
+ relation is added.
+
+ `subject_relations` and `object_relations` attributes should be specified on
+ subclasses and are usually shared references with attributes of the same
+ name on :class:`PropagateRelationHook`.
+
+ Because of those shared references, you can use `skip_subject_relations` and
+ `skip_object_relations` attributes when you don't want to propagate to
+ entities linked through some particular relations.
+ """
+ events = ('after_add_relation',)
+
+ # to set in concrete class (mandatory)
+ subject_relations = None
+ object_relations = None
+ # to set in concrete class (optionally)
+ skip_subject_relations = ()
+ skip_object_relations = ()
+
+ def __call__(self):
+ eschema = self._cw.vreg.schema.eschema(self._cw.entity_metas(self.eidfrom)['type'])
+ execute = self._cw.execute
+ for rel in self.subject_relations:
+ if rel in eschema.subjrels and not rel in self.skip_subject_relations:
+ execute('SET R %s P WHERE X eid %%(x)s, P eid %%(p)s, '
+ 'X %s R, NOT R %s P' % (self.rtype, rel, self.rtype),
+ {'x': self.eidfrom, 'p': self.eidto})
+ for rel in self.object_relations:
+ if rel in eschema.objrels and not rel in self.skip_object_relations:
+ execute('SET R %s P WHERE X eid %%(x)s, P eid %%(p)s, '
+ 'R %s X, NOT R %s P' % (self.rtype, rel, self.rtype),
+ {'x': self.eidfrom, 'p': self.eidto})
+
+
+class PropagateRelationDelHook(PropagateRelationAddHook):
+ """Propagate to entities at the end of watched relations when a `main_rtype`
+ relation is deleted.
+
+ This is the opposite of the :class:`PropagateRelationAddHook`, see its
+ documentation for how to use this class.
+ """
+ events = ('after_delete_relation',)
+
+ def __call__(self):
+ eschema = self._cw.vreg.schema.eschema(self._cw.entity_metas(self.eidfrom)['type'])
+ execute = self._cw.execute
+ for rel in self.subject_relations:
+ if rel in eschema.subjrels and not rel in self.skip_subject_relations:
+ execute('DELETE R %s P WHERE X eid %%(x)s, P eid %%(p)s, '
+ 'X %s R' % (self.rtype, rel),
+ {'x': self.eidfrom, 'p': self.eidto})
+ for rel in self.object_relations:
+ if rel in eschema.objrels and not rel in self.skip_object_relations:
+ execute('DELETE R %s P WHERE X eid %%(x)s, P eid %%(p)s, '
+ 'R %s X' % (self.rtype, rel),
+ {'x': self.eidfrom, 'p': self.eidto})
+
+
+
+# abstract classes for operation ###############################################
+
+class Operation(object):
+ """Base class for operations.
+
+ Operation may be instantiated in the hooks' `__call__` method. It always
+ takes a connection object as first argument (accessible as `.cnx` from the
+ operation instance), and optionally all keyword arguments needed by the
+ operation. These keyword arguments will be accessible as attributes from the
+ operation instance.
+
+ An operation is triggered on connections set events related to commit /
+ rollback transations. Possible events are:
+
+ * `precommit`:
+
+ the transaction is being prepared for commit. You can freely do any heavy
+ computation, raise an exception if the commit can't go. or even add some
+ new operations during this phase. If you do anything which has to be
+ reverted if the commit fails afterwards (eg altering the file system for
+ instance), you'll have to support the 'revertprecommit' event to revert
+ things by yourself
+
+ * `revertprecommit`:
+
+ if an operation failed while being pre-commited, this event is triggered
+ for all operations which had their 'precommit' event already fired to let
+ them revert things (including the operation which made the commit fail)
+
+ * `rollback`:
+
+ the transaction has been either rolled back either:
+
+ * intentionally
+ * a 'precommit' event failed, in which case all operations are rolled back
+ once 'revertprecommit'' has been called
+
+ * `postcommit`:
+
+ the transaction is over. All the ORM entities accessed by the earlier
+ transaction are invalid. If you need to work on the database, you need to
+ start a new transaction, for instance using a new internal connection,
+ which you will need to commit.
+
+ For an operation to support an event, one has to implement the `_event` method with no arguments.
+
+ The order of operations may be important, and is controlled according to
+ the insert_index's method output (whose implementation vary according to the
+ base hook class used).
+ """
+
+ def __init__(self, cnx, **kwargs):
+ self.cnx = cnx
+ self.__dict__.update(kwargs)
+ self.register(cnx)
+ # execution information
+ self.processed = None # 'precommit', 'commit'
+ self.failed = False
+
+ @property
+ @deprecated('[3.19] Operation.session is deprecated, use Operation.cnx instead')
+ def session(self):
+ return self.cnx
+
+ def register(self, cnx):
+ cnx.add_operation(self, self.insert_index())
+
+ def insert_index(self):
+ """return the index of the latest instance which is not a
+ LateOperation instance
+ """
+ # faster by inspecting operation in reverse order for heavy transactions
+ i = None
+ for i, op in enumerate(reversed(self.cnx.pending_operations)):
+ if isinstance(op, (LateOperation, SingleLastOperation)):
+ continue
+ return -i or None
+ if i is None:
+ return None
+ return -(i + 1)
+
+ def handle_event(self, event):
+ """delegate event handling to the opertaion"""
+ getattr(self, event)()
+
+ def precommit_event(self):
+ """the observed connections set is preparing a commit"""
+
+ def revertprecommit_event(self):
+ """an error went when pre-commiting this operation or a later one
+
+ should revert pre-commit's changes but take care, they may have not
+ been all considered if it's this operation which failed
+ """
+
+ def rollback_event(self):
+ """the observed connections set has been rolled back
+
+ do nothing by default
+ """
+
+ def postcommit_event(self):
+ """the observed connections set has committed"""
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
+set_log_methods(Operation, getLogger('cubicweb.session'))
+
+def _container_add(container, value):
+ {set: set.add, list: list.append}[container.__class__](container, value)
+
+
+class DataOperationMixIn(object):
+ """Mix-in class to ease applying a single operation on a set of data,
+ avoiding to create as many as operation as they are individual modification.
+ The body of the operation must then iterate over the values that have been
+ stored in a single operation instance.
+
+ You should try to use this instead of creating on operation for each
+ `value`, since handling operations becomes costly on massive data import.
+
+ Usage looks like:
+
+ .. sourcecode:: python
+
+ class MyEntityHook(Hook):
+ __regid__ = 'my.entity.hook'
+ __select__ = Hook.__select__ & is_instance('MyEntity')
+ events = ('after_add_entity',)
+
+ def __call__(self):
+ MyOperation.get_instance(self._cw).add_data(self.entity)
+
+
+ class MyOperation(DataOperationMixIn, Operation):
+ def precommit_event(self):
+ for bucket in self.get_data():
+ process(bucket)
+
+ You can modify the `containercls` class attribute, which defines the
+ container class that should be instantiated to hold payloads. An instance is
+ created on instantiation, and then the :meth:`add_data` method will add the
+ given data to the existing container. Default to a `set`. Give `list` if you
+ want to keep arrival ordering. You can also use another kind of container
+ by redefining :meth:`_build_container` and :meth:`add_data`
+
+ More optional parameters can be given to the `get_instance` operation, that
+ will be given to the operation constructor (for obvious reasons those
+ parameters should not vary accross different calls to this method for a
+ given operation).
+
+ .. Note::
+ For sanity reason `get_data` will reset the operation, so that once
+ the operation has started its treatment, if some hook want to push
+ additional data to this same operation, a new instance will be created
+ (else that data has a great chance to be never treated). This implies:
+
+ * you should **always** call `get_data` when starting treatment
+
+ * you should **never** call `get_data` for another reason.
+ """
+ containercls = set
+
+ @classproperty
+ def data_key(cls):
+ return ('cw.dataops', cls.__name__)
+
+ @classmethod
+ def get_instance(cls, cnx, **kwargs):
+ # no need to lock: transaction_data already comes from thread's local storage
+ try:
+ return cnx.transaction_data[cls.data_key]
+ except KeyError:
+ op = cnx.transaction_data[cls.data_key] = cls(cnx, **kwargs)
+ return op
+
+ def __init__(self, *args, **kwargs):
+ super(DataOperationMixIn, self).__init__(*args, **kwargs)
+ self._container = self._build_container()
+ self._processed = False
+
+ def __contains__(self, value):
+ return value in self._container
+
+ def _build_container(self):
+ return self.containercls()
+
+ def union(self, data):
+ """only when container is a set"""
+ assert not self._processed, """Trying to add data to a closed operation.
+Iterating over operation data closed it and should be reserved to precommit /
+postcommit method of the operation."""
+ self._container |= data
+
+ def add_data(self, data):
+ assert not self._processed, """Trying to add data to a closed operation.
+Iterating over operation data closed it and should be reserved to precommit /
+postcommit method of the operation."""
+ _container_add(self._container, data)
+
+ def remove_data(self, data):
+ assert not self._processed, """Trying to add data to a closed operation.
+Iterating over operation data closed it and should be reserved to precommit /
+postcommit method of the operation."""
+ self._container.remove(data)
+
+ def get_data(self):
+ assert not self._processed, """Trying to get data from a closed operation.
+Iterating over operation data closed it and should be reserved to precommit /
+postcommit method of the operation."""
+ self._processed = True
+ op = self.cnx.transaction_data.pop(self.data_key)
+ assert op is self, "Bad handling of operation data, found %s instead of %s for key %s" % (
+ op, self, self.data_key)
+ return self._container
+
+
+
+class LateOperation(Operation):
+ """special operation which should be called after all possible (ie non late)
+ operations
+ """
+ def insert_index(self):
+ """return the index of the lastest instance which is not a
+ SingleLastOperation instance
+ """
+ # faster by inspecting operation in reverse order for heavy transactions
+ i = None
+ for i, op in enumerate(reversed(self.cnx.pending_operations)):
+ if isinstance(op, SingleLastOperation):
+ continue
+ return -i or None
+ if i is None:
+ return None
+ return -(i + 1)
+
+
+
+class SingleLastOperation(Operation):
+ """special operation which should be called once and after all other
+ operations
+ """
+
+ def register(self, cnx):
+ """override register to handle cases where this operation has already
+ been added
+ """
+ operations = cnx.pending_operations
+ index = self.equivalent_index(operations)
+ if index is not None:
+ equivalent = operations.pop(index)
+ else:
+ equivalent = None
+ cnx.add_operation(self, self.insert_index())
+ return equivalent
+
+ def equivalent_index(self, operations):
+ """return the index of the equivalent operation if any"""
+ for i, op in enumerate(reversed(operations)):
+ if op.__class__ is self.__class__:
+ return -(i+1)
+ return None
+
+ def insert_index(self):
+ return None
+
+
+class SendMailOp(SingleLastOperation):
+ def __init__(self, cnx, msg=None, recipients=None, **kwargs):
+ # may not specify msg yet, as
+ # `cubicweb.sobjects.supervision.SupervisionMailOp`
+ if msg is not None:
+ assert recipients
+ self.to_send = [(msg, recipients)]
+ else:
+ assert recipients is None
+ self.to_send = []
+ super(SendMailOp, self).__init__(cnx, **kwargs)
+
+ def register(self, cnx):
+ previous = super(SendMailOp, self).register(cnx)
+ if previous:
+ self.to_send = previous.to_send + self.to_send
+
+ def postcommit_event(self):
+ self.cnx.repo.threaded_task(self.sendmails)
+
+ def sendmails(self):
+ self.cnx.vreg.config.sendmails(self.to_send)
+
+
+class RQLPrecommitOperation(Operation):
+ # to be defined in concrete classes
+ rqls = None
+
+ def precommit_event(self):
+ execute = self.cnx.execute
+ for rql in self.rqls:
+ execute(*rql)
+
+
+class CleanupNewEidsCacheOp(DataOperationMixIn, SingleLastOperation):
+ """on rollback of a insert query we have to remove from repository's
+ type/source cache eids of entities added in that transaction.
+
+ NOTE: querier's rqlst/solutions cache may have been polluted too with
+ queries such as Any X WHERE X eid 32 if 32 has been rolled back however
+ generated queries are unpredictable and analysing all the cache probably
+ too expensive. Notice that there is no pb when using args to specify eids
+ instead of giving them into the rql string.
+ """
+ data_key = 'neweids'
+
+ def rollback_event(self):
+ """the observed connections set has been rolled back,
+ remove inserted eid from repository type/source cache
+ """
+ try:
+ self.cnx.repo.clear_caches(self.get_data())
+ except KeyError:
+ pass
+
+class CleanupDeletedEidsCacheOp(DataOperationMixIn, SingleLastOperation):
+ """on commit of delete query, we have to remove from repository's
+ type/source cache eids of entities deleted in that transaction.
+ """
+ data_key = 'pendingeids'
+ def postcommit_event(self):
+ """the observed connections set has been rolled back,
+ remove inserted eid from repository type/source cache
+ """
+ try:
+ eids = self.get_data()
+ self.cnx.repo.clear_caches(eids)
+ self.cnx.repo.app_instances_bus.publish(['delete'] + list(str(eid) for eid in eids))
+ except KeyError:
+ pass
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/migractions.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/migractions.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1603 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""a class implementing basic actions used in migration scripts.
+
+The following schema actions are supported for now:
+* add/drop/rename attribute
+* add/drop entity/relation type
+* rename entity type
+
+The following data actions are supported for now:
+* add an entity
+* execute raw RQL queries
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import sys
+import os
+import tarfile
+import tempfile
+import shutil
+import os.path as osp
+from datetime import datetime
+from glob import glob
+from copy import copy
+from warnings import warn
+from contextlib import contextmanager
+
+from six import PY2, text_type
+
+from logilab.common.deprecation import deprecated
+from logilab.common.decorators import cached, clear_cache
+
+from yams.buildobjs import EntityType
+from yams.constraints import SizeConstraint
+from yams.schema import RelationDefinitionSchema
+
+from cubicweb import CW_SOFTWARE_ROOT, AuthenticationError, ExecutionError
+from cubicweb.predicates import is_instance
+from cubicweb.schema import (ETYPE_NAME_MAP, META_RTYPES, VIRTUAL_RTYPES,
+ PURE_VIRTUAL_RTYPES,
+ CubicWebRelationSchema, order_eschemas)
+from cubicweb.cwvreg import CW_EVENT_MANAGER
+from cubicweb import repoapi
+from cubicweb.migration import MigrationHelper, yes
+from cubicweb.server import hook, schemaserial as ss
+from cubicweb.server.schema2sql import eschema2sql, rschema2sql, unique_index_name, sql_type
+from cubicweb.server.utils import manager_userpasswd
+from cubicweb.server.sqlutils import sqlexec, SQL_PREFIX
+
+
+class ClearGroupMap(hook.Hook):
+ __regid__ = 'cw.migration.clear_group_mapping'
+ __select__ = hook.Hook.__select__ & is_instance('CWGroup')
+ events = ('after_add_entity', 'after_update_entity',)
+ def __call__(self):
+ clear_cache(self.mih, 'group_mapping')
+ self.mih._synchronized.clear()
+
+ @classmethod
+ def mih_register(cls, repo):
+ # may be already registered in tests (e.g. unittest_migractions at
+ # least)
+ if not cls.__regid__ in repo.vreg['after_add_entity_hooks']:
+ repo.vreg.register(ClearGroupMap)
+
+
+class ServerMigrationHelper(MigrationHelper):
+ """specific migration helper for server side migration scripts,
+ providing actions related to schema/data migration
+ """
+
+ def __init__(self, config, schema, interactive=True,
+ repo=None, cnx=None, verbosity=1, connect=True):
+ MigrationHelper.__init__(self, config, interactive, verbosity)
+ if not interactive:
+ assert cnx
+ assert repo
+ if cnx is not None:
+ assert repo
+ self.cnx = cnx
+ self.repo = repo
+ self.session = cnx.session
+ elif connect:
+ self.repo = config.repository()
+ self.set_cnx()
+ else:
+ self.session = None
+ # no config on shell to a remote instance
+ if config is not None and (cnx or connect):
+ repo = self.repo
+ # register a hook to clear our group_mapping cache and the
+ # self._synchronized set when some group is added or updated
+ ClearGroupMap.mih = self
+ ClearGroupMap.mih_register(repo)
+ CW_EVENT_MANAGER.bind('after-registry-reload',
+ ClearGroupMap.mih_register, repo)
+ # notify we're starting maintenance (called instead of server_start
+ # which is called on regular start
+ repo.hm.call_hooks('server_maintenance', repo=repo)
+ if not schema and not config.quick_start:
+ insert_lperms = self.repo.get_versions()['cubicweb'] < (3, 14, 0) and 'localperms' in config.available_cubes()
+ if insert_lperms:
+ cubes = config._cubes
+ config._cubes += ('localperms',)
+ try:
+ schema = config.load_schema(expand_cubes=True)
+ finally:
+ if insert_lperms:
+ config._cubes = cubes
+ self.fs_schema = schema
+ self._synchronized = set()
+
+ # overriden from base MigrationHelper ######################################
+
+ def set_cnx(self):
+ try:
+ login = self.repo.config.default_admin_config['login']
+ pwd = self.repo.config.default_admin_config['password']
+ except KeyError:
+ login, pwd = manager_userpasswd()
+ while True:
+ try:
+ self.cnx = repoapi.connect(self.repo, login, password=pwd)
+ if not 'managers' in self.cnx.user.groups:
+ print('migration need an account in the managers group')
+ else:
+ break
+ except AuthenticationError:
+ print('wrong user/password')
+ except (KeyboardInterrupt, EOFError):
+ print('aborting...')
+ sys.exit(0)
+ try:
+ login, pwd = manager_userpasswd()
+ except (KeyboardInterrupt, EOFError):
+ print('aborting...')
+ sys.exit(0)
+ self.session = self.repo._get_session(self.cnx.sessionid)
+
+ def cube_upgraded(self, cube, version):
+ self.cmd_set_property('system.version.%s' % cube.lower(),
+ text_type(version))
+ self.commit()
+
+ def shutdown(self):
+ if self.repo is not None:
+ self.repo.shutdown()
+
+ def migrate(self, vcconf, toupgrade, options):
+ if not options.fs_only:
+ if options.backup_db is None:
+ self.backup_database()
+ elif options.backup_db:
+ self.backup_database(askconfirm=False)
+ # disable notification during migration
+ with self.cnx.allow_all_hooks_but('notification'):
+ super(ServerMigrationHelper, self).migrate(vcconf, toupgrade, options)
+
+ def cmd_process_script(self, migrscript, funcname=None, *args, **kwargs):
+ try:
+ return super(ServerMigrationHelper, self).cmd_process_script(
+ migrscript, funcname, *args, **kwargs)
+ except ExecutionError as err:
+ sys.stderr.write("-> %s\n" % err)
+ except BaseException:
+ self.rollback()
+ raise
+
+ # Adjust docstring
+ cmd_process_script.__doc__ = MigrationHelper.cmd_process_script.__doc__
+
+ # server specific migration methods ########################################
+
+ def backup_database(self, backupfile=None, askconfirm=True, format='native'):
+ config = self.config
+ repo = self.repo
+ # paths
+ timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
+ instbkdir = osp.join(config.appdatahome, 'backup')
+ if not osp.exists(instbkdir):
+ os.makedirs(instbkdir)
+ backupfile = backupfile or osp.join(instbkdir, '%s-%s.tar.gz'
+ % (config.appid, timestamp))
+ # check backup has to be done
+ if osp.exists(backupfile) and not \
+ self.confirm('Backup file %s exists, overwrite it?' % backupfile):
+ print('-> no backup done.')
+ return
+ elif askconfirm and not self.confirm('Backup %s database?' % config.appid):
+ print('-> no backup done.')
+ return
+ open(backupfile,'w').close() # kinda lock
+ os.chmod(backupfile, 0o600)
+ # backup
+ source = repo.system_source
+ tmpdir = tempfile.mkdtemp()
+ try:
+ failed = False
+ try:
+ source.backup(osp.join(tmpdir, source.uri), self.confirm, format=format)
+ except Exception as ex:
+ print('-> error trying to backup %s [%s]' % (source.uri, ex))
+ if not self.confirm('Continue anyway?', default='n'):
+ raise SystemExit(1)
+ else:
+ failed = True
+ with open(osp.join(tmpdir, 'format.txt'), 'w') as format_file:
+ format_file.write('%s\n' % format)
+ with open(osp.join(tmpdir, 'versions.txt'), 'w') as version_file:
+ versions = repo.get_versions()
+ for cube, version in versions.items():
+ version_file.write('%s %s\n' % (cube, version))
+ if not failed:
+ bkup = tarfile.open(backupfile, 'w|gz')
+ for filename in os.listdir(tmpdir):
+ bkup.add(osp.join(tmpdir, filename), filename)
+ bkup.close()
+ # call hooks
+ repo.hm.call_hooks('server_backup', repo=repo, timestamp=timestamp)
+ # done
+ print('-> backup file', backupfile)
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def restore_database(self, backupfile, drop=True, askconfirm=True, format='native'):
+ # check
+ if not osp.exists(backupfile):
+ raise ExecutionError("Backup file %s doesn't exist" % backupfile)
+ if askconfirm and not self.confirm('Restore %s database from %s ?'
+ % (self.config.appid, backupfile)):
+ return
+ # unpack backup
+ tmpdir = tempfile.mkdtemp()
+ try:
+ bkup = tarfile.open(backupfile, 'r|gz')
+ except tarfile.ReadError:
+ # assume restoring old backup
+ shutil.copy(backupfile, osp.join(tmpdir, 'system'))
+ else:
+ for name in bkup.getnames():
+ if name[0] in '/.':
+ raise ExecutionError('Security check failed, path starts with "/" or "."')
+ bkup.close() # XXX seek error if not close+open !?!
+ bkup = tarfile.open(backupfile, 'r|gz')
+ bkup.extractall(path=tmpdir)
+ bkup.close()
+ if osp.isfile(osp.join(tmpdir, 'format.txt')):
+ with open(osp.join(tmpdir, 'format.txt')) as format_file:
+ written_format = format_file.readline().strip()
+ if written_format in ('portable', 'native'):
+ format = written_format
+ self.config.init_cnxset_pool = False
+ repo = self.repo = self.config.repository()
+ source = repo.system_source
+ try:
+ source.restore(osp.join(tmpdir, source.uri), self.confirm, drop, format)
+ except Exception as exc:
+ print('-> error trying to restore %s [%s]' % (source.uri, exc))
+ if not self.confirm('Continue anyway?', default='n'):
+ raise SystemExit(1)
+ shutil.rmtree(tmpdir)
+ # call hooks
+ repo.init_cnxset_pool()
+ repo.hm.call_hooks('server_restore', repo=repo, timestamp=backupfile)
+ print('-> database restored.')
+
+ def commit(self):
+ self.cnx.commit()
+
+ def rollback(self):
+ self.cnx.rollback()
+
+ def rqlexecall(self, rqliter, ask_confirm=False):
+ for rql, kwargs in rqliter:
+ self.rqlexec(rql, kwargs, ask_confirm=ask_confirm)
+
+ @cached
+ def _create_context(self):
+ """return a dictionary to use as migration script execution context"""
+ context = super(ServerMigrationHelper, self)._create_context()
+ context.update({'commit': self.checkpoint,
+ 'rollback': self.rollback,
+ 'sql': self.sqlexec,
+ 'rql': self.rqlexec,
+ 'rqliter': self.rqliter,
+ 'schema': self.repo.get_schema(),
+ 'cnx': self.cnx,
+ 'fsschema': self.fs_schema,
+ 'session' : self.cnx,
+ 'repo' : self.repo,
+ })
+ return context
+
+ @cached
+ def group_mapping(self):
+ """cached group mapping"""
+ return ss.group_mapping(self.cnx)
+
+ def cstrtype_mapping(self):
+ """cached constraint types mapping"""
+ return ss.cstrtype_mapping(self.cnx)
+
+ def cmd_exec_event_script(self, event, cube=None, funcname=None,
+ *args, **kwargs):
+ """execute a cube event scripts `migration/.py` where event
+ is one of 'precreate', 'postcreate', 'preremove' and 'postremove'.
+ """
+ assert event in ('precreate', 'postcreate', 'preremove', 'postremove')
+ if cube:
+ cubepath = self.config.cube_dir(cube)
+ apc = osp.join(cubepath, 'migration', '%s.py' % event)
+ elif kwargs.pop('apphome', False):
+ apc = osp.join(self.config.apphome, 'migration', '%s.py' % event)
+ else:
+ apc = osp.join(self.config.migration_scripts_dir(), '%s.py' % event)
+ if osp.exists(apc):
+ if self.config.free_wheel:
+ self.cmd_deactivate_verification_hooks()
+ self.info('executing %s', apc)
+ confirm = self.confirm
+ execscript_confirm = self.execscript_confirm
+ self.confirm = yes
+ self.execscript_confirm = yes
+ try:
+ if event == 'postcreate':
+ with self.cnx.allow_all_hooks_but():
+ return self.cmd_process_script(apc, funcname, *args, **kwargs)
+ return self.cmd_process_script(apc, funcname, *args, **kwargs)
+ finally:
+ self.confirm = confirm
+ self.execscript_confirm = execscript_confirm
+ if self.config.free_wheel:
+ self.cmd_reactivate_verification_hooks()
+
+ def cmd_install_custom_sql_scripts(self, cube=None):
+ """install a cube custom sql scripts `schema/*..sql` where
+ depends on the instance main database backend (eg 'postgres',
+ 'mysql'...)
+ """
+ driver = self.repo.system_source.dbdriver
+ if cube is None:
+ directory = osp.join(CW_SOFTWARE_ROOT, 'schemas')
+ else:
+ directory = osp.join(self.config.cube_dir(cube), 'schema')
+ sql_scripts = glob(osp.join(directory, '*.%s.sql' % driver))
+ for fpath in sql_scripts:
+ print('-> installing', fpath)
+ failed = sqlexec(open(fpath).read(), self.cnx.system_sql, False,
+ delimiter=';;')
+ if failed:
+ print('-> ERROR, skipping', fpath)
+
+ # schema synchronization internals ########################################
+
+ def _synchronize_permissions(self, erschema, teid):
+ """permission synchronization for an entity or relation type"""
+ assert teid, erschema
+ if 'update' in erschema.ACTIONS or erschema.final:
+ # entity type
+ exprtype = u'ERQLExpression'
+ else:
+ # relation type
+ exprtype = u'RRQLExpression'
+ gm = self.group_mapping()
+ confirm = self.verbosity >= 2
+ # * remove possibly deprecated permission (eg in the persistent schema
+ # but not in the new schema)
+ # * synchronize existing expressions
+ # * add new groups/expressions
+ for action in erschema.ACTIONS:
+ perm = '%s_permission' % action
+ # handle groups
+ newgroups = list(erschema.get_groups(action))
+ for geid, gname in self.rqlexec('Any G, GN WHERE T %s G, G name GN, '
+ 'T eid %%(x)s' % perm, {'x': teid},
+ ask_confirm=False):
+ if not gname in newgroups:
+ if not confirm or self.confirm('Remove %s permission of %s to %s?'
+ % (action, erschema, gname)):
+ self.rqlexec('DELETE T %s G WHERE G eid %%(x)s, T eid %s'
+ % (perm, teid),
+ {'x': geid}, ask_confirm=False)
+ else:
+ newgroups.remove(gname)
+ for gname in newgroups:
+ if not confirm or self.confirm('Grant %s permission of %s to %s?'
+ % (action, erschema, gname)):
+ try:
+ self.rqlexec('SET T %s G WHERE G eid %%(x)s, T eid %s'
+ % (perm, teid),
+ {'x': gm[gname]}, ask_confirm=False)
+ except KeyError:
+ self.error('can grant %s perm to unexistant group %s',
+ action, gname)
+ # handle rql expressions
+ newexprs = dict((expr.expression, expr) for expr in erschema.get_rqlexprs(action))
+ for expreid, expression in self.rqlexec('Any E, EX WHERE T %s E, E expression EX, '
+ 'T eid %s' % (perm, teid),
+ ask_confirm=False):
+ if not expression in newexprs:
+ if not confirm or self.confirm('Remove %s expression for %s permission of %s?'
+ % (expression, action, erschema)):
+ # deleting the relation will delete the expression entity
+ self.rqlexec('DELETE T %s E WHERE E eid %%(x)s, T eid %s'
+ % (perm, teid),
+ {'x': expreid}, ask_confirm=False)
+ else:
+ newexprs.pop(expression)
+ for expression in newexprs.values():
+ expr = expression.expression
+ if not confirm or self.confirm('Add %s expression for %s permission of %s?'
+ % (expr, action, erschema)):
+ self.rqlexec('INSERT RQLExpression X: X exprtype %%(exprtype)s, '
+ 'X expression %%(expr)s, X mainvars %%(vars)s, T %s X '
+ 'WHERE T eid %%(x)s' % perm,
+ {'expr': expr, 'exprtype': exprtype,
+ 'vars': u','.join(sorted(expression.mainvars)),
+ 'x': teid},
+ ask_confirm=False)
+
+ def _synchronize_rschema(self, rtype, syncrdefs=True,
+ syncperms=True, syncprops=True):
+ """synchronize properties of the persistent relation schema against its
+ current definition:
+
+ * description
+ * symmetric, meta
+ * inlined
+ * relation definitions if `syncrdefs`
+ * permissions if `syncperms`
+
+ physical schema changes should be handled by repository's schema hooks
+ """
+ rtype = str(rtype)
+ if rtype in self._synchronized:
+ return
+ if syncrdefs and syncperms and syncprops:
+ self._synchronized.add(rtype)
+ rschema = self.fs_schema.rschema(rtype)
+ reporschema = self.repo.schema.rschema(rtype)
+ if syncprops:
+ assert reporschema.eid, reporschema
+ self.rqlexecall(ss.updaterschema2rql(rschema, reporschema.eid),
+ ask_confirm=self.verbosity>=2)
+ if rschema.rule:
+ if syncperms:
+ self._synchronize_permissions(rschema, reporschema.eid)
+ elif syncrdefs:
+ for subj, obj in rschema.rdefs:
+ if (subj, obj) not in reporschema.rdefs:
+ continue
+ if rschema in VIRTUAL_RTYPES:
+ continue
+ self._synchronize_rdef_schema(subj, rschema, obj,
+ syncprops=syncprops,
+ syncperms=syncperms)
+
+ def _synchronize_eschema(self, etype, syncrdefs=True,
+ syncperms=True, syncprops=True):
+ """synchronize properties of the persistent entity schema against
+ its current definition:
+
+ * description
+ * internationalizable, fulltextindexed, indexed, meta
+ * relations from/to this entity
+ * __unique_together__
+ * permissions if `syncperms`
+ """
+ etype = str(etype)
+ if etype in self._synchronized:
+ return
+ if syncrdefs and syncperms and syncprops:
+ self._synchronized.add(etype)
+ repoeschema = self.repo.schema.eschema(etype)
+ try:
+ eschema = self.fs_schema.eschema(etype)
+ except KeyError:
+ return # XXX somewhat unexpected, no?...
+ if syncprops:
+ repospschema = repoeschema.specializes()
+ espschema = eschema.specializes()
+ if repospschema and not espschema:
+ self.rqlexec('DELETE X specializes Y WHERE X is CWEType, X name %(x)s',
+ {'x': str(repoeschema)}, ask_confirm=False)
+ elif not repospschema and espschema:
+ self.rqlexec('SET X specializes Y WHERE X is CWEType, X name %(x)s, '
+ 'Y is CWEType, Y name %(y)s',
+ {'x': str(repoeschema), 'y': str(espschema)},
+ ask_confirm=False)
+ self.rqlexecall(ss.updateeschema2rql(eschema, repoeschema.eid),
+ ask_confirm=self.verbosity >= 2)
+ if syncperms:
+ self._synchronize_permissions(eschema, repoeschema.eid)
+ if syncrdefs:
+ for rschema, targettypes, role in eschema.relation_definitions(True):
+ if rschema in VIRTUAL_RTYPES:
+ continue
+ if role == 'subject':
+ if not rschema in repoeschema.subject_relations():
+ continue
+ subjtypes, objtypes = [etype], targettypes
+ else: # role == 'object'
+ if not rschema in repoeschema.object_relations():
+ continue
+ subjtypes, objtypes = targettypes, [etype]
+ self._synchronize_rschema(rschema, syncrdefs=False,
+ syncprops=syncprops, syncperms=syncperms)
+ if rschema.rule: # rdef for computed rtype are infered hence should not be
+ # synchronized
+ continue
+ reporschema = self.repo.schema.rschema(rschema)
+ for subj in subjtypes:
+ for obj in objtypes:
+ if (subj, obj) not in reporschema.rdefs:
+ continue
+ self._synchronize_rdef_schema(subj, rschema, obj,
+ syncprops=syncprops, syncperms=syncperms)
+ if syncprops: # need to process __unique_together__ after rdefs were processed
+ # mappings from constraint name to columns
+ # filesystem (fs) and repository (repo) wise
+ fs = {}
+ repo = {}
+ for cols in eschema._unique_together or ():
+ fs[unique_index_name(repoeschema, cols)] = sorted(cols)
+ schemaentity = self.cnx.entity_from_eid(repoeschema.eid)
+ for entity in schemaentity.related('constraint_of', 'object',
+ targettypes=('CWUniqueTogetherConstraint',)).entities():
+ repo[entity.name] = sorted(rel.name for rel in entity.relations)
+ added = set(fs) - set(repo)
+ removed = set(repo) - set(fs)
+
+ for name in removed:
+ self.rqlexec('DELETE CWUniqueTogetherConstraint C WHERE C name %(name)s',
+ {'name': name})
+
+ def possible_unique_constraint(cols):
+ for name in cols:
+ rschema = repoeschema.subjrels.get(name)
+ if rschema is None:
+ print('dont add %s unique constraint on %s, missing %s' % (
+ ','.join(cols), eschema, name))
+ return False
+ if not (rschema.final or rschema.inlined):
+ print('dont add %s unique constraint on %s, %s is neither final nor inlined' % (
+ ','.join(cols), eschema, name))
+ return False
+ return True
+
+ for name in added:
+ if possible_unique_constraint(fs[name]):
+ rql, substs = ss._uniquetogether2rql(eschema, fs[name])
+ substs['x'] = repoeschema.eid
+ substs['name'] = name
+ self.rqlexec(rql, substs)
+
+ def _synchronize_rdef_schema(self, subjtype, rtype, objtype,
+ syncperms=True, syncprops=True):
+ """synchronize properties of the persistent relation definition schema
+ against its current definition:
+ * order and other properties
+ * constraints
+ * permissions
+ """
+ subjtype, objtype = str(subjtype), str(objtype)
+ rschema = self.fs_schema.rschema(rtype)
+ if rschema.rule:
+ raise ExecutionError('Cannot synchronize a relation definition for a '
+ 'computed relation (%s)' % rschema)
+ reporschema = self.repo.schema.rschema(rschema)
+ if (subjtype, rschema, objtype) in self._synchronized:
+ return
+ if syncperms and syncprops:
+ self._synchronized.add((subjtype, rschema, objtype))
+ if rschema.symmetric:
+ self._synchronized.add((objtype, rschema, subjtype))
+ rdef = rschema.rdef(subjtype, objtype)
+ if rdef.infered:
+ return # don't try to synchronize infered relation defs
+ repordef = reporschema.rdef(subjtype, objtype)
+ confirm = self.verbosity >= 2
+ if syncprops:
+ # properties
+ self.rqlexecall(ss.updaterdef2rql(rdef, repordef.eid),
+ ask_confirm=confirm)
+ # constraints
+ # 0. eliminate the set of unmodified constraints from the sets of
+ # old/new constraints
+ newconstraints = set(rdef.constraints)
+ oldconstraints = set(repordef.constraints)
+ unchanged_constraints = newconstraints & oldconstraints
+ newconstraints -= unchanged_constraints
+ oldconstraints -= unchanged_constraints
+ # 1. remove old constraints and update constraints of the same type
+ # NOTE: don't use rschema.constraint_by_type because it may be
+ # out of sync with newconstraints when multiple
+ # constraints of the same type are used
+ for cstr in oldconstraints:
+ self.rqlexec('DELETE CWConstraint C WHERE C eid %(x)s',
+ {'x': cstr.eid}, ask_confirm=confirm)
+ # 2. add new constraints
+ cstrtype_map = self.cstrtype_mapping()
+ self.rqlexecall(ss.constraints2rql(cstrtype_map, newconstraints,
+ repordef.eid),
+ ask_confirm=confirm)
+ if syncperms and not rschema in VIRTUAL_RTYPES:
+ self._synchronize_permissions(rdef, repordef.eid)
+
+ # base actions ############################################################
+
+ def checkpoint(self, ask_confirm=True):
+ """checkpoint action"""
+ if not ask_confirm or self.confirm('Commit now ?', shell=False):
+ self.commit()
+
+ def cmd_add_cube(self, cube, update_database=True):
+ self.cmd_add_cubes( (cube,), update_database)
+
+ def cmd_add_cubes(self, cubes, update_database=True):
+ """update_database is telling if the database schema should be updated
+ or if only the relevant eproperty should be inserted (for the case where
+ a cube has been extracted from an existing instance, so the
+ cube schema is already in there)
+ """
+ newcubes = super(ServerMigrationHelper, self).cmd_add_cubes(cubes)
+ if not newcubes:
+ return
+ for cube in newcubes:
+ self.cmd_set_property('system.version.'+cube,
+ self.config.cube_version(cube))
+ # ensure added cube is in config cubes
+ # XXX worth restoring on error?
+ if not cube in self.config._cubes:
+ self.config._cubes += (cube,)
+ if not update_database:
+ self.commit()
+ return
+ newcubes_schema = self.config.load_schema(construction_mode='non-strict')
+ # XXX we have to replace fs_schema, used in cmd_add_relation_type
+ # etc. and fsschema of migration script contexts
+ self.fs_schema = newcubes_schema
+ self.update_context('fsschema', self.fs_schema)
+ new = set()
+ # execute pre-create files
+ driver = self.repo.system_source.dbdriver
+ for cube in reversed(newcubes):
+ self.cmd_install_custom_sql_scripts(cube)
+ self.cmd_exec_event_script('precreate', cube)
+ # add new entity and relation types
+ for rschema in newcubes_schema.relations():
+ if not rschema in self.repo.schema:
+ self.cmd_add_relation_type(rschema.type)
+ new.add(rschema.type)
+ toadd = [eschema for eschema in newcubes_schema.entities()
+ if not eschema in self.repo.schema]
+ for eschema in order_eschemas(toadd):
+ self.cmd_add_entity_type(eschema.type)
+ new.add(eschema.type)
+ # check if attributes has been added to existing entities
+ for rschema in newcubes_schema.relations():
+ existingschema = self.repo.schema.rschema(rschema.type)
+ for (fromtype, totype) in rschema.rdefs:
+ # if rdef already exists or is infered from inheritance,
+ # don't add it
+ if (fromtype, totype) in existingschema.rdefs \
+ or rschema.rdefs[(fromtype, totype)].infered:
+ continue
+ # check we should actually add the relation definition
+ if not (fromtype in new or totype in new or rschema in new):
+ continue
+ self.cmd_add_relation_definition(str(fromtype), rschema.type,
+ str(totype))
+ # execute post-create files
+ for cube in reversed(newcubes):
+ with self.cnx.allow_all_hooks_but():
+ self.cmd_exec_event_script('postcreate', cube)
+ self.commit()
+
+ def cmd_drop_cube(self, cube, removedeps=False):
+ removedcubes = super(ServerMigrationHelper, self).cmd_drop_cube(
+ cube, removedeps)
+ if not removedcubes:
+ return
+ fsschema = self.fs_schema
+ removedcubes_schema = self.config.load_schema(construction_mode='non-strict')
+ reposchema = self.repo.schema
+ # execute pre-remove files
+ for cube in reversed(removedcubes):
+ self.cmd_exec_event_script('preremove', cube)
+ # remove cubes'entity and relation types
+ for rschema in fsschema.relations():
+ if not rschema in removedcubes_schema and rschema in reposchema:
+ self.cmd_drop_relation_type(rschema.type)
+ toremove = [eschema for eschema in fsschema.entities()
+ if not eschema in removedcubes_schema
+ and eschema in reposchema]
+ for eschema in reversed(order_eschemas(toremove)):
+ self.cmd_drop_entity_type(eschema.type)
+ for rschema in fsschema.relations():
+ if rschema in removedcubes_schema and rschema in reposchema:
+ # check if attributes/relations has been added to entities from
+ # other cubes
+ for fromtype, totype in rschema.rdefs:
+ if (fromtype, totype) not in removedcubes_schema[rschema.type].rdefs and \
+ (fromtype, totype) in reposchema[rschema.type].rdefs:
+ self.cmd_drop_relation_definition(
+ str(fromtype), rschema.type, str(totype))
+ # execute post-remove files
+ for cube in reversed(removedcubes):
+ self.cmd_exec_event_script('postremove', cube)
+ self.rqlexec('DELETE CWProperty X WHERE X pkey %(pk)s',
+ {'pk': u'system.version.'+cube}, ask_confirm=False)
+ self.commit()
+
+ # schema migration actions ################################################
+
+ def cmd_add_attribute(self, etype, attrname, attrtype=None, commit=True):
+ """add a new attribute on the given entity type"""
+ if attrtype is None:
+ rschema = self.fs_schema.rschema(attrname)
+ attrtype = rschema.objects(etype)[0]
+ self.cmd_add_relation_definition(etype, attrname, attrtype, commit=commit)
+
+ def cmd_drop_attribute(self, etype, attrname, commit=True):
+ """drop an existing attribute from the given entity type
+
+ `attrname` is a string giving the name of the attribute to drop
+ """
+ try:
+ rschema = self.repo.schema.rschema(attrname)
+ attrtype = rschema.objects(etype)[0]
+ except KeyError:
+ print('warning: attribute %s %s is not known, skip deletion' % (
+ etype, attrname))
+ else:
+ self.cmd_drop_relation_definition(etype, attrname, attrtype,
+ commit=commit)
+
+ def cmd_rename_attribute(self, etype, oldname, newname, commit=True):
+ """rename an existing attribute of the given entity type
+
+ `oldname` is a string giving the name of the existing attribute
+ `newname` is a string giving the name of the renamed attribute
+ """
+ eschema = self.fs_schema.eschema(etype)
+ attrtype = eschema.destination(newname)
+ # have to commit this first step anyway to get the definition
+ # actually in the schema
+ self.cmd_add_attribute(etype, newname, attrtype, commit=True)
+ # skipp NULL values if the attribute is required
+ rql = 'SET X %s VAL WHERE X is %s, X %s VAL' % (newname, etype, oldname)
+ card = eschema.rdef(newname).cardinality[0]
+ if card == '1':
+ rql += ', NOT X %s NULL' % oldname
+ self.rqlexec(rql, ask_confirm=self.verbosity>=2)
+ # XXX if both attributes fulltext indexed, should skip fti rebuild
+ # XXX if old attribute was fti indexed but not the new one old value
+ # won't be removed from the index (this occurs on other kind of
+ # fulltextindexed change...)
+ self.cmd_drop_attribute(etype, oldname, commit=commit)
+
+ def cmd_add_entity_type(self, etype, auto=True, commit=True):
+ """register a new entity type
+
+ in auto mode, automatically register entity's relation where the
+ targeted type is known
+ """
+ instschema = self.repo.schema
+ eschema = self.fs_schema.eschema(etype)
+ if etype in instschema and not (eschema.final and eschema.eid is None):
+ print('warning: %s already known, skip addition' % etype)
+ return
+ confirm = self.verbosity >= 2
+ groupmap = self.group_mapping()
+ cstrtypemap = self.cstrtype_mapping()
+ # register the entity into CWEType
+ execute = self.cnx.execute
+ if eschema.final and eschema not in instschema:
+ # final types are expected to be in the living schema by default, but they are not if
+ # the type is defined in a cube that is being added
+ edef = EntityType(eschema.type, __permissions__=eschema.permissions)
+ instschema.add_entity_type(edef)
+ ss.execschemarql(execute, eschema, ss.eschema2rql(eschema, groupmap))
+ # add specializes relation if needed
+ specialized = eschema.specializes()
+ if specialized:
+ try:
+ specialized.eid = instschema[specialized].eid
+ except KeyError:
+ raise ExecutionError('trying to add entity type but parent type is '
+ 'not yet in the database schema')
+ self.rqlexecall(ss.eschemaspecialize2rql(eschema), ask_confirm=confirm)
+ # register entity's attributes
+ for rschema, attrschema in eschema.attribute_definitions():
+ # ignore those meta relations, they will be automatically added
+ if rschema.type in META_RTYPES:
+ continue
+ if not attrschema.type in instschema:
+ self.cmd_add_entity_type(attrschema.type, False, False)
+ if not rschema.type in instschema:
+ # need to add the relation type and to commit to get it
+ # actually in the schema
+ self.cmd_add_relation_type(rschema.type, False, commit=True)
+ # register relation definition
+ rdef = self._get_rdef(rschema, eschema, eschema.destination(rschema))
+ ss.execschemarql(execute, rdef, ss.rdef2rql(rdef, cstrtypemap, groupmap),)
+ # take care to newly introduced base class
+ # XXX some part of this should probably be under the "if auto" block
+ for spschema in eschema.specialized_by(recursive=False):
+ try:
+ instspschema = instschema[spschema]
+ except KeyError:
+ # specialized entity type not in schema, ignore
+ continue
+ if instspschema.specializes() != eschema:
+ self.rqlexec('SET D specializes P WHERE D eid %(d)s, P name %(pn)s',
+ {'d': instspschema.eid, 'pn': eschema.type},
+ ask_confirm=confirm)
+ for rschema, tschemas, role in spschema.relation_definitions(True):
+ for tschema in tschemas:
+ if not tschema in instschema:
+ continue
+ if role == 'subject':
+ subjschema = spschema
+ objschema = tschema
+ if rschema.final and rschema in instspschema.subjrels:
+ # attribute already set, has_rdef would check if
+ # it's of the same type, we don't want this so
+ # simply skip here
+ continue
+ elif role == 'object':
+ subjschema = tschema
+ objschema = spschema
+ if (rschema.rdef(subjschema, objschema).infered
+ or (instschema.has_relation(rschema) and
+ (subjschema, objschema) in instschema[rschema].rdefs)):
+ continue
+ self.cmd_add_relation_definition(
+ subjschema.type, rschema.type, objschema.type)
+ if auto:
+ # we have commit here to get relation types actually in the schema
+ self.commit()
+ added = []
+ for rschema in eschema.subject_relations():
+ # attribute relation have already been processed and
+ # 'owned_by'/'created_by' will be automatically added
+ if rschema.final or rschema.type in META_RTYPES:
+ continue
+ rtypeadded = rschema.type in instschema
+ for targetschema in rschema.objects(etype):
+ # ignore relations where the targeted type is not in the
+ # current instance schema
+ targettype = targetschema.type
+ if not targettype in instschema and targettype != etype:
+ continue
+ if not rtypeadded:
+ # need to add the relation type and to commit to get it
+ # actually in the schema
+ added.append(rschema.type)
+ self.cmd_add_relation_type(rschema.type, False, commit=True)
+ rtypeadded = True
+ # register relation definition
+ # remember this two avoid adding twice non symmetric relation
+ # such as "Emailthread forked_from Emailthread"
+ added.append((etype, rschema.type, targettype))
+ rdef = self._get_rdef(rschema, eschema, targetschema)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cstrtypemap, groupmap))
+ for rschema in eschema.object_relations():
+ if rschema.type in META_RTYPES:
+ continue
+ rtypeadded = rschema.type in instschema or rschema.type in added
+ for targetschema in rschema.subjects(etype):
+ # ignore relations where the targeted type is not in the
+ # current instance schema
+ targettype = targetschema.type
+ # don't check targettype != etype since in this case the
+ # relation has already been added as a subject relation
+ if not targettype in instschema:
+ continue
+ if not rtypeadded:
+ # need to add the relation type and to commit to get it
+ # actually in the schema
+ self.cmd_add_relation_type(rschema.type, False, commit=True)
+ rtypeadded = True
+ elif (targettype, rschema.type, etype) in added:
+ continue
+ # register relation definition
+ rdef = self._get_rdef(rschema, targetschema, eschema)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cstrtypemap, groupmap))
+ if commit:
+ self.commit()
+
+ def cmd_drop_entity_type(self, etype, commit=True):
+ """Drop an existing entity type.
+
+ This will trigger deletion of necessary relation types and definitions.
+ Note that existing entities of the given type will be deleted without
+ any hooks called.
+ """
+ # XXX what if we delete an entity type which is specialized by other types
+ # unregister the entity from CWEType
+ self.rqlexec('DELETE CWEType X WHERE X name %(etype)s', {'etype': etype},
+ ask_confirm=self.verbosity>=2)
+ if commit:
+ self.commit()
+
+ def cmd_rename_entity_type(self, oldname, newname, attrs=None, commit=True):
+ """rename an existing entity type in the persistent schema
+
+ `oldname` is a string giving the name of the existing entity type
+ `newname` is a string giving the name of the renamed entity type
+ """
+ schema = self.repo.schema
+ if oldname not in schema:
+ print('warning: entity type %s is unknown, skip renaming' % oldname)
+ return
+ # if merging two existing entity types
+ if newname in schema:
+ assert oldname in ETYPE_NAME_MAP, \
+ '%s should be mapped to %s in ETYPE_NAME_MAP' % (oldname,
+ newname)
+ if attrs is None:
+ attrs = ','.join(SQL_PREFIX + rschema.type
+ for rschema in schema[newname].subject_relations()
+ if (rschema.final or rschema.inlined)
+ and not rschema in PURE_VIRTUAL_RTYPES)
+ else:
+ attrs += ('eid', 'creation_date', 'modification_date', 'cwuri')
+ attrs = ','.join(SQL_PREFIX + attr for attr in attrs)
+ self.sqlexec('INSERT INTO %s%s(%s) SELECT %s FROM %s%s' % (
+ SQL_PREFIX, newname, attrs, attrs, SQL_PREFIX, oldname),
+ ask_confirm=False)
+ # old entity type has not been added to the schema, can't gather it
+ new = schema.eschema(newname)
+ oldeid = self.rqlexec('CWEType ET WHERE ET name %(on)s',
+ {'on': oldname}, ask_confirm=False)[0][0]
+ # backport old type relations to new type
+ # XXX workflows, other relations?
+ for r1, rr1 in [('from_entity', 'to_entity'),
+ ('to_entity', 'from_entity')]:
+ self.rqlexec('SET X %(r1)s NET WHERE X %(r1)s OET, '
+ 'NOT EXISTS(X2 %(r1)s NET, X relation_type XRT, '
+ 'X2 relation_type XRT, X %(rr1)s XTE, X2 %(rr1)s XTE), '
+ 'OET eid %%(o)s, NET eid %%(n)s' % locals(),
+ {'o': oldeid, 'n': new.eid}, ask_confirm=False)
+ # backport is / is_instance_of relation to new type
+ for rtype in ('is', 'is_instance_of'):
+ self.sqlexec('UPDATE %s_relation SET eid_to=%s WHERE eid_to=%s'
+ % (rtype, new.eid, oldeid), ask_confirm=False)
+ # delete relations using SQL to avoid relations content removal
+ # triggered by schema synchronization hooks.
+ for rdeftype in ('CWRelation', 'CWAttribute'):
+ thispending = set( (eid for eid, in self.sqlexec(
+ 'SELECT cw_eid FROM cw_%s WHERE cw_from_entity=%%(eid)s OR '
+ ' cw_to_entity=%%(eid)s' % rdeftype,
+ {'eid': oldeid}, ask_confirm=False)) )
+ # we should add deleted eids into pending eids else we may
+ # get some validation error on commit since integrity hooks
+ # may think some required relation is missing... This also ensure
+ # repository caches are properly cleanup
+ hook.CleanupDeletedEidsCacheOp.get_instance(self.cnx).union(thispending)
+ # and don't forget to remove record from system tables
+ entities = [self.cnx.entity_from_eid(eid, rdeftype) for eid in thispending]
+ self.repo.system_source.delete_info_multi(self.cnx, entities)
+ self.sqlexec('DELETE FROM cw_%s WHERE cw_from_entity=%%(eid)s OR '
+ 'cw_to_entity=%%(eid)s' % rdeftype,
+ {'eid': oldeid}, ask_confirm=False)
+ # now we have to manually cleanup relations pointing to deleted
+ # entities
+ thiseids = ','.join(str(eid) for eid in thispending)
+ for rschema, ttypes, role in schema[rdeftype].relation_definitions():
+ if rschema.type in VIRTUAL_RTYPES:
+ continue
+ sqls = []
+ if role == 'object':
+ if rschema.inlined:
+ for eschema in ttypes:
+ sqls.append('DELETE FROM cw_%s WHERE cw_%s IN(%%s)'
+ % (eschema, rschema))
+ else:
+ sqls.append('DELETE FROM %s_relation WHERE eid_to IN(%%s)'
+ % rschema)
+ elif not rschema.inlined:
+ sqls.append('DELETE FROM %s_relation WHERE eid_from IN(%%s)'
+ % rschema)
+ for sql in sqls:
+ self.sqlexec(sql % thiseids, ask_confirm=False)
+ # remove the old type: use rql to propagate deletion
+ self.rqlexec('DELETE CWEType ET WHERE ET name %(on)s', {'on': oldname},
+ ask_confirm=False)
+ # elif simply renaming an entity type
+ else:
+ self.rqlexec('SET ET name %(newname)s WHERE ET is CWEType, ET name %(on)s',
+ {'newname' : text_type(newname), 'on' : oldname},
+ ask_confirm=False)
+ if commit:
+ self.commit()
+
+ def cmd_add_relation_type(self, rtype, addrdef=True, commit=True):
+ """register a new relation type named `rtype`, as described in the
+ schema description file.
+
+ `addrdef` is a boolean value; when True, it will also add all relations
+ of the type just added found in the schema definition file. Note that it
+ implies an intermediate "commit" which commits the relation type
+ creation (but not the relation definitions themselves, for which
+ committing depends on the `commit` argument value).
+
+ """
+ reposchema = self.repo.schema
+ rschema = self.fs_schema.rschema(rtype)
+ execute = self.cnx.execute
+ if rtype in reposchema:
+ print('warning: relation type %s is already known, skip addition' % (
+ rtype))
+ elif rschema.rule:
+ gmap = self.group_mapping()
+ ss.execschemarql(execute, rschema, ss.crschema2rql(rschema, gmap))
+ else:
+ # register the relation into CWRType and insert necessary relation
+ # definitions
+ ss.execschemarql(execute, rschema, ss.rschema2rql(rschema, addrdef=False))
+ if not rschema.rule and addrdef:
+ self.commit()
+ gmap = self.group_mapping()
+ cmap = self.cstrtype_mapping()
+ done = set()
+ for subj, obj in rschema.rdefs:
+ if not (reposchema.has_entity(subj)
+ and reposchema.has_entity(obj)):
+ continue
+ # symmetric relations appears twice
+ if (subj, obj) in done:
+ continue
+ done.add( (subj, obj) )
+ self.cmd_add_relation_definition(subj, rtype, obj)
+ if rtype in META_RTYPES:
+ # if the relation is in META_RTYPES, ensure we're adding it for
+ # all entity types *in the persistent schema*, not only those in
+ # the fs schema
+ for etype in self.repo.schema.entities():
+ if not etype in self.fs_schema:
+ # get sample object type and rproperties
+ objtypes = rschema.objects()
+ assert len(objtypes) == 1, objtypes
+ objtype = objtypes[0]
+ rdef = copy(rschema.rdef(rschema.subjects(objtype)[0], objtype))
+ rdef.subject = etype
+ rdef.rtype = self.repo.schema.rschema(rschema)
+ rdef.object = self.repo.schema.eschema(objtype)
+ ss.execschemarql(execute, rdef,
+ ss.rdef2rql(rdef, cmap, gmap))
+ if commit:
+ self.commit()
+
+ def cmd_drop_relation_type(self, rtype, commit=True):
+ """Drop an existing relation type.
+
+ Note that existing relations of the given type will be deleted without
+ any hooks called.
+ """
+ self.rqlexec('DELETE CWRType X WHERE X name %r' % rtype,
+ ask_confirm=self.verbosity>=2)
+ self.rqlexec('DELETE CWComputedRType X WHERE X name %r' % rtype,
+ ask_confirm=self.verbosity>=2)
+ if commit:
+ self.commit()
+
+ def cmd_rename_relation_type(self, oldname, newname, commit=True, force=False):
+ """rename an existing relation
+
+ `oldname` is a string giving the name of the existing relation
+ `newname` is a string giving the name of the renamed relation
+
+ If `force` is True, proceed even if `oldname` still appears in the fs schema
+ """
+ if oldname in self.fs_schema and not force:
+ if not self.confirm('Relation %s is still present in the filesystem schema,'
+ ' do you really want to drop it?' % oldname,
+ default='n'):
+ return
+ self.cmd_add_relation_type(newname, commit=True)
+ if not self.repo.schema[oldname].rule:
+ self.rqlexec('SET X %s Y WHERE X %s Y' % (newname, oldname),
+ ask_confirm=self.verbosity>=2)
+ self.cmd_drop_relation_type(oldname, commit=commit)
+
+ def cmd_add_relation_definition(self, subjtype, rtype, objtype, commit=True):
+ """register a new relation definition, from its definition found in the
+ schema definition file
+ """
+ rschema = self.fs_schema.rschema(rtype)
+ if rschema.rule:
+ raise ExecutionError('Cannot add a relation definition for a '
+ 'computed relation (%s)' % rschema)
+ if not rtype in self.repo.schema:
+ self.cmd_add_relation_type(rtype, addrdef=False, commit=True)
+ if (subjtype, objtype) in self.repo.schema.rschema(rtype).rdefs:
+ print('warning: relation %s %s %s is already known, skip addition' % (
+ subjtype, rtype, objtype))
+ return
+ rdef = self._get_rdef(rschema, subjtype, objtype)
+ ss.execschemarql(self.cnx.execute, rdef,
+ ss.rdef2rql(rdef, self.cstrtype_mapping(),
+ self.group_mapping()))
+ if commit:
+ self.commit()
+
+ def _get_rdef(self, rschema, subjtype, objtype):
+ return self._set_rdef_eid(rschema.rdefs[(subjtype, objtype)])
+
+ def _set_rdef_eid(self, rdef):
+ for attr in ('rtype', 'subject', 'object'):
+ schemaobj = getattr(rdef, attr)
+ if getattr(schemaobj, 'eid', None) is None:
+ schemaobj.eid = self.repo.schema[schemaobj].eid
+ assert schemaobj.eid is not None, schemaobj
+ return rdef
+
+ def cmd_drop_relation_definition(self, subjtype, rtype, objtype, commit=True):
+ """Drop an existing relation definition.
+
+ Note that existing relations of the given definition will be deleted
+ without any hooks called.
+ """
+ rschema = self.repo.schema.rschema(rtype)
+ if rschema.rule:
+ raise ExecutionError('Cannot drop a relation definition for a '
+ 'computed relation (%s)' % rschema)
+ # unregister the definition from CWAttribute or CWRelation
+ if rschema.final:
+ etype = 'CWAttribute'
+ else:
+ etype = 'CWRelation'
+ rql = ('DELETE %s X WHERE X from_entity FE, FE name "%s",'
+ 'X relation_type RT, RT name "%s", X to_entity TE, TE name "%s"')
+ self.rqlexec(rql % (etype, subjtype, rtype, objtype),
+ ask_confirm=self.verbosity>=2)
+ if commit:
+ self.commit()
+
+ def cmd_sync_schema_props_perms(self, ertype=None, syncperms=True,
+ syncprops=True, syncrdefs=True, commit=True):
+ """synchronize the persistent schema against the current definition
+ schema.
+
+ `ertype` can be :
+ - None, in that case everything will be synced ;
+ - a string, it should be an entity type or
+ a relation type. In that case, only the corresponding
+ entities / relations will be synced ;
+ - an rdef object to synchronize only this specific relation definition
+
+ It will synch common stuff between the definition schema and the
+ actual persistent schema, it won't add/remove any entity or relation.
+ """
+ assert syncperms or syncprops, 'nothing to do'
+ if ertype is not None:
+ if isinstance(ertype, RelationDefinitionSchema):
+ ertype = ertype.as_triple()
+ if isinstance(ertype, (tuple, list)):
+ assert len(ertype) == 3, 'not a relation definition'
+ self._synchronize_rdef_schema(ertype[0], ertype[1], ertype[2],
+ syncperms=syncperms,
+ syncprops=syncprops)
+ else:
+ erschema = self.repo.schema[ertype]
+ if isinstance(erschema, CubicWebRelationSchema):
+ self._synchronize_rschema(erschema, syncrdefs=syncrdefs,
+ syncperms=syncperms,
+ syncprops=syncprops)
+ else:
+ self._synchronize_eschema(erschema, syncrdefs=syncrdefs,
+ syncperms=syncperms,
+ syncprops=syncprops)
+ else:
+ for etype in self.repo.schema.entities():
+ if etype.eid is None:
+ # not yet added final etype (thing to BigInt defined in
+ # yams though 3.13 migration not done yet)
+ continue
+ self._synchronize_eschema(etype, syncrdefs=syncrdefs,
+ syncprops=syncprops, syncperms=syncperms)
+ if commit:
+ self.commit()
+
+ def cmd_change_relation_props(self, subjtype, rtype, objtype,
+ commit=True, **kwargs):
+ """change some properties of a relation definition
+
+ you usually want to use sync_schema_props_perms instead.
+ """
+ assert kwargs
+ restriction = []
+ if subjtype and subjtype != 'Any':
+ restriction.append('X from_entity FE, FE name "%s"' % subjtype)
+ if objtype and objtype != 'Any':
+ restriction.append('X to_entity TE, TE name "%s"' % objtype)
+ if rtype and rtype != 'Any':
+ restriction.append('X relation_type RT, RT name "%s"' % rtype)
+ assert restriction
+ values = []
+ for k, v in kwargs.items():
+ values.append('X %s %%(%s)s' % (k, k))
+ if PY2 and isinstance(v, str):
+ kwargs[k] = unicode(v)
+ rql = 'SET %s WHERE %s' % (','.join(values), ','.join(restriction))
+ self.rqlexec(rql, kwargs, ask_confirm=self.verbosity>=2)
+ if commit:
+ self.commit()
+
+ def cmd_set_size_constraint(self, etype, rtype, size, commit=True):
+ """set change size constraint of a string attribute
+
+ if size is None any size constraint will be removed.
+
+ you usually want to use sync_schema_props_perms instead.
+ """
+ oldvalue = None
+ for constr in self.repo.schema.eschema(etype).rdef(rtype).constraints:
+ if isinstance(constr, SizeConstraint):
+ oldvalue = constr.max
+ if oldvalue == size:
+ return
+ if oldvalue is None and not size is None:
+ ceid = self.rqlexec('INSERT CWConstraint C: C value %(v)s, C cstrtype CT '
+ 'WHERE CT name "SizeConstraint"',
+ {'v': SizeConstraint(size).serialize()},
+ ask_confirm=self.verbosity>=2)[0][0]
+ self.rqlexec('SET X constrained_by C WHERE X from_entity S, X relation_type R, '
+ 'S name "%s", R name "%s", C eid %s' % (etype, rtype, ceid),
+ ask_confirm=self.verbosity>=2)
+ elif not oldvalue is None:
+ if not size is None:
+ self.rqlexec('SET C value %%(v)s WHERE X from_entity S, X relation_type R,'
+ 'X constrained_by C, C cstrtype CT, CT name "SizeConstraint",'
+ 'S name "%s", R name "%s"' % (etype, rtype),
+ {'v': text_type(SizeConstraint(size).serialize())},
+ ask_confirm=self.verbosity>=2)
+ else:
+ self.rqlexec('DELETE X constrained_by C WHERE X from_entity S, X relation_type R,'
+ 'X constrained_by C, C cstrtype CT, CT name "SizeConstraint",'
+ 'S name "%s", R name "%s"' % (etype, rtype),
+ ask_confirm=self.verbosity>=2)
+ # cleanup unused constraints
+ self.rqlexec('DELETE CWConstraint C WHERE NOT X constrained_by C')
+ if commit:
+ self.commit()
+
+ # Workflows handling ######################################################
+
+ def cmd_make_workflowable(self, etype):
+ """add workflow relations to an entity type to make it workflowable"""
+ self.cmd_add_relation_definition(etype, 'in_state', 'State')
+ self.cmd_add_relation_definition(etype, 'custom_workflow', 'Workflow')
+ self.cmd_add_relation_definition('TrInfo', 'wf_info_for', etype)
+
+ def cmd_add_workflow(self, name, wfof, default=True, commit=False,
+ ensure_workflowable=True, **kwargs):
+ """
+ create a new workflow and links it to entity types
+ :type name: unicode
+ :param name: name of the workflow
+
+ :type wfof: string or list/tuple of strings
+ :param wfof: entity type(s) having this workflow
+
+ :type default: bool
+ :param default: tells wether this is the default workflow
+ for the specified entity type(s); set it to false in
+ the case of a subworkflow
+
+ :rtype: `Workflow`
+ """
+ wf = self.cmd_create_entity('Workflow', name=text_type(name),
+ **kwargs)
+ if not isinstance(wfof, (list, tuple)):
+ wfof = (wfof,)
+ def _missing_wf_rel(etype):
+ return 'missing workflow relations, see make_workflowable(%s)' % etype
+ for etype in wfof:
+ eschema = self.repo.schema[etype]
+ etype = text_type(etype)
+ if ensure_workflowable:
+ assert 'in_state' in eschema.subjrels, _missing_wf_rel(etype)
+ assert 'custom_workflow' in eschema.subjrels, _missing_wf_rel(etype)
+ assert 'wf_info_for' in eschema.objrels, _missing_wf_rel(etype)
+ rset = self.rqlexec(
+ 'SET X workflow_of ET WHERE X eid %(x)s, ET name %(et)s',
+ {'x': wf.eid, 'et': text_type(etype)}, ask_confirm=False)
+ assert rset, 'unexistant entity type %s' % etype
+ if default:
+ self.rqlexec(
+ 'SET ET default_workflow X WHERE X eid %(x)s, ET name %(et)s',
+ {'x': wf.eid, 'et': text_type(etype)}, ask_confirm=False)
+ if commit:
+ self.commit()
+ return wf
+
+ def cmd_get_workflow_for(self, etype):
+ """return default workflow for the given entity type"""
+ rset = self.rqlexec('Workflow X WHERE ET default_workflow X, ET name %(et)s',
+ {'et': etype})
+ return rset.get_entity(0, 0)
+
+ # CWProperty handling ######################################################
+
+ def cmd_property_value(self, pkey):
+ """retreive the site-wide persistent property value for the given key.
+
+ To get a user specific property value, use appropriate method on CWUser
+ instance.
+ """
+ rset = self.rqlexec(
+ 'Any V WHERE X is CWProperty, X pkey %(k)s, X value V, NOT X for_user U',
+ {'k': pkey}, ask_confirm=False)
+ return rset[0][0]
+
+ def cmd_set_property(self, pkey, value):
+ """set the site-wide persistent property value for the given key to the
+ given value.
+
+ To set a user specific property value, use appropriate method on CWUser
+ instance.
+ """
+ value = text_type(value)
+ try:
+ prop = self.rqlexec(
+ 'CWProperty X WHERE X pkey %(k)s, NOT X for_user U',
+ {'k': text_type(pkey)}, ask_confirm=False).get_entity(0, 0)
+ except Exception:
+ self.cmd_create_entity('CWProperty', pkey=text_type(pkey), value=value)
+ else:
+ prop.cw_set(value=value)
+
+ # other data migration commands ###########################################
+
+ def cmd_storage_changed(self, etype, attribute):
+ """migrate entities to a custom storage. The new storage is expected to
+ be set, it will be temporarily removed for the migration.
+ """
+ from logilab.common.shellutils import ProgressBar
+ source = self.repo.system_source
+ storage = source.storage(etype, attribute)
+ source.unset_storage(etype, attribute)
+ rset = self.rqlexec('Any X WHERE X is %s' % etype, ask_confirm=False)
+ pb = ProgressBar(len(rset))
+ for entity in rset.entities():
+ # fill cache. Do not fetch that attribute using the global rql query
+ # since we may exhaust memory doing that....
+ getattr(entity, attribute)
+ storage.migrate_entity(entity, attribute)
+ # remove from entity cache to avoid memory exhaustion
+ del entity.cw_attr_cache[attribute]
+ pb.update()
+ print()
+ source.set_storage(etype, attribute, storage)
+
+ def cmd_create_entity(self, etype, commit=False, **kwargs):
+ """add a new entity of the given type"""
+ entity = self.cnx.create_entity(etype, **kwargs)
+ if commit:
+ self.commit()
+ return entity
+
+ def cmd_find(self, etype, **kwargs):
+ """find entities of the given type and attribute values"""
+ return self.cnx.find(etype, **kwargs)
+
+ @deprecated("[3.19] use find(*args, **kwargs).entities() instead")
+ def cmd_find_entities(self, etype, **kwargs):
+ """find entities of the given type and attribute values"""
+ return self.cnx.find(etype, **kwargs).entities()
+
+ @deprecated("[3.19] use find(*args, **kwargs).one() instead")
+ def cmd_find_one_entity(self, etype, **kwargs):
+ """find one entity of the given type and attribute values.
+
+ raise :exc:`cubicweb.req.FindEntityError` if can not return one and only
+ one entity.
+ """
+ return self.cnx.find(etype, **kwargs).one()
+
+ def cmd_update_etype_fti_weight(self, etype, weight):
+ if self.repo.system_source.dbdriver == 'postgres':
+ self.sqlexec('UPDATE appears SET weight=%(weight)s '
+ 'FROM entities as X '
+ 'WHERE X.eid=appears.uid AND X.type=%(type)s',
+ {'type': etype, 'weight': weight}, ask_confirm=False)
+
+ def cmd_reindex_entities(self, etypes=None):
+ """force reindexaction of entities of the given types or of all
+ indexable entity types
+ """
+ from cubicweb.server.checkintegrity import reindex_entities
+ reindex_entities(self.repo.schema, self.cnx, etypes=etypes)
+
+ @contextmanager
+ def cmd_dropped_constraints(self, etype, attrname, cstrtype=None,
+ droprequired=False):
+ """context manager to drop constraints temporarily on fs_schema
+
+ `cstrtype` should be a constraint class (or a tuple of classes)
+ and will be passed to isinstance directly
+
+ For instance::
+
+ >>> with dropped_constraints('MyType', 'myattr',
+ ... UniqueConstraint, droprequired=True):
+ ... add_attribute('MyType', 'myattr')
+ ... # + instructions to fill MyType.myattr column
+ ...
+ >>>
+
+ """
+ rdef = self.fs_schema.eschema(etype).rdef(attrname)
+ original_constraints = rdef.constraints
+ # remove constraints
+ if cstrtype:
+ rdef.constraints = [cstr for cstr in original_constraints
+ if not (cstrtype and isinstance(cstr, cstrtype))]
+ if droprequired:
+ original_cardinality = rdef.cardinality
+ rdef.cardinality = '?' + rdef.cardinality[1]
+ yield
+ # restore original constraints
+ rdef.constraints = original_constraints
+ if droprequired:
+ rdef.cardinality = original_cardinality
+ # update repository schema
+ self.cmd_sync_schema_props_perms(rdef, syncperms=False)
+
+ def sqlexec(self, sql, args=None, ask_confirm=True):
+ """execute the given sql if confirmed
+
+ should only be used for low level stuff undoable with existing higher
+ level actions
+ """
+ if not ask_confirm or self.confirm('Execute sql: %s ?' % sql):
+ try:
+ cu = self.cnx.system_sql(sql, args)
+ except Exception:
+ ex = sys.exc_info()[1]
+ if self.confirm('Error: %s\nabort?' % ex, pdb=True):
+ raise
+ return
+ try:
+ return cu.fetchall()
+ except Exception:
+ # no result to fetch
+ return
+
+ def rqlexec(self, rql, kwargs=None, build_descr=True,
+ ask_confirm=False):
+ """rql action"""
+ if not isinstance(rql, (tuple, list)):
+ rql = ( (rql, kwargs), )
+ res = None
+ execute = self.cnx.execute
+ for rql, kwargs in rql:
+ if kwargs:
+ msg = '%s (%s)' % (rql, kwargs)
+ else:
+ msg = rql
+ if not ask_confirm or self.confirm('Execute rql: %s ?' % msg):
+ try:
+ res = execute(rql, kwargs, build_descr=build_descr)
+ except Exception as ex:
+ if self.confirm('Error: %s\nabort?' % ex, pdb=True):
+ raise
+ return res
+
+ def rqliter(self, rql, kwargs=None, ask_confirm=True):
+ return ForRqlIterator(self, rql, kwargs, ask_confirm)
+
+ # low-level commands to repair broken system database ######################
+
+ def cmd_change_attribute_type(self, etype, attr, newtype, commit=True):
+ """low level method to change the type of an entity attribute. This is
+ a quick hack which has some drawback:
+ * only works when the old type can be changed to the new type by the
+ underlying rdbms (eg using ALTER TABLE)
+ * the actual schema won't be updated until next startup
+ """
+ rschema = self.repo.schema.rschema(attr)
+ oldschema = rschema.objects(etype)[0]
+ rdef = rschema.rdef(etype, oldschema)
+ sql = ("UPDATE cw_CWAttribute "
+ "SET cw_to_entity=(SELECT cw_eid FROM cw_CWEType WHERE cw_name='%s')"
+ "WHERE cw_eid=%s") % (newtype, rdef.eid)
+ self.sqlexec(sql, ask_confirm=False)
+ dbhelper = self.repo.system_source.dbhelper
+ newrdef = self.fs_schema.rschema(attr).rdef(etype, newtype)
+ sqltype = sql_type(dbhelper, newrdef)
+ cursor = self.cnx.cnxset.cu
+ # consider former cardinality by design, since cardinality change is not handled here
+ allownull = rdef.cardinality[0] != '1'
+ dbhelper.change_col_type(cursor, 'cw_%s' % etype, 'cw_%s' % attr, sqltype, allownull)
+ if commit:
+ self.commit()
+ # manually update live schema
+ eschema = self.repo.schema[etype]
+ rschema._subj_schemas[eschema].remove(oldschema)
+ rschema._obj_schemas[oldschema].remove(eschema)
+ newschema = self.repo.schema[newtype]
+ rschema._update(eschema, newschema)
+ rdef.object = newschema
+ del rschema.rdefs[(eschema, oldschema)]
+ rschema.rdefs[(eschema, newschema)] = rdef
+
+ def cmd_add_entity_type_table(self, etype, commit=True):
+ """low level method to create the sql table for an existing entity.
+ This may be useful on accidental desync between the repository schema
+ and a sql database
+ """
+ dbhelper = self.repo.system_source.dbhelper
+ tablesql = eschema2sql(dbhelper, self.repo.schema.eschema(etype),
+ prefix=SQL_PREFIX)
+ for sql in tablesql.split(';'):
+ if sql.strip():
+ self.sqlexec(sql)
+ if commit:
+ self.commit()
+
+ def cmd_add_relation_type_table(self, rtype, commit=True):
+ """low level method to create the sql table for an existing relation.
+ This may be useful on accidental desync between the repository schema
+ and a sql database
+ """
+ tablesql = rschema2sql(self.repo.schema.rschema(rtype))
+ for sql in tablesql.split(';'):
+ if sql.strip():
+ self.sqlexec(sql)
+ if commit:
+ self.commit()
+
+ @deprecated("[3.15] use rename_relation_type(oldname, newname)")
+ def cmd_rename_relation(self, oldname, newname, commit=True):
+ self.cmd_rename_relation_type(oldname, newname, commit)
+
+
+class ForRqlIterator:
+ """specific rql iterator to make the loop skipable"""
+ def __init__(self, helper, rql, kwargs, ask_confirm):
+ self._h = helper
+ self.rql = rql
+ self.kwargs = kwargs
+ self.ask_confirm = ask_confirm
+ self._rsetit = None
+
+ def __iter__(self):
+ return self
+
+ def _get_rset(self):
+ rql, kwargs = self.rql, self.kwargs
+ if kwargs:
+ msg = '%s (%s)' % (rql, kwargs)
+ else:
+ msg = rql
+ if self.ask_confirm:
+ if not self._h.confirm('Execute rql: %s ?' % msg):
+ raise StopIteration
+ try:
+ return self._h._cw.execute(rql, kwargs)
+ except Exception as ex:
+ if self._h.confirm('Error: %s\nabort?' % ex):
+ raise
+ else:
+ raise StopIteration
+
+ def __next__(self):
+ if self._rsetit is not None:
+ return next(self._rsetit)
+ rset = self._get_rset()
+ self._rsetit = iter(rset)
+ return next(self._rsetit)
+
+ next = __next__
+
+ def entities(self):
+ try:
+ rset = self._get_rset()
+ except StopIteration:
+ return []
+ return rset.entities()
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/querier.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/querier.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,737 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Helper classes to execute RQL queries on a set of sources, performing
+security checking and data aggregation.
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+from itertools import repeat
+
+from six import text_type, string_types, integer_types
+from six.moves import range
+
+from rql import RQLSyntaxError, CoercionError
+from rql.stmts import Union
+from rql.nodes import ETYPE_PYOBJ_MAP, etype_from_pyobj, Relation, Exists, Not
+from yams import BASE_TYPES
+
+from cubicweb import ValidationError, Unauthorized, UnknownEid
+from cubicweb.rqlrewrite import RQLRelationRewriter
+from cubicweb import Binary, server
+from cubicweb.rset import ResultSet
+
+from cubicweb.utils import QueryCache, RepeatList
+from cubicweb.server.rqlannotation import SQLGenAnnotator, set_qdata
+from cubicweb.server.ssplanner import READ_ONLY_RTYPES, add_types_restriction
+from cubicweb.server.edition import EditedEntity
+from cubicweb.server.ssplanner import SSPlanner
+from cubicweb.statsd_logger import statsd_timeit, statsd_c
+
+ETYPE_PYOBJ_MAP[Binary] = 'Bytes'
+
+
+def empty_rset(rql, args, rqlst=None):
+ """build an empty result set object"""
+ return ResultSet([], rql, args, rqlst=rqlst)
+
+def update_varmap(varmap, selected, table):
+ """return a sql schema to store RQL query result"""
+ for i, term in enumerate(selected):
+ key = term.as_string()
+ value = '%s.C%s' % (table, i)
+ if varmap.get(key, value) != value:
+ raise Exception('variable name conflict on %s: got %s / %s'
+ % (key, value, varmap))
+ varmap[key] = value
+
+# permission utilities ########################################################
+
+def check_no_password_selected(rqlst):
+ """check that Password entities are not selected"""
+ for solution in rqlst.solutions:
+ for var, etype in solution.items():
+ if etype == 'Password':
+ raise Unauthorized('Password selection is not allowed (%s)' % var)
+
+def term_etype(cnx, term, solution, args):
+ """return the entity type for the given term (a VariableRef or a Constant
+ node)
+ """
+ try:
+ return solution[term.name]
+ except AttributeError:
+ return cnx.entity_metas(term.eval(args))['type']
+
+def check_relations_read_access(cnx, select, args):
+ """Raise :exc:`Unauthorized` if the given user doesn't have credentials to
+ read relations used in the given syntax tree
+ """
+ # use `term_etype` since we've to deal with rewritten constants here,
+ # when used as an external source by another repository.
+ # XXX what about local read security w/ those rewritten constants...
+ # XXX constants can also happen in some queries generated by req.find()
+ DBG = (server.DEBUG & server.DBG_SEC) and 'read' in server._SECURITY_CAPS
+ schema = cnx.repo.schema
+ user = cnx.user
+ if select.where is not None:
+ for rel in select.where.iget_nodes(Relation):
+ for solution in select.solutions:
+ # XXX has_text may have specific perm ?
+ if rel.r_type in READ_ONLY_RTYPES:
+ continue
+ rschema = schema.rschema(rel.r_type)
+ if rschema.final:
+ eschema = schema.eschema(term_etype(cnx, rel.children[0],
+ solution, args))
+ rdef = eschema.rdef(rschema)
+ else:
+ rdef = rschema.rdef(term_etype(cnx, rel.children[0],
+ solution, args),
+ term_etype(cnx, rel.children[1].children[0],
+ solution, args))
+ if not user.matching_groups(rdef.get_groups('read')):
+ if DBG:
+ print('check_read_access: %s %s does not match %s' %
+ (rdef, user.groups, rdef.get_groups('read')))
+ # XXX rqlexpr not allowed
+ raise Unauthorized('read', rel.r_type)
+ if DBG:
+ print('check_read_access: %s %s matches %s' %
+ (rdef, user.groups, rdef.get_groups('read')))
+
+def get_local_checks(cnx, rqlst, solution):
+ """Check that the given user has credentials to access data read by the
+ query and return a dict defining necessary "local checks" (i.e. rql
+ expression in read permission defined in the schema) where no group grants
+ him the permission.
+
+ Returned dictionary's keys are variable names and values the rql expressions
+ for this variable (with the given solution).
+
+ Raise :exc:`Unauthorized` if access is known to be defined, i.e. if there is
+ no matching group and no local permissions.
+ """
+ DBG = (server.DEBUG & server.DBG_SEC) and 'read' in server._SECURITY_CAPS
+ schema = cnx.repo.schema
+ user = cnx.user
+ localchecks = {}
+ # iterate on defined_vars and not on solutions to ignore column aliases
+ for varname in rqlst.defined_vars:
+ eschema = schema.eschema(solution[varname])
+ if eschema.final:
+ continue
+ if not user.matching_groups(eschema.get_groups('read')):
+ erqlexprs = eschema.get_rqlexprs('read')
+ if not erqlexprs:
+ ex = Unauthorized('read', solution[varname])
+ ex.var = varname
+ if DBG:
+ print('check_read_access: %s %s %s %s' %
+ (varname, eschema, user.groups, eschema.get_groups('read')))
+ raise ex
+ # don't insert security on variable only referenced by 'NOT X relation Y' or
+ # 'NOT EXISTS(X relation Y)'
+ varinfo = rqlst.defined_vars[varname].stinfo
+ if varinfo['selected'] or (
+ len([r for r in varinfo['relations']
+ if (not schema.rschema(r.r_type).final
+ and ((isinstance(r.parent, Exists) and r.parent.neged(strict=True))
+ or isinstance(r.parent, Not)))])
+ !=
+ len(varinfo['relations'])):
+ localchecks[varname] = erqlexprs
+ return localchecks
+
+
+# Plans #######################################################################
+
+class ExecutionPlan(object):
+ """the execution model of a rql query, composed of querier steps"""
+
+ def __init__(self, querier, rqlst, args, cnx):
+ # original rql syntax tree
+ self.rqlst = rqlst
+ self.args = args or {}
+ # cnx executing the query
+ self.cnx = cnx
+ # quick reference to the system source
+ self.syssource = cnx.repo.system_source
+ # execution steps
+ self.steps = []
+ # various resource accesors
+ self.querier = querier
+ self.schema = querier.schema
+ self.sqlannotate = querier.sqlgen_annotate
+ self.rqlhelper = cnx.vreg.rqlhelper
+
+ def annotate_rqlst(self):
+ if not self.rqlst.annotated:
+ self.rqlhelper.annotate(self.rqlst)
+
+ def add_step(self, step):
+ """add a step to the plan"""
+ self.steps.append(step)
+
+ def sqlexec(self, sql, args=None):
+ return self.syssource.sqlexec(self.cnx, sql, args)
+
+ def execute(self):
+ """execute a plan and return resulting rows"""
+ for step in self.steps:
+ result = step.execute()
+ # the latest executed step contains the full query result
+ return result
+
+ def preprocess(self, union, security=True):
+ """insert security when necessary then annotate rql st for sql generation
+
+ return rqlst to actually execute
+ """
+ cached = None
+ if security and self.cnx.read_security:
+ # ensure security is turned of when security is inserted,
+ # else we may loop for ever...
+ if self.cnx.transaction_data.get('security-rqlst-cache'):
+ key = self.cache_key
+ else:
+ key = None
+ if key is not None and key in self.cnx.transaction_data:
+ cachedunion, args = self.cnx.transaction_data[key]
+ union.children[:] = []
+ for select in cachedunion.children:
+ union.append(select)
+ union.has_text_query = cachedunion.has_text_query
+ args.update(self.args)
+ self.args = args
+ cached = True
+ else:
+ with self.cnx.security_enabled(read=False):
+ noinvariant = self._insert_security(union)
+ if key is not None:
+ self.cnx.transaction_data[key] = (union, self.args)
+ else:
+ noinvariant = ()
+ if cached is None:
+ self.rqlhelper.simplify(union)
+ self.sqlannotate(union)
+ set_qdata(self.schema.rschema, union, noinvariant)
+ if union.has_text_query:
+ self.cache_key = None
+
+ def _insert_security(self, union):
+ noinvariant = set()
+ for select in union.children[:]:
+ for subquery in select.with_:
+ self._insert_security(subquery.query)
+ localchecks, restricted = self._check_permissions(select)
+ if any(localchecks):
+ self.cnx.rql_rewriter.insert_local_checks(
+ select, self.args, localchecks, restricted, noinvariant)
+ return noinvariant
+
+ def _check_permissions(self, rqlst):
+ """Return a dict defining "local checks", i.e. RQLExpression defined in
+ the schema that should be inserted in the original query, together with
+ a set of variable names which requires some security to be inserted.
+
+ Solutions where a variable has a type which the user can't definitly
+ read are removed, else if the user *may* read it (i.e. if an rql
+ expression is defined for the "read" permission of the related type),
+ the local checks dict is updated.
+
+ The local checks dict has entries for each different local check
+ necessary, with associated solutions as value, a local check being
+ defined by a list of 2-uple (variable name, rql expressions) for each
+ variable which has to be checked. Solutions which don't require local
+ checks will be associated to the empty tuple key.
+
+ Note rqlst should not have been simplified at this point.
+ """
+ cnx = self.cnx
+ msgs = []
+ # dict(varname: eid), allowing to check rql expression for variables
+ # which have a known eid
+ varkwargs = {}
+ if not cnx.transaction_data.get('security-rqlst-cache'):
+ for var in rqlst.defined_vars.values():
+ if var.stinfo['constnode'] is not None:
+ eid = var.stinfo['constnode'].eval(self.args)
+ varkwargs[var.name] = int(eid)
+ # dictionary of variables restricted for security reason
+ localchecks = {}
+ restricted_vars = set()
+ newsolutions = []
+ for solution in rqlst.solutions:
+ try:
+ localcheck = get_local_checks(cnx, rqlst, solution)
+ except Unauthorized as ex:
+ msg = 'remove %s from solutions since %s has no %s access to %s'
+ msg %= (solution, cnx.user.login, ex.args[0], ex.args[1])
+ msgs.append(msg)
+ LOGGER.info(msg)
+ else:
+ newsolutions.append(solution)
+ # try to benefit of rqlexpr.check cache for entities which
+ # are specified by eid in query'args
+ for varname, eid in varkwargs.items():
+ try:
+ rqlexprs = localcheck.pop(varname)
+ except KeyError:
+ continue
+ # if entity has been added in the current transaction, the
+ # user can read it whatever rql expressions are associated
+ # to its type
+ if cnx.added_in_transaction(eid):
+ continue
+ for rqlexpr in rqlexprs:
+ if rqlexpr.check(cnx, eid):
+ break
+ else:
+ raise Unauthorized('No read acces on %r with eid %i.' % (var, eid))
+ # mark variables protected by an rql expression
+ restricted_vars.update(localcheck)
+ # turn local check into a dict key
+ localcheck = tuple(sorted(localcheck.items()))
+ localchecks.setdefault(localcheck, []).append(solution)
+ # raise Unautorized exception if the user can't access to any solution
+ if not newsolutions:
+ raise Unauthorized('\n'.join(msgs))
+ # if there is some message, solutions have been modified and must be
+ # reconsidered by the syntax treee
+ if msgs:
+ rqlst.set_possible_types(newsolutions)
+ return localchecks, restricted_vars
+
+ def finalize(self, select, solutions, insertedvars):
+ rqlst = Union()
+ rqlst.append(select)
+ for mainvarname, rschema, newvarname in insertedvars:
+ nvartype = str(rschema.objects(solutions[0][mainvarname])[0])
+ for sol in solutions:
+ sol[newvarname] = nvartype
+ select.clean_solutions(solutions)
+ add_types_restriction(self.schema, select)
+ self.rqlhelper.annotate(rqlst)
+ self.preprocess(rqlst, security=False)
+ return rqlst
+
+
+class InsertPlan(ExecutionPlan):
+ """an execution model specific to the INSERT rql query
+ """
+
+ def __init__(self, querier, rqlst, args, cnx):
+ ExecutionPlan.__init__(self, querier, rqlst, args, cnx)
+ # save originally selected variable, we may modify this
+ # dictionary for substitution (query parameters)
+ self.selected = rqlst.selection
+ # list of rows of entities definition (ssplanner.EditedEntity)
+ self.e_defs = [[]]
+ # list of new relation definition (3-uple (from_eid, r_type, to_eid)
+ self.r_defs = set()
+ # indexes to track entity definitions bound to relation definitions
+ self._r_subj_index = {}
+ self._r_obj_index = {}
+ self._expanded_r_defs = {}
+
+ def add_entity_def(self, edef):
+ """add an entity definition to build"""
+ self.e_defs[-1].append(edef)
+
+ def add_relation_def(self, rdef):
+ """add an relation definition to build"""
+ self.r_defs.add(rdef)
+ if not isinstance(rdef[0], int):
+ self._r_subj_index.setdefault(rdef[0], []).append(rdef)
+ if not isinstance(rdef[2], int):
+ self._r_obj_index.setdefault(rdef[2], []).append(rdef)
+
+ def substitute_entity_def(self, edef, edefs):
+ """substitute an incomplete entity definition by a list of complete
+ equivalents
+
+ e.g. on queries such as ::
+ INSERT Personne X, Societe Y: X nom N, Y nom 'toto', X travaille Y
+ WHERE U login 'admin', U login N
+
+ X will be inserted as many times as U exists, and so the X travaille Y
+ relations as to be added as many time as X is inserted
+ """
+ if not edefs or not self.e_defs:
+ # no result, no entity will be created
+ self.e_defs = ()
+ return
+ # first remove the incomplete entity definition
+ colidx = self.e_defs[0].index(edef)
+ for i, row in enumerate(self.e_defs[:]):
+ self.e_defs[i][colidx] = edefs[0]
+ samplerow = self.e_defs[i]
+ for edef_ in edefs[1:]:
+ row = [ed.clone() for i, ed in enumerate(samplerow)
+ if i != colidx]
+ row.insert(colidx, edef_)
+ self.e_defs.append(row)
+ # now, see if this entity def is referenced as subject in some relation
+ # definition
+ if edef in self._r_subj_index:
+ for rdef in self._r_subj_index[edef]:
+ expanded = self._expanded(rdef)
+ result = []
+ for exp_rdef in expanded:
+ for edef_ in edefs:
+ result.append( (edef_, exp_rdef[1], exp_rdef[2]) )
+ self._expanded_r_defs[rdef] = result
+ # and finally, see if this entity def is referenced as object in some
+ # relation definition
+ if edef in self._r_obj_index:
+ for rdef in self._r_obj_index[edef]:
+ expanded = self._expanded(rdef)
+ result = []
+ for exp_rdef in expanded:
+ for edef_ in edefs:
+ result.append( (exp_rdef[0], exp_rdef[1], edef_) )
+ self._expanded_r_defs[rdef] = result
+
+ def _expanded(self, rdef):
+ """return expanded value for the given relation definition"""
+ try:
+ return self._expanded_r_defs[rdef]
+ except KeyError:
+ self.r_defs.remove(rdef)
+ return [rdef]
+
+ def relation_defs(self):
+ """return the list for relation definitions to insert"""
+ for rdefs in self._expanded_r_defs.values():
+ for rdef in rdefs:
+ yield rdef
+ for rdef in self.r_defs:
+ yield rdef
+
+ def insert_entity_defs(self):
+ """return eids of inserted entities in a suitable form for the resulting
+ result set, e.g.:
+
+ e.g. on queries such as ::
+ INSERT Personne X, Societe Y: X nom N, Y nom 'toto', X travaille Y
+ WHERE U login 'admin', U login N
+
+ if there is two entities matching U, the result set will look like
+ [(eidX1, eidY1), (eidX2, eidY2)]
+ """
+ cnx = self.cnx
+ repo = cnx.repo
+ results = []
+ for row in self.e_defs:
+ results.append([repo.glob_add_entity(cnx, edef)
+ for edef in row])
+ return results
+
+ def insert_relation_defs(self):
+ cnx = self.cnx
+ repo = cnx.repo
+ edited_entities = {}
+ relations = {}
+ for subj, rtype, obj in self.relation_defs():
+ # if a string is given into args instead of an int, we get it here
+ if isinstance(subj, string_types):
+ subj = int(subj)
+ elif not isinstance(subj, integer_types):
+ subj = subj.entity.eid
+ if isinstance(obj, string_types):
+ obj = int(obj)
+ elif not isinstance(obj, integer_types):
+ obj = obj.entity.eid
+ if repo.schema.rschema(rtype).inlined:
+ if subj not in edited_entities:
+ entity = cnx.entity_from_eid(subj)
+ edited = EditedEntity(entity)
+ edited_entities[subj] = edited
+ else:
+ edited = edited_entities[subj]
+ edited.edited_attribute(rtype, obj)
+ else:
+ if rtype in relations:
+ relations[rtype].append((subj, obj))
+ else:
+ relations[rtype] = [(subj, obj)]
+ repo.glob_add_relations(cnx, relations)
+ for edited in edited_entities.values():
+ repo.glob_update_entity(cnx, edited)
+
+
+class QuerierHelper(object):
+ """helper class to execute rql queries, putting all things together"""
+
+ def __init__(self, repo, schema):
+ # system info helper
+ self._repo = repo
+ # instance schema
+ self.set_schema(schema)
+
+ def set_schema(self, schema):
+ self.schema = schema
+ repo = self._repo
+ # rql st and solution cache.
+ self._rql_cache = QueryCache(repo.config['rql-cache-size'])
+ # rql cache key cache. Don't bother using a Cache instance: we should
+ # have a limited number of queries in there, since there are no entries
+ # in this cache for user queries (which have no args)
+ self._rql_ck_cache = {}
+ # some cache usage stats
+ self.cache_hit, self.cache_miss = 0, 0
+ # rql parsing / analysing helper
+ self.solutions = repo.vreg.solutions
+ rqlhelper = repo.vreg.rqlhelper
+ # set backend on the rql helper, will be used for function checking
+ rqlhelper.backend = repo.config.system_source_config['db-driver']
+ self._parse = rqlhelper.parse
+ self._annotate = rqlhelper.annotate
+ # rql planner
+ self._planner = SSPlanner(schema, rqlhelper)
+ # sql generation annotator
+ self.sqlgen_annotate = SQLGenAnnotator(schema).annotate
+
+ def parse(self, rql, annotate=False):
+ """return a rql syntax tree for the given rql"""
+ try:
+ return self._parse(text_type(rql), annotate=annotate)
+ except UnicodeError:
+ raise RQLSyntaxError(rql)
+
+ def plan_factory(self, rqlst, args, cnx):
+ """create an execution plan for an INSERT RQL query"""
+ if rqlst.TYPE == 'insert':
+ return InsertPlan(self, rqlst, args, cnx)
+ return ExecutionPlan(self, rqlst, args, cnx)
+
+ @statsd_timeit
+ def execute(self, cnx, rql, args=None, build_descr=True):
+ """execute a rql query, return resulting rows and their description in
+ a `ResultSet` object
+
+ * `rql` should be a Unicode string or a plain ASCII string
+ * `args` the optional parameters dictionary associated to the query
+ * `build_descr` is a boolean flag indicating if the description should
+ be built on select queries (if false, the description will be en empty
+ list)
+
+ on INSERT queries, there will be one row with the eid of each inserted
+ entity
+
+ result for DELETE and SET queries is undefined yet
+
+ to maximize the rql parsing/analyzing cache performance, you should
+ always use substitute arguments in queries (i.e. avoid query such as
+ 'Any X WHERE X eid 123'!)
+ """
+ if server.DEBUG & (server.DBG_RQL | server.DBG_SQL):
+ if server.DEBUG & (server.DBG_MORE | server.DBG_SQL):
+ print('*'*80)
+ print('querier input', repr(rql), repr(args))
+ # parse the query and binds variables
+ cachekey = (rql,)
+ try:
+ if args:
+ # search for named args in query which are eids (hence
+ # influencing query's solutions)
+ eidkeys = self._rql_ck_cache[rql]
+ if eidkeys:
+ # if there are some, we need a better cache key, eg (rql +
+ # entity type of each eid)
+ try:
+ cachekey = self._repo.querier_cache_key(cnx, rql,
+ args, eidkeys)
+ except UnknownEid:
+ # we want queries such as "Any X WHERE X eid 9999"
+ # return an empty result instead of raising UnknownEid
+ return empty_rset(rql, args)
+ rqlst = self._rql_cache[cachekey]
+ self.cache_hit += 1
+ statsd_c('cache_hit')
+ except KeyError:
+ self.cache_miss += 1
+ statsd_c('cache_miss')
+ rqlst = self.parse(rql)
+ try:
+ # compute solutions for rqlst and return named args in query
+ # which are eids. Notice that if you may not need `eidkeys`, we
+ # have to compute solutions anyway (kept as annotation on the
+ # tree)
+ eidkeys = self.solutions(cnx, rqlst, args)
+ except UnknownEid:
+ # we want queries such as "Any X WHERE X eid 9999" return an
+ # empty result instead of raising UnknownEid
+ return empty_rset(rql, args)
+ if args and rql not in self._rql_ck_cache:
+ self._rql_ck_cache[rql] = eidkeys
+ if eidkeys:
+ cachekey = self._repo.querier_cache_key(cnx, rql, args,
+ eidkeys)
+ self._rql_cache[cachekey] = rqlst
+ if rqlst.TYPE != 'select':
+ if cnx.read_security:
+ check_no_password_selected(rqlst)
+ cachekey = None
+ else:
+ if cnx.read_security:
+ for select in rqlst.children:
+ check_no_password_selected(select)
+ check_relations_read_access(cnx, select, args)
+ # on select query, always copy the cached rqlst so we don't have to
+ # bother modifying it. This is not necessary on write queries since
+ # a new syntax tree is built from them.
+ rqlst = rqlst.copy()
+ # Rewrite computed relations
+ rewriter = RQLRelationRewriter(cnx)
+ rewriter.rewrite(rqlst, args)
+ self._annotate(rqlst)
+ if args:
+ # different SQL generated when some argument is None or not (IS
+ # NULL). This should be considered when computing sql cache key
+ cachekey += tuple(sorted([k for k, v in args.items()
+ if v is None]))
+ # make an execution plan
+ plan = self.plan_factory(rqlst, args, cnx)
+ plan.cache_key = cachekey
+ self._planner.build_plan(plan)
+ # execute the plan
+ try:
+ results = plan.execute()
+ except (Unauthorized, ValidationError):
+ # getting an Unauthorized/ValidationError exception means the
+ # transaction must be rolled back
+ #
+ # notes:
+ # * we should not reset the connections set here, since we don't want the
+ # connection to loose it during processing
+ # * don't rollback if we're in the commit process, will be handled
+ # by the connection
+ if cnx.commit_state is None:
+ cnx.commit_state = 'uncommitable'
+ raise
+ # build a description for the results if necessary
+ descr = ()
+ if build_descr:
+ if rqlst.TYPE == 'select':
+ # sample selection
+ if len(rqlst.children) == 1 and len(rqlst.children[0].solutions) == 1:
+ # easy, all lines are identical
+ selected = rqlst.children[0].selection
+ solution = rqlst.children[0].solutions[0]
+ description = _make_description(selected, args, solution)
+ descr = RepeatList(len(results), tuple(description))
+ else:
+ # hard, delegate the work :o)
+ descr = manual_build_descr(cnx, rqlst, args, results)
+ elif rqlst.TYPE == 'insert':
+ # on insert plan, some entities may have been auto-casted,
+ # so compute description manually even if there is only
+ # one solution
+ basedescr = [None] * len(plan.selected)
+ todetermine = list(zip(range(len(plan.selected)), repeat(False)))
+ descr = _build_descr(cnx, results, basedescr, todetermine)
+ # FIXME: get number of affected entities / relations on non
+ # selection queries ?
+ # return a result set object
+ return ResultSet(results, rql, args, descr)
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+
+from logging import getLogger
+from cubicweb import set_log_methods
+LOGGER = getLogger('cubicweb.querier')
+set_log_methods(QuerierHelper, LOGGER)
+
+
+def manual_build_descr(cnx, rqlst, args, result):
+ """build a description for a given result by analysing each row
+
+ XXX could probably be done more efficiently during execution of query
+ """
+ # not so easy, looks for variable which changes from one solution
+ # to another
+ unstables = rqlst.get_variable_indices()
+ basedescr = []
+ todetermine = []
+ for i in range(len(rqlst.children[0].selection)):
+ ttype = _selection_idx_type(i, rqlst, args)
+ if ttype is None or ttype == 'Any':
+ ttype = None
+ isfinal = True
+ else:
+ isfinal = ttype in BASE_TYPES
+ if ttype is None or i in unstables:
+ basedescr.append(None)
+ todetermine.append( (i, isfinal) )
+ else:
+ basedescr.append(ttype)
+ if not todetermine:
+ return RepeatList(len(result), tuple(basedescr))
+ return _build_descr(cnx, result, basedescr, todetermine)
+
+def _build_descr(cnx, result, basedescription, todetermine):
+ description = []
+ entity_metas = cnx.entity_metas
+ todel = []
+ for i, row in enumerate(result):
+ row_descr = basedescription[:]
+ for index, isfinal in todetermine:
+ value = row[index]
+ if value is None:
+ # None value inserted by an outer join, no type
+ row_descr[index] = None
+ continue
+ if isfinal:
+ row_descr[index] = etype_from_pyobj(value)
+ else:
+ try:
+ row_descr[index] = entity_metas(value)['type']
+ except UnknownEid:
+ cnx.error('wrong eid %s in repository, you should '
+ 'db-check the database' % value)
+ todel.append(i)
+ break
+ else:
+ description.append(tuple(row_descr))
+ for i in reversed(todel):
+ del result[i]
+ return description
+
+def _make_description(selected, args, solution):
+ """return a description for a result set"""
+ description = []
+ for term in selected:
+ description.append(term.get_type(solution, args))
+ return description
+
+def _selection_idx_type(i, rqlst, args):
+ """try to return type of term at index `i` of the rqlst's selection"""
+ for select in rqlst.children:
+ term = select.selection[i]
+ for solution in select.solutions:
+ try:
+ ttype = term.get_type(solution, args)
+ if ttype is not None:
+ return ttype
+ except CoercionError:
+ return None
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/repository.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/repository.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1133 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Defines the central class for the CubicWeb RQL server: the repository.
+
+The repository is an abstraction allowing execution of rql queries against
+data sources. Most of the work is actually done in helper classes. The
+repository mainly:
+
+* brings these classes all together to provide a single access
+ point to a cubicweb instance.
+* handles session management
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import threading
+from warnings import warn
+from itertools import chain
+from time import time, localtime, strftime
+from contextlib import contextmanager
+
+from six.moves import range, queue
+
+from logilab.common.decorators import cached, clear_cache
+from logilab.common.deprecation import deprecated
+
+from yams import BadSchemaDefinition
+from rql.utils import rqlvar_maker
+
+from cubicweb import (CW_MIGRATION_MAP, QueryError,
+ UnknownEid, AuthenticationError, ExecutionError,
+ BadConnectionId, ValidationError, Unauthorized,
+ UniqueTogetherError, onevent, ViolatedConstraint)
+from cubicweb import cwvreg, schema, server
+from cubicweb.server import ShuttingDown, utils, hook, querier, sources
+from cubicweb.server.session import Session, InternalManager
+
+NO_CACHE_RELATIONS = set( [('owned_by', 'object'),
+ ('created_by', 'object'),
+ ('cw_source', 'object'),
+ ])
+
+def prefill_entity_caches(entity):
+ cnx = entity._cw
+ # prefill entity relation caches
+ for rschema in entity.e_schema.subject_relations():
+ rtype = str(rschema)
+ if rtype in schema.VIRTUAL_RTYPES or (rtype, 'subject') in NO_CACHE_RELATIONS:
+ continue
+ if rschema.final:
+ entity.cw_attr_cache.setdefault(rtype, None)
+ else:
+ entity.cw_set_relation_cache(rtype, 'subject',
+ cnx.empty_rset())
+ for rschema in entity.e_schema.object_relations():
+ rtype = str(rschema)
+ if rtype in schema.VIRTUAL_RTYPES or (rtype, 'object') in NO_CACHE_RELATIONS:
+ continue
+ entity.cw_set_relation_cache(rtype, 'object', cnx.empty_rset())
+
+def del_existing_rel_if_needed(cnx, eidfrom, rtype, eidto):
+ """delete existing relation when adding a new one if card is 1 or ?
+
+ have to be done once the new relation has been inserted to avoid having
+ an entity without a relation for some time
+
+ this kind of behaviour has to be done in the repository so we don't have
+ hooks order hazardness
+ """
+ # skip that if integrity explicitly disabled
+ if not cnx.is_hook_category_activated('activeintegrity'):
+ return
+ rdef = cnx.rtype_eids_rdef(rtype, eidfrom, eidto)
+ card = rdef.cardinality
+ # one may be tented to check for neweids but this may cause more than one
+ # relation even with '1?' cardinality if thoses relations are added in the
+ # same transaction where the entity is being created. This never occurs from
+ # the web interface but may occurs during test or dbapi connection (though
+ # not expected for this). So: don't do it, we pretend to ensure repository
+ # consistency.
+ #
+ # notes:
+ # * inlined relations will be implicitly deleted for the subject entity
+ # * we don't want read permissions to be applied but we want delete
+ # permission to be checked
+ if card[0] in '1?':
+ with cnx.security_enabled(read=False):
+ cnx.execute('DELETE X %s Y WHERE X eid %%(x)s, '
+ 'NOT Y eid %%(y)s' % rtype,
+ {'x': eidfrom, 'y': eidto})
+ if card[1] in '1?':
+ with cnx.security_enabled(read=False):
+ cnx.execute('DELETE X %s Y WHERE Y eid %%(y)s, '
+ 'NOT X eid %%(x)s' % rtype,
+ {'x': eidfrom, 'y': eidto})
+
+
+def preprocess_inlined_relations(cnx, entity):
+ """when an entity is added, check if it has some inlined relation which
+ requires to be extrated for proper call hooks
+ """
+ relations = []
+ activeintegrity = cnx.is_hook_category_activated('activeintegrity')
+ eschema = entity.e_schema
+ for attr in entity.cw_edited:
+ rschema = eschema.subjrels[attr]
+ if not rschema.final: # inlined relation
+ value = entity.cw_edited[attr]
+ relations.append((attr, value))
+ cnx.update_rel_cache_add(entity.eid, attr, value)
+ rdef = cnx.rtype_eids_rdef(attr, entity.eid, value)
+ if rdef.cardinality[1] in '1?' and activeintegrity:
+ with cnx.security_enabled(read=False):
+ cnx.execute('DELETE X %s Y WHERE Y eid %%(y)s' % attr,
+ {'x': entity.eid, 'y': value})
+ return relations
+
+
+class NullEventBus(object):
+ def publish(self, msg):
+ pass
+
+ def add_subscription(self, topic, callback):
+ pass
+
+ def start(self):
+ pass
+
+ def stop(self):
+ pass
+
+
+class Repository(object):
+ """a repository provides access to a set of persistent storages for
+ entities and relations
+ """
+
+ def __init__(self, config, tasks_manager=None, vreg=None):
+ self.config = config
+ if vreg is None:
+ vreg = cwvreg.CWRegistryStore(config)
+ self.vreg = vreg
+ self._tasks_manager = tasks_manager
+
+ self.app_instances_bus = NullEventBus()
+ self.info('starting repository from %s', self.config.apphome)
+ # dictionary of opened sessions
+ self._sessions = {}
+
+ # list of functions to be called at regular interval
+ # list of running threads
+ self._running_threads = []
+ # initial schema, should be build or replaced latter
+ self.schema = schema.CubicWebSchema(config.appid)
+ self.vreg.schema = self.schema # until actual schema is loaded...
+ # shutdown flag
+ self.shutting_down = False
+ # sources (additional sources info in the system database)
+ self.system_source = self.get_source('native', 'system',
+ config.system_source_config.copy())
+ self.sources_by_uri = {'system': self.system_source}
+ # querier helper, need to be created after sources initialization
+ self.querier = querier.QuerierHelper(self, self.schema)
+ # cache eid -> (type, extid, actual source)
+ self._type_source_cache = {}
+ # cache extid -> eid
+ self._extid_cache = {}
+ # open some connection sets
+ if config.init_cnxset_pool:
+ self.init_cnxset_pool()
+ # the hooks manager
+ self.hm = hook.HooksManager(self.vreg)
+ # registry hook to fix user class on registry reload
+ @onevent('after-registry-reload', self)
+ def fix_user_classes(self):
+ # After registry reload the 'CWUser' class used for CWEtype
+ # changed. So any existing user object have a different class than
+ # the new loaded one. We are hot fixing this.
+ usercls = self.vreg['etypes'].etype_class('CWUser')
+ for session in self._sessions.values():
+ if not isinstance(session.user, InternalManager):
+ session.user.__class__ = usercls
+
+ def init_cnxset_pool(self):
+ """should be called bootstrap_repository, as this is what it does"""
+ config = self.config
+ self._cnxsets_pool = queue.Queue()
+ # 0. init a cnxset that will be used to fetch bootstrap information from
+ # the database
+ self._cnxsets_pool.put_nowait(self.system_source.wrapped_connection())
+ # 1. set used cubes
+ if config.creating or not config.read_instance_schema:
+ config.bootstrap_cubes()
+ else:
+ self.set_schema(self.config.load_bootstrap_schema(), resetvreg=False)
+ config.init_cubes(self.get_cubes())
+ # 2. load schema
+ if config.quick_start:
+ # quick start: only to get a minimal repository to get cubes
+ # information (eg dump/restore/...)
+ #
+ # restrict appobject_path to only load hooks and entity classes in
+ # the registry
+ config.cube_appobject_path = set(('hooks', 'entities'))
+ config.cubicweb_appobject_path = set(('hooks', 'entities'))
+ # limit connections pool to 1
+ config['connections-pool-size'] = 1
+ if config.quick_start or config.creating or not config.read_instance_schema:
+ # load schema from the file system
+ if not config.creating:
+ self.info("set fs instance'schema")
+ self.set_schema(config.load_schema(expand_cubes=True))
+ else:
+ # normal start: load the instance schema from the database
+ self.info('loading schema from the repository')
+ self.set_schema(self.deserialize_schema())
+ # 3. initialize data sources
+ if config.creating:
+ # call init_creating so that for instance native source can
+ # configurate tsearch according to postgres version
+ self.system_source.init_creating()
+ else:
+ self.init_sources_from_database()
+ if 'CWProperty' in self.schema:
+ self.vreg.init_properties(self.properties())
+ # 4. close initialization connection set and reopen fresh ones for
+ # proper initialization
+ self._get_cnxset().close(True)
+ self.cnxsets = [] # list of available cnxsets (can't iterate on a Queue)
+ for i in range(config['connections-pool-size']):
+ self.cnxsets.append(self.system_source.wrapped_connection())
+ self._cnxsets_pool.put_nowait(self.cnxsets[-1])
+
+ # internals ###############################################################
+
+ def init_sources_from_database(self):
+ self.sources_by_eid = {}
+ if self.config.quick_start \
+ or not 'CWSource' in self.schema: # # 3.10 migration
+ self.system_source.init_creating()
+ return
+ with self.internal_cnx() as cnx:
+ # FIXME: sources should be ordered (add_entity priority)
+ for sourceent in cnx.execute(
+ 'Any S, SN, SA, SC WHERE S is_instance_of CWSource, '
+ 'S name SN, S type SA, S config SC').entities():
+ if sourceent.name == 'system':
+ self.system_source.eid = sourceent.eid
+ self.sources_by_eid[sourceent.eid] = self.system_source
+ self.system_source.init(True, sourceent)
+ continue
+ self.add_source(sourceent)
+
+ def _clear_planning_caches(self):
+ clear_cache(self, 'source_defs')
+
+ def add_source(self, sourceent):
+ try:
+ source = self.get_source(sourceent.type, sourceent.name,
+ sourceent.host_config, sourceent.eid)
+ except RuntimeError:
+ if self.config.repairing:
+ self.exception('cant setup source %s, skipped', sourceent.name)
+ return
+ raise
+ self.sources_by_eid[sourceent.eid] = source
+ self.sources_by_uri[sourceent.name] = source
+ if self.config.source_enabled(source):
+ # call source's init method to complete their initialisation if
+ # needed (for instance looking for persistent configuration using an
+ # internal session, which is not possible until connections sets have been
+ # initialized)
+ source.init(True, sourceent)
+ else:
+ source.init(False, sourceent)
+ self._clear_planning_caches()
+
+ def remove_source(self, uri):
+ source = self.sources_by_uri.pop(uri)
+ del self.sources_by_eid[source.eid]
+ self._clear_planning_caches()
+
+ def get_source(self, type, uri, source_config, eid=None):
+ # set uri and type in source config so it's available through
+ # source_defs()
+ source_config['uri'] = uri
+ source_config['type'] = type
+ return sources.get_source(type, source_config, self, eid)
+
+ def set_schema(self, schema, resetvreg=True):
+ self.info('set schema %s %#x', schema.name, id(schema))
+ if resetvreg:
+ # trigger full reload of all appobjects
+ self.vreg.set_schema(schema)
+ else:
+ self.vreg._set_schema(schema)
+ self.querier.set_schema(schema)
+ for source in self.sources_by_uri.values():
+ source.set_schema(schema)
+ self.schema = schema
+
+ def deserialize_schema(self):
+ """load schema from the database"""
+ from cubicweb.server.schemaserial import deserialize_schema
+ appschema = schema.CubicWebSchema(self.config.appid)
+ self.debug('deserializing db schema into %s %#x', appschema.name, id(appschema))
+ with self.internal_cnx() as cnx:
+ try:
+ deserialize_schema(appschema, cnx)
+ except BadSchemaDefinition:
+ raise
+ except Exception as ex:
+ import traceback
+ traceback.print_exc()
+ raise Exception('Is the database initialised ? (cause: %s)' % ex)
+ return appschema
+
+ def _prepare_startup(self):
+ """Prepare "Repository as a server" for startup.
+
+ * trigger server startup hook,
+ * register session clean up task.
+ """
+ if not (self.config.creating or self.config.repairing
+ or self.config.quick_start):
+ # call instance level initialisation hooks
+ self.hm.call_hooks('server_startup', repo=self)
+ # register a task to cleanup expired session
+ self.cleanup_session_time = self.config['cleanup-session-time'] or 60 * 60 * 24
+ assert self.cleanup_session_time > 0
+ cleanup_session_interval = min(60*60, self.cleanup_session_time / 3)
+ assert self._tasks_manager is not None, "This Repository is not intended to be used as a server"
+ self._tasks_manager.add_looping_task(cleanup_session_interval,
+ self.clean_sessions)
+
+ def start_looping_tasks(self):
+ """Actual "Repository as a server" startup.
+
+ * trigger server startup hook,
+ * register session clean up task,
+ * start all tasks.
+
+ XXX Other startup related stuffs are done elsewhere. In Repository
+ XXX __init__ or in external codes (various server managers).
+ """
+ self._prepare_startup()
+ assert self._tasks_manager is not None, "This Repository is not intended to be used as a server"
+ self._tasks_manager.start()
+
+ def looping_task(self, interval, func, *args):
+ """register a function to be called every `interval` seconds.
+
+ looping tasks can only be registered during repository initialization,
+ once done this method will fail.
+ """
+ assert self._tasks_manager is not None, "This Repository is not intended to be used as a server"
+ self._tasks_manager.add_looping_task(interval, func, *args)
+
+ def threaded_task(self, func):
+ """start function in a separated thread"""
+ utils.RepoThread(func, self._running_threads).start()
+
+ #@locked
+ def _get_cnxset(self):
+ try:
+ return self._cnxsets_pool.get(True, timeout=5)
+ except queue.Empty:
+ raise Exception('no connections set available after 5 secs, probably either a '
+ 'bug in code (too many uncommited/rolled back '
+ 'connections) or too much load on the server (in '
+ 'which case you can try to set a bigger '
+ 'connections pool size)')
+
+ def _free_cnxset(self, cnxset):
+ self._cnxsets_pool.put_nowait(cnxset)
+
+ def shutdown(self):
+ """called on server stop event to properly close opened sessions and
+ connections
+ """
+ assert not self.shutting_down, 'already shutting down'
+ if not (self.config.creating or self.config.repairing
+ or self.config.quick_start):
+ # then, the system source is still available
+ self.hm.call_hooks('before_server_shutdown', repo=self)
+ self.shutting_down = True
+ self.system_source.shutdown()
+ if self._tasks_manager is not None:
+ self._tasks_manager.stop()
+ if not (self.config.creating or self.config.repairing
+ or self.config.quick_start):
+ self.hm.call_hooks('server_shutdown', repo=self)
+ for thread in self._running_threads:
+ self.info('waiting thread %s...', thread.getName())
+ thread.join()
+ self.info('thread %s finished', thread.getName())
+ self.close_sessions()
+ while not self._cnxsets_pool.empty():
+ cnxset = self._cnxsets_pool.get_nowait()
+ try:
+ cnxset.close(True)
+ except Exception:
+ self.exception('error while closing %s' % cnxset)
+ continue
+ hits, misses = self.querier.cache_hit, self.querier.cache_miss
+ try:
+ self.info('rql st cache hit/miss: %s/%s (%s%% hits)', hits, misses,
+ (hits * 100) / (hits + misses))
+ hits, misses = self.system_source.cache_hit, self.system_source.cache_miss
+ self.info('sql cache hit/miss: %s/%s (%s%% hits)', hits, misses,
+ (hits * 100) / (hits + misses))
+ nocache = self.system_source.no_cache
+ self.info('sql cache usage: %s/%s (%s%%)', hits+ misses, nocache,
+ ((hits + misses) * 100) / (hits + misses + nocache))
+ except ZeroDivisionError:
+ pass
+
+ def check_auth_info(self, cnx, login, authinfo):
+ """validate authentication, raise AuthenticationError on failure, return
+ associated CWUser's eid on success.
+ """
+ # iter on sources_by_uri then check enabled source since sources doesn't
+ # contain copy based sources
+ for source in self.sources_by_uri.values():
+ if self.config.source_enabled(source) and source.support_entity('CWUser'):
+ try:
+ return source.authenticate(cnx, login, **authinfo)
+ except AuthenticationError:
+ continue
+ else:
+ raise AuthenticationError('authentication failed with all sources')
+
+ def authenticate_user(self, cnx, login, **authinfo):
+ """validate login / password, raise AuthenticationError on failure
+ return associated CWUser instance on success
+ """
+ eid = self.check_auth_info(cnx, login, authinfo)
+ cwuser = self._build_user(cnx, eid)
+ if self.config.consider_user_state and \
+ not cwuser.cw_adapt_to('IWorkflowable').state in cwuser.AUTHENTICABLE_STATES:
+ raise AuthenticationError('user is not in authenticable state')
+ return cwuser
+
+ def _build_user(self, cnx, eid):
+ """return a CWUser entity for user with the given eid"""
+ cls = self.vreg['etypes'].etype_class('CWUser')
+ st = cls.fetch_rqlst(cnx.user, ordermethod=None)
+ st.add_eid_restriction(st.get_variable('X'), 'x', 'Substitute')
+ rset = cnx.execute(st.as_string(), {'x': eid})
+ assert len(rset) == 1, rset
+ cwuser = rset.get_entity(0, 0)
+ # pylint: disable=W0104
+ # prefetch / cache cwuser's groups and properties. This is especially
+ # useful for internal sessions to avoid security insertions
+ cwuser.groups
+ cwuser.properties
+ return cwuser
+
+ # public (dbapi) interface ################################################
+
+ @deprecated("[3.19] use _cw.call_service('repo_stats')")
+ def stats(self): # XXX restrict to managers session?
+ """Return a dictionary containing some statistics about the repository
+ resources usage.
+
+ This is a public method, not requiring a session id.
+
+ This method is deprecated in favor of using _cw.call_service('repo_stats')
+ """
+ with self.internal_cnx() as cnx:
+ return cnx.call_service('repo_stats')
+
+ @deprecated("[3.19] use _cw.call_service('repo_gc_stats')")
+ def gc_stats(self, nmax=20):
+ """Return a dictionary containing some statistics about the repository
+ memory usage.
+
+ This is a public method, not requiring a session id.
+
+ nmax is the max number of (most) referenced object returned as
+ the 'referenced' result
+ """
+ with self.internal_cnx() as cnx:
+ return cnx.call_service('repo_gc_stats', nmax=nmax)
+
+ def get_schema(self):
+ """Return the instance schema.
+
+ This is a public method, not requiring a session id.
+ """
+ return self.schema
+
+ def get_cubes(self):
+ """Return the list of cubes used by this instance.
+
+ This is a public method, not requiring a session id.
+ """
+ versions = self.get_versions(not (self.config.creating
+ or self.config.repairing
+ or self.config.quick_start
+ or self.config.mode == 'test'))
+ cubes = list(versions)
+ cubes.remove('cubicweb')
+ return cubes
+
+ def get_option_value(self, option, foreid=None):
+ """Return the value for `option` in the configuration.
+
+ This is a public method, not requiring a session id.
+
+ `foreid` argument is deprecated and now useless (as of 3.19).
+ """
+ if foreid is not None:
+ warn('[3.19] foreid argument is deprecated', DeprecationWarning,
+ stacklevel=2)
+ # XXX we may want to check we don't give sensible information
+ return self.config[option]
+
+ @cached
+ def get_versions(self, checkversions=False):
+ """Return the a dictionary containing cubes used by this instance
+ as key with their version as value, including cubicweb version.
+
+ This is a public method, not requiring a session id.
+ """
+ from logilab.common.changelog import Version
+ vcconf = {}
+ with self.internal_cnx() as cnx:
+ for pk, version in cnx.execute(
+ 'Any K,V WHERE P is CWProperty, P value V, P pkey K, '
+ 'P pkey ~="system.version.%"', build_descr=False):
+ cube = pk.split('.')[-1]
+ # XXX cubicweb migration
+ if cube in CW_MIGRATION_MAP:
+ cube = CW_MIGRATION_MAP[cube]
+ version = Version(version)
+ vcconf[cube] = version
+ if checkversions:
+ if cube != 'cubicweb':
+ fsversion = self.config.cube_version(cube)
+ else:
+ fsversion = self.config.cubicweb_version()
+ if version < fsversion:
+ msg = ('instance has %s version %s but %s '
+ 'is installed. Run "cubicweb-ctl upgrade".')
+ raise ExecutionError(msg % (cube, version, fsversion))
+ return vcconf
+
+ @cached
+ def source_defs(self):
+ """Return the a dictionary containing source uris as value and a
+ dictionary describing each source as value.
+
+ This is a public method, not requiring a session id.
+ """
+ sources = {}
+ # remove sensitive information
+ for uri, source in self.sources_by_uri.items():
+ sources[uri] = source.public_config
+ return sources
+
+ def properties(self):
+ """Return a result set containing system wide properties.
+
+ This is a public method, not requiring a session id.
+ """
+ with self.internal_cnx() as cnx:
+ # don't use cnx.execute, we don't want rset.req set
+ return self.querier.execute(cnx, 'Any K,V WHERE P is CWProperty,'
+ 'P pkey K, P value V, NOT P for_user U',
+ build_descr=False)
+
+ @deprecated("[3.19] Use session.call_service('register_user') instead'")
+ def register_user(self, login, password, email=None, **kwargs):
+ """check a user with the given login exists, if not create it with the
+ given password. This method is designed to be used for anonymous
+ registration on public web site.
+ """
+ with self.internal_cnx() as cnx:
+ cnx.call_service('register_user', login=login, password=password,
+ email=email, **kwargs)
+ cnx.commit()
+
+ def find_users(self, fetch_attrs, **query_attrs):
+ """yield user attributes for cwusers matching the given query_attrs
+ (the result set cannot survive this method call)
+
+ This can be used by low-privileges account (anonymous comes to
+ mind).
+
+ `fetch_attrs`: tuple of attributes to be fetched
+ `query_attrs`: dict of attr/values to restrict the query
+ """
+ assert query_attrs
+ if not hasattr(self, '_cwuser_attrs'):
+ cwuser = self.schema['CWUser']
+ self._cwuser_attrs = set(str(rschema)
+ for rschema, _eschema in cwuser.attribute_definitions()
+ if not rschema.meta)
+ cwuserattrs = self._cwuser_attrs
+ for k in chain(fetch_attrs, query_attrs):
+ if k not in cwuserattrs:
+ raise Exception('bad input for find_user')
+ with self.internal_cnx() as cnx:
+ varmaker = rqlvar_maker()
+ vars = [(attr, next(varmaker)) for attr in fetch_attrs]
+ rql = 'Any %s WHERE X is CWUser, ' % ','.join(var[1] for var in vars)
+ rql += ','.join('X %s %s' % (var[0], var[1]) for var in vars) + ','
+ rset = cnx.execute(rql + ','.join('X %s %%(%s)s' % (attr, attr)
+ for attr in query_attrs),
+ query_attrs)
+ return rset.rows
+
+ def new_session(self, login, **kwargs):
+ """open a new session for a given user
+
+ raise `AuthenticationError` if the authentication failed
+ raise `ConnectionError` if we can't open a connection
+ """
+ cnxprops = kwargs.pop('cnxprops', None)
+ # use an internal connection
+ with self.internal_cnx() as cnx:
+ # try to get a user object
+ user = self.authenticate_user(cnx, login, **kwargs)
+ session = Session(user, self, cnxprops)
+ user._cw = user.cw_rset.req = session
+ user.cw_clear_relation_cache()
+ self._sessions[session.sessionid] = session
+ self.info('opened session %s for user %s', session.sessionid, login)
+ with session.new_cnx() as cnx:
+ self.hm.call_hooks('session_open', cnx)
+ # commit connection at this point in case write operation has been
+ # done during `session_open` hooks
+ cnx.commit()
+ return session
+
+ def connect(self, login, **kwargs):
+ """open a new session for a given user and return its sessionid """
+ return self.new_session(login, **kwargs).sessionid
+
+ def close(self, sessionid, txid=None, checkshuttingdown=True):
+ """close the session with the given id"""
+ session = self._get_session(sessionid, txid=txid,
+ checkshuttingdown=checkshuttingdown)
+ # operation uncommited before close are rolled back before hook is called
+ with session.new_cnx() as cnx:
+ self.hm.call_hooks('session_close', cnx)
+ # commit connection at this point in case write operation has been
+ # done during `session_close` hooks
+ cnx.commit()
+ session.close()
+ del self._sessions[sessionid]
+ self.info('closed session %s for user %s', sessionid, session.user.login)
+
+ # session handling ########################################################
+
+ def close_sessions(self):
+ """close every opened sessions"""
+ for sessionid in list(self._sessions):
+ try:
+ self.close(sessionid, checkshuttingdown=False)
+ except Exception: # XXX BaseException?
+ self.exception('error while closing session %s' % sessionid)
+
+ def clean_sessions(self):
+ """close sessions not used since an amount of time specified in the
+ configuration
+ """
+ mintime = time() - self.cleanup_session_time
+ self.debug('cleaning session unused since %s',
+ strftime('%H:%M:%S', localtime(mintime)))
+ nbclosed = 0
+ for session in self._sessions.values():
+ if session.timestamp < mintime:
+ self.close(session.sessionid)
+ nbclosed += 1
+ return nbclosed
+
+ @contextmanager
+ def internal_cnx(self):
+ """Context manager returning a Connection using internal user which have
+ every access rights on the repository.
+
+ Beware that unlike the older :meth:`internal_session`, internal
+ connections have all hooks beside security enabled.
+ """
+ with Session(InternalManager(), self) as session:
+ with session.new_cnx() as cnx:
+ cnx.user._cw = cnx # XXX remove when "vreg = user._cw.vreg"
+ # hack in entity.py is gone
+ with cnx.security_enabled(read=False, write=False):
+ yield cnx
+
+ def _get_session(self, sessionid, txid=None, checkshuttingdown=True):
+ """return the session associated with the given session identifier"""
+ if checkshuttingdown and self.shutting_down:
+ raise ShuttingDown('Repository is shutting down')
+ try:
+ session = self._sessions[sessionid]
+ except KeyError:
+ raise BadConnectionId('No such session %s' % sessionid)
+ return session
+
+ # data sources handling ###################################################
+ # * correspondance between eid and (type, source)
+ # * correspondance between eid and local id (i.e. specific to a given source)
+
+ def type_and_source_from_eid(self, eid, cnx):
+ """return a tuple `(type, extid, actual source uri)` for the entity of
+ the given `eid`
+ """
+ try:
+ eid = int(eid)
+ except ValueError:
+ raise UnknownEid(eid)
+ try:
+ return self._type_source_cache[eid]
+ except KeyError:
+ etype, extid, auri = self.system_source.eid_type_source(cnx, eid)
+ self._type_source_cache[eid] = (etype, extid, auri)
+ return etype, extid, auri
+
+ def clear_caches(self, eids):
+ etcache = self._type_source_cache
+ extidcache = self._extid_cache
+ rqlcache = self.querier._rql_cache
+ for eid in eids:
+ try:
+ etype, extid, auri = etcache.pop(int(eid)) # may be a string in some cases
+ rqlcache.pop( ('%s X WHERE X eid %s' % (etype, eid),), None)
+ extidcache.pop(extid, None)
+ except KeyError:
+ etype = None
+ rqlcache.pop( ('Any X WHERE X eid %s' % eid,), None)
+ self.system_source.clear_eid_cache(eid, etype)
+
+ def type_from_eid(self, eid, cnx):
+ """return the type of the entity with id """
+ return self.type_and_source_from_eid(eid, cnx)[0]
+
+ def querier_cache_key(self, cnx, rql, args, eidkeys):
+ cachekey = [rql]
+ for key in sorted(eidkeys):
+ try:
+ etype = self.type_from_eid(args[key], cnx)
+ except KeyError:
+ raise QueryError('bad cache key %s (no value)' % key)
+ except TypeError:
+ raise QueryError('bad cache key %s (value: %r)' % (
+ key, args[key]))
+ cachekey.append(etype)
+ # ensure eid is correctly typed in args
+ args[key] = int(args[key])
+ return tuple(cachekey)
+
+ @deprecated('[3.22] use the new store API')
+ def extid2eid(self, source, extid, etype, cnx, insert=True,
+ sourceparams=None):
+ """Return eid from a local id. If the eid is a negative integer, that
+ means the entity is known but has been copied back to the system source
+ hence should be ignored.
+
+ If no record is found, ie the entity is not known yet:
+
+ 1. an eid is attributed
+
+ 2. the source's :meth:`before_entity_insertion` method is called to
+ build the entity instance
+
+ 3. unless source's :attr:`should_call_hooks` tell otherwise,
+ 'before_add_entity' hooks are called
+
+ 4. record is added into the system source
+
+ 5. the source's :meth:`after_entity_insertion` method is called to
+ complete building of the entity instance
+
+ 6. unless source's :attr:`should_call_hooks` tell otherwise,
+ 'before_add_entity' hooks are called
+ """
+ try:
+ return self._extid_cache[extid]
+ except KeyError:
+ pass
+ eid = self.system_source.extid2eid(cnx, extid)
+ if eid is not None:
+ self._extid_cache[extid] = eid
+ self._type_source_cache[eid] = (etype, extid, source.uri)
+ return eid
+ if not insert:
+ return
+ # no link between extid and eid, create one
+ # write query, ensure connection's mode is 'write' so connections
+ # won't be released until commit/rollback
+ try:
+ eid = self.system_source.create_eid(cnx)
+ self._extid_cache[extid] = eid
+ self._type_source_cache[eid] = (etype, extid, source.uri)
+ entity = source.before_entity_insertion(
+ cnx, extid, etype, eid, sourceparams)
+ if source.should_call_hooks:
+ # get back a copy of operation for later restore if
+ # necessary, see below
+ pending_operations = cnx.pending_operations[:]
+ self.hm.call_hooks('before_add_entity', cnx, entity=entity)
+ self.add_info(cnx, entity, source, extid)
+ source.after_entity_insertion(cnx, extid, entity, sourceparams)
+ if source.should_call_hooks:
+ self.hm.call_hooks('after_add_entity', cnx, entity=entity)
+ return eid
+ except Exception:
+ # XXX do some cleanup manually so that the transaction has a
+ # chance to be commited, with simply this entity discarded
+ self._extid_cache.pop(extid, None)
+ self._type_source_cache.pop(eid, None)
+ if 'entity' in locals():
+ hook.CleanupDeletedEidsCacheOp.get_instance(cnx).add_data(entity.eid)
+ self.system_source.delete_info_multi(cnx, [entity])
+ if source.should_call_hooks:
+ cnx.pending_operations = pending_operations
+ raise
+
+ def add_info(self, cnx, entity, source, extid=None):
+ """add type and source info for an eid into the system table,
+ and index the entity with the full text index
+ """
+ # begin by inserting eid/type/source/extid into the entities table
+ hook.CleanupNewEidsCacheOp.get_instance(cnx).add_data(entity.eid)
+ self.system_source.add_info(cnx, entity, source, extid)
+
+ def _delete_cascade_multi(self, cnx, entities):
+ """same as _delete_cascade but accepts a list of entities with
+ the same etype and belonging to the same source.
+ """
+ pendingrtypes = cnx.transaction_data.get('pendingrtypes', ())
+ # delete remaining relations: if user can delete the entity, he can
+ # delete all its relations without security checking
+ with cnx.security_enabled(read=False, write=False):
+ in_eids = ','.join([str(_e.eid) for _e in entities])
+ with cnx.running_hooks_ops():
+ for rschema, _, role in entities[0].e_schema.relation_definitions():
+ if rschema.rule:
+ continue # computed relation
+ rtype = rschema.type
+ if rtype in schema.VIRTUAL_RTYPES or rtype in pendingrtypes:
+ continue
+ if role == 'subject':
+ # don't skip inlined relation so they are regularly
+ # deleted and so hooks are correctly called
+ rql = 'DELETE X %s Y WHERE X eid IN (%s)' % (rtype, in_eids)
+ else:
+ rql = 'DELETE Y %s X WHERE X eid IN (%s)' % (rtype, in_eids)
+ try:
+ cnx.execute(rql, build_descr=False)
+ except ValidationError:
+ raise
+ except Unauthorized:
+ self.exception('Unauthorized exception while cascading delete for entity %s. '
+ 'RQL: %s.\nThis should not happen since security is disabled here.',
+ entities, rql)
+ raise
+ except Exception:
+ if self.config.mode == 'test':
+ raise
+ self.exception('error while cascading delete for entity %s. RQL: %s',
+ entities, rql)
+
+ def init_entity_caches(self, cnx, entity, source):
+ """add entity to connection entities cache and repo's extid cache.
+ Return entity's ext id if the source isn't the system source.
+ """
+ cnx.set_entity_cache(entity)
+ if source.uri == 'system':
+ extid = None
+ else:
+ extid = source.get_extid(entity)
+ self._extid_cache[str(extid)] = entity.eid
+ self._type_source_cache[entity.eid] = (entity.cw_etype, extid, source.uri)
+ return extid
+
+ def glob_add_entity(self, cnx, edited):
+ """add an entity to the repository
+
+ the entity eid should originally be None and a unique eid is assigned to
+ the entity instance
+ """
+ entity = edited.entity
+ entity._cw_is_saved = False # entity has an eid but is not yet saved
+ # init edited_attributes before calling before_add_entity hooks
+ entity.cw_edited = edited
+ source = self.system_source
+ # allocate an eid to the entity before calling hooks
+ entity.eid = self.system_source.create_eid(cnx)
+ # set caches asap
+ extid = self.init_entity_caches(cnx, entity, source)
+ if server.DEBUG & server.DBG_REPO:
+ print('ADD entity', self, entity.cw_etype, entity.eid, edited)
+ prefill_entity_caches(entity)
+ self.hm.call_hooks('before_add_entity', cnx, entity=entity)
+ relations = preprocess_inlined_relations(cnx, entity)
+ edited.set_defaults()
+ if cnx.is_hook_category_activated('integrity'):
+ edited.check(creation=True)
+ self.add_info(cnx, entity, source, extid)
+ try:
+ source.add_entity(cnx, entity)
+ except (UniqueTogetherError, ViolatedConstraint) as exc:
+ userhdlr = cnx.vreg['adapters'].select(
+ 'IUserFriendlyError', cnx, entity=entity, exc=exc)
+ userhdlr.raise_user_exception()
+ edited.saved = entity._cw_is_saved = True
+ # trigger after_add_entity after after_add_relation
+ self.hm.call_hooks('after_add_entity', cnx, entity=entity)
+ # call hooks for inlined relations
+ for attr, value in relations:
+ self.hm.call_hooks('before_add_relation', cnx,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
+ self.hm.call_hooks('after_add_relation', cnx,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
+ return entity.eid
+
+ def glob_update_entity(self, cnx, edited):
+ """replace an entity in the repository
+ the type and the eid of an entity must not be changed
+ """
+ entity = edited.entity
+ if server.DEBUG & server.DBG_REPO:
+ print('UPDATE entity', entity.cw_etype, entity.eid,
+ entity.cw_attr_cache, edited)
+ hm = self.hm
+ eschema = entity.e_schema
+ cnx.set_entity_cache(entity)
+ orig_edited = getattr(entity, 'cw_edited', None)
+ entity.cw_edited = edited
+ source = self.system_source
+ try:
+ only_inline_rels, need_fti_update = True, False
+ relations = []
+ for attr in list(edited):
+ if attr == 'eid':
+ continue
+ rschema = eschema.subjrels[attr]
+ if rschema.final:
+ if getattr(eschema.rdef(attr), 'fulltextindexed', False):
+ need_fti_update = True
+ only_inline_rels = False
+ else:
+ # inlined relation
+ previous_value = entity.related(attr) or None
+ if previous_value is not None:
+ previous_value = previous_value[0][0] # got a result set
+ if previous_value == entity.cw_attr_cache[attr]:
+ previous_value = None
+ else:
+ hm.call_hooks('before_delete_relation', cnx,
+ eidfrom=entity.eid, rtype=attr,
+ eidto=previous_value)
+ relations.append((attr, edited[attr], previous_value))
+ # call hooks for inlined relations
+ for attr, value, _t in relations:
+ hm.call_hooks('before_add_relation', cnx,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
+ if not only_inline_rels:
+ hm.call_hooks('before_update_entity', cnx, entity=entity)
+ if cnx.is_hook_category_activated('integrity'):
+ edited.check()
+ try:
+ source.update_entity(cnx, entity)
+ edited.saved = True
+ except (UniqueTogetherError, ViolatedConstraint) as exc:
+ userhdlr = cnx.vreg['adapters'].select(
+ 'IUserFriendlyError', cnx, entity=entity, exc=exc)
+ userhdlr.raise_user_exception()
+ self.system_source.update_info(cnx, entity, need_fti_update)
+ if not only_inline_rels:
+ hm.call_hooks('after_update_entity', cnx, entity=entity)
+ for attr, value, prevvalue in relations:
+ # if the relation is already cached, update existant cache
+ relcache = entity.cw_relation_cached(attr, 'subject')
+ if prevvalue is not None:
+ hm.call_hooks('after_delete_relation', cnx,
+ eidfrom=entity.eid, rtype=attr, eidto=prevvalue)
+ if relcache is not None:
+ cnx.update_rel_cache_del(entity.eid, attr, prevvalue)
+ del_existing_rel_if_needed(cnx, entity.eid, attr, value)
+ cnx.update_rel_cache_add(entity.eid, attr, value)
+ hm.call_hooks('after_add_relation', cnx,
+ eidfrom=entity.eid, rtype=attr, eidto=value)
+ finally:
+ if orig_edited is not None:
+ entity.cw_edited = orig_edited
+
+
+ def glob_delete_entities(self, cnx, eids):
+ """delete a list of entities and all related entities from the repository"""
+ # mark eids as being deleted in cnx info and setup cache update
+ # operation (register pending eids before actual deletion to avoid
+ # multiple call to glob_delete_entities)
+ op = hook.CleanupDeletedEidsCacheOp.get_instance(cnx)
+ if not isinstance(eids, (set, frozenset)):
+ warn('[3.13] eids should be given as a set', DeprecationWarning,
+ stacklevel=2)
+ eids = frozenset(eids)
+ eids = eids - op._container
+ op._container |= eids
+ data_by_etype = {} # values are [list of entities]
+ #
+ # WARNING: the way this dictionary is populated is heavily optimized
+ # and does not use setdefault on purpose. Unless a new release
+ # of the Python interpreter advertises large perf improvements
+ # in setdefault, this should not be changed without profiling.
+ for eid in eids:
+ etype = self.type_from_eid(eid, cnx)
+ # XXX should cache entity's cw_metainformation
+ entity = cnx.entity_from_eid(eid, etype)
+ try:
+ data_by_etype[etype].append(entity)
+ except KeyError:
+ data_by_etype[etype] = [entity]
+ source = self.system_source
+ for etype, entities in data_by_etype.items():
+ if server.DEBUG & server.DBG_REPO:
+ print('DELETE entities', etype, [entity.eid for entity in entities])
+ self.hm.call_hooks('before_delete_entity', cnx, entities=entities)
+ self._delete_cascade_multi(cnx, entities)
+ source.delete_entities(cnx, entities)
+ source.delete_info_multi(cnx, entities)
+ self.hm.call_hooks('after_delete_entity', cnx, entities=entities)
+ # don't clear cache here, it is done in a hook on commit
+
+ def glob_add_relation(self, cnx, subject, rtype, object):
+ """add a relation to the repository"""
+ self.glob_add_relations(cnx, {rtype: [(subject, object)]})
+
+ def glob_add_relations(self, cnx, relations):
+ """add several relations to the repository
+
+ relations is a dictionary rtype: [(subj_eid, obj_eid), ...]
+ """
+ source = self.system_source
+ relations_by_rtype = {}
+ subjects_by_types = {}
+ objects_by_types = {}
+ activintegrity = cnx.is_hook_category_activated('activeintegrity')
+ for rtype, eids_subj_obj in relations.items():
+ if server.DEBUG & server.DBG_REPO:
+ for subjeid, objeid in eids_subj_obj:
+ print('ADD relation', subjeid, rtype, objeid)
+ for subjeid, objeid in eids_subj_obj:
+ if rtype in relations_by_rtype:
+ relations_by_rtype[rtype].append((subjeid, objeid))
+ else:
+ relations_by_rtype[rtype] = [(subjeid, objeid)]
+ if not activintegrity:
+ continue
+ # take care to relation of cardinality '?1', as all eids will
+ # be inserted later, we've remove duplicated eids since they
+ # won't be caught by `del_existing_rel_if_needed`
+ rdef = cnx.rtype_eids_rdef(rtype, subjeid, objeid)
+ card = rdef.cardinality
+ if card[0] in '?1':
+ with cnx.security_enabled(read=False):
+ cnx.execute('DELETE X %s Y WHERE X eid %%(x)s, '
+ 'NOT Y eid %%(y)s' % rtype,
+ {'x': subjeid, 'y': objeid})
+ subjects = subjects_by_types.setdefault(rdef, {})
+ if subjeid in subjects:
+ del relations_by_rtype[rtype][subjects[subjeid]]
+ subjects[subjeid] = len(relations_by_rtype[rtype]) - 1
+ continue
+ subjects[subjeid] = len(relations_by_rtype[rtype]) - 1
+ if card[1] in '?1':
+ with cnx.security_enabled(read=False):
+ cnx.execute('DELETE X %s Y WHERE Y eid %%(y)s, '
+ 'NOT X eid %%(x)s' % rtype,
+ {'x': subjeid, 'y': objeid})
+ objects = objects_by_types.setdefault(rdef, {})
+ if objeid in objects:
+ del relations_by_rtype[rtype][objects[objeid]]
+ objects[objeid] = len(relations_by_rtype[rtype])
+ continue
+ objects[objeid] = len(relations_by_rtype[rtype])
+ for rtype, source_relations in relations_by_rtype.items():
+ self.hm.call_hooks('before_add_relation', cnx,
+ rtype=rtype, eids_from_to=source_relations)
+ for rtype, source_relations in relations_by_rtype.items():
+ source.add_relations(cnx, rtype, source_relations)
+ rschema = self.schema.rschema(rtype)
+ for subjeid, objeid in source_relations:
+ cnx.update_rel_cache_add(subjeid, rtype, objeid, rschema.symmetric)
+ for rtype, source_relations in relations_by_rtype.items():
+ self.hm.call_hooks('after_add_relation', cnx,
+ rtype=rtype, eids_from_to=source_relations)
+
+ def glob_delete_relation(self, cnx, subject, rtype, object):
+ """delete a relation from the repository"""
+ if server.DEBUG & server.DBG_REPO:
+ print('DELETE relation', subject, rtype, object)
+ source = self.system_source
+ self.hm.call_hooks('before_delete_relation', cnx,
+ eidfrom=subject, rtype=rtype, eidto=object)
+ source.delete_relation(cnx, subject, rtype, object)
+ rschema = self.schema.rschema(rtype)
+ cnx.update_rel_cache_del(subject, rtype, object, rschema.symmetric)
+ self.hm.call_hooks('after_delete_relation', cnx,
+ eidfrom=subject, rtype=rtype, eidto=object)
+
+
+
+
+ # these are overridden by set_log_methods below
+ # only defining here to prevent pylint from complaining
+ info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
+
+from logging import getLogger
+from cubicweb import set_log_methods
+set_log_methods(Repository, getLogger('cubicweb.repository'))
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/rqlannotation.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/rqlannotation.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,413 @@
+# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""Functions to add additional annotations on a rql syntax tree to ease later
+code generation.
+"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+from rql import BadRQLQuery
+from rql.nodes import Relation, VariableRef, Constant, Variable, Or, Exists
+from rql.utils import common_parent
+
+def _annotate_select(annotator, rqlst):
+ has_text_query = False
+ for subquery in rqlst.with_:
+ if annotator._annotate_union(subquery.query):
+ has_text_query = True
+ #if server.DEBUG:
+ # print '-------- sql annotate', repr(rqlst)
+ getrschema = annotator.schema.rschema
+ for var in rqlst.defined_vars.values():
+ stinfo = var.stinfo
+ if stinfo.get('ftirels'):
+ has_text_query = True
+ if stinfo['attrvar']:
+ stinfo['invariant'] = False
+ stinfo['principal'] = _select_main_var(stinfo['rhsrelations'])
+ continue
+ if not stinfo['relations'] and stinfo['typerel'] is None:
+ # Any X, Any MAX(X)...
+ # those particular queries should be executed using the system
+ # entities table unless there is some type restriction
+ stinfo['invariant'] = True
+ stinfo['principal'] = None
+ continue
+ if any(rel for rel in stinfo['relations'] if rel.r_type == 'eid' and rel.operator() != '=') and \
+ not any(r for r in var.stinfo['relations'] - var.stinfo['rhsrelations']
+ if r.r_type != 'eid' and (getrschema(r.r_type).inlined or getrschema(r.r_type).final)):
+ # Any X WHERE X eid > 2
+ # those particular queries should be executed using the system entities table
+ stinfo['invariant'] = True
+ stinfo['principal'] = None
+ continue
+ if stinfo['selected'] and var.valuable_references() == 1+bool(stinfo['constnode']):
+ # "Any X", "Any X, Y WHERE X attr Y"
+ stinfo['invariant'] = False
+ continue
+ joins = set()
+ invariant = False
+ for ref in var.references():
+ rel = ref.relation()
+ if rel is None or rel.is_types_restriction():
+ continue
+ lhs, rhs = rel.get_parts()
+ onlhs = ref is lhs
+ role = 'subject' if onlhs else 'object'
+ if rel.r_type == 'eid':
+ if not (onlhs and len(stinfo['relations']) > 1):
+ break
+ if not stinfo['constnode']:
+ joins.add( (rel, role) )
+ continue
+ elif rel.r_type == 'identity':
+ # identity can't be used as principal, so check other relation are used
+ # XXX explain rhs.operator == '='
+ if rhs.operator != '=' or len(stinfo['relations']) <= 1: #(stinfo['constnode'] and rhs.operator == '='):
+ break
+ joins.add( (rel, role) )
+ continue
+ rschema = getrschema(rel.r_type)
+ if rel.optional:
+ if rel in stinfo.get('optrelations', ()):
+ # optional variable can't be invariant if this is the lhs
+ # variable of an inlined relation
+ if not rel in stinfo['rhsrelations'] and rschema.inlined:
+ break
+ # variable used as main variable of an optional relation can't
+ # be invariant, unless we can use some other relation as
+ # reference for the outer join
+ elif not stinfo['constnode']:
+ break
+ elif len(stinfo['relations']) == 2:
+ if onlhs:
+ ostinfo = rhs.children[0].variable.stinfo
+ else:
+ ostinfo = lhs.variable.stinfo
+ if not (ostinfo.get('optcomparisons') or
+ any(orel for orel in ostinfo['relations']
+ if orel.optional and orel is not rel)):
+ break
+ if rschema.final or (onlhs and rschema.inlined):
+ if rschema.type != 'has_text':
+ # need join anyway if the variable appears in a final or
+ # inlined relation
+ break
+ joins.add( (rel, role) )
+ continue
+ if not stinfo['constnode']:
+ if rschema.inlined and rel.neged(strict=True):
+ # if relation is inlined, can't be invariant if that
+ # variable is used anywhere else.
+ # see 'Any P WHERE NOT N ecrit_par P, N eid 512':
+ # sql for 'NOT N ecrit_par P' is 'N.ecrit_par is NULL' so P
+ # can use N.ecrit_par as principal
+ if (stinfo['selected'] or len(stinfo['relations']) > 1):
+ break
+ joins.add( (rel, role) )
+ else:
+ # if there is at least one ambigous relation and no other to
+ # restrict types, can't be invariant since we need to filter out
+ # other types
+ if not annotator.is_ambiguous(var):
+ invariant = True
+ stinfo['invariant'] = invariant
+ if invariant and joins:
+ # remember rqlst/solutions analyze information
+ # we have to select a kindof "main" relation which will "extrajoins"
+ # the other
+ # priority should be given to relation which are not in inner queries
+ # (eg exists)
+ try:
+ stinfo['principal'] = principal = _select_principal(var.scope, joins)
+ if getrschema(principal.r_type).inlined:
+ # the scope of the lhs variable must be equal or outer to the
+ # rhs variable's scope (since it's retrieved from lhs's table)
+ sstinfo = principal.children[0].variable.stinfo
+ sstinfo['scope'] = common_parent(sstinfo['scope'], stinfo['scope']).scope
+ except CantSelectPrincipal:
+ stinfo['invariant'] = False
+ # see unittest_rqlannotation. test_has_text_security_cache_bug
+ # XXX probably more to do, but yet that work without more...
+ for col_alias in rqlst.aliases.values():
+ if col_alias.stinfo.get('ftirels'):
+ has_text_query = True
+ return has_text_query
+
+
+
+class CantSelectPrincipal(Exception):
+ """raised when no 'principal' variable can be found"""
+
+def _select_principal(scope, relations, _sort=lambda x:x):
+ """given a list of rqlst relations, select one which will be used to
+ represent an invariant variable (e.g. using on extremity of the relation
+ instead of the variable's type table
+ """
+ # _sort argument is there for test
+ diffscope_rels = {}
+ ored_rels = set()
+ diffscope_rels = set()
+ for rel, role in _sort(relations):
+ # note: only eid and has_text among all final relations may be there
+ if rel.r_type in ('eid', 'identity'):
+ continue
+ if rel.optional is not None and len(relations) > 1:
+ if role == 'subject' and rel.optional == 'right':
+ continue
+ if role == 'object' and rel.optional == 'left':
+ continue
+ if rel.ored(traverse_scope=True):
+ ored_rels.add(rel)
+ elif rel.scope is scope:
+ return rel
+ elif not rel.neged(traverse_scope=True):
+ diffscope_rels.add(rel)
+ if len(ored_rels) > 1:
+ ored_rels_copy = tuple(ored_rels)
+ for rel1 in ored_rels_copy:
+ for rel2 in ored_rels_copy:
+ if rel1 is rel2:
+ continue
+ if isinstance(common_parent(rel1, rel2), Or):
+ ored_rels.discard(rel1)
+ ored_rels.discard(rel2)
+ for rel in _sort(ored_rels):
+ if rel.scope is scope:
+ return rel
+ diffscope_rels.add(rel)
+ # if DISTINCT query, can use variable from a different scope as principal
+ # since introduced duplicates will be removed
+ if scope.stmt.distinct and diffscope_rels:
+ return next(iter(_sort(diffscope_rels)))
+ # XXX could use a relation from a different scope if it can't generate
+ # duplicates, so we should have to check cardinality
+ raise CantSelectPrincipal()
+
+def _select_main_var(relations):
+ """given a list of rqlst relations, select one which will be used as main
+ relation for the rhs variable
+ """
+ principal = None
+ others = []
+ # sort for test predictability
+ for rel in sorted(relations, key=lambda x: (x.children[0].name, x.r_type)):
+ # only equality relation with a variable as rhs may be principal
+ if rel.operator() not in ('=', 'IS') \
+ or not isinstance(rel.children[1].children[0], VariableRef) or rel.neged(strict=True):
+ continue
+ if rel.optional:
+ others.append(rel)
+ continue
+ if rel.scope is rel.stmt:
+ return rel
+ principal = rel
+ if principal is None:
+ if others:
+ return others[0]
+ raise BadRQLQuery('unable to find principal in %s' % ', '.join(
+ r.as_string() for r in relations))
+ return principal
+
+
+def set_qdata(getrschema, union, noinvariant):
+ """recursive function to set querier data on variables in the syntax tree
+ """
+ for select in union.children:
+ for subquery in select.with_:
+ set_qdata(getrschema, subquery.query, noinvariant)
+ for var in select.defined_vars.values():
+ if var.stinfo['invariant']:
+ if var in noinvariant and not var.stinfo['principal'].r_type == 'has_text':
+ var._q_invariant = False
+ else:
+ var._q_invariant = True
+ else:
+ var._q_invariant = False
+
+
+class SQLGenAnnotator(object):
+ def __init__(self, schema):
+ self.schema = schema
+ self.nfdomain = frozenset(eschema.type for eschema in schema.entities()
+ if not eschema.final)
+
+ def annotate(self, rqlst):
+ """add information to the rql syntax tree to help sources to do their
+ job (read sql generation)
+
+ a variable is tagged as invariant if:
+ * it's a non final variable
+ * it's not used as lhs in any final or inlined relation
+ * there is no type restriction on this variable (either explicit in the
+ syntax tree or because a solution for this variable has been removed
+ due to security filtering)
+ """
+ #assert rqlst.TYPE == 'select', rqlst
+ rqlst.has_text_query = self._annotate_union(rqlst)
+
+ def _annotate_union(self, union):
+ has_text_query = False
+ for select in union.children:
+ if _annotate_select(self, select):
+ has_text_query = True
+ return has_text_query
+
+ def is_ambiguous(self, var):
+ # ignore has_text relation when we know it will be used as principal.
+ # This is expected by the rql2sql generator which will use the `entities`
+ # table to filter out by type if necessary, This optimisation is very
+ # interesting in multi-sources cases, as it may avoid a costly query
+ # on sources to get all entities of a given type to achieve this, while
+ # we have all the necessary information.
+ root = var.stmt.root # Union node
+ # rel.scope -> Select or Exists node, so add .parent to get Union from
+ # Select node
+ rels = [rel for rel in var.stinfo['relations'] if rel.scope.parent is root]
+ if len(rels) == 1 and rels[0].r_type == 'has_text':
+ return False
+ try:
+ data = var.stmt._deamb_data
+ except AttributeError:
+ data = var.stmt._deamb_data = IsAmbData(self.schema, self.nfdomain)
+ data.compute(var.stmt)
+ return data.is_ambiguous(var)
+
+
+class IsAmbData(object):
+ def __init__(self, schema, nfdomain):
+ self.schema = schema
+ # shortcuts
+ self.rschema = schema.rschema
+ self.eschema = schema.eschema
+ # domain for non final variables
+ self.nfdomain = nfdomain
+ # {var: possible solutions set}
+ self.varsols = {}
+ # set of ambiguous variables
+ self.ambiguousvars = set()
+ # remember if a variable has been deambiguified by another to avoid
+ # doing the opposite
+ self.deambification_map = {}
+ # not invariant variables (access to final.inlined relation)
+ self.not_invariants = set()
+
+ def is_ambiguous(self, var):
+ return var in self.ambiguousvars
+
+ def restrict(self, var, restricted_domain):
+ self.varsols[var] &= restricted_domain
+ if var in self.ambiguousvars and self.varsols[var] == var.stinfo['possibletypes']:
+ self.ambiguousvars.remove(var)
+
+ def compute(self, rqlst):
+ # set domains for each variable
+ for varname, var in rqlst.defined_vars.items():
+ if var.stinfo['uidrel'] is not None or \
+ self.eschema(rqlst.solutions[0][varname]).final:
+ ptypes = var.stinfo['possibletypes']
+ else:
+ ptypes = set(self.nfdomain)
+ self.ambiguousvars.add(var)
+ self.varsols[var] = ptypes
+ if not self.ambiguousvars:
+ return
+ # apply relation restriction
+ self.maydeambrels = maydeambrels = {}
+ for rel in rqlst.iget_nodes(Relation):
+ if rel.r_type == 'eid' or rel.is_types_restriction():
+ continue
+ lhs, rhs = rel.get_variable_parts()
+ if isinstance(lhs, VariableRef) or isinstance(rhs, VariableRef):
+ rschema = self.rschema(rel.r_type)
+ if rschema.inlined or rschema.final:
+ self.not_invariants.add(lhs.variable)
+ self.set_rel_constraint(lhs, rel, rschema.subjects)
+ self.set_rel_constraint(rhs, rel, rschema.objects)
+ # try to deambiguify more variables by considering other variables'type
+ modified = True
+ while modified and self.ambiguousvars:
+ modified = False
+ for var in self.ambiguousvars.copy():
+ try:
+ for rel in (var.stinfo['relations'] & maydeambrels[var]):
+ if self.deambiguifying_relation(var, rel):
+ modified = True
+ break
+ except KeyError:
+ # no relation to deambiguify
+ continue
+
+ def _debug_print(self):
+ print('varsols', dict((x, sorted(str(v) for v in values))
+ for x, values in self.varsols.items()))
+ print('ambiguous vars', sorted(self.ambiguousvars))
+
+ def set_rel_constraint(self, term, rel, etypes_func):
+ if isinstance(term, VariableRef) and self.is_ambiguous(term.variable):
+ var = term.variable
+ if len(var.stinfo['relations']) == 1 \
+ or rel.scope is var.scope or rel.r_type == 'identity':
+ self.restrict(var, frozenset(etypes_func()))
+ try:
+ self.maydeambrels[var].add(rel)
+ except KeyError:
+ self.maydeambrels[var] = set((rel,))
+
+ def deambiguifying_relation(self, var, rel):
+ lhs, rhs = rel.get_variable_parts()
+ onlhs = var is getattr(lhs, 'variable', None)
+ other = onlhs and rhs or lhs
+ otheretypes = None
+ # XXX isinstance(other.variable, Variable) to skip column alias
+ if isinstance(other, VariableRef) and isinstance(other.variable, Variable):
+ deambiguifier = other.variable
+ if not var is self.deambification_map.get(deambiguifier):
+ if var.stinfo['typerel'] is None:
+ otheretypes = deambiguifier.stinfo['possibletypes']
+ elif not self.is_ambiguous(deambiguifier):
+ otheretypes = self.varsols[deambiguifier]
+ elif deambiguifier in self.not_invariants:
+ # we know variable won't be invariant, try to use
+ # it to deambguify the current variable
+ otheretypes = self.varsols[deambiguifier]
+ if deambiguifier.stinfo['typerel'] is None:
+ # if deambiguifier has no type restriction using 'is',
+ # don't record it
+ deambiguifier = None
+ elif isinstance(other, Constant) and other.uidtype:
+ otheretypes = (other.uidtype,)
+ deambiguifier = None
+ if otheretypes is not None:
+ # to restrict, we must check that for all type in othertypes,
+ # possible types on the other end of the relation are matching
+ # variable's possible types
+ rschema = self.rschema(rel.r_type)
+ if onlhs:
+ rtypefunc = rschema.subjects
+ else:
+ rtypefunc = rschema.objects
+ for otheretype in otheretypes:
+ reltypes = frozenset(rtypefunc(otheretype))
+ if var.stinfo['possibletypes'] != reltypes:
+ return False
+ self.restrict(var, var.stinfo['possibletypes'])
+ self.deambification_map[var] = deambiguifier
+ return True
+ return False
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/schema2sql.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/schema2sql.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,300 @@
+# copyright 2004-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of cubicweb.
+#
+# yams is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# yams is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with yams. If not, see .
+"""write a schema as sql"""
+
+__docformat__ = "restructuredtext en"
+
+from hashlib import md5
+
+from six import string_types
+from six.moves import range
+
+from yams.constraints import (SizeConstraint, UniqueConstraint, Attribute,
+ NOW, TODAY)
+
+# default are usually not handled at the sql level. If you want them, set
+# SET_DEFAULT to True
+SET_DEFAULT = False
+
+def rschema_has_table(rschema, skip_relations):
+ """Return True if the given schema should have a table in the database"""
+ return not (rschema.final or rschema.inlined or rschema.rule or rschema.type in skip_relations)
+
+
+def schema2sql(dbhelper, schema, skip_entities=(), skip_relations=(), prefix=''):
+ """write to the output stream a SQL schema to store the objects
+ corresponding to the given schema
+ """
+ output = []
+ w = output.append
+ for etype in sorted(schema.entities()):
+ eschema = schema.eschema(etype)
+ if eschema.final or eschema.type in skip_entities:
+ continue
+ w(eschema2sql(dbhelper, eschema, skip_relations, prefix=prefix))
+ for rtype in sorted(schema.relations()):
+ rschema = schema.rschema(rtype)
+ if rschema_has_table(rschema, skip_relations):
+ w(rschema2sql(rschema))
+ return '\n'.join(output)
+
+
+def dropschema2sql(dbhelper, schema, skip_entities=(), skip_relations=(), prefix=''):
+ """write to the output stream a SQL schema to store the objects
+ corresponding to the given schema
+ """
+ output = []
+ w = output.append
+ for etype in sorted(schema.entities()):
+ eschema = schema.eschema(etype)
+ if eschema.final or eschema.type in skip_entities:
+ continue
+ stmts = dropeschema2sql(dbhelper, eschema, skip_relations, prefix=prefix)
+ for stmt in stmts:
+ w(stmt)
+ for rtype in sorted(schema.relations()):
+ rschema = schema.rschema(rtype)
+ if rschema_has_table(rschema, skip_relations):
+ w(droprschema2sql(rschema))
+ return '\n'.join(output)
+
+
+def eschema_attrs(eschema, skip_relations):
+ attrs = [attrdef for attrdef in eschema.attribute_definitions()
+ if not attrdef[0].type in skip_relations]
+ attrs += [(rschema, None)
+ for rschema in eschema.subject_relations()
+ if not rschema.final and rschema.inlined]
+ return attrs
+
+def unique_index_name(eschema, columns):
+ return u'unique_%s' % md5((eschema.type +
+ ',' +
+ ','.join(sorted(columns))).encode('ascii')).hexdigest()
+
+def iter_unique_index_names(eschema):
+ for columns in eschema._unique_together or ():
+ yield columns, unique_index_name(eschema, columns)
+
+def dropeschema2sql(dbhelper, eschema, skip_relations=(), prefix=''):
+ """return sql to drop an entity type's table"""
+ # not necessary to drop indexes, that's implictly done when
+ # dropping the table, but we need to drop SQLServer views used to
+ # create multicol unique indices
+ statements = []
+ tablename = prefix + eschema.type
+ if eschema._unique_together is not None:
+ for columns, index_name in iter_unique_index_names(eschema):
+ cols = ['%s%s' % (prefix, col) for col in columns]
+ sqls = dbhelper.sqls_drop_multicol_unique_index(tablename, cols, index_name)
+ statements += sqls
+ statements += ['DROP TABLE %s;' % (tablename)]
+ return statements
+
+
+def eschema2sql(dbhelper, eschema, skip_relations=(), prefix=''):
+ """write an entity schema as SQL statements to stdout"""
+ output = []
+ w = output.append
+ table = prefix + eschema.type
+ w('CREATE TABLE %s(' % (table))
+ attrs = eschema_attrs(eschema, skip_relations)
+ # XXX handle objectinline physical mode
+ for i in range(len(attrs)):
+ rschema, attrschema = attrs[i]
+ if attrschema is not None:
+ sqltype = aschema2sql(dbhelper, eschema, rschema, attrschema,
+ indent=' ')
+ else: # inline relation
+ sqltype = 'integer REFERENCES entities (eid)'
+ if i == len(attrs) - 1:
+ w(' %s%s %s' % (prefix, rschema.type, sqltype))
+ else:
+ w(' %s%s %s,' % (prefix, rschema.type, sqltype))
+ for rschema, aschema in attrs:
+ if aschema is None: # inline relation
+ continue
+ attr = rschema.type
+ rdef = rschema.rdef(eschema.type, aschema.type)
+ for constraint in rdef.constraints:
+ cstrname, check = check_constraint(eschema, aschema, attr, constraint, dbhelper, prefix=prefix)
+ if cstrname is not None:
+ w(', CONSTRAINT %s CHECK(%s)' % (cstrname, check))
+ w(');')
+ # create indexes
+ for i in range(len(attrs)):
+ rschema, attrschema = attrs[i]
+ if attrschema is None or eschema.rdef(rschema).indexed:
+ w(dbhelper.sql_create_index(table, prefix + rschema.type))
+ for columns, index_name in iter_unique_index_names(eschema):
+ cols = ['%s%s' % (prefix, col) for col in columns]
+ sqls = dbhelper.sqls_create_multicol_unique_index(table, cols, index_name)
+ for sql in sqls:
+ w(sql)
+ w('')
+ return '\n'.join(output)
+
+def as_sql(value, dbhelper, prefix):
+ if isinstance(value, Attribute):
+ return prefix + value.attr
+ elif isinstance(value, TODAY):
+ return dbhelper.sql_current_date()
+ elif isinstance(value, NOW):
+ return dbhelper.sql_current_timestamp()
+ else:
+ # XXX more quoting for literals?
+ return value
+
+def check_constraint(eschema, aschema, attr, constraint, dbhelper, prefix=''):
+ # XXX should find a better name
+ cstrname = 'cstr' + md5((eschema.type + attr + constraint.type() +
+ (constraint.serialize() or '')).encode('ascii')).hexdigest()
+ if constraint.type() == 'BoundaryConstraint':
+ value = as_sql(constraint.boundary, dbhelper, prefix)
+ return cstrname, '%s%s %s %s' % (prefix, attr, constraint.operator, value)
+ elif constraint.type() == 'IntervalBoundConstraint':
+ condition = []
+ if constraint.minvalue is not None:
+ value = as_sql(constraint.minvalue, dbhelper, prefix)
+ condition.append('%s%s >= %s' % (prefix, attr, value))
+ if constraint.maxvalue is not None:
+ value = as_sql(constraint.maxvalue, dbhelper, prefix)
+ condition.append('%s%s <= %s' % (prefix, attr, value))
+ return cstrname, ' AND '.join(condition)
+ elif constraint.type() == 'StaticVocabularyConstraint':
+ sample = next(iter(constraint.vocabulary()))
+ if not isinstance(sample, string_types):
+ values = ', '.join(str(word) for word in constraint.vocabulary())
+ else:
+ # XXX better quoting?
+ values = ', '.join("'%s'" % word.replace("'", "''") for word in constraint.vocabulary())
+ return cstrname, '%s%s IN (%s)' % (prefix, attr, values)
+ return None, None
+
+def aschema2sql(dbhelper, eschema, rschema, aschema, creating=True, indent=''):
+ """write an attribute schema as SQL statements to stdout"""
+ attr = rschema.type
+ rdef = rschema.rdef(eschema.type, aschema.type)
+ sqltype = type_from_rdef(dbhelper, rdef, creating)
+ if SET_DEFAULT:
+ default = eschema.default(attr)
+ if default is not None:
+ if aschema.type == 'Boolean':
+ sqltype += ' DEFAULT %s' % dbhelper.boolean_value(default)
+ elif aschema.type == 'String':
+ sqltype += ' DEFAULT %r' % str(default)
+ elif aschema.type in ('Int', 'BigInt', 'Float'):
+ sqltype += ' DEFAULT %s' % default
+ # XXX ignore default for other type
+ # this is expected for NOW / TODAY
+ if creating:
+ if rdef.uid:
+ sqltype += ' PRIMARY KEY REFERENCES entities (eid)'
+ elif rdef.cardinality[0] == '1':
+ # don't set NOT NULL if backend isn't able to change it later
+ if dbhelper.alter_column_support:
+ sqltype += ' NOT NULL'
+ # else we're getting sql type to alter a column, we don't want key / indexes
+ # / null modifiers
+ return sqltype
+
+
+def type_from_rdef(dbhelper, rdef, creating=True):
+ """return a sql type string corresponding to the relation definition"""
+ constraints = list(rdef.constraints)
+ unique, sqltype = False, None
+ if rdef.object.type == 'String':
+ for constraint in constraints:
+ if isinstance(constraint, SizeConstraint):
+ if constraint.max is not None:
+ size_constrained_string = dbhelper.TYPE_MAPPING.get(
+ 'SizeConstrainedString', 'varchar(%s)')
+ sqltype = size_constrained_string % constraint.max
+ elif isinstance(constraint, UniqueConstraint):
+ unique = True
+ if sqltype is None:
+ sqltype = sql_type(dbhelper, rdef)
+ if creating and unique:
+ sqltype += ' UNIQUE'
+ return sqltype
+
+
+def sql_type(dbhelper, rdef):
+ sqltype = dbhelper.TYPE_MAPPING[rdef.object]
+ if callable(sqltype):
+ sqltype = sqltype(rdef)
+ return sqltype
+
+
+_SQL_SCHEMA = """
+CREATE TABLE %(table)s (
+ eid_from INTEGER NOT NULL REFERENCES entities (eid),
+ eid_to INTEGER NOT NULL REFERENCES entities (eid),
+ CONSTRAINT %(table)s_p_key PRIMARY KEY(eid_from, eid_to)
+);
+
+CREATE INDEX %(table)s_from_idx ON %(table)s(eid_from);
+CREATE INDEX %(table)s_to_idx ON %(table)s(eid_to);"""
+
+
+def rschema2sql(rschema):
+ assert not rschema.rule
+ return _SQL_SCHEMA % {'table': '%s_relation' % rschema.type}
+
+
+def droprschema2sql(rschema):
+ """return sql to drop a relation type's table"""
+ # not necessary to drop indexes, that's implictly done when dropping
+ # the table
+ return 'DROP TABLE %s_relation;' % rschema.type
+
+
+def grant_schema(schema, user, set_owner=True, skip_entities=(), prefix=''):
+ """write to the output stream a SQL schema to store the objects
+ corresponding to the given schema
+ """
+ output = []
+ w = output.append
+ for etype in sorted(schema.entities()):
+ eschema = schema.eschema(etype)
+ if eschema.final or etype in skip_entities:
+ continue
+ w(grant_eschema(eschema, user, set_owner, prefix=prefix))
+ for rtype in sorted(schema.relations()):
+ rschema = schema.rschema(rtype)
+ if rschema_has_table(rschema, skip_relations=()): # XXX skip_relations should be specified
+ w(grant_rschema(rschema, user, set_owner))
+ return '\n'.join(output)
+
+
+def grant_eschema(eschema, user, set_owner=True, prefix=''):
+ output = []
+ w = output.append
+ etype = eschema.type
+ if set_owner:
+ w('ALTER TABLE %s%s OWNER TO %s;' % (prefix, etype, user))
+ w('GRANT ALL ON %s%s TO %s;' % (prefix, etype, user))
+ return '\n'.join(output)
+
+
+def grant_rschema(rschema, user, set_owner=True):
+ output = []
+ if set_owner:
+ output.append('ALTER TABLE %s_relation OWNER TO %s;' % (rschema.type, user))
+ output.append('GRANT ALL ON %s_relation TO %s;' % (rschema.type, user))
+ return '\n'.join(output)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/schemaserial.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/schemaserial.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,656 @@
+# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""functions for schema / permissions (de)serialization using RQL"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import os
+import json
+import sys
+
+from six import PY2, text_type, string_types
+
+from logilab.common.shellutils import ProgressBar, DummyProgressBar
+
+from yams import BadSchemaDefinition, schema as schemamod, buildobjs as ybo
+
+from cubicweb import Binary
+from cubicweb.schema import (KNOWN_RPROPERTIES, CONSTRAINTS, ETYPE_NAME_MAP,
+ VIRTUAL_RTYPES)
+from cubicweb.server import sqlutils, schema2sql as y2sql
+
+
+def group_mapping(cnx, interactive=True):
+ """create a group mapping from an rql cursor
+
+ A group mapping has standard group names as key (managers, owners at least)
+ and the actual CWGroup entity's eid as associated value.
+ In interactive mode (the default), missing groups'eid will be prompted
+ from the user.
+ """
+ res = {}
+ for eid, name in cnx.execute('Any G, N WHERE G is CWGroup, G name N',
+ build_descr=False):
+ res[name] = eid
+ if not interactive:
+ return res
+ missing = [g for g in ('owners', 'managers', 'users', 'guests') if not g in res]
+ if missing:
+ print('some native groups are missing but the following groups have been found:')
+ print('\n'.join('* %s (%s)' % (n, eid) for n, eid in res.items()))
+ print()
+ print('enter the eid of a to group to map to each missing native group')
+ print('or just type enter to skip permissions granted to a group')
+ for group in missing:
+ while True:
+ value = raw_input('eid for group %s: ' % group).strip()
+ if not value:
+ continue
+ try:
+ eid = int(value)
+ except ValueError:
+ print('eid should be an integer')
+ continue
+ for eid_ in res.values():
+ if eid == eid_:
+ break
+ else:
+ print('eid is not a group eid')
+ continue
+ res[name] = eid
+ break
+ return res
+
+def cstrtype_mapping(cnx):
+ """cached constraint types mapping"""
+ map = dict(cnx.execute('Any T, X WHERE X is CWConstraintType, X name T'))
+ return map
+
+# schema / perms deserialization ##############################################
+
+def deserialize_schema(schema, cnx):
+ """return a schema according to information stored in an rql database
+ as CWRType and CWEType entities
+ """
+ repo = cnx.repo
+ dbhelper = repo.system_source.dbhelper
+
+ # Computed Rtype
+ with cnx.ensure_cnx_set:
+ tables = set(t.lower() for t in dbhelper.list_tables(cnx.cnxset.cu))
+ has_computed_relations = 'cw_cwcomputedrtype' in tables
+ # computed attribute
+ try:
+ cnx.system_sql("SELECT cw_formula FROM cw_CWAttribute")
+ has_computed_attributes = True
+ except Exception:
+ cnx.rollback()
+ has_computed_attributes = False
+
+ # XXX bw compat (3.6 migration)
+ sqlcu = cnx.system_sql("SELECT * FROM cw_CWRType WHERE cw_name='symetric'")
+ if sqlcu.fetchall():
+ sql = dbhelper.sql_rename_col('cw_CWRType', 'cw_symetric', 'cw_symmetric',
+ dbhelper.TYPE_MAPPING['Boolean'], True)
+ sqlcu.execute(sql)
+ sqlcu.execute("UPDATE cw_CWRType SET cw_name='symmetric' WHERE cw_name='symetric'")
+ cnx.commit()
+ ertidx = {}
+ copiedeids = set()
+ permsidx = deserialize_ertype_permissions(cnx)
+ schema.reading_from_database = True
+ # load every entity types
+ for eid, etype, desc in cnx.execute(
+ 'Any X, N, D WHERE X is CWEType, X name N, X description D',
+ build_descr=False):
+ # base types are already in the schema, skip them
+ if etype in schemamod.BASE_TYPES:
+ # just set the eid
+ eschema = schema.eschema(etype)
+ eschema.eid = eid
+ ertidx[eid] = etype
+ continue
+ if etype in ETYPE_NAME_MAP:
+ needcopy = False
+ netype = ETYPE_NAME_MAP[etype]
+ # can't use write rql queries at this point, use raw sql
+ sqlexec = cnx.system_sql
+ if sqlexec('SELECT 1 FROM %(p)sCWEType WHERE %(p)sname=%%(n)s'
+ % {'p': sqlutils.SQL_PREFIX}, {'n': netype}).fetchone():
+ # the new type already exists, we should copy (eg make existing
+ # instances of the old type instances of the new type)
+ assert etype.lower() != netype.lower()
+ needcopy = True
+ else:
+ # the new type doesn't exist, we should rename
+ sqlexec('UPDATE %(p)sCWEType SET %(p)sname=%%(n)s WHERE %(p)seid=%%(x)s'
+ % {'p': sqlutils.SQL_PREFIX}, {'x': eid, 'n': netype})
+ if etype.lower() != netype.lower():
+ alter_table_sql = dbhelper.sql_rename_table(sqlutils.SQL_PREFIX+etype,
+ sqlutils.SQL_PREFIX+netype)
+ sqlexec(alter_table_sql)
+ sqlexec('UPDATE entities SET type=%(n)s WHERE type=%(x)s',
+ {'x': etype, 'n': netype})
+ cnx.commit(False)
+ tocleanup = [eid]
+ tocleanup += (eid for eid, cached in repo._type_source_cache.items()
+ if etype == cached[0])
+ repo.clear_caches(tocleanup)
+ cnx.commit(False)
+ if needcopy:
+ ertidx[eid] = netype
+ copiedeids.add(eid)
+ # copy / CWEType entity removal expected to be done through
+ # rename_entity_type in a migration script
+ continue
+ etype = netype
+ ertidx[eid] = etype
+ eschema = schema.add_entity_type(
+ ybo.EntityType(name=etype, description=desc, eid=eid))
+ set_perms(eschema, permsidx)
+ # load inheritance relations
+ for etype, stype in cnx.execute(
+ 'Any XN, ETN WHERE X is CWEType, X name XN, X specializes ET, ET name ETN',
+ build_descr=False):
+ etype = ETYPE_NAME_MAP.get(etype, etype)
+ stype = ETYPE_NAME_MAP.get(stype, stype)
+ schema.eschema(etype)._specialized_type = stype
+ schema.eschema(stype)._specialized_by.append(etype)
+ if has_computed_relations:
+ rset = cnx.execute(
+ 'Any X, N, R, D WHERE X is CWComputedRType, X name N, '
+ 'X rule R, X description D')
+ for eid, rule_name, rule, description in rset.rows:
+ rtype = ybo.ComputedRelation(name=rule_name, rule=rule, eid=eid,
+ description=description)
+ rschema = schema.add_relation_type(rtype)
+ set_perms(rschema, permsidx)
+ # load every relation types
+ for eid, rtype, desc, sym, il, ftc in cnx.execute(
+ 'Any X,N,D,S,I,FTC WHERE X is CWRType, X name N, X description D, '
+ 'X symmetric S, X inlined I, X fulltext_container FTC', build_descr=False):
+ ertidx[eid] = rtype
+ rschema = schema.add_relation_type(
+ ybo.RelationType(name=rtype, description=desc,
+ symmetric=bool(sym), inlined=bool(il),
+ fulltext_container=ftc, eid=eid))
+ # remains to load every relation definitions (ie relations and attributes)
+ cstrsidx = deserialize_rdef_constraints(cnx)
+ pendingrdefs = []
+ # closure to factorize common code of attribute/relation rdef addition
+ def _add_rdef(rdefeid, seid, reid, oeid, **kwargs):
+ rdef = ybo.RelationDefinition(ertidx[seid], ertidx[reid], ertidx[oeid],
+ constraints=cstrsidx.get(rdefeid, ()),
+ eid=rdefeid, **kwargs)
+ if seid in copiedeids or oeid in copiedeids:
+ # delay addition of this rdef. We'll insert them later if needed. We
+ # have to do this because:
+ #
+ # * on etype renaming, we want relation of the old entity type being
+ # redirected to the new type during migration
+ #
+ # * in the case of a copy, we've to take care that rdef already
+ # existing in the schema are not overwritten by a redirected one,
+ # since we want correct eid on them (redirected rdef will be
+ # removed in rename_entity_type)
+ pendingrdefs.append(rdef)
+ else:
+ # add_relation_def return a RelationDefinitionSchema if it has been
+ # actually added (can be None on duplicated relation definitions,
+ # e.g. if the relation type is marked as beeing symmetric)
+ rdefs = schema.add_relation_def(rdef)
+ if rdefs is not None:
+ ertidx[rdefeid] = rdefs
+ set_perms(rdefs, permsidx)
+ # Get the type parameters for additional base types.
+ try:
+ extra_props = dict(cnx.execute('Any X, XTP WHERE X is CWAttribute, '
+ 'X extra_props XTP'))
+ except Exception:
+ cnx.critical('Previous CRITICAL notification about extra_props is not '
+ 'a problem if you are migrating to cubicweb 3.17')
+ extra_props = {} # not yet in the schema (introduced by 3.17 migration)
+
+ # load attributes
+ rql = ('Any X,SE,RT,OE,CARD,ORD,DESC,IDX,FTIDX,I18N,DFLT%(fm)s '
+ 'WHERE X is CWAttribute, X relation_type RT, X cardinality CARD,'
+ ' X ordernum ORD, X indexed IDX, X description DESC, '
+ ' X internationalizable I18N, X defaultval DFLT,%(fmsnip)s'
+ ' X fulltextindexed FTIDX, X from_entity SE, X to_entity OE')
+ if has_computed_attributes:
+ rql = rql % {'fm': ',FM', 'fmsnip': 'X formula FM,'}
+ else:
+ rql = rql % {'fm': '', 'fmsnip': ''}
+ for values in cnx.execute(rql, build_descr=False):
+ attrs = dict(zip(
+ ('rdefeid', 'seid', 'reid', 'oeid', 'cardinality',
+ 'order', 'description', 'indexed', 'fulltextindexed',
+ 'internationalizable', 'default', 'formula'), values))
+ typeparams = extra_props.get(attrs['rdefeid'])
+ attrs.update(json.loads(typeparams.getvalue().decode('ascii')) if typeparams else {})
+ default = attrs['default']
+ if default is not None:
+ if isinstance(default, Binary):
+ # while migrating from 3.17 to 3.18, we still have to
+ # handle String defaults
+ attrs['default'] = default.unzpickle()
+ _add_rdef(**attrs)
+ # load relations
+ for values in cnx.execute(
+ 'Any X,SE,RT,OE,CARD,ORD,DESC,C WHERE X is CWRelation, X relation_type RT,'
+ 'X cardinality CARD, X ordernum ORD, X description DESC, '
+ 'X from_entity SE, X to_entity OE, X composite C', build_descr=False):
+ rdefeid, seid, reid, oeid, card, ord, desc, comp = values
+ _add_rdef(rdefeid, seid, reid, oeid,
+ cardinality=card, description=desc, order=ord,
+ composite=comp)
+ for rdef in pendingrdefs:
+ try:
+ rdefs = schema.add_relation_def(rdef)
+ except BadSchemaDefinition:
+ continue
+ if rdefs is not None:
+ set_perms(rdefs, permsidx)
+ unique_togethers = {}
+ rset = cnx.execute(
+ 'Any X,E,R WHERE '
+ 'X is CWUniqueTogetherConstraint, '
+ 'X constraint_of E, X relations R', build_descr=False)
+ for values in rset:
+ uniquecstreid, eeid, releid = values
+ eschema = schema.schema_by_eid(eeid)
+ relations = unique_togethers.setdefault(uniquecstreid, (eschema, []))
+ rel = ertidx[releid]
+ if isinstance(rel, schemamod.RelationDefinitionSchema):
+ # not yet migrated 3.9 database ('relations' target type changed
+ # to CWRType in 3.10)
+ rtype = rel.rtype.type
+ else:
+ rtype = str(rel)
+ relations[1].append(rtype)
+ for eschema, unique_together in unique_togethers.values():
+ eschema._unique_together.append(tuple(sorted(unique_together)))
+ schema.infer_specialization_rules()
+ cnx.commit()
+ schema.finalize()
+ schema.reading_from_database = False
+
+
+def deserialize_ertype_permissions(cnx):
+ """return sect action:groups associations for the given
+ entity or relation schema with its eid, according to schema's
+ permissions stored in the database as [read|add|delete|update]_permission
+ relations between CWEType/CWRType and CWGroup entities
+ """
+ res = {}
+ for action in ('read', 'add', 'update', 'delete'):
+ rql = 'Any E,N WHERE G is CWGroup, G name N, E %s_permission G' % action
+ for eid, gname in cnx.execute(rql, build_descr=False):
+ res.setdefault(eid, {}).setdefault(action, []).append(gname)
+ rql = ('Any E,X,EXPR,V WHERE X is RQLExpression, X expression EXPR, '
+ 'E %s_permission X, X mainvars V' % action)
+ for eid, expreid, expr, mainvars in cnx.execute(rql, build_descr=False):
+ # we don't know yet if it's a rql expr for an entity or a relation,
+ # so append a tuple to differentiate from groups and so we'll be
+ # able to instantiate it later
+ res.setdefault(eid, {}).setdefault(action, []).append( (expr, mainvars, expreid) )
+ return res
+
+def deserialize_rdef_constraints(cnx):
+ """return the list of relation definition's constraints as instances"""
+ res = {}
+ for rdefeid, ceid, ct, val in cnx.execute(
+ 'Any E, X,TN,V WHERE E constrained_by X, X is CWConstraint, '
+ 'X cstrtype T, T name TN, X value V', build_descr=False):
+ cstr = CONSTRAINTS[ct].deserialize(val)
+ cstr.eid = ceid
+ res.setdefault(rdefeid, []).append(cstr)
+ return res
+
+def set_perms(erschema, permsidx):
+ """set permissions on the given erschema according to the permission
+ definition dictionary as built by deserialize_ertype_permissions for a
+ given erschema's eid
+ """
+ # reset erschema permissions here to avoid getting yams default anyway
+ erschema.permissions = dict((action, ()) for action in erschema.ACTIONS)
+ try:
+ thispermsdict = permsidx[erschema.eid]
+ except KeyError:
+ return
+ for action, somethings in thispermsdict.items():
+ erschema.permissions[action] = tuple(
+ isinstance(p, tuple) and erschema.rql_expression(*p) or p
+ for p in somethings)
+
+
+# schema / perms serialization ################################################
+
+def serialize_schema(cnx, schema):
+ """synchronize schema and permissions in the database according to
+ current schema
+ """
+ _title = '-> storing the schema in the database '
+ print(_title, end=' ')
+ execute = cnx.execute
+ eschemas = schema.entities()
+ pb_size = (len(eschemas + schema.relations())
+ + len(CONSTRAINTS)
+ + len([x for x in eschemas if x.specializes()]))
+ if sys.stdout.isatty():
+ pb = ProgressBar(pb_size, title=_title)
+ else:
+ pb = DummyProgressBar()
+ groupmap = group_mapping(cnx, interactive=False)
+ # serialize all entity types, assuring CWEType is serialized first for proper
+ # is / is_instance_of insertion
+ eschemas.remove(schema.eschema('CWEType'))
+ eschemas.insert(0, schema.eschema('CWEType'))
+ for eschema in eschemas:
+ execschemarql(execute, eschema, eschema2rql(eschema, groupmap))
+ pb.update()
+ # serialize constraint types
+ cstrtypemap = {}
+ rql = 'INSERT CWConstraintType X: X name %(ct)s'
+ for cstrtype in CONSTRAINTS:
+ cstrtypemap[cstrtype] = execute(rql, {'ct': text_type(cstrtype)},
+ build_descr=False)[0][0]
+ pb.update()
+ # serialize relations
+ for rschema in schema.relations():
+ # skip virtual relations such as eid, has_text and identity
+ if rschema in VIRTUAL_RTYPES:
+ pb.update()
+ continue
+ if rschema.rule:
+ execschemarql(execute, rschema, crschema2rql(rschema, groupmap))
+ pb.update()
+ continue
+ execschemarql(execute, rschema, rschema2rql(rschema, addrdef=False))
+ if rschema.symmetric:
+ rdefs = [rdef for k, rdef in rschema.rdefs.items()
+ if (rdef.subject, rdef.object) == k]
+ else:
+ rdefs = rschema.rdefs.values()
+ for rdef in rdefs:
+ execschemarql(execute, rdef,
+ rdef2rql(rdef, cstrtypemap, groupmap))
+ pb.update()
+ # serialize unique_together constraints
+ for eschema in eschemas:
+ if eschema._unique_together:
+ execschemarql(execute, eschema, uniquetogether2rqls(eschema))
+ # serialize yams inheritance relationships
+ for rql, kwargs in specialize2rql(schema):
+ execute(rql, kwargs, build_descr=False)
+ pb.update()
+ print()
+
+
+# high level serialization functions
+
+def execschemarql(execute, schema, rqls):
+ for rql, kwargs in rqls:
+ kwargs['x'] = schema.eid
+ rset = execute(rql, kwargs, build_descr=False)
+ if schema.eid is None:
+ schema.eid = rset[0][0]
+ else:
+ assert rset
+
+def erschema2rql(erschema, groupmap):
+ if isinstance(erschema, schemamod.EntitySchema):
+ return eschema2rql(erschema, groupmap=groupmap)
+ return rschema2rql(erschema, groupmap=groupmap)
+
+def specialize2rql(schema):
+ for eschema in schema.entities():
+ if eschema.final:
+ continue
+ for rql, kwargs in eschemaspecialize2rql(eschema):
+ yield rql, kwargs
+
+# etype serialization
+
+def eschema2rql(eschema, groupmap=None):
+ """return a list of rql insert statements to enter an entity schema
+ in the database as an CWEType entity
+ """
+ relations, values = eschema_relations_values(eschema)
+ # NOTE: 'specializes' relation can't be inserted here since there's no
+ # way to make sure the parent type is inserted before the child type
+ yield 'INSERT CWEType X: %s' % ','.join(relations) , values
+ # entity permissions
+ if groupmap is not None:
+ for rql, args in _erperms2rql(eschema, groupmap):
+ yield rql, args
+
+def eschema_relations_values(eschema):
+ values = _ervalues(eschema)
+ relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
+ return relations, values
+
+def eschemaspecialize2rql(eschema):
+ specialized_type = eschema.specializes()
+ if specialized_type:
+ values = {'x': eschema.eid, 'et': specialized_type.eid}
+ yield 'SET X specializes ET WHERE X eid %(x)s, ET eid %(et)s', values
+
+def uniquetogether2rqls(eschema):
+ rql_args = []
+ # robustness against duplicated CWUniqueTogetherConstraint (pre 3.18)
+ columnset = set()
+ for columns in eschema._unique_together:
+ if columns in columnset:
+ print('schemaserial: skipping duplicate unique together %r %r' %
+ (eschema.type, columns))
+ continue
+ columnset.add(columns)
+ rql, args = _uniquetogether2rql(eschema, columns)
+ args['name'] = y2sql.unique_index_name(eschema, columns)
+ rql_args.append((rql, args))
+ return rql_args
+
+def _uniquetogether2rql(eschema, unique_together):
+ relations = []
+ restrictions = []
+ substs = {}
+ for i, name in enumerate(unique_together):
+ rschema = eschema.schema.rschema(name)
+ rtype = 'T%d' % i
+ substs[rtype] = text_type(rschema.type)
+ relations.append('C relations %s' % rtype)
+ restrictions.append('%(rtype)s name %%(%(rtype)s)s' % {'rtype': rtype})
+ relations = ', '.join(relations)
+ restrictions = ', '.join(restrictions)
+ rql = ('INSERT CWUniqueTogetherConstraint C: C name %%(name)s, C constraint_of X, %s '
+ 'WHERE X eid %%(x)s, %s')
+ return rql % (relations, restrictions), substs
+
+
+def _ervalues(erschema):
+ try:
+ type_ = text_type(erschema.type)
+ except UnicodeDecodeError as e:
+ raise Exception("can't decode %s [was %s]" % (erschema.type, e))
+ try:
+ desc = text_type(erschema.description) or u''
+ except UnicodeDecodeError as e:
+ raise Exception("can't decode %s [was %s]" % (erschema.description, e))
+ return {
+ 'name': type_,
+ 'final': erschema.final,
+ 'description': desc,
+ }
+
+# rtype serialization
+
+def rschema2rql(rschema, cstrtypemap=None, addrdef=True, groupmap=None):
+ """generate rql insert statements to enter a relation schema
+ in the database as an CWRType entity
+ """
+ if rschema.type == 'has_text':
+ return
+ relations, values = rschema_relations_values(rschema)
+ yield 'INSERT CWRType X: %s' % ','.join(relations), values
+ if addrdef:
+ assert cstrtypemap
+ # sort for testing purpose
+ for rdef in sorted(rschema.rdefs.values(),
+ key=lambda x: (x.subject, x.object)):
+ for rql, values in rdef2rql(rdef, cstrtypemap, groupmap):
+ yield rql, values
+
+def rschema_relations_values(rschema):
+ values = _ervalues(rschema)
+ values['final'] = rschema.final
+ values['symmetric'] = rschema.symmetric
+ values['inlined'] = rschema.inlined
+ if PY2 and isinstance(rschema.fulltext_container, str):
+ values['fulltext_container'] = unicode(rschema.fulltext_container)
+ else:
+ values['fulltext_container'] = rschema.fulltext_container
+ relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
+ return relations, values
+
+def crschema2rql(crschema, groupmap):
+ relations, values = crschema_relations_values(crschema)
+ yield 'INSERT CWComputedRType X: %s' % ','.join(relations), values
+ if groupmap:
+ for rql, args in _erperms2rql(crschema, groupmap):
+ yield rql, args
+
+def crschema_relations_values(crschema):
+ values = _ervalues(crschema)
+ values['rule'] = text_type(crschema.rule)
+ # XXX why oh why?
+ del values['final']
+ relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
+ return relations, values
+
+# rdef serialization
+
+def rdef2rql(rdef, cstrtypemap, groupmap=None):
+ # don't serialize inferred relations
+ if rdef.infered:
+ return
+ relations, values = _rdef_values(rdef)
+ relations.append('X relation_type ER,X from_entity SE,X to_entity OE')
+ values.update({'se': rdef.subject.eid, 'rt': rdef.rtype.eid, 'oe': rdef.object.eid})
+ if rdef.final:
+ etype = 'CWAttribute'
+ else:
+ etype = 'CWRelation'
+ yield 'INSERT %s X: %s WHERE SE eid %%(se)s,ER eid %%(rt)s,OE eid %%(oe)s' % (
+ etype, ','.join(relations), ), values
+ for rql, values in constraints2rql(cstrtypemap, rdef.constraints):
+ yield rql, values
+ # no groupmap means "no security insertion"
+ if groupmap:
+ for rql, args in _erperms2rql(rdef, groupmap):
+ yield rql, args
+
+_IGNORED_PROPS = ['eid', 'constraints', 'uid', 'infered', 'permissions']
+
+def _rdef_values(rdef):
+ amap = {'order': 'ordernum', 'default': 'defaultval'}
+ values = {}
+ extra = {}
+ for prop in rdef.rproperty_defs(rdef.object):
+ if prop in _IGNORED_PROPS:
+ continue
+ value = getattr(rdef, prop)
+ if prop not in KNOWN_RPROPERTIES:
+ extra[prop] = value
+ continue
+ # XXX type cast really necessary?
+ if prop in ('indexed', 'fulltextindexed', 'internationalizable'):
+ value = bool(value)
+ elif prop == 'ordernum':
+ value = int(value)
+ elif PY2 and isinstance(value, str):
+ value = unicode(value)
+ if value is not None and prop == 'default':
+ value = Binary.zpickle(value)
+ values[amap.get(prop, prop)] = value
+ if extra:
+ values['extra_props'] = Binary(json.dumps(extra).encode('ascii'))
+ relations = ['X %s %%(%s)s' % (attr, attr) for attr in sorted(values)]
+ return relations, values
+
+def constraints2rql(cstrtypemap, constraints, rdefeid=None):
+ for constraint in constraints:
+ values = {'ct': cstrtypemap[constraint.type()],
+ 'value': text_type(constraint.serialize()),
+ 'x': rdefeid} # when not specified, will have to be set by the caller
+ yield 'INSERT CWConstraint X: X value %(value)s, X cstrtype CT, EDEF constrained_by X WHERE \
+CT eid %(ct)s, EDEF eid %(x)s', values
+
+
+def _erperms2rql(erschema, groupmap):
+ """return rql insert statements to enter the entity or relation
+ schema's permissions in the database as
+ [read|add|delete|update]_permission relations between CWEType/CWRType
+ and CWGroup entities
+ """
+ for action in erschema.ACTIONS:
+ try:
+ grantedto = erschema.action_permissions(action)
+ except KeyError:
+ # may occurs when modifying persistent schema
+ continue
+ for group_or_rqlexpr in grantedto:
+ if isinstance(group_or_rqlexpr, string_types):
+ # group
+ try:
+ yield ('SET X %s_permission Y WHERE Y eid %%(g)s, X eid %%(x)s' % action,
+ {'g': groupmap[group_or_rqlexpr]})
+ except KeyError:
+ print("WARNING: group %s used in permissions for %s was ignored because it doesn't exist."
+ " You may want to add it into a precreate.py file" % (group_or_rqlexpr, erschema))
+ continue
+ else:
+ # rqlexpr
+ rqlexpr = group_or_rqlexpr
+ yield ('INSERT RQLExpression E: E expression %%(e)s, E exprtype %%(t)s, '
+ 'E mainvars %%(v)s, X %s_permission E WHERE X eid %%(x)s' % action,
+ {'e': text_type(rqlexpr.expression),
+ 'v': text_type(','.join(sorted(rqlexpr.mainvars))),
+ 't': text_type(rqlexpr.__class__.__name__)})
+
+# update functions
+
+def updateeschema2rql(eschema, eid):
+ relations, values = eschema_relations_values(eschema)
+ values['x'] = eid
+ yield 'SET %s WHERE X eid %%(x)s' % ','.join(relations), values
+
+def updaterschema2rql(rschema, eid):
+ if rschema.rule:
+ yield ('SET X rule %(r)s WHERE X eid %(x)s',
+ {'x': eid, 'r': text_type(rschema.rule)})
+ else:
+ relations, values = rschema_relations_values(rschema)
+ values['x'] = eid
+ yield 'SET %s WHERE X eid %%(x)s' % ','.join(relations), values
+
+def updaterdef2rql(rdef, eid):
+ relations, values = _rdef_values(rdef)
+ values['x'] = eid
+ yield 'SET %s WHERE X eid %%(x)s' % ','.join(relations), values
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/serverconfig.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/serverconfig.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,350 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""server.serverconfig definition"""
+from __future__ import print_function
+
+__docformat__ = "restructuredtext en"
+
+import sys
+from os.path import join, exists
+
+from six.moves import StringIO
+
+import logilab.common.configuration as lgconfig
+from logilab.common.decorators import cached
+
+from cubicweb.toolsutils import read_config, restrict_perms_to_user
+from cubicweb.cwconfig import CONFIGURATIONS, CubicWebConfiguration
+from cubicweb.server import SOURCE_TYPES
+
+
+USER_OPTIONS = (
+ ('login', {'type' : 'string',
+ 'default': 'admin',
+ 'help': "cubicweb manager account's login "
+ '(this user will be created)',
+ 'level': 0,
+ }),
+ ('password', {'type' : 'password',
+ 'default': lgconfig.REQUIRED,
+ 'help': "cubicweb manager account's password",
+ 'level': 0,
+ }),
+ )
+
+class SourceConfiguration(lgconfig.Configuration):
+ def __init__(self, appconfig, options):
+ self.appconfig = appconfig # has to be done before super call
+ super(SourceConfiguration, self).__init__(options=options)
+
+ # make Method('default_instance_id') usable in db option defs (in native.py)
+ def default_instance_id(self):
+ return self.appconfig.appid
+
+ def input_option(self, option, optdict, inputlevel):
+ try:
+ dbdriver = self['db-driver']
+ except lgconfig.OptionError:
+ pass
+ else:
+ if dbdriver == 'sqlite':
+ if option in ('db-user', 'db-password'):
+ return
+ if option == 'db-name':
+ optdict = optdict.copy()
+ optdict['help'] = 'path to the sqlite database'
+ optdict['default'] = join(self.appconfig.appdatahome,
+ self.appconfig.appid + '.sqlite')
+ super(SourceConfiguration, self).input_option(option, optdict, inputlevel)
+
+
+
+def ask_source_config(appconfig, type, inputlevel=0):
+ options = SOURCE_TYPES[type].options
+ sconfig = SourceConfiguration(appconfig, options=options)
+ sconfig.input_config(inputlevel=inputlevel)
+ return sconfig
+
+def generate_source_config(sconfig, encoding=sys.stdin.encoding):
+ """serialize a repository source configuration as text"""
+ stream = StringIO()
+ optsbysect = list(sconfig.options_by_section())
+ assert len(optsbysect) == 1, (
+ 'all options for a source should be in the same group, got %s'
+ % [x[0] for x in optsbysect])
+ lgconfig.ini_format(stream, optsbysect[0][1], encoding)
+ return stream.getvalue()
+
+
+class ServerConfiguration(CubicWebConfiguration):
+ """standalone RQL server"""
+ name = 'repository'
+
+ cubicweb_appobject_path = CubicWebConfiguration.cubicweb_appobject_path | set(['sobjects', 'hooks'])
+ cube_appobject_path = CubicWebConfiguration.cube_appobject_path | set(['sobjects', 'hooks'])
+
+ options = lgconfig.merge_options((
+ # ctl configuration
+ ('host',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'host name if not correctly detectable through gethostname',
+ 'group': 'main', 'level': 1,
+ }),
+ ('pid-file',
+ {'type' : 'string',
+ 'default': lgconfig.Method('default_pid_file'),
+ 'help': 'repository\'s pid file',
+ 'group': 'main', 'level': 2,
+ }),
+ ('uid',
+ {'type' : 'string',
+ 'default': None,
+ 'help': 'if this option is set, use the specified user to start \
+the repository rather than the user running the command',
+ 'group': 'main', 'level': (CubicWebConfiguration.mode == 'installed') and 0 or 1,
+ }),
+ ('cleanup-session-time',
+ {'type' : 'time',
+ 'default': '24h',
+ 'help': 'duration of inactivity after which a session '
+ 'will be closed, to limit memory consumption (avoid sessions that '
+ 'never expire and cause memory leak when http-session-time is 0, or '
+ 'because of bad client that never closes their connection). '
+ 'So notice that even if http-session-time is 0 and the user don\'t '
+ 'close his browser, he will have to reauthenticate after this time '
+ 'of inactivity. Default to 24h.',
+ 'group': 'main', 'level': 3,
+ }),
+ ('connections-pool-size',
+ {'type' : 'int',
+ 'default': 4,
+ 'help': 'size of the connections pool. Each source supporting multiple \
+connections will have this number of opened connections.',
+ 'group': 'main', 'level': 3,
+ }),
+ ('rql-cache-size',
+ {'type' : 'int',
+ 'default': 3000,
+ 'help': 'size of the parsed rql cache size.',
+ 'group': 'main', 'level': 3,
+ }),
+ ('undo-enabled',
+ {'type' : 'yn', 'default': False,
+ 'help': 'enable undo support',
+ 'group': 'main', 'level': 3,
+ }),
+ ('keep-transaction-lifetime',
+ {'type' : 'int', 'default': 7,
+ 'help': 'number of days during which transaction records should be \
+kept (hence undoable).',
+ 'group': 'main', 'level': 3,
+ }),
+ ('multi-sources-etypes',
+ {'type' : 'csv', 'default': (),
+ 'help': 'defines which entity types from this repository are used \
+by some other instances. You should set this properly for these instances to \
+detect updates / deletions.',
+ 'group': 'main', 'level': 3,
+ }),
+
+ ('delay-full-text-indexation',
+ {'type' : 'yn', 'default': False,
+ 'help': 'When full text indexation of entity has a too important cost'
+ ' to be done when entity are added/modified by users, activate this '
+ 'option and setup a job using cubicweb-ctl db-rebuild-fti on your '
+ 'system (using cron for instance).',
+ 'group': 'main', 'level': 3,
+ }),
+
+ # email configuration
+ ('default-recipients-mode',
+ {'type' : 'choice',
+ 'choices' : ('default-dest-addrs', 'users', 'none'),
+ 'default': 'default-dest-addrs',
+ 'help': 'when a notification should be sent with no specific rules \
+to find recipients, recipients will be found according to this mode. Available \
+modes are "default-dest-addrs" (emails specified in the configuration \
+variable with the same name), "users" (every users which has activated \
+account with an email set), "none" (no notification).',
+ 'group': 'email', 'level': 2,
+ }),
+ ('default-dest-addrs',
+ {'type' : 'csv',
+ 'default': (),
+ 'help': 'comma separated list of email addresses that will be used \
+as default recipient when an email is sent and the notification has no \
+specific recipient rules.',
+ 'group': 'email', 'level': 2,
+ }),
+ ('supervising-addrs',
+ {'type' : 'csv',
+ 'default': (),
+ 'help': 'comma separated list of email addresses that will be \
+notified of every changes.',
+ 'group': 'email', 'level': 2,
+ }),
+ ('zmq-address-sub',
+ {'type' : 'csv',
+ 'default' : (),
+ 'help': ('List of ZMQ addresses to subscribe to (requires pyzmq) '
+ '(of the form `tcp://:`)'),
+ 'group': 'zmq', 'level': 1,
+ }),
+ ('zmq-address-pub',
+ {'type' : 'string',
+ 'default' : None,
+ 'help': ('ZMQ address to use for publishing (requires pyzmq) '
+ '(of the form `tcp://:`)'),
+ 'group': 'zmq', 'level': 1,
+ }),
+ ) + CubicWebConfiguration.options)
+
+ # should we init the connections pool (eg connect to sources). This is
+ # usually necessary...
+ init_cnxset_pool = True
+
+ # read the schema from the database
+ read_instance_schema = True
+ # set this to true to get a minimal repository, for instance to get cubes
+ # information on commands such as i18ninstance, db-restore, etc...
+ quick_start = False
+ # check user's state at login time
+ consider_user_state = True
+
+ # should some hooks be deactivated during [pre|post]create script execution
+ free_wheel = False
+
+ # list of enables sources when sources restriction is necessary
+ # (eg repository initialization at least)
+ enabled_sources = None
+
+ def bootstrap_cubes(self):
+ from logilab.common.textutils import splitstrip
+ with open(join(self.apphome, 'bootstrap_cubes')) as f:
+ for line in f:
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ self.init_cubes(self.expand_cubes(splitstrip(line)))
+ break
+ else:
+ # no cubes
+ self.init_cubes(())
+
+ def write_bootstrap_cubes_file(self, cubes):
+ stream = open(join(self.apphome, 'bootstrap_cubes'), 'w')
+ stream.write('# this is a generated file only used for bootstraping\n')
+ stream.write('# you should not have to edit this\n')
+ stream.write('%s\n' % ','.join(cubes))
+ stream.close()
+
+ def sources_file(self):
+ return join(self.apphome, 'sources')
+
+ # this method has to be cached since when the server is running using a
+ # restricted user, this user usually don't have access to the sources
+ # configuration file (#16102)
+ @cached
+ def read_sources_file(self):
+ """return a dictionary of values found in the sources file"""
+ return read_config(self.sources_file(), raise_if_unreadable=True)
+
+ @property
+ def system_source_config(self):
+ return self.read_sources_file()['system']
+
+ @property
+ def default_admin_config(self):
+ return self.read_sources_file()['admin']
+
+ def source_enabled(self, source):
+ if self.sources_mode is not None:
+ if 'migration' in self.sources_mode:
+ assert len(self.sources_mode) == 1
+ if source.connect_for_migration:
+ return True
+ print('not connecting to source', source.uri, 'during migration')
+ return False
+ if 'all' in self.sources_mode:
+ assert len(self.sources_mode) == 1
+ return True
+ return source.uri in self.sources_mode
+ if self.quick_start:
+ return source.uri == 'system'
+ return (not source.disabled and (
+ not self.enabled_sources or source.uri in self.enabled_sources))
+
+ def write_sources_file(self, sourcescfg):
+ """serialize repository'sources configuration into a INI like file"""
+ sourcesfile = self.sources_file()
+ if exists(sourcesfile):
+ import shutil
+ shutil.copy(sourcesfile, sourcesfile + '.bak')
+ stream = open(sourcesfile, 'w')
+ for section in ('admin', 'system'):
+ sconfig = sourcescfg[section]
+ if isinstance(sconfig, dict):
+ # get a Configuration object
+ assert section == 'system', '%r is not system' % section
+ _sconfig = SourceConfiguration(
+ self, options=SOURCE_TYPES['native'].options)
+ for attr, val in sconfig.items():
+ try:
+ _sconfig.set_option(attr, val)
+ except lgconfig.OptionError:
+ # skip adapter, may be present on pre 3.10 instances
+ if attr != 'adapter':
+ self.error('skip unknown option %s in sources file' % attr)
+ sconfig = _sconfig
+ stream.write('[%s]\n%s\n' % (section, generate_source_config(sconfig)))
+ restrict_perms_to_user(sourcesfile)
+
+ def load_schema(self, expand_cubes=False, **kwargs):
+ from cubicweb.schema import CubicWebSchemaLoader
+ if expand_cubes:
+ # in case some new dependencies have been introduced, we have to
+ # reinitialize cubes so the full filesystem schema is read
+ origcubes = self.cubes()
+ self._cubes = None
+ self.init_cubes(self.expand_cubes(origcubes))
+ schema = CubicWebSchemaLoader().load(self, **kwargs)
+ if expand_cubes:
+ # restore original value
+ self._cubes = origcubes
+ return schema
+
+ def load_bootstrap_schema(self):
+ from cubicweb.schema import BootstrapSchemaLoader
+ schema = BootstrapSchemaLoader().load(self)
+ schema.name = 'bootstrap'
+ return schema
+
+ sources_mode = None
+ def set_sources_mode(self, sources):
+ self.sources_mode = sources
+
+ def migration_handler(self, schema=None, interactive=True,
+ cnx=None, repo=None, connect=True, verbosity=None):
+ """return a migration handler instance"""
+ from cubicweb.server.migractions import ServerMigrationHelper
+ if verbosity is None:
+ verbosity = getattr(self, 'verbosity', 0)
+ return ServerMigrationHelper(self, schema, interactive=interactive,
+ cnx=cnx, repo=repo, connect=connect,
+ verbosity=verbosity)
diff -r 058bb3dc685f -r 0b59724cb3f2 cubicweb/server/serverctl.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/cubicweb/server/serverctl.py Sat Jan 16 13:48:51 2016 +0100
@@ -0,0 +1,1100 @@
+# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see .
+"""cubicweb-ctl commands and command handlers specific to the repository"""
+from __future__ import print_function
+
+__docformat__ = 'restructuredtext en'
+
+# *ctl module should limit the number of import to be imported as quickly as
+# possible (for cubicweb-ctl reactivity, necessary for instance for usable bash
+# completion). So import locally in command helpers.
+import sys
+import os
+from contextlib import contextmanager
+import logging
+import subprocess
+
+from six import string_types
+from six.moves import input
+
+from logilab.common import nullobject
+from logilab.common.configuration import Configuration, merge_options
+from logilab.common.shellutils import ASK, generate_password
+
+from logilab.database import get_db_helper, get_connection
+
+from cubicweb import AuthenticationError, ExecutionError, ConfigurationError
+from cubicweb.toolsutils import Command, CommandHandler, underline_title
+from cubicweb.cwctl import CWCTL, check_options_consistency, ConfigureInstanceCommand
+from cubicweb.server import SOURCE_TYPES
+from cubicweb.server.serverconfig import (
+ USER_OPTIONS, ServerConfiguration, SourceConfiguration,
+ ask_source_config, generate_source_config)
+
+# utility functions ###########################################################
+
+def source_cnx(source, dbname=None, special_privs=False, interactive=True):
+ """open and return a connection to the system database defined in the
+ given server.serverconfig
+ """
+ from getpass import getpass
+ dbhost = source.get('db-host')
+ if dbname is None:
+ dbname = source['db-name']
+ driver = source['db-driver']
+ dbhelper = get_db_helper(driver)
+ if interactive:
+ print('-> connecting to %s database' % driver, end=' ')
+ if dbhost:
+ print('%s@%s' % (dbname, dbhost), end=' ')
+ else:
+ print(dbname, end=' ')
+ if dbhelper.users_support:
+ if not interactive or (not special_privs and source.get('db-user')):
+ user = source.get('db-user', os.environ.get('USER', ''))
+ if interactive:
+ print('as', user)
+ password = source.get('db-password')
+ else:
+ print()
+ if special_privs:
+ print('WARNING')
+ print ('the user will need the following special access rights '
+ 'on the database:')
+ print(special_privs)
+ print()
+ default_user = source.get('db-user', os.environ.get('USER', ''))
+ user = input('Connect as user ? [%r]: ' % default_user)
+ user = user.strip() or default_user
+ if user == source.get('db-user'):
+ password = source.get('db-password')
+ else:
+ password = getpass('password: ')
+ else:
+ user = password = None
+ extra_args = source.get('db-extra-arguments')
+ extra = extra_args and {'extra_args': extra_args} or {}
+ cnx = get_connection(driver, dbhost, dbname, user, password=password,
+ port=source.get('db-port'),
+ schema=source.get('db-namespace'),
+ **extra)
+ try:
+ cnx.logged_user = user
+ except AttributeError:
+ # C object, __slots__
+ from logilab.database import _SimpleConnectionWrapper
+ cnx = _SimpleConnectionWrapper(cnx)
+ cnx.logged_user = user
+ return cnx
+
+def system_source_cnx(source, dbms_system_base=False,
+ special_privs='CREATE/DROP DATABASE', interactive=True):
+ """shortcut to get a connextion to the instance system database
+ defined in the given config. If is True,
+ connect to the dbms system database instead (for task such as
+ create/drop the instance database)
+ """
+ if dbms_system_base:
+ system_db = get_db_helper(source['db-driver']).system_database()
+ return source_cnx(source, system_db, special_privs=special_privs,
+ interactive=interactive)
+ return source_cnx(source, special_privs=special_privs,
+ interactive=interactive)
+
+def _db_sys_cnx(source, special_privs, interactive=True):
+ """return a connection on the RDMS system table (to create/drop a user or a
+ database)
+ """
+ import logilab.common as lgp
+ lgp.USE_MX_DATETIME = False
+ driver = source['db-driver']
+ helper = get_db_helper(driver)
+ # connect on the dbms system base to create our base
+ cnx = system_source_cnx(source, True, special_privs=special_privs,
+ interactive=interactive)
+ # disable autocommit (isolation_level(1)) because DROP and
+ # CREATE DATABASE can't be executed in a transaction
+ set_isolation_level = getattr(cnx, 'set_isolation_level', None)
+ if set_isolation_level is not None:
+ # set_isolation_level() is psycopg specific
+ set_isolation_level(0)
+ return cnx
+
+def repo_cnx(config):
+ """return a in-memory repository and a repoapi connection to it"""
+ from cubicweb import repoapi
+ from cubicweb.server.utils import manager_userpasswd
+ try:
+ login = config.default_admin_config['login']
+ pwd = config.default_admin_config['password']
+ except KeyError:
+ login, pwd = manager_userpasswd()
+ while True:
+ try:
+ repo = repoapi.get_repository(config=config)
+ cnx = repoapi.connect(repo, login, password=pwd)
+ return repo, cnx
+ except AuthenticationError:
+ print('-> Error: wrong user/password.')
+ # reset cubes else we'll have an assertion error on next retry
+ config._cubes = None
+ login, pwd = manager_userpasswd()
+
+
+# repository specific command handlers ########################################
+
+class RepositoryCreateHandler(CommandHandler):
+ cmdname = 'create'
+ cfgname = 'repository'
+
+ def bootstrap(self, cubes, automatic=False, inputlevel=0):
+ """create an instance by copying files from the given cube and by asking
+ information necessary to build required configuration files
+ """
+ config = self.config
+ if not automatic:
+ print(underline_title('Configuring the repository'))
+ config.input_config('email', inputlevel)
+ print('\n'+underline_title('Configuring the sources'))
+ sourcesfile = config.sources_file()
+ # hack to make Method('default_instance_id') usable in db option defs
+ # (in native.py)
+ sconfig = SourceConfiguration(config,
+ options=SOURCE_TYPES['native'].options)
+ if not automatic:
+ sconfig.input_config(inputlevel=inputlevel)
+ print()
+ sourcescfg = {'system': sconfig}
+ if automatic:
+ # XXX modify a copy
+ password = generate_password()
+ print('-> set administrator account to admin / %s' % password)
+ USER_OPTIONS[1][1]['default'] = password
+ sconfig = Configuration(options=USER_OPTIONS)
+ else:
+ sconfig = Configuration(options=USER_OPTIONS)
+ sconfig.input_config(inputlevel=inputlevel)
+ sourcescfg['admin'] = sconfig
+ config.write_sources_file(sourcescfg)
+ # remember selected cubes for later initialization of the database
+ config.write_bootstrap_cubes_file(cubes)
+
+ def postcreate(self, automatic=False, inputlevel=0):
+ if automatic:
+ CWCTL.run(['db-create', '--automatic', self.config.appid])
+ elif ASK.confirm('Run db-create to create the system database ?'):
+ CWCTL.run(['db-create', '--config-level', str(inputlevel),
+ self.config.appid])
+ else:
+ print('-> nevermind, you can do it later with '
+ '"cubicweb-ctl db-create %s".' % self.config.appid)
+
+
+@contextmanager
+def db_transaction(source, privilege):
+ """Open a transaction to the instance database"""
+ cnx = system_source_cnx(source, special_privs=privilege)
+ cursor = cnx.cursor()
+ try:
+ yield cursor
+ except:
+ cnx.rollback()
+ cnx.close()
+ raise
+ else:
+ cnx.commit()
+ cnx.close()
+
+
+@contextmanager
+def db_sys_transaction(source, privilege):
+ """Open a transaction to the system database"""
+ cnx = _db_sys_cnx(source, privilege)
+ cursor = cnx.cursor()
+ try:
+ yield cursor
+ except:
+ cnx.rollback()
+ cnx.close()
+ raise
+ else:
+ cnx.commit()
+ cnx.close()
+
+
+class RepositoryDeleteHandler(CommandHandler):
+ cmdname = 'delete'
+ cfgname = 'repository'
+
+ def _drop_namespace(self, source):
+ db_namespace = source.get('db-namespace')
+ with db_transaction(source, privilege='DROP SCHEMA') as cursor:
+ helper = get_db_helper(source['db-driver'])
+ helper.drop_schema(cursor, db_namespace)
+ print('-> database schema %s dropped' % db_namespace)
+
+ def _drop_database(self, source):
+ dbname = source['db-name']
+ if source['db-driver'] == 'sqlite':
+ print('deleting database file %(db-name)s' % source)
+ os.unlink(source['db-name'])
+ print('-> database %(db-name)s dropped.' % source)
+ else:
+ helper = get_db_helper(source['db-driver'])
+ with db_sys_transaction(source, privilege='DROP DATABASE') as cursor:
+ print('dropping database %(db-name)s' % source)
+ cursor.execute('DROP DATABASE "%(db-name)s"' % source)
+ print('-> database %(db-name)s dropped.' % source)
+
+ def _drop_user(self, source):
+ user = source['db-user'] or None
+ if user is not None:
+ with db_sys_transaction(source, privilege='DROP USER') as cursor:
+ print('dropping user %s' % user)
+ cursor.execute('DROP USER %s' % user)
+
+ def _cleanup_steps(self, source):
+ # 1/ delete namespace if used
+ db_namespace = source.get('db-namespace')
+ if db_namespace:
+ yield ('Delete database namespace "%s"' % db_namespace,
+ self._drop_namespace, True)
+ # 2/ delete database
+ yield ('Delete database "%(db-name)s"' % source,
+ self._drop_database, True)
+ # 3/ delete user
+ helper = get_db_helper(source['db-driver'])
+ if source['db-user'] and helper.users_support:
+ # XXX should check we are not connected as user
+ yield ('Delete user "%(db-user)s"' % source,
+ self._drop_user, False)
+
+ def cleanup(self):
+ """remove instance's configuration and database"""
+ source = self.config.system_source_config
+ for msg, step, default in self._cleanup_steps(source):
+ if ASK.confirm(msg, default_is_yes=default):
+ try:
+ step(source)
+ except Exception as exc:
+ print('ERROR', exc)
+ if ASK.confirm('An error occurred. Continue anyway?',
+ default_is_yes=False):
+ continue
+ raise ExecutionError(str(exc))
+
+
+# repository specific commands ################################################
+
+def createdb(helper, source, dbcnx, cursor, **kwargs):
+ if dbcnx.logged_user != source['db-user']:
+ helper.create_database(cursor, source['db-name'], source['db-user'],
+ source['db-encoding'], **kwargs)
+ else:
+ helper.create_database(cursor, source['db-name'],
+ dbencoding=source['db-encoding'], **kwargs)
+
+
+class CreateInstanceDBCommand(Command):
+ """Create the system database of an instance (run after 'create').
+
+ You will be prompted for a login / password to use to connect to
+ the system database. The given user should have almost all rights
+ on the database (ie a super user on the DBMS allowed to create
+ database, users, languages...).
+
+
+ the identifier of the instance to initialize.
+ """
+ name = 'db-create'
+ arguments = ''
+ min_args = max_args = 1
+ options = (
+ ('automatic',
+ {'short': 'a', 'action' : 'store_true',
+ 'default': False,
+ 'help': 'automatic mode: never ask and use default answer to every '
+ 'question. this may require that your login match a database super '
+ 'user (allowed to create database & all).',
+ }),
+ ('config-level',
+ {'short': 'l', 'type' : 'int', 'metavar': '',
+ 'default': 0,
+ 'help': 'configuration level (0..2): 0 will ask for essential '
+ 'configuration parameters only while 2 will ask for all parameters',
+ }),
+ ('create-db',
+ {'short': 'c', 'type': 'yn', 'metavar': '',
+ 'default': True,
+ 'help': 'create the database (yes by default)'
+ }),
+ )
+
+ def run(self, args):
+ """run the command with its specific arguments"""
+ check_options_consistency(self.config)
+ automatic = self.get('automatic')
+ appid = args.pop()
+ config = ServerConfiguration.config_for(appid)
+ source = config.system_source_config
+ dbname = source['db-name']
+ driver = source['db-driver']
+ helper = get_db_helper(driver)
+ if driver == 'sqlite':
+ if os.path.exists(dbname) and (
+ automatic or
+ ASK.confirm('Database %s already exists. Drop it?' % dbname)):
+ os.unlink(dbname)
+ elif self.config.create_db:
+ print('\n'+underline_title('Creating the system database'))
+ # connect on the dbms system base to create our base
+ dbcnx = _db_sys_cnx(source, 'CREATE/DROP DATABASE and / or USER',
+ interactive=not automatic)
+ cursor = dbcnx.cursor()
+ try:
+ if helper.users_support:
+ user = source['db-user']
+ if not helper.user_exists(cursor, user) and (automatic or \
+ ASK.confirm('Create db user %s ?' % user, default_is_yes=False)):
+ helper.create_user(source['db-user'], source.get('db-password'))
+ print('-> user %s created.' % user)
+ if dbname in helper.list_databases(cursor):
+ if automatic or ASK.confirm('Database %s already exists -- do you want to drop it ?' % dbname):
+ cursor.execute('DROP DATABASE "%s"' % dbname)
+ else:
+ print('you may want to run "cubicweb-ctl db-init '
+ '--drop %s" manually to continue.' % config.appid)
+ return
+ createdb(helper, source, dbcnx, cursor)
+ dbcnx.commit()
+ print('-> database %s created.' % dbname)
+ except BaseException:
+ dbcnx.rollback()
+ raise
+ cnx = system_source_cnx(source, special_privs='CREATE LANGUAGE/SCHEMA',
+ interactive=not automatic)
+ cursor = cnx.cursor()
+ helper.init_fti_extensions(cursor)
+ namespace = source.get('db-namespace')
+ if namespace and ASK.confirm('Create schema %s in database %s ?'
+ % (namespace, dbname)):
+ helper.create_schema(cursor, namespace)
+ cnx.commit()
+ # postgres specific stuff
+ if driver == 'postgres':
+ # install plpythonu/plpgsql languages
+ langs = ('plpythonu', 'plpgsql')
+ for extlang in langs:
+ if automatic or ASK.confirm('Create language %s ?' % extlang):
+ try:
+ helper.create_language(cursor, extlang)
+ except Exception as exc:
+ print('-> ERROR:', exc)
+ print('-> could not create language %s, some stored procedures might be unusable' % extlang)
+ cnx.rollback()
+ else:
+ cnx.commit()
+ print('-> database for instance %s created and necessary extensions installed.' % appid)
+ print()
+ if automatic:
+ CWCTL.run(['db-init', '--automatic', '--config-level', '0',
+ config.appid])
+ elif ASK.confirm('Run db-init to initialize the system database ?'):
+ CWCTL.run(['db-init', '--config-level',
+ str(self.config.config_level), config.appid])
+ else:
+ print('-> nevermind, you can do it later with '
+ '"cubicweb-ctl db-init %s".' % config.appid)
+
+
+class InitInstanceCommand(Command):
+ """Initialize the system database of an instance (run after 'db-create').
+
+ Notice this will be done using user specified in the sources files, so this
+ user should have the create tables grant permissions on the database.
+
+
+ the identifier of the instance to initialize.
+ """
+ name = 'db-init'
+ arguments = ''
+ min_args = max_args = 1
+ options = (
+ ('automatic',
+ {'short': 'a', 'action' : 'store_true',
+ 'default': False,
+ 'help': 'automatic mode: never ask and use default answer to every '
+ 'question.',
+ }),
+ ('config-level',
+ {'short': 'l', 'type': 'int', 'default': 0,
+ 'help': 'level threshold for questions asked when configuring '
+ 'another source'
+ }),
+ ('drop',
+ {'short': 'd', 'action': 'store_true',
+ 'default': False,
+ 'help': 'insert drop statements to remove previously existant '
+ 'tables, indexes... (no by default)'
+ }),
+ )
+
+ def run(self, args):
+ check_options_consistency(self.config)
+ print('\n'+underline_title('Initializing the system database'))
+ from cubicweb.server import init_repository
+ appid = args[0]
+ config = ServerConfiguration.config_for(appid)
+ try:
+ system = config.system_source_config
+ extra_args = system.get('db-extra-arguments')
+ extra = extra_args and {'extra_args': extra_args} or {}
+ get_connection(
+ system['db-driver'], database=system['db-name'],
+ host=system.get('db-host'), port=system.get('db-port'),
+ user=system.get('db-user') or '', password=system.get('db-password') or '',
+ schema=system.get('db-namespace'), **extra)
+ except Exception as ex:
+ raise ConfigurationError(
+ 'You seem to have provided wrong connection information in '\
+ 'the %s file. Resolve this first (error: %s).'
+ % (config.sources_file(), str(ex).strip()))
+ init_repository(config, drop=self.config.drop)
+ if not self.config.automatic:
+ while ASK.confirm('Enter another source ?', default_is_yes=False):
+ CWCTL.run(['source-add', '--config-level',
+ str(self.config.config_level), config.appid])
+
+
+class AddSourceCommand(Command):
+ """Add a data source to an instance.
+
+
+ the identifier of the instance to initialize.
+ """
+ name = 'source-add'
+ arguments = ''
+ min_args = max_args = 1
+ options = (
+ ('config-level',
+ {'short': 'l', 'type': 'int', 'default': 1,
+ 'help': 'level threshold for questions asked when configuring another source'
+ }),
+ )
+
+ def run(self, args):
+ appid = args[0]
+ config = ServerConfiguration.config_for(appid)
+ repo, cnx = repo_cnx(config)
+ repo.hm.call_hooks('server_maintenance', repo=repo)
+ try:
+ with cnx:
+ used = set(n for n, in cnx.execute('Any SN WHERE S is CWSource, S name SN'))
+ cubes = repo.get_cubes()
+ while True:
+ type = input('source type (%s): '
+ % ', '.join(sorted(SOURCE_TYPES)))
+ if type not in SOURCE_TYPES:
+ print('-> unknown source type, use one of the available types.')
+ continue
+ sourcemodule = SOURCE_TYPES[type].module
+ if not sourcemodule.startswith('cubicweb.'):
+ # module names look like cubes.mycube.themodule
+ sourcecube = SOURCE_TYPES[type].module.split('.', 2)[1]
+ # if the source adapter is coming from an external component,
+ # ensure it's specified in used cubes
+ if not sourcecube in cubes:
+ print ('-> this source type require the %s cube which is '
+ 'not used by the instance.')
+ continue
+ break
+ while True:
+ parser = input('parser type (%s): '
+ % ', '.join(sorted(repo.vreg['parsers'])))
+ if parser in repo.vreg['parsers']:
+ break
+ print('-> unknown parser identifier, use one of the available types.')
+ while True:
+ sourceuri = input('source identifier (a unique name used to '
+ 'tell sources apart): ').strip()
+ if not sourceuri:
+ print('-> mandatory.')
+ else:
+ sourceuri = unicode(sourceuri, sys.stdin.encoding)
+ if sourceuri in used:
+ print('-> uri already used, choose another one.')
+ else:
+ break
+ url = input('source URL (leave empty for none): ').strip()
+ url = unicode(url) if url else None
+ # XXX configurable inputlevel
+ sconfig = ask_source_config(config, type, inputlevel=self.config.config_level)
+ cfgstr = unicode(generate_source_config(sconfig), sys.stdin.encoding)
+ cnx.create_entity('CWSource', name=sourceuri, type=unicode(type),
+ config=cfgstr, parser=unicode(parser), url=unicode(url))
+ cnx.commit()
+ finally:
+ repo.hm.call_hooks('server_shutdown')
+
+
+class GrantUserOnInstanceCommand(Command):
+ """Grant a database user on a repository system database.
+
+
+ the identifier of the instance
+
+ the database's user requiring grant access
+ """
+ name = 'db-grant-user'
+ arguments = ''
+ min_args = max_args = 2
+ options = (
+ ('set-owner',
+ {'short': 'o', 'type' : 'yn', 'metavar' : '',
+ 'default' : False,
+ 'help': 'Set the user as tables owner if yes (no by default).'}
+ ),
+ )
+ def run(self, args):
+ """run the command with its specific arguments"""
+ from cubicweb.server.sqlutils import sqlexec, sqlgrants
+ appid, user = args
+ config = ServerConfiguration.config_for(appid)
+ source = config.system_source_config
+ set_owner = self.config.set_owner
+ cnx = system_source_cnx(source, special_privs='GRANT')
+ cursor = cnx.cursor()
+ schema = config.load_schema()
+ try:
+ sqlexec(sqlgrants(schema, source['db-driver'], user,
+ set_owner=set_owner), cursor)
+ except Exception as ex:
+ cnx.rollback()
+ import traceback
+ traceback.print_exc()
+ print('-> an error occurred:', ex)
+ else:
+ cnx.commit()
+ print('-> rights granted to %s on instance %s.' % (appid, user))
+
+
+class ResetAdminPasswordCommand(Command):
+ """Reset the administrator password.
+
+
+ the identifier of the instance
+ """
+ name = 'reset-admin-pwd'
+ arguments = ''
+ min_args = max_args = 1
+ options = (
+ ('password',
+ {'short': 'p', 'type' : 'string', 'metavar' : '',
+ 'default' : None,
+ 'help': 'Use this password instead of prompt for one.\n'
+ '/!\ THIS IS AN INSECURE PRACTICE /!\ \n'
+ 'the password will appear in shell history'}
+ ),
+ )
+
+ def run(self, args):
+ """run the command with its specific arguments"""
+ from cubicweb.server.utils import crypt_password, manager_userpasswd
+ appid = args[0]
+ config = ServerConfiguration.config_for(appid)
+ sourcescfg = config.read_sources_file()
+ try:
+ adminlogin = sourcescfg['admin']['login']
+ except KeyError:
+ print('-> Error: could not get cubicweb administrator login.')
+ sys.exit(1)
+ cnx = source_cnx(sourcescfg['system'])
+ driver = sourcescfg['system']['db-driver']
+ dbhelper = get_db_helper(driver)
+ cursor = cnx.cursor()
+ # check admin exists
+ cursor.execute("SELECT * FROM cw_CWUser WHERE cw_login=%(l)s",
+ {'l': adminlogin})
+ if not cursor.fetchall():
+ print("-> error: admin user %r specified in sources doesn't exist "
+ "in the database" % adminlogin)
+ print(" fix your sources file before running this command")
+ cnx.close()
+ sys.exit(1)
+ if self.config.password is None:
+ # ask for a new password
+ msg = 'new password for %s' % adminlogin
+ _, pwd = manager_userpasswd(adminlogin, confirm=True, passwdmsg=msg)
+ else:
+ pwd = self.config.password
+ try:
+ cursor.execute("UPDATE cw_CWUser SET cw_upassword=%(p)s WHERE cw_login=%(l)s",
+ {'p': dbhelper.binary_value(crypt_password(pwd)), 'l': adminlogin})
+ sconfig = Configuration(options=USER_OPTIONS)
+ sconfig['login'] = adminlogin
+ sconfig['password'] = pwd
+ sourcescfg['admin'] = sconfig
+ config.write_sources_file(sourcescfg)
+ except Exception as ex:
+ cnx.rollback()
+ import traceback
+ traceback.print_exc()
+ print('-> an error occurred:', ex)
+ else:
+ cnx.commit()
+ print('-> password reset, sources file regenerated.')
+ cnx.close()
+
+
+
+def _remote_dump(host, appid, output, sudo=False):
+ # XXX generate unique/portable file name
+ from datetime import date
+ filename = '%s-%s.tgz' % (appid, date.today().strftime('%Y-%m-%d'))
+ dmpcmd = 'cubicweb-ctl db-dump -o /tmp/%s %s' % (filename, appid)
+ if sudo:
+ dmpcmd = 'sudo %s' % (dmpcmd)
+ dmpcmd = 'ssh -t %s "%s"' % (host, dmpcmd)
+ print(dmpcmd)
+ if os.system(dmpcmd):
+ raise ExecutionError('Error while dumping the database')
+ if output is None:
+ output = filename
+ cmd = 'scp %s:/tmp/%s %s' % (host, filename, output)
+ print(cmd)
+ if os.system(cmd):
+ raise ExecutionError('Error while retrieving the dump at /tmp/%s' % filename)
+ rmcmd = 'ssh -t %s "rm -f /tmp/%s"' % (host, filename)
+ print(rmcmd)
+ if os.system(rmcmd) and not ASK.confirm(
+ 'An error occurred while deleting remote dump at /tmp/%s. '
+ 'Continue anyway?' % filename):
+ raise ExecutionError('Error while deleting remote dump at /tmp/%s' % filename)
+
+
+def _local_dump(appid, output, format='native'):
+ config = ServerConfiguration.config_for(appid)
+ config.quick_start = True
+ mih = config.migration_handler(verbosity=1)
+ mih.backup_database(output, askconfirm=False, format=format)
+ mih.shutdown()
+
+def _local_restore(appid, backupfile, drop, format='native'):
+ config = ServerConfiguration.config_for(appid)
+ config.verbosity = 1 # else we won't be asked for confirmation on problems
+ config.quick_start = True
+ mih = config.migration_handler(connect=False, verbosity=1)
+ mih.restore_database(backupfile, drop, askconfirm=False, format=format)
+ repo = mih.repo
+ # version of the database
+ dbversions = repo.get_versions()
+ mih.shutdown()
+ if not dbversions:
+ print("bad or missing version information in the database, don't upgrade file system")
+ return
+ # version of installed software
+ eversion = dbversions['cubicweb']
+ status = instance_status(config, eversion, dbversions)
+ # * database version > installed software
+ if status == 'needsoftupgrade':
+ print("** The database of %s is more recent than the installed software!" % config.appid)
+ print("** Upgrade your software, then migrate the database by running the command")
+ print("** 'cubicweb-ctl upgrade %s'" % config.appid)
+ return
+ # * database version < installed software, an upgrade will be necessary
+ # anyway, just rewrite vc.conf and warn user he has to upgrade
+ elif status == 'needapplupgrade':
+ print("** The database of %s is older than the installed software." % config.appid)
+ print("** Migrate the database by running the command")
+ print("** 'cubicweb-ctl upgrade %s'" % config.appid)
+ return
+ # * database version = installed software, database version = instance fs version
+ # ok!
+
+def instance_status(config, cubicwebapplversion, vcconf):
+ cubicwebversion = config.cubicweb_version()
+ if cubicwebapplversion > cubicwebversion:
+ return 'needsoftupgrade'
+ if cubicwebapplversion < cubicwebversion:
+ return 'needapplupgrade'
+ for cube in config.cubes():
+ try:
+ softversion = config.cube_version(cube)
+ except ConfigurationError:
+ print('-> Error: no cube version information for %s, please check that the cube is installed.' % cube)
+ continue
+ try:
+ applversion = vcconf[cube]
+ except KeyError:
+ print('-> Error: no cube version information for %s in version configuration.' % cube)
+ continue
+ if softversion == applversion:
+ continue
+ if softversion > applversion:
+ return 'needsoftupgrade'
+ elif softversion < applversion:
+ return 'needapplupgrade'
+ return None
+
+
+class DBDumpCommand(Command):
+ """Backup the system database of an instance.
+
+
+ the identifier of the instance to backup
+ format [[user@]host:]appname
+ """
+ name = 'db-dump'
+ arguments = ''
+ min_args = max_args = 1
+ options = (
+ ('output',
+ {'short': 'o', 'type' : 'string', 'metavar' : '',
+ 'default' : None,
+ 'help': 'Specify the backup file where the backup will be stored.'}
+ ),
+ ('sudo',
+ {'short': 's', 'action' : 'store_true',
+ 'default' : False,
+ 'help': 'Use sudo on the remote host.'}
+ ),
+ ('format',
+ {'short': 'f', 'default': 'native', 'type': 'choice',
+ 'choices': ('native', 'portable'),
+ 'help': '"native" format uses db backend utilities to dump the database. '
+ '"portable" format uses a database independent format'}
+ ),
+ )
+
+ def run(self, args):
+ appid = args[0]
+ if ':' in appid:
+ host, appid = appid.split(':')
+ _remote_dump(host, appid, self.config.output, self.config.sudo)
+ else:
+ _local_dump(appid, self.config.output, format=self.config.format)
+
+
+
+
+class DBRestoreCommand(Command):
+ """Restore the system database of an instance.
+
+
+ the identifier of the instance to restore
+ """
+ name = 'db-restore'
+ arguments = ''
+ min_args = max_args = 2
+
+ options = (
+ ('no-drop',
+ {'short': 'n', 'action' : 'store_true', 'default' : False,
+ 'help': 'for some reason the database doesn\'t exist and so '
+ 'should not be dropped.'}
+ ),
+ ('format',
+ {'short': 'f', 'default': 'native', 'type': 'choice',
+ 'choices': ('native', 'portable'),
+ 'help': 'the format used when dumping the database'}),
+ )
+
+ def run(self, args):
+ appid, backupfile = args
+ if self.config.format == 'portable':
+ # we need to ensure a DB exist before restoring from portable format
+ if not self.config.no_drop:
+ try:
+ CWCTL.run(['db-create', '--automatic', appid])
+ except SystemExit as exc:
+ # continue if the command exited with status 0 (success)
+ if exc.code:
+ raise
+ _local_restore(appid, backupfile,
+ drop=not self.config.no_drop,
+ format=self.config.format)
+ if self.config.format == 'portable':
+ try:
+ CWCTL.run(['db-rebuild-fti', appid])
+ except SystemExit as exc:
+ if exc.code:
+ raise
+
+
+class DBCopyCommand(Command):
+ """Copy the system database of an instance (backup and restore).
+
+
+ the identifier of the instance to backup
+ format [[user@]host:]appname
+
+
+ the identifier of the instance to restore
+ """
+ name = 'db-copy'
+ arguments = ''
+ min_args = max_args = 2
+ options = (
+ ('no-drop',
+ {'short': 'n', 'action' : 'store_true',
+ 'default' : False,
+ 'help': 'For some reason the database doesn\'t exist and so '
+ 'should not be dropped.'}
+ ),
+ ('keep-dump',
+ {'short': 'k', 'action' : 'store_true',
+ 'default' : False,
+ 'help': 'Specify that the dump file should not be automatically removed.'}
+ ),
+ ('sudo',
+ {'short': 's', 'action' : 'store_true',
+ 'default' : False,
+ 'help': 'Use sudo on the remote host.'}
+ ),
+ ('format',
+ {'short': 'f', 'default': 'native', 'type': 'choice',
+ 'choices': ('native', 'portable'),
+ 'help': '"native" format uses db backend utilities to dump the database. '
+ '"portable" format uses a database independent format'}
+ ),
+ )
+
+ def run(self, args):
+ import tempfile
+ srcappid, destappid = args
+ fd, output = tempfile.mkstemp()
+ os.close(fd)
+ if ':' in srcappid:
+ host, srcappid = srcappid.split(':')
+ _remote_dump(host, srcappid, output, self.config.sudo)
+ else:
+ _local_dump(srcappid, output, format=self.config.format)
+ _local_restore(destappid, output, not self.config.no_drop,
+ self.config.format)
+ if self.config.keep_dump:
+ print('-> you can get the dump file at', output)
+ else:
+ os.remove(output)
+
+
+class CheckRepositoryCommand(Command):
+ """Check integrity of the system database of an instance.
+
+
+ the identifier of the instance to check
+ """
+ name = 'db-check'
+ arguments = ''
+ min_args = max_args = 1
+ options = (
+ ('checks',
+ {'short': 'c', 'type' : 'csv', 'metavar' : '',
+ 'default' : ('entities', 'relations',
+ 'mandatory_relations', 'mandatory_attributes',
+ 'metadata', 'schema', 'text_index'),
+ 'help': 'Comma separated list of check to run. By default run all \
+checks, i.e. entities, relations, mandatory_relations, mandatory_attributes, \
+metadata, text_index and schema.'}
+ ),
+
+ ('autofix',
+ {'short': 'a', 'type' : 'yn', 'metavar' : '',
+ 'default' : False,
+ 'help': 'Automatically correct integrity problems if this option \
+is set to "y" or "yes", else only display them'}
+ ),
+ ('reindex',
+ {'short': 'r', 'type' : 'yn', 'metavar' : '',
+ 'default' : False,
+ 'help': 're-indexes the database for full text search if this \
+option is set to "y" or "yes" (may be long for large database).'}
+ ),
+ ('force',
+ {'short': 'f', 'action' : 'store_true',
+ 'default' : False,
+ 'help': 'don\'t check instance is up to date.'}
+ ),
+
+ )
+
+ def run(self, args):
+ from cubicweb.server.checkintegrity import check
+ appid = args[0]
+ config = ServerConfiguration.config_for(appid)
+ config.repairing = self.config.force
+ repo, _cnx = repo_cnx(config)
+ with repo.internal_cnx() as cnx:
+ check(repo, cnx,
+ self.config.checks,
+ self.config.reindex,
+ self.config.autofix)
+
+
+class RebuildFTICommand(Command):
+ """Rebuild the full-text index of the system database of an instance.
+
+ [etype(s)]
+ the identifier of the instance to rebuild
+
+ If no etype is specified, cubicweb will reindex everything, otherwise
+ only specified etypes will be considered.
+ """
+ name = 'db-rebuild-fti'
+ arguments = ''
+ min_args = 1
+
+ def run(self, args):
+ from cubicweb.server.checkintegrity import reindex_entities
+ appid = args.pop(0)
+ etypes = args or None
+ config = ServerConfiguration.config_for(appid)
+ repo, cnx = repo_cnx(config)
+ with cnx:
+ reindex_entities(repo.schema, cnx, etypes=etypes)
+ cnx.commit()
+
+
+class SynchronizeSourceCommand(Command):
+ """Force a source synchronization.
+
+
+ the identifier of the instance
+