--- a/cubicweb/devtools/testlib.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/devtools/testlib.py Mon Jun 06 15:28:35 2016 +0200
@@ -69,7 +69,7 @@
@classproperty
@cached
- def datadir(cls): # pylint: disable=E0213
+ def datadir(cls): # pylint: disable=E0213
"""helper attribute holding the standard test's data directory
"""
mod = sys.modules[cls.__module__]
@@ -889,7 +889,6 @@
return req, self.session
def assertAuthSuccess(self, req, origsession, nbsessions=1):
- sh = self.app.session_handler
session = self.app.get_session(req)
cnx = repoapi.Connection(session)
req.set_cnx(cnx)
@@ -927,7 +926,7 @@
'text/x-vcard': None,
'text/calendar': None,
'image/png': None,
- }
+ }
# maps vid : validator name (override content_type_validators)
vid_validators = dict((vid, htmlparser.VALMAP[valkey])
for vid, valkey in VIEW_VALIDATORS.items())
@@ -1055,7 +1054,7 @@
# the line number
content = u'\n'.join(line_template % (idx + 1, line)
for idx, line in enumerate(content)
- if line_context_filter(idx+1, position))
+ if line_context_filter(idx + 1, position))
msg += u'\nfor content:\n%s' % content
exc = AssertionError(msg)
exc.__traceback__ = tcbk
--- a/cubicweb/entities/authobjs.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/entities/authobjs.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -132,7 +132,7 @@
groups = frozenset((groups,))
elif isinstance(groups, (tuple, list)):
groups = frozenset(groups)
- return len(groups & self.groups) # XXX return the resulting set instead of its size
+ return len(groups & self.groups) # XXX return the resulting set instead of its size
def is_in_group(self, group):
"""convience / shortcut method to test if the user belongs to `group`
@@ -141,9 +141,8 @@
def is_anonymous(self):
""" checks if user is an anonymous user"""
- #FIXME on the web-side anonymous user is detected according
- # to config['anonymous-user'], we don't have this info on
- # the server side.
+ # FIXME on the web-side anonymous user is detected according to config['anonymous-user'],
+ # we don't have this info on the server side.
return self.groups == frozenset(('guests', ))
def owns(self, eid):
@@ -162,7 +161,7 @@
if self.firstname and self.surname:
return self._cw._('%(firstname)s %(surname)s') % {
- 'firstname': self.firstname, 'surname' : self.surname}
+ 'firstname': self.firstname, 'surname': self.surname}
if self.firstname:
return self.firstname
return self.login
--- a/cubicweb/entity.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/entity.py Mon Jun 06 15:28:35 2016 +0200
@@ -338,7 +338,6 @@
return
else:
visited.add(eschema.type)
- _fetchattrs = []
for attr in sorted(fetchattrs):
try:
rschema = eschema.subjrels[attr]
@@ -366,7 +365,7 @@
# later information here, systematically add it.
rel.change_optional('right')
targettypes = rschema.objects(eschema.type)
- vreg = user._cw.vreg # XXX user._cw.vreg iiiirk
+ vreg = user._cw.vreg # XXX user._cw.vreg iiiirk
etypecls = vreg['etypes'].etype_class(targettypes[0])
if len(targettypes) > 1:
# find fetch_attrs common to all destination types
--- a/cubicweb/hooks/syncsession.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/hooks/syncsession.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -18,8 +18,8 @@
"""Core hooks: synchronize living session on persistent data changes"""
__docformat__ = "restructuredtext en"
+
from cubicweb import _
-
from cubicweb import UnknownProperty, BadConnectionId, validation_error
from cubicweb.predicates import is_instance
from cubicweb.server import hook
@@ -55,7 +55,7 @@
class _DeleteGroupOp(_GroupOperation):
- """synchronize user when a in_group relation has been deleted"""
+ """Synchronize user when a in_group relation has been deleted"""
def postcommit_event(self):
"""the observed connections set has been commited"""
@@ -67,7 +67,8 @@
class _AddGroupOp(_GroupOperation):
- """synchronize user when a in_group relation has been added"""
+ """Synchronize user when a in_group relation has been added"""
+
def postcommit_event(self):
"""the observed connections set has been commited"""
groups = self.cnxuser.groups
@@ -79,6 +80,7 @@
class SyncInGroupHook(SyncSessionHook):
+ """Watch addition/removal of in_group relation to synchronize living sessions accordingly"""
__regid__ = 'syncingroup'
__select__ = SyncSessionHook.__select__ & hook.match_rtype('in_group')
events = ('after_delete_relation', 'after_add_relation')
@@ -99,11 +101,10 @@
hook.Operation.__init__(self, cnx)
def postcommit_event(self):
- """the observed connections set has been commited"""
try:
self.cnx.repo.close(self.sessionid)
except BadConnectionId:
- pass # already closed
+ pass # already closed
class CloseDeletedUserSessionsHook(SyncSessionHook):
@@ -112,7 +113,6 @@
events = ('after_delete_entity',)
def __call__(self):
- """modify user permission, need to update users"""
for session in get_user_sessions(self._cw.repo, self.entity.eid):
_DelUserOp(self._cw, session.sessionid)
@@ -211,11 +211,10 @@
events = ('before_delete_entity',)
def __call__(self):
- eid = self.entity.eid
cnx = self._cw
for eidfrom, rtype, eidto in cnx.transaction_data.get('pendingrelations', ()):
if rtype == 'for_user' and eidfrom == self.entity.eid:
- # if for_user was set, delete has already been handled
+ # if for_user was set, delete already handled by hook on for_user deletion
break
else:
_DelCWPropertyOp(cnx, cwpropdict=cnx.vreg['propertyvalues'],
@@ -233,7 +232,7 @@
if not cnx.entity_metas(eidfrom)['type'] == 'CWProperty':
return
key, value = cnx.execute('Any K,V WHERE P eid %(x)s,P pkey K,P value V',
- {'x': eidfrom})[0]
+ {'x': eidfrom})[0]
if cnx.vreg.property_info(key)['sitewide']:
msg = _("site-wide property can't be set for user")
raise validation_error(eidfrom, {('for_user', 'subject'): msg})
@@ -248,8 +247,7 @@
def __call__(self):
cnx = self._cw
- key = cnx.execute('Any K WHERE P eid %(x)s, P pkey K',
- {'x': self.eidfrom})[0][0]
+ key = cnx.execute('Any K WHERE P eid %(x)s, P pkey K', {'x': self.eidfrom})[0][0]
cnx.transaction_data.setdefault('pendingrelations', []).append(
(self.eidfrom, self.rtype, self.eidto))
for session in get_user_sessions(cnx.repo, self.eidto):
--- a/cubicweb/hooks/test/unittest_syncsession.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/hooks/test/unittest_syncsession.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -27,6 +27,7 @@
from cubicweb import ValidationError
from cubicweb.devtools.testlib import CubicWebTC
+
class CWPropertyHooksTC(CubicWebTC):
def test_unexistant_cwproperty(self):
--- a/cubicweb/req.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/req.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -23,7 +23,9 @@
from datetime import time, datetime, timedelta
from six import PY2, PY3, text_type
-from six.moves.urllib.parse import parse_qs, parse_qsl, quote as urlquote, unquote as urlunquote, urlsplit, urlunsplit
+from six.moves.urllib.parse import (parse_qs, parse_qsl,
+ quote as urlquote, unquote as urlunquote,
+ urlsplit, urlunsplit)
from logilab.common.decorators import cached
from logilab.common.deprecation import deprecated
@@ -38,9 +40,11 @@
ONESECOND = timedelta(0, 1, 0)
CACHE_REGISTRY = {}
+
class FindEntityError(Exception):
"""raised when find_one_entity() can not return one and only one entity"""
+
class Cache(dict):
def __init__(self):
super(Cache, self).__init__()
@@ -59,13 +63,13 @@
:attribute vreg.schema: the instance's schema
:attribute vreg.config: the instance's configuration
"""
- is_request = True # False for repository session
+ is_request = True # False for repository session
def __init__(self, vreg):
self.vreg = vreg
try:
encoding = vreg.property_value('ui.encoding')
- except Exception: # no vreg or property not registered
+ except Exception: # no vreg or property not registered
encoding = 'utf-8'
self.encoding = encoding
# cache result of execution for (rql expr / eids),
@@ -117,10 +121,12 @@
def etype_rset(self, etype, size=1):
"""return a fake result set for a particular entity type"""
- rset = ResultSet([('A',)]*size, '%s X' % etype,
- description=[(etype,)]*size)
+ rset = ResultSet([('A',)] * size, '%s X' % etype,
+ description=[(etype,)] * size)
+
def get_entity(row, col=0, etype=etype, req=self, rset=rset):
return req.vreg['etypes'].etype_class(etype)(req, rset, row, col)
+
rset.get_entity = get_entity
rset.req = self
return rset
@@ -255,7 +261,7 @@
if _now > cache.latest_cache_lookup + ONESECOND:
ecache = self.execute(
'Any C,T WHERE C is CWCache, C name %(name)s, C timestamp T',
- {'name':cachename}).get_entity(0,0)
+ {'name': cachename}).get_entity(0, 0)
cache.latest_cache_lookup = _now
if not ecache.valid(cache.cache_creation_date):
cache.clear()
@@ -330,7 +336,7 @@
quoted = quoted.encode(self.encoding)
try:
return unicode(urlunquote(quoted), self.encoding)
- except UnicodeDecodeError: # might occurs on manually typed URLs
+ except UnicodeDecodeError: # might occurs on manually typed URLs
return unicode(urlunquote(quoted), 'iso-8859-1')
def url_parse_qsl(self, querystring):
@@ -344,10 +350,9 @@
for key, val in parse_qsl(querystring):
try:
yield unicode(key, self.encoding), unicode(val, self.encoding)
- except UnicodeDecodeError: # might occurs on manually typed URLs
+ except UnicodeDecodeError: # might occurs on manually typed URLs
yield unicode(key, 'iso-8859-1'), unicode(val, 'iso-8859-1')
-
def rebuild_url(self, url, **newparams):
"""return the given url with newparams inserted. If any new params
is already specified in the url, it's overriden by the new value
@@ -410,12 +415,11 @@
else:
initargs.update(kwargs)
try:
- view = self.vreg[__registry].select(__vid, self, rset=rset, **initargs)
+ view = self.vreg[__registry].select(__vid, self, rset=rset, **initargs)
except NoSelectableObject:
if __fallback_oid is None:
raise
- view = self.vreg[__registry].select(__fallback_oid, self,
- rset=rset, **initargs)
+ view = self.vreg[__registry].select(__fallback_oid, self, rset=rset, **initargs)
return view.render(w=w, **kwargs)
def printable_value(self, attrtype, value, props=None, displaytime=True,
@@ -474,8 +478,8 @@
elif etype == 'Time':
format = self.property_value('ui.time-format')
try:
- # (adim) I can't find a way to parse a Time with a custom format
- date = strptime(value, format) # this returns a DateTime
+ # (adim) I can't find a way to parse a time with a custom format
+ date = strptime(value, format) # this returns a datetime
return time(date.hour, date.minute, date.second)
except ValueError:
raise ValueError(self._('can\'t parse %(value)r (expected %(format)s)')
--- a/cubicweb/server/migractions.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/server/migractions.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -39,7 +39,6 @@
from datetime import datetime
from glob import glob
from copy import copy
-from warnings import warn
from contextlib import contextmanager
from six import PY2, text_type
@@ -69,6 +68,7 @@
__regid__ = 'cw.migration.clear_group_mapping'
__select__ = hook.Hook.__select__ & is_instance('CWGroup')
events = ('after_add_entity', 'after_update_entity',)
+
def __call__(self):
clear_cache(self.mih, 'group_mapping')
self.mih._synchronized.clear()
@@ -77,7 +77,7 @@
def mih_register(cls, repo):
# may be already registered in tests (e.g. unittest_migractions at
# least)
- if not cls.__regid__ in repo.vreg['after_add_entity_hooks']:
+ if cls.__regid__ not in repo.vreg['after_add_entity_hooks']:
repo.vreg.register(ClearGroupMap)
@@ -176,7 +176,7 @@
def cmd_process_script(self, migrscript, funcname=None, *args, **kwargs):
try:
return super(ServerMigrationHelper, self).cmd_process_script(
- migrscript, funcname, *args, **kwargs)
+ migrscript, funcname, *args, **kwargs)
except ExecutionError as err:
sys.stderr.write("-> %s\n" % err)
except BaseException:
@@ -206,7 +206,7 @@
elif askconfirm and not self.confirm('Backup %s database?' % config.appid):
print('-> no backup done.')
return
- open(backupfile,'w').close() # kinda lock
+ open(backupfile,'w').close() # kinda lock
os.chmod(backupfile, 0o600)
# backup
source = repo.system_source
@@ -235,7 +235,7 @@
# call hooks
repo.hm.call_hooks('server_backup', repo=repo, timestamp=timestamp)
# done
- print('-> backup file', backupfile)
+ print('-> backup file', backupfile)
finally:
shutil.rmtree(tmpdir)
@@ -257,7 +257,7 @@
for name in bkup.getnames():
if name[0] in '/.':
raise ExecutionError('Security check failed, path starts with "/" or "."')
- bkup.close() # XXX seek error if not close+open !?!
+ bkup.close() # XXX seek error if not close+open !?!
bkup = tarfile.open(backupfile, 'r|gz')
bkup.extractall(path=tmpdir)
bkup.close()
@@ -303,8 +303,8 @@
'schema': self.repo.get_schema(),
'cnx': self.cnx,
'fsschema': self.fs_schema,
- 'session' : self.cnx,
- 'repo' : self.repo,
+ 'session': self.cnx,
+ 'repo': self.repo,
})
return context
@@ -391,7 +391,7 @@
for geid, gname in self.rqlexec('Any G, GN WHERE T %s G, G name GN, '
'T eid %%(x)s' % perm, {'x': teid},
ask_confirm=False):
- if not gname in newgroups:
+ if gname not in newgroups:
if not confirm or self.confirm('Remove %s permission of %s to %s?'
% (action, erschema, gname)):
self.rqlexec('DELETE T %s G WHERE G eid %%(x)s, T eid %s'
@@ -414,7 +414,7 @@
for expreid, expression in self.rqlexec('Any E, EX WHERE T %s E, E expression EX, '
'T eid %s' % (perm, teid),
ask_confirm=False):
- if not expression in newexprs:
+ if expression not in newexprs:
if not confirm or self.confirm('Remove %s expression for %s permission of %s?'
% (expression, action, erschema)):
# deleting the relation will delete the expression entity
@@ -458,7 +458,7 @@
if syncprops:
assert reporschema.eid, reporschema
self.rqlexecall(ss.updaterschema2rql(rschema, reporschema.eid),
- ask_confirm=self.verbosity>=2)
+ ask_confirm=self.verbosity >= 2)
if rschema.rule:
if syncperms:
self._synchronize_permissions(rschema, reporschema.eid)
@@ -492,7 +492,7 @@
try:
eschema = self.fs_schema.eschema(etype)
except KeyError:
- return # XXX somewhat unexpected, no?...
+ return # XXX somewhat unexpected, no?...
if syncprops:
repospschema = repoeschema.specializes()
espschema = eschema.specializes()
@@ -513,17 +513,17 @@
if rschema in VIRTUAL_RTYPES:
continue
if role == 'subject':
- if not rschema in repoeschema.subject_relations():
+ if rschema not in repoeschema.subject_relations():
continue
subjtypes, objtypes = [etype], targettypes
- else: # role == 'object'
- if not rschema in repoeschema.object_relations():
+ else: # role == 'object'
+ if rschema not in repoeschema.object_relations():
continue
subjtypes, objtypes = targettypes, [etype]
self._synchronize_rschema(rschema, syncrdefs=False,
syncprops=syncprops, syncperms=syncperms)
- if rschema.rule: # rdef for computed rtype are infered hence should not be
- # synchronized
+ if rschema.rule: # rdef for computed rtype are infered hence should not be
+ # synchronized
continue
reporschema = self.repo.schema.rschema(rschema)
for subj in subjtypes:
@@ -532,7 +532,7 @@
continue
self._synchronize_rdef_schema(subj, rschema, obj,
syncprops=syncprops, syncperms=syncperms)
- if syncprops: # need to process __unique_together__ after rdefs were processed
+ if syncprops: # need to process __unique_together__ after rdefs were processed
# mappings from constraint name to columns
# filesystem (fs) and repository (repo) wise
fs = {}
@@ -592,7 +592,7 @@
self._synchronized.add((objtype, rschema, subjtype))
rdef = rschema.rdef(subjtype, objtype)
if rdef.infered:
- return # don't try to synchronize infered relation defs
+ return # don't try to synchronize infered relation defs
repordef = reporschema.rdef(subjtype, objtype)
confirm = self.verbosity >= 2
if syncprops:
@@ -619,7 +619,7 @@
self.rqlexecall(ss.constraints2rql(cstrtype_map, newconstraints,
repordef.eid),
ask_confirm=confirm)
- if syncperms and not rschema in VIRTUAL_RTYPES:
+ if syncperms and rschema not in VIRTUAL_RTYPES:
self._synchronize_permissions(rdef, repordef.eid)
# base actions ############################################################
@@ -630,7 +630,7 @@
self.commit()
def cmd_add_cube(self, cube, update_database=True):
- self.cmd_add_cubes( (cube,), update_database)
+ self.cmd_add_cubes((cube,), update_database)
def cmd_add_cubes(self, cubes, update_database=True):
"""update_database is telling if the database schema should be updated
@@ -642,11 +642,11 @@
if not newcubes:
return
for cube in newcubes:
- self.cmd_set_property('system.version.'+cube,
+ self.cmd_set_property('system.version.' + cube,
self.config.cube_version(cube))
# ensure added cube is in config cubes
# XXX worth restoring on error?
- if not cube in self.config._cubes:
+ if cube not in self.config._cubes:
self.config._cubes += (cube,)
if not update_database:
self.commit()
@@ -658,17 +658,16 @@
self.update_context('fsschema', self.fs_schema)
new = set()
# execute pre-create files
- driver = self.repo.system_source.dbdriver
for cube in reversed(newcubes):
self.cmd_install_custom_sql_scripts(cube)
self.cmd_exec_event_script('precreate', cube)
# add new entity and relation types
for rschema in newcubes_schema.relations():
- if not rschema in self.repo.schema:
+ if rschema not in self.repo.schema:
self.cmd_add_relation_type(rschema.type)
new.add(rschema.type)
toadd = [eschema for eschema in newcubes_schema.entities()
- if not eschema in self.repo.schema]
+ if eschema not in self.repo.schema]
for eschema in order_eschemas(toadd):
self.cmd_add_entity_type(eschema.type)
new.add(eschema.type)
@@ -705,11 +704,10 @@
self.cmd_exec_event_script('preremove', cube)
# remove cubes'entity and relation types
for rschema in fsschema.relations():
- if not rschema in removedcubes_schema and rschema in reposchema:
+ if rschema not in removedcubes_schema and rschema in reposchema:
self.cmd_drop_relation_type(rschema.type)
toremove = [eschema for eschema in fsschema.entities()
- if not eschema in removedcubes_schema
- and eschema in reposchema]
+ if eschema not in removedcubes_schema and eschema in reposchema]
for eschema in reversed(order_eschemas(toremove)):
self.cmd_drop_entity_type(eschema.type)
for rschema in fsschema.relations():
@@ -718,14 +716,14 @@
# other cubes
for fromtype, totype in rschema.rdefs:
if (fromtype, totype) not in removedcubes_schema[rschema.type].rdefs and \
- (fromtype, totype) in reposchema[rschema.type].rdefs:
+ (fromtype, totype) in reposchema[rschema.type].rdefs:
self.cmd_drop_relation_definition(
str(fromtype), rschema.type, str(totype))
# execute post-remove files
for cube in reversed(removedcubes):
self.cmd_exec_event_script('postremove', cube)
self.rqlexec('DELETE CWProperty X WHERE X pkey %(pk)s',
- {'pk': u'system.version.'+cube}, ask_confirm=False)
+ {'pk': u'system.version.' + cube}, ask_confirm=False)
self.commit()
# schema migration actions ################################################
@@ -768,7 +766,7 @@
card = eschema.rdef(newname).cardinality[0]
if card == '1':
rql += ', NOT X %s NULL' % oldname
- self.rqlexec(rql, ask_confirm=self.verbosity>=2)
+ self.rqlexec(rql, ask_confirm=self.verbosity >= 2)
# XXX if both attributes fulltext indexed, should skip fti rebuild
# XXX if old attribute was fti indexed but not the new one old value
# won't be removed from the index (this occurs on other kind of
@@ -811,9 +809,9 @@
# ignore those meta relations, they will be automatically added
if rschema.type in META_RTYPES:
continue
- if not attrschema.type in instschema:
+ if attrschema.type not in instschema:
self.cmd_add_entity_type(attrschema.type, False, False)
- if not rschema.type in instschema:
+ if rschema.type not in instschema:
# need to add the relation type and to commit to get it
# actually in the schema
self.cmd_add_relation_type(rschema.type, False, commit=True)
@@ -834,7 +832,7 @@
ask_confirm=confirm)
for rschema, tschemas, role in spschema.relation_definitions(True):
for tschema in tschemas:
- if not tschema in instschema:
+ if tschema not in instschema:
continue
if role == 'subject':
subjschema = spschema
@@ -867,7 +865,7 @@
# ignore relations where the targeted type is not in the
# current instance schema
targettype = targetschema.type
- if not targettype in instschema and targettype != etype:
+ if targettype not in instschema and targettype != etype:
continue
if not rtypeadded:
# need to add the relation type and to commit to get it
@@ -892,7 +890,7 @@
targettype = targetschema.type
# don't check targettype != etype since in this case the
# relation has already been added as a subject relation
- if not targettype in instschema:
+ if targettype not in instschema:
continue
if not rtypeadded:
# need to add the relation type and to commit to get it
@@ -918,7 +916,7 @@
# XXX what if we delete an entity type which is specialized by other types
# unregister the entity from CWEType
self.rqlexec('DELETE CWEType X WHERE X name %(etype)s', {'etype': etype},
- ask_confirm=self.verbosity>=2)
+ ask_confirm=self.verbosity >= 2)
if commit:
self.commit()
@@ -935,13 +933,12 @@
# if merging two existing entity types
if newname in schema:
assert oldname in ETYPE_NAME_MAP, \
- '%s should be mapped to %s in ETYPE_NAME_MAP' % (oldname,
- newname)
+ '%s should be mapped to %s in ETYPE_NAME_MAP' % (oldname, newname)
if attrs is None:
attrs = ','.join(SQL_PREFIX + rschema.type
for rschema in schema[newname].subject_relations()
if (rschema.final or rschema.inlined)
- and not rschema in PURE_VIRTUAL_RTYPES)
+ and rschema not in PURE_VIRTUAL_RTYPES)
else:
attrs += ('eid', 'creation_date', 'modification_date', 'cwuri')
attrs = ','.join(SQL_PREFIX + attr for attr in attrs)
@@ -968,10 +965,10 @@
# delete relations using SQL to avoid relations content removal
# triggered by schema synchronization hooks.
for rdeftype in ('CWRelation', 'CWAttribute'):
- thispending = set( (eid for eid, in self.sqlexec(
+ thispending = set((eid for eid, in self.sqlexec(
'SELECT cw_eid FROM cw_%s WHERE cw_from_entity=%%(eid)s OR '
' cw_to_entity=%%(eid)s' % rdeftype,
- {'eid': oldeid}, ask_confirm=False)) )
+ {'eid': oldeid}, ask_confirm=False)))
# we should add deleted eids into pending eids else we may
# get some validation error on commit since integrity hooks
# may think some required relation is missing... This also ensure
@@ -1009,7 +1006,7 @@
# elif simply renaming an entity type
else:
self.rqlexec('SET ET name %(newname)s WHERE ET is CWEType, ET name %(on)s',
- {'newname' : text_type(newname), 'on' : oldname},
+ {'newname': text_type(newname), 'on': oldname},
ask_confirm=False)
if commit:
self.commit()
@@ -1050,14 +1047,14 @@
# symmetric relations appears twice
if (subj, obj) in done:
continue
- done.add( (subj, obj) )
+ done.add((subj, obj))
self.cmd_add_relation_definition(subj, rtype, obj)
if rtype in META_RTYPES:
# if the relation is in META_RTYPES, ensure we're adding it for
# all entity types *in the persistent schema*, not only those in
# the fs schema
for etype in self.repo.schema.entities():
- if not etype in self.fs_schema:
+ if etype not in self.fs_schema:
# get sample object type and rproperties
objtypes = rschema.objects()
assert len(objtypes) == 1, objtypes
@@ -1078,9 +1075,9 @@
any hooks called.
"""
self.rqlexec('DELETE CWRType X WHERE X name %r' % rtype,
- ask_confirm=self.verbosity>=2)
+ ask_confirm=self.verbosity >= 2)
self.rqlexec('DELETE CWComputedRType X WHERE X name %r' % rtype,
- ask_confirm=self.verbosity>=2)
+ ask_confirm=self.verbosity >= 2)
if commit:
self.commit()
@@ -1100,7 +1097,7 @@
self.cmd_add_relation_type(newname, commit=True)
if not self.repo.schema[oldname].rule:
self.rqlexec('SET X %s Y WHERE X %s Y' % (newname, oldname),
- ask_confirm=self.verbosity>=2)
+ ask_confirm=self.verbosity >= 2)
self.cmd_drop_relation_type(oldname, commit=commit)
def cmd_add_relation_definition(self, subjtype, rtype, objtype, commit=True):
@@ -1111,7 +1108,7 @@
if rschema.rule:
raise ExecutionError('Cannot add a relation definition for a '
'computed relation (%s)' % rschema)
- if not rtype in self.repo.schema:
+ if rtype not in self.repo.schema:
self.cmd_add_relation_type(rtype, addrdef=False, commit=True)
if (subjtype, objtype) in self.repo.schema.rschema(rtype).rdefs:
print('warning: relation %s %s %s is already known, skip addition' % (
@@ -1131,7 +1128,7 @@
for attr in ('rtype', 'subject', 'object'):
schemaobj = getattr(rdef, attr)
if getattr(schemaobj, 'eid', None) is None:
- schemaobj.eid = self.repo.schema[schemaobj].eid
+ schemaobj.eid = self.repo.schema[schemaobj].eid
assert schemaobj.eid is not None, schemaobj
return rdef
@@ -1153,7 +1150,7 @@
rql = ('DELETE %s X WHERE X from_entity FE, FE name "%s",'
'X relation_type RT, RT name "%s", X to_entity TE, TE name "%s"')
self.rqlexec(rql % (etype, subjtype, rtype, objtype),
- ask_confirm=self.verbosity>=2)
+ ask_confirm=self.verbosity >= 2)
if commit:
self.commit()
@@ -1194,8 +1191,8 @@
else:
for etype in self.repo.schema.entities():
if etype.eid is None:
- # not yet added final etype (thing to BigInt defined in
- # yams though 3.13 migration not done yet)
+ # not yet added final etype (thing to BigInt defined in
+ # yams though 3.13 migration not done yet)
continue
self._synchronize_eschema(etype, syncrdefs=syncrdefs,
syncprops=syncprops, syncperms=syncperms)
@@ -1223,7 +1220,7 @@
if PY2 and isinstance(v, str):
kwargs[k] = unicode(v)
rql = 'SET %s WHERE %s' % (','.join(values), ','.join(restriction))
- self.rqlexec(rql, kwargs, ask_confirm=self.verbosity>=2)
+ self.rqlexec(rql, kwargs, ask_confirm=self.verbosity >= 2)
if commit:
self.commit()
@@ -1240,26 +1237,26 @@
oldvalue = constr.max
if oldvalue == size:
return
- if oldvalue is None and not size is None:
+ if oldvalue is None and size is not None:
ceid = self.rqlexec('INSERT CWConstraint C: C value %(v)s, C cstrtype CT '
'WHERE CT name "SizeConstraint"',
{'v': SizeConstraint(size).serialize()},
- ask_confirm=self.verbosity>=2)[0][0]
+ ask_confirm=self.verbosity >= 2)[0][0]
self.rqlexec('SET X constrained_by C WHERE X from_entity S, X relation_type R, '
'S name "%s", R name "%s", C eid %s' % (etype, rtype, ceid),
- ask_confirm=self.verbosity>=2)
- elif not oldvalue is None:
- if not size is None:
+ ask_confirm=self.verbosity >= 2)
+ elif oldvalue is not None:
+ if size is not None:
self.rqlexec('SET C value %%(v)s WHERE X from_entity S, X relation_type R,'
'X constrained_by C, C cstrtype CT, CT name "SizeConstraint",'
'S name "%s", R name "%s"' % (etype, rtype),
{'v': text_type(SizeConstraint(size).serialize())},
- ask_confirm=self.verbosity>=2)
+ ask_confirm=self.verbosity >= 2)
else:
self.rqlexec('DELETE X constrained_by C WHERE X from_entity S, X relation_type R,'
'X constrained_by C, C cstrtype CT, CT name "SizeConstraint",'
'S name "%s", R name "%s"' % (etype, rtype),
- ask_confirm=self.verbosity>=2)
+ ask_confirm=self.verbosity >= 2)
# cleanup unused constraints
self.rqlexec('DELETE CWConstraint C WHERE NOT X constrained_by C')
if commit:
@@ -1294,8 +1291,10 @@
**kwargs)
if not isinstance(wfof, (list, tuple)):
wfof = (wfof,)
+
def _missing_wf_rel(etype):
return 'missing workflow relations, see make_workflowable(%s)' % etype
+
for etype in wfof:
eschema = self.repo.schema[etype]
etype = text_type(etype)
@@ -1472,7 +1471,7 @@
ask_confirm=False):
"""rql action"""
if not isinstance(rql, (tuple, list)):
- rql = ( (rql, kwargs), )
+ rql = ((rql, kwargs),)
res = None
execute = self.cnx.execute
for rql, kwargs in rql:
--- a/cubicweb/server/repository.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/server/repository.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -29,7 +29,6 @@
__docformat__ = "restructuredtext en"
-import threading
from warnings import warn
from itertools import chain
from time import time, localtime, strftime
@@ -51,10 +50,13 @@
from cubicweb.server import ShuttingDown, utils, hook, querier, sources
from cubicweb.server.session import Session, InternalManager
-NO_CACHE_RELATIONS = set( [('owned_by', 'object'),
- ('created_by', 'object'),
- ('cw_source', 'object'),
- ])
+
+NO_CACHE_RELATIONS = set([
+ ('owned_by', 'object'),
+ ('created_by', 'object'),
+ ('cw_source', 'object'),
+])
+
def prefill_entity_caches(entity):
cnx = entity._cw
@@ -74,6 +76,7 @@
continue
entity.cw_set_relation_cache(rtype, 'object', cnx.empty_rset())
+
def del_existing_rel_if_needed(cnx, eidfrom, rtype, eidto):
"""delete existing relation when adding a new one if card is 1 or ?
@@ -120,7 +123,7 @@
eschema = entity.e_schema
for attr in entity.cw_edited:
rschema = eschema.subjrels[attr]
- if not rschema.final: # inlined relation
+ if not rschema.final: # inlined relation
value = entity.cw_edited[attr]
relations.append((attr, value))
cnx.update_rel_cache_add(entity.eid, attr, value)
@@ -128,7 +131,7 @@
if rdef.cardinality[1] in '1?' and activeintegrity:
with cnx.security_enabled(read=False):
cnx.execute('DELETE X %s Y WHERE Y eid %%(y)s' % attr,
- {'x': entity.eid, 'y': value})
+ {'x': entity.eid, 'y': value})
return relations
@@ -168,7 +171,7 @@
self._running_threads = []
# initial schema, should be build or replaced latter
self.schema = schema.CubicWebSchema(config.appid)
- self.vreg.schema = self.schema # until actual schema is loaded...
+ self.vreg.schema = self.schema # until actual schema is loaded...
# shutdown flag
self.shutting_down = False
# sources (additional sources info in the system database)
@@ -186,6 +189,7 @@
self.init_cnxset_pool()
# the hooks manager
self.hm = hook.HooksManager(self.vreg)
+
# registry hook to fix user class on registry reload
@onevent('after-registry-reload', self)
def fix_user_classes(self):
@@ -247,7 +251,8 @@
# 4. close initialization connection set and reopen fresh ones for
# proper initialization
self._get_cnxset().close(True)
- self.cnxsets = [] # list of available cnxsets (can't iterate on a Queue)
+ # list of available cnxsets (can't iterate on a Queue)
+ self.cnxsets = []
for i in range(config['connections-pool-size']):
self.cnxsets.append(self.system_source.wrapped_connection())
self._cnxsets_pool.put_nowait(self.cnxsets[-1])
@@ -256,15 +261,14 @@
def init_sources_from_database(self):
self.sources_by_eid = {}
- if self.config.quick_start \
- or not 'CWSource' in self.schema: # # 3.10 migration
+ if self.config.quick_start or 'CWSource' not in self.schema: # 3.10 migration
self.system_source.init_creating()
return
with self.internal_cnx() as cnx:
# FIXME: sources should be ordered (add_entity priority)
for sourceent in cnx.execute(
- 'Any S, SN, SA, SC WHERE S is_instance_of CWSource, '
- 'S name SN, S type SA, S config SC').entities():
+ 'Any S, SN, SA, SC WHERE S is_instance_of CWSource, '
+ 'S name SN, S type SA, S config SC').entities():
if sourceent.name == 'system':
self.system_source.eid = sourceent.eid
self.sources_by_eid[sourceent.eid] = self.system_source
@@ -349,8 +353,9 @@
# register a task to cleanup expired session
self.cleanup_session_time = self.config['cleanup-session-time'] or 60 * 60 * 24
assert self.cleanup_session_time > 0
- cleanup_session_interval = min(60*60, self.cleanup_session_time / 3)
- assert self._tasks_manager is not None, "This Repository is not intended to be used as a server"
+ cleanup_session_interval = min(60 * 60, self.cleanup_session_time / 3)
+ assert self._tasks_manager is not None, \
+ "This Repository is not intended to be used as a server"
self._tasks_manager.add_looping_task(cleanup_session_interval,
self.clean_sessions)
@@ -365,7 +370,8 @@
XXX __init__ or in external codes (various server managers).
"""
self._prepare_startup()
- assert self._tasks_manager is not None, "This Repository is not intended to be used as a server"
+ assert self._tasks_manager is not None,\
+ "This Repository is not intended to be used as a server"
self._tasks_manager.start()
def looping_task(self, interval, func, *args):
@@ -374,14 +380,14 @@
looping tasks can only be registered during repository initialization,
once done this method will fail.
"""
- assert self._tasks_manager is not None, "This Repository is not intended to be used as a server"
+ assert self._tasks_manager is not None,\
+ "This Repository is not intended to be used as a server"
self._tasks_manager.add_looping_task(interval, func, *args)
def threaded_task(self, func):
"""start function in a separated thread"""
utils.RepoThread(func, self._running_threads).start()
- #@locked
def _get_cnxset(self):
try:
return self._cnxsets_pool.get(True, timeout=5)
@@ -430,8 +436,8 @@
hits, misses = self.system_source.cache_hit, self.system_source.cache_miss
self.info('sql cache hit/miss: %s/%s (%s%% hits)', hits, misses,
(hits * 100) / (hits + misses))
- nocache = self.system_source.no_cache
- self.info('sql cache usage: %s/%s (%s%%)', hits+ misses, nocache,
+ nocache = self.system_source.no_cache
+ self.info('sql cache usage: %s/%s (%s%%)', hits + misses, nocache,
((hits + misses) * 100) / (hits + misses + nocache))
except ZeroDivisionError:
pass
@@ -458,7 +464,7 @@
eid = self.check_auth_info(cnx, login, authinfo)
cwuser = self._build_user(cnx, eid)
if self.config.consider_user_state and \
- not cwuser.cw_adapt_to('IWorkflowable').state in cwuser.AUTHENTICABLE_STATES:
+ not cwuser.cw_adapt_to('IWorkflowable').state in cwuser.AUTHENTICABLE_STATES:
raise AuthenticationError('user is not in authenticable state')
return cwuser
@@ -480,7 +486,7 @@
# public (dbapi) interface ################################################
@deprecated("[3.19] use _cw.call_service('repo_stats')")
- def stats(self): # XXX restrict to managers session?
+ def stats(self): # XXX restrict to managers session?
"""Return a dictionary containing some statistics about the repository
resources usage.
@@ -548,8 +554,8 @@
vcconf = {}
with self.internal_cnx() as cnx:
for pk, version in cnx.execute(
- 'Any K,V WHERE P is CWProperty, P value V, P pkey K, '
- 'P pkey ~="system.version.%"', build_descr=False):
+ 'Any K,V WHERE P is CWProperty, P value V, P pkey K, '
+ 'P pkey ~="system.version.%"', build_descr=False):
cube = pk.split('.')[-1]
# XXX cubicweb migration
if cube in CW_MIGRATION_MAP:
@@ -692,8 +698,7 @@
connections have all hooks beside security enabled.
"""
with Session(InternalManager(), self).new_cnx() as cnx:
- cnx.user._cw = cnx # XXX remove when "vreg = user._cw.vreg"
- # hack in entity.py is gone
+ cnx.user._cw = cnx # XXX remove when "vreg = user._cw.vreg" hack in entity.py is gone
with cnx.security_enabled(read=False, write=False):
yield cnx
@@ -732,12 +737,12 @@
rqlcache = self.querier._rql_cache
for eid in eids:
try:
- etype, extid, auri = etcache.pop(int(eid)) # may be a string in some cases
- rqlcache.pop( ('%s X WHERE X eid %s' % (etype, eid),), None)
+ etype, extid, auri = etcache.pop(int(eid)) # may be a string in some cases
+ rqlcache.pop(('%s X WHERE X eid %s' % (etype, eid),), None)
extidcache.pop(extid, None)
except KeyError:
etype = None
- rqlcache.pop( ('Any X WHERE X eid %s' % eid,), None)
+ rqlcache.pop(('Any X WHERE X eid %s' % eid,), None)
self.system_source.clear_eid_cache(eid, etype)
def type_from_eid(self, eid, cnx):
@@ -846,7 +851,7 @@
with cnx.running_hooks_ops():
for rschema, _, role in entities[0].e_schema.relation_definitions():
if rschema.rule:
- continue # computed relation
+ continue # computed relation
rtype = rschema.type
if rtype in schema.VIRTUAL_RTYPES or rtype in pendingrtypes:
continue
@@ -861,9 +866,10 @@
except ValidationError:
raise
except Unauthorized:
- self.exception('Unauthorized exception while cascading delete for entity %s. '
- 'RQL: %s.\nThis should not happen since security is disabled here.',
- entities, rql)
+ self.exception(
+ 'Unauthorized exception while cascading delete for entity %s. '
+ 'RQL: %s.\nThis should not happen since security is disabled here.',
+ entities, rql)
raise
except Exception:
if self.config.mode == 'test':
@@ -891,7 +897,7 @@
the entity instance
"""
entity = edited.entity
- entity._cw_is_saved = False # entity has an eid but is not yet saved
+ entity._cw_is_saved = False # entity has an eid but is not yet saved
# init edited_attributes before calling before_add_entity hooks
entity.cw_edited = edited
source = self.system_source
@@ -920,9 +926,9 @@
# call hooks for inlined relations
for attr, value in relations:
self.hm.call_hooks('before_add_relation', cnx,
- eidfrom=entity.eid, rtype=attr, eidto=value)
+ eidfrom=entity.eid, rtype=attr, eidto=value)
self.hm.call_hooks('after_add_relation', cnx,
- eidfrom=entity.eid, rtype=attr, eidto=value)
+ eidfrom=entity.eid, rtype=attr, eidto=value)
return entity.eid
def glob_update_entity(self, cnx, edited):
@@ -954,7 +960,7 @@
# inlined relation
previous_value = entity.related(attr) or None
if previous_value is not None:
- previous_value = previous_value[0][0] # got a result set
+ previous_value = previous_value[0][0] # got a result set
if previous_value == entity.cw_attr_cache[attr]:
previous_value = None
else:
@@ -996,7 +1002,6 @@
if orig_edited is not None:
entity.cw_edited = orig_edited
-
def glob_delete_entities(self, cnx, eids):
"""delete a list of entities and all related entities from the repository"""
# mark eids as being deleted in cnx info and setup cache update
@@ -1009,7 +1014,7 @@
eids = frozenset(eids)
eids = eids - op._container
op._container |= eids
- data_by_etype = {} # values are [list of entities]
+ data_by_etype = {} # values are [list of entities]
#
# WARNING: the way this dictionary is populated is heavily optimized
# and does not use setdefault on purpose. Unless a new release
@@ -1026,7 +1031,7 @@
source = self.system_source
for etype, entities in data_by_etype.items():
if server.DEBUG & server.DBG_REPO:
- print('DELETE entities', etype, [entity.eid for entity in entities])
+ print('DELETE entities', etype, [e.eid for e in entities])
self.hm.call_hooks('before_delete_entity', cnx, entities=entities)
self._delete_cascade_multi(cnx, entities)
source.delete_entities(cnx, entities)
@@ -1111,13 +1116,11 @@
self.hm.call_hooks('after_delete_relation', cnx,
eidfrom=subject, rtype=rtype, eidto=object)
-
-
-
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
+
from logging import getLogger
from cubicweb import set_log_methods
set_log_methods(Repository, getLogger('cubicweb.repository'))
--- a/cubicweb/server/session.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/server/session.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2015 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -20,12 +20,13 @@
__docformat__ = "restructuredtext en"
+import functools
import sys
from time import time
from uuid import uuid4
from warnings import warn
-import functools
from contextlib import contextmanager
+from logging import getLogger
from six import text_type
@@ -33,7 +34,8 @@
from logilab.common.textutils import unormalize
from logilab.common.registry import objectify_predicate
-from cubicweb import QueryError, schema, server, ProgrammingError
+from cubicweb import QueryError, ProgrammingError, schema, server
+from cubicweb import set_log_methods
from cubicweb.req import RequestSessionBase
from cubicweb.utils import make_uid
from cubicweb.rqlrewrite import RQLRewriter
@@ -50,6 +52,7 @@
NO_UNDO_TYPES.add('cw_source')
# XXX rememberme,forgotpwd,apycot,vcsfile
+
@objectify_predicate
def is_user_session(cls, req, **kwargs):
"""return 1 when session is not internal.
@@ -57,6 +60,7 @@
This predicate can only be used repository side only. """
return not req.is_internal_session
+
@objectify_predicate
def is_internal_session(cls, req, **kwargs):
"""return 1 when session is not internal.
@@ -64,6 +68,7 @@
This predicate can only be used repository side only. """
return req.is_internal_session
+
@objectify_predicate
def repairing(cls, req, **kwargs):
"""return 1 when repository is running in repair mode"""
@@ -72,7 +77,7 @@
@deprecated('[3.17] use <object>.allow/deny_all_hooks_but instead')
def hooks_control(obj, mode, *categories):
- assert mode in (HOOKS_ALLOW_ALL, HOOKS_DENY_ALL)
+ assert mode in (HOOKS_ALLOW_ALL, HOOKS_DENY_ALL)
if mode == HOOKS_ALLOW_ALL:
return obj.allow_all_hooks_but(*categories)
elif mode == HOOKS_DENY_ALL:
@@ -132,6 +137,7 @@
def security_enabled(obj, *args, **kwargs):
return obj.security_enabled(*args, **kwargs)
+
class _security_enabled(object):
"""context manager to control security w/ session.execute,
@@ -165,7 +171,8 @@
HOOKS_ALLOW_ALL = object()
HOOKS_DENY_ALL = object()
-DEFAULT_SECURITY = object() # evaluated to true by design
+DEFAULT_SECURITY = object() # evaluated to true by design
+
class SessionClosedError(RuntimeError):
pass
@@ -177,7 +184,7 @@
def check_open(cnx, *args, **kwargs):
if not cnx._open:
raise ProgrammingError('Closed Connection: %s'
- % cnx.connectionid)
+ % cnx.connectionid)
return func(cnx, *args, **kwargs)
return check_open
@@ -275,15 +282,14 @@
#: (None, 'precommit', 'postcommit', 'uncommitable')
self.commit_state = None
- ### hook control attribute
+ # hook control attribute
self.hooks_mode = HOOKS_ALLOW_ALL
self.disabled_hook_cats = set()
self.enabled_hook_cats = set()
self.pruned_hooks_cache = {}
-
- ### security control attributes
- self._read_security = DEFAULT_SECURITY # handled by a property
+ # security control attributes
+ self._read_security = DEFAULT_SECURITY # handled by a property
self.write_security = DEFAULT_SECURITY
# undo control
@@ -304,11 +310,6 @@
self._set_user(session.user)
@_open_only
- def source_defs(self):
- """Return the definition of sources used by the repository."""
- return self.session.repo.source_defs()
-
- @_open_only
def get_schema(self):
"""Return the schema currently used by the repository."""
return self.session.repo.source_defs()
@@ -381,13 +382,13 @@
# life cycle handling ####################################################
def __enter__(self):
- assert self._open is None # first opening
+ assert self._open is None # first opening
self._open = True
self.cnxset = self.repo._get_cnxset()
return self
def __exit__(self, exctype=None, excvalue=None, tb=None):
- assert self._open # actually already open
+ assert self._open # actually already open
self.rollback()
self._open = False
self.cnxset.cnxset_freed()
@@ -487,8 +488,7 @@
# XXX not using _open_only because before at creation time. _set_user
# call this function to cache the Connection user.
if entity.cw_etype != 'CWUser' and not self._open:
- raise ProgrammingError('Closed Connection: %s'
- % self.connectionid)
+ raise ProgrammingError('Closed Connection: %s' % self.connectionid)
ecache = self.transaction_data.setdefault('ecache', {})
ecache.setdefault(entity.eid, entity)
@@ -526,7 +526,7 @@
You may use this in hooks when you know both eids of the relation you
want to add.
"""
- self.add_relations([(rtype, [(fromeid, toeid)])])
+ self.add_relations([(rtype, [(fromeid, toeid)])])
@_open_only
def add_relations(self, relations):
@@ -555,7 +555,6 @@
for edited in edited_entities.values():
self.repo.glob_update_entity(self, edited)
-
@_open_only
def delete_relation(self, fromeid, rtype, toeid):
"""provide direct access to the repository method to delete a relation.
@@ -606,7 +605,7 @@
rset = rset.copy()
entities = list(entities)
rset.rows.append([targeteid])
- if not isinstance(rset.description, list): # else description not set
+ if not isinstance(rset.description, list): # else description not set
rset.description = list(rset.description)
rset.description.append([self.entity_metas(targeteid)['type']])
targetentity = self.entity_from_eid(targeteid)
@@ -640,7 +639,7 @@
rset = rset.copy()
entities = list(entities)
del rset.rows[idx]
- if isinstance(rset.description, list): # else description not set
+ if isinstance(rset.description, list): # else description not set
del rset.description[idx]
del entities[idx]
rset.rowcount -= 1
@@ -696,11 +695,11 @@
if self.hooks_mode is HOOKS_DENY_ALL:
enabledcats = self.enabled_hook_cats
changes = enabledcats & categories
- enabledcats -= changes # changes is small hence faster
+ enabledcats -= changes # changes is small hence faster
else:
disabledcats = self.disabled_hook_cats
changes = categories - disabledcats
- disabledcats |= changes # changes is small hence faster
+ disabledcats |= changes # changes is small hence faster
return tuple(changes)
@_open_only
@@ -716,11 +715,11 @@
if self.hooks_mode is HOOKS_DENY_ALL:
enabledcats = self.enabled_hook_cats
changes = categories - enabledcats
- enabledcats |= changes # changes is small hence faster
+ enabledcats |= changes # changes is small hence faster
else:
disabledcats = self.disabled_hook_cats
changes = disabledcats & categories
- disabledcats -= changes # changes is small hence faster
+ disabledcats -= changes # changes is small hence faster
return tuple(changes)
@_open_only
@@ -788,7 +787,7 @@
etype, extid, source = self.repo.type_and_source_from_eid(eid, self)
metas = {'type': etype, 'source': source, 'extid': extid}
if asdict:
- metas['asource'] = metas['source'] # XXX pre 3.19 client compat
+ metas['asource'] = metas['source'] # XXX pre 3.19 client compat
return metas
return etype, source, extid
@@ -966,9 +965,11 @@
This is to be used by session"""
args = {}
+
@deprecated('[3.19] use a Connection object instead')
def attr_from_cnx(session):
return getattr(session._cnx, attr_name)
+
args['fget'] = attr_from_cnx
if writable:
@deprecated('[3.19] use a Connection object instead')
@@ -1001,7 +1002,7 @@
def __init__(self, user, repo, _id=None):
self.sessionid = _id or make_uid(unormalize(user.login))
- self.user = user # XXX repoapi: deprecated and store only a login.
+ self.user = user # XXX repoapi: deprecated and store only a login.
self.repo = repo
self._timestamp = Timestamp()
self.data = {}
@@ -1054,9 +1055,10 @@
self._timestamp.touch()
local_perm_cache = cnx_attr('local_perm_cache')
+
@local_perm_cache.setter
def local_perm_cache(self, value):
- #base class assign an empty dict:-(
+ # base class assign an empty dict:-(
assert value == {}
pass
@@ -1078,8 +1080,7 @@
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
- info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
-
+ info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
class InternalManager(object):
@@ -1128,7 +1129,6 @@
return self._IEmailable
return None
-from logging import getLogger
-from cubicweb import set_log_methods
+
set_log_methods(Session, getLogger('cubicweb.session'))
set_log_methods(Connection, getLogger('cubicweb.session'))
--- a/cubicweb/server/test/unittest_security.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/server/test/unittest_security.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -37,6 +37,7 @@
hash = _CRYPTO_CTX.encrypt('oldpassword', scheme='des_crypt')
self.create_user(cnx, u'oldpassword', password=Binary(hash.encode('ascii')))
+
class LowLevelSecurityFunctionTC(BaseSecurityTC):
def test_check_relation_read_access(self):
@@ -82,7 +83,7 @@
"""
with self.repo.internal_cnx() as cnx:
oldhash = cnx.system_sql("SELECT cw_upassword FROM cw_CWUser "
- "WHERE cw_login = 'oldpassword'").fetchone()[0]
+ "WHERE cw_login = 'oldpassword'").fetchone()[0]
oldhash = self.repo.system_source.binary_to_str(oldhash)
session = self.repo.new_session('oldpassword', password='oldpassword')
session.close()
@@ -115,10 +116,11 @@
self.hijack_source_execute()
cnx.execute('Any U WHERE NOT A todo_by U, A is Affaire')
self.assertEqual(self.query[0][1].as_string(),
- 'Any U WHERE NOT EXISTS(A todo_by U), A is Affaire')
+ 'Any U WHERE NOT EXISTS(A todo_by U), A is Affaire')
cnx.execute('Any U WHERE NOT EXISTS(A todo_by U), A is Affaire')
self.assertEqual(self.query[0][1].as_string(),
- 'Any U WHERE NOT EXISTS(A todo_by U), A is Affaire')
+ 'Any U WHERE NOT EXISTS(A todo_by U), A is Affaire')
+
class SecurityTC(BaseSecurityTC):
--- a/cubicweb/test/unittest_schema.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/test/unittest_schema.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -17,23 +17,21 @@
# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""unit tests for module cubicweb.schema"""
-import sys
-from os.path import join, isabs, basename, dirname
+from os.path import join, dirname
from logilab.common.testlib import TestCase, unittest_main
from rql import RQLSyntaxError
from yams import ValidationError, BadSchemaDefinition
-from yams.constraints import SizeConstraint, StaticVocabularyConstraint
from yams.buildobjs import (RelationDefinition, EntityType, RelationType,
- Int, String, SubjectRelation, ComputedRelation)
+ Int, String, ComputedRelation)
from yams.reader import fill_schema
from cubicweb.schema import (
- CubicWebSchema, CubicWebEntitySchema, CubicWebSchemaLoader,
+ CubicWebSchema, CubicWebSchemaLoader,
RQLConstraint, RQLUniqueConstraint, RQLVocabularyConstraint,
- RQLExpression, ERQLExpression, RRQLExpression,
+ ERQLExpression, RRQLExpression,
normalize_expression, order_eschemas, guess_rrqlexpr_mainvars,
build_schema_from_namespace)
from cubicweb.devtools import TestServerConfiguration as TestConfiguration
@@ -44,18 +42,18 @@
# build a dummy schema ########################################################
-PERSONNE_PERMISSIONS = {
- 'read': ('managers', 'users', 'guests'),
+PERSONNE_PERMISSIONS = {
+ 'read': ('managers', 'users', 'guests'),
'update': ('managers', 'owners'),
- 'add': ('managers', ERQLExpression('X travaille S, S owned_by U')),
+ 'add': ('managers', ERQLExpression('X travaille S, S owned_by U')),
'delete': ('managers', 'owners',),
- }
+}
CONCERNE_PERMISSIONS = {
- 'read': ('managers', 'users', 'guests'),
- 'add': ('managers', RRQLExpression('U has_update_permission S')),
+ 'read': ('managers', 'users', 'guests'),
+ 'add': ('managers', RRQLExpression('U has_update_permission S')),
'delete': ('managers', RRQLExpression('O owned_by U')),
- }
+}
schema = CubicWebSchema('Test Schema')
enote = schema.add_entity_type(EntityType('Note'))
@@ -83,7 +81,7 @@
('Personne concerne Affaire'),
('Personne concerne Societe'),
('Affaire concerne Societe'),
- )
+)
done = {}
for rel in RELS:
_from, _type, _to = rel.split()
@@ -96,6 +94,7 @@
else:
schema.add_relation_def(RelationDefinition(_from, _type, _to))
+
class CubicWebSchemaTC(TestCase):
def test_rql_constraints_inheritance(self):
@@ -132,94 +131,111 @@
def test_erqlexpression(self):
self.assertRaises(RQLSyntaxError, ERQLExpression, '1')
expr = ERQLExpression('X travaille S, S owned_by U')
- self.assertEqual(str(expr), 'Any X WHERE X travaille S, S owned_by U, X eid %(x)s, U eid %(u)s')
+ self.assertEqual(
+ str(expr),
+ 'Any X WHERE X travaille S, S owned_by U, X eid %(x)s, U eid %(u)s')
expr = ERQLExpression('X foo S, S bar U, X baz XE, S quux SE HAVING XE > SE')
- self.assertEqual(str(expr), 'Any X WHERE X foo S, S bar U, X baz XE, S quux SE, X eid %(x)s, U eid %(u)s HAVING XE > SE')
+ self.assertEqual(
+ str(expr),
+ 'Any X WHERE X foo S, S bar U, X baz XE, S quux SE, X eid %(x)s, '
+ 'U eid %(u)s HAVING XE > SE')
def test_rrqlexpression(self):
self.assertRaises(Exception, RRQLExpression, '1')
self.assertRaises(RQLSyntaxError, RRQLExpression, 'O X Y')
expr = RRQLExpression('U has_update_permission O')
- self.assertEqual(str(expr), 'Any O,U WHERE U has_update_permission O, O eid %(o)s, U eid %(u)s')
+ self.assertEqual(
+ str(expr),
+ 'Any O,U WHERE U has_update_permission O, O eid %(o)s, U eid %(u)s')
+
loader = CubicWebSchemaLoader()
config = TestConfiguration('data', __file__)
config.bootstrap_cubes()
+
class SchemaReaderClassTest(TestCase):
def test_order_eschemas(self):
schema = loader.load(config)
self.assertEqual(order_eschemas([schema['Note'], schema['SubNote']]),
- [schema['Note'], schema['SubNote']])
+ [schema['Note'], schema['SubNote']])
self.assertEqual(order_eschemas([schema['SubNote'], schema['Note']]),
- [schema['Note'], schema['SubNote']])
+ [schema['Note'], schema['SubNote']])
def test_knownValues_load_schema(self):
schema = loader.load(config)
self.assertIsInstance(schema, CubicWebSchema)
self.assertEqual(schema.name, 'data')
entities = sorted([str(e) for e in schema.entities()])
- expected_entities = ['Ami', 'BaseTransition', 'BigInt', 'Bookmark', 'Boolean', 'Bytes', 'Card',
- 'Date', 'Datetime', 'Decimal',
- 'CWCache', 'CWComputedRType', 'CWConstraint',
- 'CWConstraintType', 'CWDataImport', 'CWEType',
- 'CWAttribute', 'CWGroup', 'EmailAddress',
- 'CWRelation', 'CWPermission', 'CWProperty', 'CWRType',
- 'CWSource', 'CWSourceHostConfig', 'CWSourceSchemaConfig',
- 'CWUniqueTogetherConstraint', 'CWUser',
- 'ExternalUri', 'FakeFile', 'Float', 'Int', 'Interval', 'Note',
- 'Password', 'Personne', 'Produit',
- 'RQLExpression', 'Reference',
- 'Service', 'Societe', 'State', 'StateFull', 'String', 'SubNote', 'SubWorkflowExitPoint',
- 'Tag', 'TZDatetime', 'TZTime', 'Time', 'Transition', 'TrInfo',
- 'Usine',
- 'Workflow', 'WorkflowTransition']
+ expected_entities = [
+ 'Ami', 'BaseTransition', 'BigInt', 'Bookmark', 'Boolean', 'Bytes', 'Card',
+ 'Date', 'Datetime', 'Decimal',
+ 'CWCache', 'CWComputedRType', 'CWConstraint',
+ 'CWConstraintType', 'CWDataImport', 'CWEType',
+ 'CWAttribute', 'CWGroup', 'EmailAddress',
+ 'CWRelation', 'CWPermission', 'CWProperty', 'CWRType',
+ 'CWSource', 'CWSourceHostConfig', 'CWSourceSchemaConfig',
+ 'CWUniqueTogetherConstraint', 'CWUser',
+ 'ExternalUri', 'FakeFile', 'Float', 'Int', 'Interval', 'Note',
+ 'Password', 'Personne', 'Produit',
+ 'RQLExpression', 'Reference',
+ 'Service', 'Societe', 'State', 'StateFull', 'String', 'SubNote', 'SubWorkflowExitPoint',
+ 'Tag', 'TZDatetime', 'TZTime', 'Time', 'Transition', 'TrInfo',
+ 'Usine',
+ 'Workflow', 'WorkflowTransition',
+ ]
self.assertListEqual(sorted(expected_entities), entities)
relations = sorted([str(r) for r in schema.relations()])
- expected_relations = ['actionnaire', 'add_permission', 'address', 'alias', 'allowed_transition', 'associe',
- 'bookmarked_by', 'by_transition', 'buddies',
+ expected_relations = [
+ 'actionnaire', 'add_permission', 'address', 'alias', 'allowed_transition', 'associe',
+ 'bookmarked_by', 'by_transition', 'buddies',
- 'cardinality', 'comment', 'comment_format',
- 'composite', 'condition', 'config', 'connait',
- 'constrained_by', 'constraint_of',
- 'content', 'content_format', 'contrat_exclusif',
- 'created_by', 'creation_date', 'cstrtype', 'custom_workflow',
- 'cwuri', 'cw_for_source', 'cw_import_of', 'cw_host_config_of', 'cw_schema', 'cw_source',
+ 'cardinality', 'comment', 'comment_format',
+ 'composite', 'condition', 'config', 'connait',
+ 'constrained_by', 'constraint_of',
+ 'content', 'content_format', 'contrat_exclusif',
+ 'created_by', 'creation_date', 'cstrtype', 'custom_workflow',
+ 'cwuri', 'cw_for_source', 'cw_import_of', 'cw_host_config_of', 'cw_schema', 'cw_source',
- 'data', 'data_encoding', 'data_format', 'data_name', 'default_workflow', 'defaultval', 'delete_permission',
- 'description', 'description_format', 'destination_state', 'dirige',
+ 'data', 'data_encoding', 'data_format', 'data_name', 'default_workflow', 'defaultval',
+ 'delete_permission', 'description', 'description_format', 'destination_state',
+ 'dirige',
- 'ean', 'ecrit_par', 'eid', 'end_timestamp', 'evaluee', 'expression', 'exprtype', 'extra_props',
+ 'ean', 'ecrit_par', 'eid', 'end_timestamp', 'evaluee', 'expression', 'exprtype',
+ 'extra_props',
- 'fabrique_par', 'final', 'firstname', 'for_user', 'formula', 'fournit',
- 'from_entity', 'from_state', 'fulltext_container', 'fulltextindexed',
+ 'fabrique_par', 'final', 'firstname', 'for_user', 'formula', 'fournit',
+ 'from_entity', 'from_state', 'fulltext_container', 'fulltextindexed',
- 'has_group_permission', 'has_text',
- 'identity', 'in_group', 'in_state', 'in_synchronization', 'indexed',
- 'initial_state', 'inlined', 'internationalizable', 'is', 'is_instance_of',
+ 'has_group_permission', 'has_text',
+ 'identity', 'in_group', 'in_state', 'in_synchronization', 'indexed',
+ 'initial_state', 'inlined', 'internationalizable', 'is', 'is_instance_of',
- 'label', 'last_login_time', 'latest_retrieval', 'lieu', 'log', 'login',
+ 'label', 'last_login_time', 'latest_retrieval', 'lieu', 'log', 'login',
- 'mainvars', 'match_host', 'modification_date',
+ 'mainvars', 'match_host', 'modification_date',
+
+ 'name', 'nom',
- 'name', 'nom',
+ 'options', 'ordernum', 'owned_by',
- 'options', 'ordernum', 'owned_by',
+ 'parser', 'path', 'pkey', 'prefered_form', 'prenom', 'primary_email',
- 'parser', 'path', 'pkey', 'prefered_form', 'prenom', 'primary_email',
+ 'read_permission', 'relation_type', 'relations', 'require_group', 'rule',
- 'read_permission', 'relation_type', 'relations', 'require_group', 'rule',
+ 'specializes', 'start_timestamp', 'state_of', 'status', 'subworkflow',
+ 'subworkflow_exit', 'subworkflow_state', 'surname', 'symmetric', 'synopsis',
- 'specializes', 'start_timestamp', 'state_of', 'status', 'subworkflow', 'subworkflow_exit', 'subworkflow_state', 'surname', 'symmetric', 'synopsis',
-
- 'tags', 'timestamp', 'title', 'to_entity', 'to_state', 'transition_of', 'travaille', 'type',
+ 'tags', 'timestamp', 'title', 'to_entity', 'to_state', 'transition_of', 'travaille',
+ 'type',
- 'upassword', 'update_permission', 'url', 'uri', 'use_email',
+ 'upassword', 'update_permission', 'url', 'uri', 'use_email',
- 'value',
+ 'value',
- 'wf_info_for', 'wikiid', 'workflow_of', 'tr_count']
+ 'wf_info_for', 'wikiid', 'workflow_of', 'tr_count',
+ ]
self.assertListEqual(sorted(expected_relations), relations)
@@ -236,7 +252,7 @@
'use_email'])
rels = sorted(r.type for r in eschema.object_relations())
self.assertListEqual(rels, ['bookmarked_by', 'buddies', 'created_by', 'for_user',
- 'identity', 'owned_by', 'wf_info_for'])
+ 'identity', 'owned_by', 'wf_info_for'])
rschema = schema.rschema('relation_type')
properties = rschema.rdef('CWAttribute', 'CWRType')
self.assertEqual(properties.cardinality, '1*')
@@ -255,13 +271,13 @@
schema = loader.load(config)
aschema = schema['TrInfo'].rdef('comment')
self.assertEqual(aschema.get_groups('read'),
- set(('managers', 'users', 'guests')))
+ set(('managers', 'users', 'guests')))
self.assertEqual(aschema.get_rqlexprs('read'),
- ())
+ ())
self.assertEqual(aschema.get_groups('update'),
- set(('managers',)))
+ set(('managers',)))
self.assertEqual([x.expression for x in aschema.get_rqlexprs('update')],
- ['U has_update_permission X'])
+ ['U has_update_permission X'])
def test_nonregr_allowed_type_names(self):
schema = CubicWebSchema('Test Schema')
@@ -290,12 +306,12 @@
class works_for(RelationDefinition):
subject = 'Person'
- object = 'Company'
+ object = 'Company'
cardinality = '?*'
class Company(EntityType):
- total_salary = Int(formula='Any SUM(SA) GROUPBY X WHERE '
- 'P works_for X, P salary SA')
+ total_salary = Int(formula='Any SUM(SA) GROUPBY X WHERE P works_for X, P salary SA')
+
good_schema = build_schema_from_namespace(vars().items())
rdef = good_schema['Company'].rdef('total_salary')
# ensure 'X is Company' is added to the rqlst to avoid ambiguities, see #4901163
@@ -306,12 +322,12 @@
{'add': (), 'update': (),
'read': ('managers', 'users', 'guests')})
- class Company(EntityType):
+ class Company(EntityType): # noqa
total_salary = String(formula='Any SUM(SA) GROUPBY X WHERE '
'P works_for X, P salary SA')
with self.assertRaises(BadSchemaDefinition) as exc:
- bad_schema = build_schema_from_namespace(vars().items())
+ build_schema_from_namespace(vars().items())
self.assertEqual(str(exc.exception),
'computed attribute total_salary on Company: '
@@ -326,7 +342,7 @@
name = String()
class Company(EntityType):
- name = String()
+ name = String()
class Service(EntityType):
name = String()
@@ -355,14 +371,14 @@
schema = build_schema_from_namespace(vars().items())
# check object/subject type
- self.assertEqual([('Person','Service')],
+ self.assertEqual([('Person', 'Service')],
list(schema['produces_and_buys'].rdefs.keys()))
- self.assertEqual([('Person','Service')],
+ self.assertEqual([('Person', 'Service')],
list(schema['produces_and_buys2'].rdefs.keys()))
self.assertCountEqual([('Company', 'Service'), ('Person', 'Service')],
list(schema['reproduce'].rdefs.keys()))
# check relation definitions are marked infered
- rdef = schema['produces_and_buys'].rdefs[('Person','Service')]
+ rdef = schema['produces_and_buys'].rdefs[('Person', 'Service')]
self.assertTrue(rdef.infered)
# and have no add/delete permissions
self.assertEqual(rdef.permissions,
@@ -419,30 +435,32 @@
"can't use RRQLExpression on attribute ToTo.attr[String], use an ERQLExpression")
def test_rqlexpr_on_computedrel(self):
- self._test('rqlexpr_on_computedrel.py',
- "can't use rql expression for read permission of relation Subject computed Object")
+ self._test(
+ 'rqlexpr_on_computedrel.py',
+ "can't use rql expression for read permission of relation Subject computed Object")
class NormalizeExpressionTC(TestCase):
def test(self):
self.assertEqual(normalize_expression('X bla Y,Y blur Z , Z zigoulou X '),
- 'X bla Y, Y blur Z, Z zigoulou X')
+ 'X bla Y, Y blur Z, Z zigoulou X')
self.assertEqual(normalize_expression('X bla Y, Y name "x,y"'),
- 'X bla Y, Y name "x,y"')
+ 'X bla Y, Y name "x,y"')
class RQLExpressionTC(TestCase):
def test_comparison(self):
self.assertEqual(ERQLExpression('X is CWUser', 'X', 0),
- ERQLExpression('X is CWUser', 'X', 0))
+ ERQLExpression('X is CWUser', 'X', 0))
self.assertNotEqual(ERQLExpression('X is CWUser', 'X', 0),
- ERQLExpression('X is CWGroup', 'X', 0))
+ ERQLExpression('X is CWGroup', 'X', 0))
class GuessRrqlExprMainVarsTC(TestCase):
def test_exists(self):
- mainvars = guess_rrqlexpr_mainvars(normalize_expression('NOT EXISTS(O team_competition C, C level < 3, C concerns S)'))
+ mainvars = guess_rrqlexpr_mainvars(normalize_expression(
+ 'NOT EXISTS(O team_competition C, C level < 3, C concerns S)'))
self.assertEqual(mainvars, set(['S', 'O']))
@@ -454,7 +472,7 @@
self.assertRaises(ValidationError,
cstr.repo_check, cnx, 1, 'rel', anoneid)
self.assertEqual(cstr.repo_check(cnx, 1, cnx.user.eid),
- None) # no validation error, constraint checked
+ None) # no validation error, constraint checked
class WorkflowShemaTC(CubicWebTC):
@@ -543,8 +561,10 @@
('transition_of', 'BaseTransition', 'Workflow', 'object'),
('transition_of', 'Transition', 'Workflow', 'object'),
('transition_of', 'WorkflowTransition', 'Workflow', 'object')],
- 'WorkflowTransition': [('condition', 'WorkflowTransition', 'RQLExpression', 'subject'),
- ('subworkflow_exit', 'WorkflowTransition', 'SubWorkflowExitPoint', 'subject')]
+ 'WorkflowTransition': [
+ ('condition', 'WorkflowTransition', 'RQLExpression', 'subject'),
+ ('subworkflow_exit', 'WorkflowTransition', 'SubWorkflowExitPoint', 'subject')
+ ]
}
def test_composite_entities(self):
--- a/cubicweb/web/application.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/web/application.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.
@@ -19,30 +19,27 @@
__docformat__ = "restructuredtext en"
+import contextlib
+import json
import sys
from time import clock, time
from contextlib import contextmanager
from warnings import warn
-import json
from six import text_type, binary_type
from six.moves import http_client
-from logilab.common.deprecation import deprecated
-
from rql import BadRQLQuery
-from cubicweb import set_log_methods, cwvreg
+from cubicweb import set_log_methods
from cubicweb import (
- ValidationError, Unauthorized, Forbidden,
- AuthenticationError, NoSelectableObject,
- CW_EVENT_MANAGER)
+ CW_EVENT_MANAGER, ValidationError, Unauthorized, Forbidden,
+ AuthenticationError, NoSelectableObject)
from cubicweb.repoapi import anonymous_cnx
-from cubicweb.web import LOGGER, component, cors
+from cubicweb.web import cors
from cubicweb.web import (
- StatusResponse, DirectResponse, Redirect, NotFound, LogOut,
+ LOGGER, StatusResponse, DirectResponse, Redirect, NotFound, LogOut,
RemoteCallFailed, InvalidSession, RequestError, PublishException)
-
from cubicweb.web.request import CubicWebRequestBase
# make session manager available through a global variable so the debug view can
@@ -62,7 +59,6 @@
req.set_cnx(orig_cnx)
-
class CookieSessionHandler(object):
"""a session handler using a cookie to store the session identifier"""
@@ -122,7 +118,7 @@
try:
sessionid = str(cookie[sessioncookie].value)
session = self.get_session_by_id(req, sessionid)
- except (KeyError, InvalidSession): # no valid session cookie
+ except (KeyError, InvalidSession): # no valid session cookie
session = self.open_session(req)
return session
@@ -151,7 +147,8 @@
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
- info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+ info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
+
class CubicWebPublisher(object):
"""the publisher is a singleton hold by the web frontend, and is responsible
@@ -206,19 +203,25 @@
accessed path
"""
def wrap_set_cnx(func):
+
def wrap_execute(cnx):
orig_execute = cnx.execute
+
def execute(rql, kwargs=None, build_descr=True):
tstart, cstart = time(), clock()
rset = orig_execute(rql, kwargs, build_descr=build_descr)
cnx.executed_queries.append((rql, kwargs, time() - tstart, clock() - cstart))
return rset
+
return execute
+
def set_cnx(cnx):
func(cnx)
cnx.execute = wrap_execute(cnx)
cnx.executed_queries = []
+
return set_cnx
+
req.set_cnx = wrap_set_cnx(req.set_cnx)
try:
return self.main_handle_request(req, path)
@@ -227,7 +230,7 @@
if cnx:
with self._logfile_lock:
try:
- result = ['\n'+'*'*80]
+ result = ['\n' + '*' * 80]
result.append(req.url())
result += ['%s %s -- (%.3f sec, %.3f CPU sec)' % q
for q in cnx.executed_queries]
@@ -237,7 +240,6 @@
except Exception:
self.exception('error while logging queries')
-
def main_handle_request(self, req, path):
"""Process an http request
@@ -255,7 +257,7 @@
if req.authmode == 'http':
# activate realm-based auth
realm = self.vreg.config['realm']
- req.set_header('WWW-Authenticate', [('Basic', {'realm' : realm })], raw=False)
+ req.set_header('WWW-Authenticate', [('Basic', {'realm': realm})], raw=False)
content = b''
try:
try:
@@ -264,22 +266,18 @@
cnx = repoapi.Connection(session)
req.set_cnx(cnx)
except AuthenticationError:
- # Keep the dummy session set at initialisation.
- # such session with work to an some extend but raise an
- # AuthenticationError on any database access.
- import contextlib
+ # Keep the dummy session set at initialisation. such session will work to some
+ # extend but raise an AuthenticationError on any database access.
+ # XXX We want to clean up this approach in the future. But several cubes like
+ # registration or forgotten password rely on this principle.
@contextlib.contextmanager
def dummy():
yield
cnx = dummy()
- # XXX We want to clean up this approach in the future. But
- # several cubes like registration or forgotten password rely on
- # this principle.
-
# nested try to allow LogOut to delegate logic to AuthenticationError
# handler
try:
- ### Try to generate the actual request content
+ # Try to generate the actual request content
with cnx:
content = self.core_handle(req, path)
# Handle user log-out
@@ -330,7 +328,6 @@
assert isinstance(content, binary_type)
return content
-
def core_handle(self, req, path):
"""method called by the main publisher to process <path>
@@ -354,7 +351,7 @@
tstart = clock()
commited = False
try:
- ### standard processing of the request
+ # standard processing of the request
try:
# apply CORS sanity checks
cors.process_request(req, self.vreg.config)
@@ -384,7 +381,7 @@
commited = True
if txuuid is not None:
req.data['last_undoable_transaction'] = txuuid
- ### error case
+ # error case
except NotFound as ex:
result = self.notfound_content(req)
req.status_out = ex.status
@@ -393,18 +390,20 @@
except RemoteCallFailed as ex:
result = self.ajax_error_handler(req, ex)
except Unauthorized as ex:
- req.data['errmsg'] = req._('You\'re not authorized to access this page. '
- 'If you think you should, please contact the site administrator.')
+ req.data['errmsg'] = req._(
+ 'You\'re not authorized to access this page. '
+ 'If you think you should, please contact the site administrator.')
req.status_out = http_client.FORBIDDEN
result = self.error_handler(req, ex, tb=False)
except Forbidden as ex:
- req.data['errmsg'] = req._('This action is forbidden. '
- 'If you think it should be allowed, please contact the site administrator.')
+ req.data['errmsg'] = req._(
+ 'This action is forbidden. '
+ 'If you think it should be allowed, please contact the site administrator.')
req.status_out = http_client.FORBIDDEN
result = self.error_handler(req, ex, tb=False)
except (BadRQLQuery, RequestError) as ex:
result = self.error_handler(req, ex, tb=False)
- ### pass through exception
+ # pass through exception
except DirectResponse:
if req.cnx:
req.cnx.commit()
@@ -412,7 +411,7 @@
except (AuthenticationError, LogOut):
# the rollback is handled in the finally
raise
- ### Last defense line
+ # Last defense line
except BaseException as ex:
req.status_out = http_client.INTERNAL_SERVER_ERROR
result = self.error_handler(req, ex, tb=True)
@@ -421,7 +420,7 @@
try:
req.cnx.rollback()
except Exception:
- pass # ignore rollback error at this point
+ pass # ignore rollback error at this point
self.add_undo_link_to_msg(req)
self.debug('query %s executed in %s sec', req.relative_path(), clock() - tstart)
return result
@@ -441,7 +440,7 @@
return b''
def validation_error_handler(self, req, ex):
- ex.translate(req._) # translate messages using ui language
+ ex.translate(req._) # translate messages using ui language
if '__errorurl' in req.form:
forminfo = {'error': ex,
'values': req.form,
@@ -486,8 +485,8 @@
def add_undo_link_to_msg(self, req):
txuuid = req.data.get('last_undoable_transaction')
if txuuid is not None:
- msg = u'<span class="undo">[<a href="%s">%s</a>]</span>' %(
- req.build_url('undo', txuuid=txuuid), req._('undo'))
+ msg = u'<span class="undo">[<a href="%s">%s</a>]</span>' % (
+ req.build_url('undo', txuuid=txuuid), req._('undo'))
req.append_to_redirect_message(msg)
def ajax_error_handler(self, req, ex):
@@ -498,7 +497,7 @@
if req.status_out < 400:
# don't overwrite it if it's already set
req.status_out = status
- json_dumper = getattr(ex, 'dumps', lambda : json.dumps({'reason': text_type(ex)}))
+ json_dumper = getattr(ex, 'dumps', lambda: json.dumps({'reason': text_type(ex)}))
return json_dumper().encode('utf-8')
# special case handling
@@ -525,7 +524,8 @@
# these are overridden by set_log_methods below
# only defining here to prevent pylint from complaining
- info = warning = error = critical = exception = debug = lambda msg,*a,**kw: None
+ info = warning = error = critical = exception = debug = lambda msg, *a, **kw: None
+
set_log_methods(CubicWebPublisher, LOGGER)
set_log_methods(CookieSessionHandler, LOGGER)
--- a/cubicweb/web/cors.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/web/cors.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,5 +1,20 @@
-# -*- coding: utf-8 -*-
-# copyright 2014 Logilab, PARIS
+# copyright 2014-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
+#
+# This file is part of CubicWeb.
+#
+# CubicWeb is free software: you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation, either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# CubicWeb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License along
+# with CubicWeb. If not, see <http://www.gnu.org/licenses/>.
"""A set of utility functions to handle CORS requests
--- a/cubicweb/web/request.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/web/request.py Mon Jun 06 15:28:35 2016 +0200
@@ -944,7 +944,7 @@
def __bool__(self):
return False
-
+
__nonzero__ = __bool__
class _MockAnonymousSession(object):
--- a/cubicweb/web/test/unittest_form.py Mon Jun 06 21:17:33 2016 +0200
+++ b/cubicweb/web/test/unittest_form.py Mon Jun 06 15:28:35 2016 +0200
@@ -1,4 +1,4 @@
-# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
+# copyright 2003-2016 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of CubicWeb.